import gradio as gr
import inference_2 as inference
import os
import sys
import asyncio
# Windows compatibility fix for asyncio
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
# ChatGPT-inspired CSS with Dark Theme
custom_css = """
/* ChatGPT-style global container */
.gradio-container {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif !important;
background: #212121 !important;
color: #ffffff !important;
margin: 0 !important;
padding: 0 !important;
height: 100vh !important;
}
/* ChatGPT-style layout */
.chat-layout {
display: flex !important;
height: 100vh !important;
}
/* ChatGPT-style sidebar */
.chat-sidebar {
width: 260px !important;
background: #171717 !important;
border-right: 1px solid #2e2e2e !important;
padding: 1rem !important;
overflow-y: auto !important;
flex-shrink: 0 !important;
}
.sidebar-header {
padding: 1rem 0 !important;
border-bottom: 1px solid #2e2e2e !important;
margin-bottom: 1rem !important;
}
.sidebar-title {
font-size: 1.1rem !important;
font-weight: 600 !important;
color: #ffffff !important;
margin: 0 !important;
}
/* Sidebar menu items */
.sidebar-item {
display: flex !important;
align-items: center !important;
padding: 0.75rem 1rem !important;
margin: 0.25rem 0 !important;
border-radius: 8px !important;
cursor: pointer !important;
transition: background-color 0.2s ease !important;
color: #b4b4b4 !important;
text-decoration: none !important;
width: 100% !important;
border: none !important;
background: transparent !important;
text-align: left !important;
}
.sidebar-item:hover {
background: #2a2a2a !important;
color: #ffffff !important;
}
.sidebar-item.active {
background: #2a2a2a !important;
color: #ffffff !important;
}
/* ChatGPT-style main content */
.chat-main {
flex: 1 !important;
background: #212121 !important;
overflow-y: auto !important;
display: flex !important;
flex-direction: column !important;
}
/* ChatGPT-style header */
.chat-header {
background: #2a2a2a !important;
border-bottom: 1px solid #2e2e2e !important;
padding: 1rem 2rem !important;
flex-shrink: 0 !important;
}
.chat-title {
font-size: 1.2rem !important;
font-weight: 600 !important;
color: #ffffff !important;
margin: 0 !important;
}
.chat-subtitle {
color: #b4b4b4 !important;
font-size: 0.9rem !important;
margin-top: 0.25rem !important;
}
/* ChatGPT-style content area */
.chat-content {
flex: 1 !important;
padding: 2rem !important;
max-width: 800px !important;
margin: 0 auto !important;
width: 100% !important;
box-sizing: border-box !important;
}
/* ChatGPT-style cards */
.chat-card {
background: #2a2a2a !important;
border: 1px solid #2e2e2e !important;
border-radius: 12px !important;
padding: 1.5rem !important;
margin: 1rem 0 !important;
transition: border-color 0.2s ease !important;
}
.chat-card:hover {
border-color: #404040 !important;
}
/* ChatGPT-style inputs */
.chat-input {
background: #171717 !important;
border: 1px solid #2e2e2e !important;
border-radius: 8px !important;
padding: 1rem !important;
color: #ffffff !important;
font-size: 0.9rem !important;
transition: border-color 0.2s ease !important;
}
.chat-input:focus {
border-color: #0ea5e9 !important;
box-shadow: 0 0 0 3px rgba(14, 165, 233, 0.1) !important;
outline: none !important;
}
/* ChatGPT-style buttons */
.chat-button {
background: #0ea5e9 !important;
color: #ffffff !important;
border: none !important;
border-radius: 8px !important;
padding: 0.75rem 1.5rem !important;
font-weight: 500 !important;
font-size: 0.9rem !important;
cursor: pointer !important;
transition: all 0.2s ease !important;
display: inline-flex !important;
align-items: center !important;
gap: 0.5rem !important;
}
.chat-button:hover {
background: #0284c7 !important;
transform: translateY(-1px) !important;
box-shadow: 0 4px 12px rgba(14, 165, 233, 0.3) !important;
}
/* ChatGPT-style output */
.chat-output {
background: #171717 !important;
border: 1px solid #2e2e2e !important;
border-radius: 8px !important;
padding: 1rem !important;
font-family: 'SF Mono', Monaco, 'Cascadia Code', 'Roboto Mono', Consolas, 'Courier New', monospace !important;
font-size: 0.85rem !important;
line-height: 1.5 !important;
color: #ffffff !important;
min-height: 200px !important;
white-space: pre-wrap !important;
}
/* Upload area styling */
.upload-area {
border: 2px dashed #2e2e2e !important;
border-radius: 8px !important;
padding: 2rem !important;
text-align: center !important;
background: #171717 !important;
transition: all 0.2s ease !important;
color: #b4b4b4 !important;
}
.upload-area:hover {
border-color: #0ea5e9 !important;
background: #1a1a1a !important;
}
/* ChatGPT-style accordion */
.chat-accordion {
background: #2a2a2a !important;
border: 1px solid #2e2e2e !important;
border-radius: 8px !important;
margin-top: 1rem !important;
}
.chat-accordion summary {
padding: 1rem !important;
font-weight: 500 !important;
cursor: pointer !important;
background: #2a2a2a !important;
border-radius: 8px 8px 0 0 !important;
color: #ffffff !important;
}
.chat-accordion[open] summary {
border-bottom: 1px solid #2e2e2e !important;
}
/* Responsive design */
@media (max-width: 768px) {
.chat-layout {
flex-direction: column !important;
}
.chat-sidebar {
width: 100% !important;
height: auto !important;
border-right: none !important;
border-bottom: 1px solid #2e2e2e !important;
}
.chat-content {
padding: 1rem !important;
}
}
"""
# Create the ChatGPT-inspired Gradio interface
with gr.Blocks(
theme=gr.themes.Base(
primary_hue="blue",
secondary_hue="gray",
neutral_hue="gray"
),
css=custom_css,
title="DeepSecure AI"
) as app:
# ChatGPT-style layout
with gr.Row(elem_classes="chat-layout"):
# Sidebar
with gr.Column(elem_classes="chat-sidebar", scale=0):
with gr.Column(elem_classes="sidebar-header"):
gr.HTML('
')
# Current analysis type state
analysis_type = gr.State("video")
# Sidebar menu
video_btn_sidebar = gr.Button(
"š¬ Video Analysis",
elem_classes="sidebar-item active",
variant="secondary",
size="sm"
)
audio_btn_sidebar = gr.Button(
"šµ Audio Analysis",
elem_classes="sidebar-item",
variant="secondary",
size="sm"
)
image_btn_sidebar = gr.Button(
"š¼ļø Image Analysis",
elem_classes="sidebar-item",
variant="secondary",
size="sm"
)
# Model info in sidebar
with gr.Accordion("š Model Stats", open=False, elem_classes="chat-accordion"):
gr.HTML("""
Video: 96.2% accuracy
Audio: 94.8% accuracy
Image: 97.1% accuracy
""")
# Main content area
with gr.Column(elem_classes="chat-main", scale=1):
# Header
with gr.Row(elem_classes="chat-header"):
current_title = gr.HTML('Video Deepfake Detection
')
current_subtitle = gr.HTML('Upload a video file to analyze for potential deepfake manipulation
')
# Content area
with gr.Column(elem_classes="chat-content"):
# Dynamic content based on selected analysis type
with gr.Group():
# Video Analysis Content
video_content = gr.Column(visible=True)
with video_content:
with gr.Column(elem_classes="chat-card"):
gr.Markdown("### Upload Video File")
gr.Markdown("*Drag and drop or click to browse ⢠Supported: MP4, AVI, MOV, MKV*")
video_input = gr.Video(
label="",
elem_classes="upload-area",
height=250
)
video_btn = gr.Button(
"š Analyze Video",
elem_classes="chat-button",
size="lg",
variant="primary"
)
video_output = gr.Textbox(
label="Analysis Results",
elem_classes="chat-output",
lines=10,
placeholder="Upload a video and click 'Analyze Video' to see detailed results here...",
interactive=False
)
# Video examples
video_examples = []
if os.path.exists("videos/aaa.mp4"):
video_examples.append("videos/aaa.mp4")
if os.path.exists("videos/bbb.mp4"):
video_examples.append("videos/bbb.mp4")
if video_examples:
with gr.Accordion("š Try Sample Videos", open=False, elem_classes="chat-accordion"):
gr.Examples(
examples=video_examples,
inputs=video_input,
label="Sample videos for testing:"
)
# Audio Analysis Content
audio_content = gr.Column(visible=False)
with audio_content:
with gr.Column(elem_classes="chat-card"):
gr.Markdown("### Upload Audio File")
gr.Markdown("*Drag and drop or click to browse ⢠Supported: WAV, MP3, FLAC, M4A*")
audio_input = gr.Audio(
label="",
elem_classes="upload-area"
)
audio_btn = gr.Button(
"š Analyze Audio",
elem_classes="chat-button",
size="lg",
variant="primary"
)
audio_output = gr.Textbox(
label="Analysis Results",
elem_classes="chat-output",
lines=10,
placeholder="Upload an audio file and click 'Analyze Audio' to see detailed results here...",
interactive=False
)
# Audio examples
audio_examples = []
if os.path.exists("audios/DF_E_2000027.flac"):
audio_examples.append("audios/DF_E_2000027.flac")
if os.path.exists("audios/DF_E_2000031.flac"):
audio_examples.append("audios/DF_E_2000031.flac")
if audio_examples:
with gr.Accordion("š Try Sample Audio", open=False, elem_classes="chat-accordion"):
gr.Examples(
examples=audio_examples,
inputs=audio_input,
label="Sample audio files for testing:"
)
# Image Analysis Content
image_content = gr.Column(visible=False)
with image_content:
with gr.Column(elem_classes="chat-card"):
gr.Markdown("### Upload Image File")
gr.Markdown("*Drag and drop or click to browse ⢠Supported: JPG, PNG, WEBP, BMP*")
image_input = gr.Image(
label="",
elem_classes="upload-area",
height=300
)
image_btn = gr.Button(
"š Analyze Image",
elem_classes="chat-button",
size="lg",
variant="primary"
)
image_output = gr.Textbox(
label="Analysis Results",
elem_classes="chat-output",
lines=10,
placeholder="Upload an image and click 'Analyze Image' to see detailed results here...",
interactive=False
)
# Image examples
image_examples = []
if os.path.exists("images/lady.jpg"):
image_examples.append("images/lady.jpg")
if os.path.exists("images/fake_image.jpg"):
image_examples.append("images/fake_image.jpg")
if image_examples:
with gr.Accordion("š Try Sample Images", open=False, elem_classes="chat-accordion"):
gr.Examples(
examples=image_examples,
inputs=image_input,
label="Sample images for testing:"
)
# Sidebar navigation functions
def switch_to_video():
return (
gr.update(visible=True), # video_content
gr.update(visible=False), # audio_content
gr.update(visible=False), # image_content
'Video Deepfake Detection
',
'Upload a video file to analyze for potential deepfake manipulation
',
"video"
)
def switch_to_audio():
return (
gr.update(visible=False), # video_content
gr.update(visible=True), # audio_content
gr.update(visible=False), # image_content
'Audio Deepfake Detection
',
'Upload an audio file to detect voice cloning or synthetic speech
',
"audio"
)
def switch_to_image():
return (
gr.update(visible=False), # video_content
gr.update(visible=False), # audio_content
gr.update(visible=True), # image_content
'Image Deepfake Detection
',
'Upload an image to detect face swaps, GANs, or other manipulations
',
"image"
)
# Connect sidebar navigation
video_btn_sidebar.click(
switch_to_video,
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
)
audio_btn_sidebar.click(
switch_to_audio,
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
)
image_btn_sidebar.click(
switch_to_image,
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
)
# Enhanced prediction functions with better formatting
def safe_video_predict(video):
if video is None:
return "ā ļø Please upload a video file first."
try:
result = inference.deepfakes_video_predict(video)
return f"š¬ VIDEO ANALYSIS COMPLETE\n{'='*50}\n\nā
{result}\n\nš Analysis performed using ResNext-50 + LSTM model\nšÆ Model accuracy: 96.2%\nā±ļø Processing time: Variable based on video length"
except Exception as e:
return f"ā VIDEO ANALYSIS FAILED\n{'='*50}\n\nš Error Details:\n{str(e)}\n\nš” Troubleshooting:\n⢠Ensure video format is supported (MP4, AVI, MOV, MKV)\n⢠Check if file is corrupted\n⢠Try a smaller file size"
def safe_audio_predict(audio):
if audio is None:
return "ā ļø Please upload an audio file first."
try:
result = inference.deepfakes_spec_predict(audio)
return f"šµ AUDIO ANALYSIS COMPLETE\n{'='*50}\n\nā
{result}\n\nš Analysis performed using Spectral CNN + Transformer model\nšÆ Model accuracy: 94.8%\nā±ļø Processing time: ~5-15 seconds"
except Exception as e:
return f"ā AUDIO ANALYSIS FAILED\n{'='*50}\n\nš Error Details:\n{str(e)}\n\nš” Troubleshooting:\n⢠Ensure audio format is supported (WAV, MP3, FLAC, M4A)\n⢠Check if file is corrupted\n⢠Try converting to WAV format"
def safe_image_predict(image):
if image is None:
return "ā ļø Please upload an image file first."
try:
result = inference.deepfakes_image_predict(image)
return f"š¼ļø IMAGE ANALYSIS COMPLETE\n{'='*50}\n\nā
{result}\n\nš Analysis performed using EfficientNet-B4 + XceptionNet model\nšÆ Model accuracy: 97.1%\nā±ļø Processing time: ~2-5 seconds"
except Exception as e:
return f"ā IMAGE ANALYSIS FAILED\n{'='*50}\n\nš Error Details:\n{str(e)}\n\nš” Troubleshooting:\n⢠Ensure image format is supported (JPG, PNG, WEBP, BMP)\n⢠Check if file is corrupted\n⢠Try a different image file"
# Connect analysis buttons
video_btn.click(safe_video_predict, video_input, video_output, show_progress=True)
audio_btn.click(safe_audio_predict, audio_input, audio_output, show_progress=True)
image_btn.click(safe_image_predict, image_input, image_output, show_progress=True)
# Launch Configuration - Windows Optimized
if __name__ == "__main__":
import random
# Try multiple ports to avoid conflicts
ports_to_try = [7862, 7863, 7864, 7865, 8000, 8001, 8002]
for port in ports_to_try:
try:
print(f"Trying to start server on port {port}...")
app.launch(
server_name="127.0.0.1",
server_port=port,
share=False,
inbrowser=True,
prevent_thread_lock=False,
show_error=True,
quiet=False,
max_threads=40
)
break # If successful, break the loop
except OSError as e:
if "port" in str(e).lower():
print(f"Port {port} is busy, trying next port...")
continue
else:
print(f"Error starting server: {e}")
break
except Exception as e:
print(f"Unexpected error: {e}")
break
else:
print("All ports are busy. Please close other applications and try again.")