Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -54,20 +54,26 @@ if __name__ == "__main__":
|
|
54 |
with gr.Row():
|
55 |
with gr.Column(variant='panel'):
|
56 |
|
57 |
-
gr.Markdown('## Select input video and audio', sanitize_html=False)
|
58 |
-
# Define
|
59 |
sample_video = gr.Video(interactive=False, label="Input Video")
|
60 |
sample_audio = gr.Audio(interactive=False, label="Input Audio")
|
61 |
|
|
|
|
|
|
|
|
|
62 |
# Define radio inputs
|
63 |
-
video_selection = gr.
|
64 |
-
|
65 |
-
audio_selection = gr.
|
66 |
-
|
|
|
67 |
# Define button inputs
|
68 |
with gr.Row(equal_height=True):
|
69 |
generate_original_button = gr.Button(value="Generate with Original Model", variant="primary")
|
70 |
generate_compressed_button = gr.Button(value="Generate with Compressed Model", variant="primary")
|
|
|
71 |
with gr.Column(variant='panel'):
|
72 |
# Define original model output components
|
73 |
gr.Markdown('## Original Wav2Lip')
|
@@ -87,19 +93,34 @@ if __name__ == "__main__":
|
|
87 |
compressed_model_fps = gr.Textbox(value="", label="FPS")
|
88 |
compressed_model_params = gr.Textbox(value=servicer.params['nota_wav2lip'], label="# Parameters")
|
89 |
|
90 |
-
# Switch video and audio samples when selecting the
|
91 |
video_selection.change(fn=servicer.switch_video_samples, inputs=video_selection, outputs=sample_video)
|
92 |
audio_selection.change(fn=servicer.switch_audio_samples, inputs=audio_selection, outputs=sample_audio)
|
93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
# Click the generate button for original model
|
95 |
-
generate_original_button.click(
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
98 |
# Click the generate button for compressed model
|
99 |
-
generate_compressed_button.click(
|
100 |
-
|
101 |
-
|
|
|
|
|
102 |
|
103 |
gr.Markdown(Path('docs/footer.md').read_text())
|
104 |
|
105 |
-
demo.queue().launch()
|
|
|
54 |
with gr.Row():
|
55 |
with gr.Column(variant='panel'):
|
56 |
|
57 |
+
gr.Markdown('## Select or Upload input video and audio', sanitize_html=False)
|
58 |
+
# Define preview slots
|
59 |
sample_video = gr.Video(interactive=False, label="Input Video")
|
60 |
sample_audio = gr.Audio(interactive=False, label="Input Audio")
|
61 |
|
62 |
+
# New upload inputs
|
63 |
+
video_upload = gr.Video(source="upload", type="filepath", label="Upload Video")
|
64 |
+
audio_upload = gr.Audio(source="upload", type="filepath", label="Upload Audio")
|
65 |
+
|
66 |
# Define radio inputs
|
67 |
+
video_selection = gr.Radio(video_label_dict,
|
68 |
+
type='value', label="Select an input video:")
|
69 |
+
audio_selection = gr.Radio(audio_label_dict,
|
70 |
+
type='value', label="Select an input audio:")
|
71 |
+
|
72 |
# Define button inputs
|
73 |
with gr.Row(equal_height=True):
|
74 |
generate_original_button = gr.Button(value="Generate with Original Model", variant="primary")
|
75 |
generate_compressed_button = gr.Button(value="Generate with Compressed Model", variant="primary")
|
76 |
+
|
77 |
with gr.Column(variant='panel'):
|
78 |
# Define original model output components
|
79 |
gr.Markdown('## Original Wav2Lip')
|
|
|
93 |
compressed_model_fps = gr.Textbox(value="", label="FPS")
|
94 |
compressed_model_params = gr.Textbox(value=servicer.params['nota_wav2lip'], label="# Parameters")
|
95 |
|
96 |
+
# Switch video and audio samples when selecting the radio button
|
97 |
video_selection.change(fn=servicer.switch_video_samples, inputs=video_selection, outputs=sample_video)
|
98 |
audio_selection.change(fn=servicer.switch_audio_samples, inputs=audio_selection, outputs=sample_audio)
|
99 |
|
100 |
+
# Update preview when uploading
|
101 |
+
video_upload.change(fn=lambda x: x, inputs=video_upload, outputs=sample_video)
|
102 |
+
audio_upload.change(fn=lambda x: x, inputs=audio_upload, outputs=sample_audio)
|
103 |
+
|
104 |
+
# Helper: decide whether to use uploaded or selected
|
105 |
+
def resolve_inputs(video_choice, audio_choice, video_file, audio_file):
|
106 |
+
video_path = video_file if video_file else video_label_dict.get(video_choice)
|
107 |
+
audio_path = audio_file if audio_file else audio_label_dict.get(audio_choice)
|
108 |
+
return video_path, audio_path
|
109 |
+
|
110 |
# Click the generate button for original model
|
111 |
+
generate_original_button.click(
|
112 |
+
fn=lambda v, a, vu, au: servicer.generate_original_model(*resolve_inputs(v, a, vu, au)),
|
113 |
+
inputs=[video_selection, audio_selection, video_upload, audio_upload],
|
114 |
+
outputs=[original_model_output, original_model_inference_time, original_model_fps]
|
115 |
+
)
|
116 |
+
|
117 |
# Click the generate button for compressed model
|
118 |
+
generate_compressed_button.click(
|
119 |
+
fn=lambda v, a, vu, au: servicer.generate_compressed_model(*resolve_inputs(v, a, vu, au)),
|
120 |
+
inputs=[video_selection, audio_selection, video_upload, audio_upload],
|
121 |
+
outputs=[compressed_model_output, compressed_model_inference_time, compressed_model_fps]
|
122 |
+
)
|
123 |
|
124 |
gr.Markdown(Path('docs/footer.md').read_text())
|
125 |
|
126 |
+
demo.queue().launch()
|