Spaces:
Sleeping
Sleeping
Salman11223
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,42 +2,33 @@ import os
|
|
2 |
import requests
|
3 |
import gradio as gr
|
4 |
import moviepy.editor as mp
|
|
|
5 |
import torch
|
6 |
import assemblyai as aai
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
text,
|
28 |
-
config,
|
29 |
-
speaker_wav=speaker_wav,
|
30 |
-
gpt_cond_len=3,
|
31 |
-
language=language
|
32 |
-
)
|
33 |
-
return outputs
|
34 |
-
except Exception as e:
|
35 |
-
print(f"Error during synthesis: {e}")
|
36 |
-
raise
|
37 |
|
38 |
# Translation class
|
39 |
-
class
|
40 |
-
def
|
41 |
self.video_path = video_path
|
42 |
self.original_language = original_language
|
43 |
self.target_language = target_language
|
@@ -78,18 +69,8 @@ class Translation:
|
|
78 |
return translation
|
79 |
|
80 |
def generate_audio(self, translated_text):
|
81 |
-
|
82 |
-
|
83 |
-
translated_text,
|
84 |
-
speaker_wav='output_audio.wav',
|
85 |
-
language=self.tran_code
|
86 |
-
)
|
87 |
-
with open("output_synth.wav", "wb") as f:
|
88 |
-
f.write(synthesized_audio)
|
89 |
-
return "output_synth.wav"
|
90 |
-
except Exception as e:
|
91 |
-
print(f"Error generating audio: {e}")
|
92 |
-
raise
|
93 |
|
94 |
def translate_video(self):
|
95 |
audio_path = self.extract_audio()
|
@@ -106,7 +87,7 @@ class Translation:
|
|
106 |
|
107 |
# Gradio Interface
|
108 |
def app(video_path, original_language, target_language):
|
109 |
-
translator =
|
110 |
video_file = translator.translate_video()
|
111 |
return video_file
|
112 |
|
@@ -120,4 +101,4 @@ interface = gr.Interface(
|
|
120 |
outputs=gr.Video(label="Translated Video")
|
121 |
)
|
122 |
|
123 |
-
interface.launch(
|
|
|
2 |
import requests
|
3 |
import gradio as gr
|
4 |
import moviepy.editor as mp
|
5 |
+
from TTS.api import TTS
|
6 |
import torch
|
7 |
import assemblyai as aai
|
8 |
+
|
9 |
+
# Download necessary models if not already present
|
10 |
+
model_files = {
|
11 |
+
"wav2lip.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/wav2lip.pth",
|
12 |
+
"wav2lip_gan.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/wav2lip_gan.pth",
|
13 |
+
"resnet50.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/resnet50.pth",
|
14 |
+
"mobilenet.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/mobilenet.pth",
|
15 |
+
"s3fd.pth": "https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth"
|
16 |
+
}
|
17 |
+
|
18 |
+
for filename, url in model_files.items():
|
19 |
+
file_path = os.path.join("checkpoints" if "pth" in filename else "face_detection", filename)
|
20 |
+
if not os.path.exists(file_path):
|
21 |
+
print(f"Downloading {filename}...")
|
22 |
+
r = requests.get(url)
|
23 |
+
with open(file_path, 'wb') as f:
|
24 |
+
f.write(r.content)
|
25 |
+
|
26 |
+
# Initialize TTS model without prompts
|
27 |
+
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True, progress_bar=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
# Translation class
|
30 |
+
class translation:
|
31 |
+
def init(self, video_path, original_language, target_language):
|
32 |
self.video_path = video_path
|
33 |
self.original_language = original_language
|
34 |
self.target_language = target_language
|
|
|
69 |
return translation
|
70 |
|
71 |
def generate_audio(self, translated_text):
|
72 |
+
tts.tts_to_file(text=translated_text, speaker_wav='output_audio.wav', file_path="output_synth.wav", language=self.tran_code)
|
73 |
+
return "output_synth.wav"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
def translate_video(self):
|
76 |
audio_path = self.extract_audio()
|
|
|
87 |
|
88 |
# Gradio Interface
|
89 |
def app(video_path, original_language, target_language):
|
90 |
+
translator = translation(video_path, original_language, target_language)
|
91 |
video_file = translator.translate_video()
|
92 |
return video_file
|
93 |
|
|
|
101 |
outputs=gr.Video(label="Translated Video")
|
102 |
)
|
103 |
|
104 |
+
interface.launch()
|