Spaces:
Sleeping
Sleeping
Salman11223
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import gradio as gr
|
4 |
+
import moviepy.editor as mp
|
5 |
+
from TTS.api import TTS
|
6 |
+
import torch
|
7 |
+
import assemblyai as aai
|
8 |
+
|
9 |
+
# Download necessary models if not already present
|
10 |
+
model_files = {
|
11 |
+
"wav2lip.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/wav2lip.pth",
|
12 |
+
"wav2lip_gan.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/wav2lip_gan.pth",
|
13 |
+
"resnet50.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/resnet50.pth",
|
14 |
+
"mobilenet.pth": "https://github.com/justinjohn0306/Wav2Lip/releases/download/models/mobilenet.pth",
|
15 |
+
"s3fd.pth": "https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth"
|
16 |
+
}
|
17 |
+
|
18 |
+
for filename, url in model_files.items():
|
19 |
+
file_path = os.path.join("checkpoints" if "pth" in filename else "face_detection", filename)
|
20 |
+
if not os.path.exists(file_path):
|
21 |
+
print(f"Downloading {filename}...")
|
22 |
+
r = requests.get(url)
|
23 |
+
with open(file_path, 'wb') as f:
|
24 |
+
f.write(r.content)
|
25 |
+
|
26 |
+
# Initialize TTS model
|
27 |
+
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True)
|
28 |
+
|
29 |
+
# Translation class
|
30 |
+
class translation:
|
31 |
+
def _init_(self, video_path, original_language, target_language):
|
32 |
+
self.video_path = video_path
|
33 |
+
self.original_language = original_language
|
34 |
+
self.target_language = target_language
|
35 |
+
|
36 |
+
def org_language_parameters(self, original_language):
|
37 |
+
language_codes = {'English': 'en', 'German': 'de', 'Italian': 'it', 'Spanish': 'es'}
|
38 |
+
self.lan_code = language_codes.get(original_language, '')
|
39 |
+
|
40 |
+
def target_language_parameters(self, target_language):
|
41 |
+
language_codes = {'English': 'en', 'German': 'de', 'Italian': 'it', 'Spanish': 'es'}
|
42 |
+
self.tran_code = language_codes.get(target_language, '')
|
43 |
+
|
44 |
+
def extract_audio(self):
|
45 |
+
video = mp.VideoFileClip(self.video_path)
|
46 |
+
audio = video.audio
|
47 |
+
audio_path = "output_audio.wav"
|
48 |
+
audio.write_audiofile(audio_path)
|
49 |
+
return audio_path
|
50 |
+
|
51 |
+
def transcribe_audio(self, audio_path):
|
52 |
+
aai.settings.api_key = os.getenv("ASSEMBLYAI_API_KEY")
|
53 |
+
config = aai.TranscriptionConfig(language_code=self.lan_code)
|
54 |
+
transcriber = aai.Transcriber(config=config)
|
55 |
+
transcript = transcriber.transcribe(audio_path)
|
56 |
+
return transcript.text
|
57 |
+
|
58 |
+
def translate_text(self, transcript_text):
|
59 |
+
base_url = "https://api.cognitive.microsofttranslator.com/translate"
|
60 |
+
headers = {
|
61 |
+
"Ocp-Apim-Subscription-Key": os.getenv("MICROSOFT_TRANSLATOR_API_KEY"),
|
62 |
+
"Content-Type": "application/json",
|
63 |
+
"Ocp-Apim-Subscription-Region": "southeastasia"
|
64 |
+
}
|
65 |
+
params = {"api-version": "3.0", "from": self.lan_code, "to": self.tran_code}
|
66 |
+
body = [{"text": transcript_text}]
|
67 |
+
response = requests.post(base_url, headers=headers, params=params, json=body)
|
68 |
+
translation = response.json()[0]["translations"][0]["text"]
|
69 |
+
return translation
|
70 |
+
|
71 |
+
def generate_audio(self, translated_text):
|
72 |
+
tts.tts_to_file(text=translated_text, speaker_wav='output_audio.wav', file_path="output_synth.wav", language=self.tran_code)
|
73 |
+
return "output_synth.wav"
|
74 |
+
|
75 |
+
def translate_video(self):
|
76 |
+
audio_path = self.extract_audio()
|
77 |
+
self.org_language_parameters(self.original_language)
|
78 |
+
self.target_language_parameters(self.target_language)
|
79 |
+
transcript_text = self.transcribe_audio(audio_path)
|
80 |
+
translated_text = self.translate_text(transcript_text)
|
81 |
+
translated_audio_path = self.generate_audio(translated_text)
|
82 |
+
|
83 |
+
# Run Wav2Lip inference
|
84 |
+
os.system(f"python inference.py --checkpoint_path 'checkpoints/wav2lip_gan.pth' --face {self.video_path} --audio {translated_audio_path} --outfile 'output_video.mp4'")
|
85 |
+
return 'output_video.mp4'
|
86 |
+
|
87 |
+
|
88 |
+
# Gradio Interface
|
89 |
+
def app(video_path, original_language, target_language):
|
90 |
+
translator = translation(video_path, original_language, target_language)
|
91 |
+
video_file = translator.translate_video()
|
92 |
+
return video_file
|
93 |
+
|
94 |
+
interface = gr.Interface(
|
95 |
+
fn=app,
|
96 |
+
inputs=[
|
97 |
+
gr.Video(label="Video Path"),
|
98 |
+
gr.Dropdown(["English", "German", "Italian", "Spanish"], label="Original Language"),
|
99 |
+
gr.Dropdown(["English", "German", "Italian", "Spanish"], label="Targeted Language"),
|
100 |
+
],
|
101 |
+
outputs=gr.Video(label="Translated Video")
|
102 |
+
)
|
103 |
+
|
104 |
+
interface.launch()
|