Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -15,36 +15,30 @@ def extract_audio(video_path):
|
|
15 |
return str(e)
|
16 |
|
17 |
def transcribe_audio_to_srt(audio_path, srt_file="out_put.srt"):
|
18 |
-
|
19 |
-
|
20 |
model = whisper.load_model("base.en")
|
21 |
result = model.transcribe(audio_path)
|
22 |
# Create a list to hold subtitle entries
|
23 |
subtitles = []
|
24 |
start_time = 0.0
|
25 |
-
|
26 |
for i, segment in enumerate(result['segments']):
|
27 |
start_time = segment['start']
|
28 |
end_time = segment['end']
|
29 |
content = segment['text'].strip()
|
30 |
-
|
31 |
# Create a subtitle entry
|
32 |
subtitle = srt.Subtitle(index=i+1,
|
33 |
start=timedelta(seconds=start_time),
|
34 |
end=timedelta(seconds=end_time),
|
35 |
content=content)
|
36 |
subtitles.append(subtitle)
|
37 |
-
|
38 |
# Write the subtitles to an SRT file
|
39 |
with open(srt_file, 'w', encoding='utf-8') as f:
|
40 |
f.write(srt.compose(subtitles))
|
41 |
return srt_file
|
42 |
|
43 |
def process_video(video):
|
44 |
-
video_path =
|
45 |
-
video.save(video_path)
|
46 |
audio_path = extract_audio(video_path)
|
47 |
-
processed_audio_path =
|
48 |
with open(processed_audio_path, "r") as f:
|
49 |
srt_content = f.read()
|
50 |
return srt_content
|
@@ -55,5 +49,7 @@ iface = gr.Interface(
|
|
55 |
outputs=gr.Textbox(label="Generated SRT File Content"),
|
56 |
title="Extract and Process Audio from Video",
|
57 |
description="Upload a video file to extract and process the audio, and view the generated SRT file content.",
|
58 |
-
allow_flagging="never"
|
|
|
|
|
59 |
iface.launch()
|
|
|
15 |
return str(e)
|
16 |
|
17 |
def transcribe_audio_to_srt(audio_path, srt_file="out_put.srt"):
|
|
|
|
|
18 |
model = whisper.load_model("base.en")
|
19 |
result = model.transcribe(audio_path)
|
20 |
# Create a list to hold subtitle entries
|
21 |
subtitles = []
|
22 |
start_time = 0.0
|
|
|
23 |
for i, segment in enumerate(result['segments']):
|
24 |
start_time = segment['start']
|
25 |
end_time = segment['end']
|
26 |
content = segment['text'].strip()
|
|
|
27 |
# Create a subtitle entry
|
28 |
subtitle = srt.Subtitle(index=i+1,
|
29 |
start=timedelta(seconds=start_time),
|
30 |
end=timedelta(seconds=end_time),
|
31 |
content=content)
|
32 |
subtitles.append(subtitle)
|
|
|
33 |
# Write the subtitles to an SRT file
|
34 |
with open(srt_file, 'w', encoding='utf-8') as f:
|
35 |
f.write(srt.compose(subtitles))
|
36 |
return srt_file
|
37 |
|
38 |
def process_video(video):
|
39 |
+
video_path = video
|
|
|
40 |
audio_path = extract_audio(video_path)
|
41 |
+
processed_audio_path = transcribe_audio_to_srt(audio_path)
|
42 |
with open(processed_audio_path, "r") as f:
|
43 |
srt_content = f.read()
|
44 |
return srt_content
|
|
|
49 |
outputs=gr.Textbox(label="Generated SRT File Content"),
|
50 |
title="Extract and Process Audio from Video",
|
51 |
description="Upload a video file to extract and process the audio, and view the generated SRT file content.",
|
52 |
+
allow_flagging="never"
|
53 |
+
)
|
54 |
+
|
55 |
iface.launch()
|