Spaces:
Sleeping
Sleeping
File size: 1,708 Bytes
4188026 29b1b8a 4188026 9561150 4188026 7d46111 88b65df 17e2033 4188026 9561150 4188026 9561150 4188026 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from transformers import pipeline
import gradio as gr
import os
import torch
import srt
from datetime import timedelta
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
auth_token = os.environ.get("hf_token")
if not auth_token:
raise ValueError("Hugging Face token is missing! Add it as a secret.")
pipe = pipeline(model="fahadqazi/whisper-small-sindhi", device=device, token=auth_token) # change to "your-username/the-name-you-picked"
def transcribe(audio):
# Perform transcription
result = pipe(audio)
transcription = result["text"]
# Generate timestamps for transcription (You might need to tweak this to match your desired chunks)
segments = result.get("chunks", []) # Assuming the model returns chunks (this depends on model and pipeline)
# Create an SRT object
subtitle_generator = []
start_time = timedelta(seconds=0)
for i, segment in enumerate(segments):
end_time = start_time + timedelta(seconds=segment["end"]) # Using segment['end'] to create time intervals
subtitle_generator.append(srt.Subtitle(index=i+1, start=start_time, end=end_time, content=segment["text"]))
start_time = end_time # Update start_time for next subtitle
# Write subtitles to .srt file
srt_file = "output.srt"
with open(srt_file, "w") as f:
f.write(srt.compose(subtitle_generator))
return transcription, srt_file
iface = gr.Interface(
fn=transcribe,
inputs=gr.Audio(type="filepath"),
outputs=["text", "file"],
title="Whisper Small Sindhi",
description="Realtime demo for Sindhi speech recognition using a fine-tuned Whisper small model.",
)
iface.launch() |