Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,10 +25,10 @@ dict_languages = {"English": "en",
|
|
| 25 |
"Hindi": "hi"}
|
| 26 |
|
| 27 |
@spaces.GPU
|
| 28 |
-
def process_transcript(
|
| 29 |
"""Process audio with selected Voxtral model and return the generated response"""
|
| 30 |
-
|
| 31 |
-
inputs = processor.apply_transcrition_request(language=
|
| 32 |
inputs = inputs.to(device, dtype=torch.bfloat16)
|
| 33 |
|
| 34 |
outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
|
|
@@ -62,8 +62,8 @@ with gr.Blocks(title="Transcription") as transcript:
|
|
| 62 |
|
| 63 |
|
| 64 |
submit_transcript.click(
|
| 65 |
-
fn=process_transcript,
|
| 66 |
-
inputs=[
|
| 67 |
outputs=text_transcript
|
| 68 |
)
|
| 69 |
|
|
|
|
| 25 |
"Hindi": "hi"}
|
| 26 |
|
| 27 |
@spaces.GPU
|
| 28 |
+
def process_transcript(model, processor, audio_path, language):
|
| 29 |
"""Process audio with selected Voxtral model and return the generated response"""
|
| 30 |
+
id_language = dict_languages[language]
|
| 31 |
+
inputs = processor.apply_transcrition_request(language=id_language, audio=audio_path, model_id=model_name)
|
| 32 |
inputs = inputs.to(device, dtype=torch.bfloat16)
|
| 33 |
|
| 34 |
outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
submit_transcript.click(
|
| 65 |
+
,fn=lambda txt: process_transcript(model, processor, sel_audio, sel_language)
|
| 66 |
+
inputs=[sel_language, sel_audio],
|
| 67 |
outputs=text_transcript
|
| 68 |
)
|
| 69 |
|