File size: 1,527 Bytes
e2f65f6
8c679c2
e2f65f6
 
8c679c2
86fab4a
98333ca
8c679c2
a0b460e
fbc6758
8c679c2
 
 
 
 
 
 
f80cfc8
8c679c2
f80cfc8
 
8c679c2
f80cfc8
8c679c2
f80cfc8
8c679c2
 
f80cfc8
1895fc7
8c679c2
f80cfc8
a0b460e
8c679c2
f80cfc8
8c679c2
 
f80cfc8
8c679c2
 
fbc6758
8c679c2
e2f65f6
 
a0b460e
8c679c2
e2f65f6
0a3e7f6
f80cfc8
8c679c2
e2f65f6
 
 
f80cfc8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import transformers
import gradio as gr
import librosa
import torch
import spaces
import numpy as np

@spaces.GPU(duration=60)
def transcribe_and_respond(audio_file):
    try:
        pipe = transformers.pipeline(
            model='sarvamai/shuka_v1',
            trust_remote_code=True,
            device=0,
            torch_dtype=torch.bfloat16
        )

        # Load the audio file
        audio, sr = librosa.load(audio_file, sr=16000)

        # Print audio properties for debugging
        print(f"Audio dtype: {audio.dtype}, Audio shape: {audio.shape}, Sample rate: {sr}")

        turns = [
            {'role': 'system', 'content': 'Repeat the following text exactly, without any changes'},
            {'role': 'user', 'content': '<|audio|>'}
        ]

        # Debug: Print the initial turns
        print(f"Initial turns: {turns}")

        # Call the model with the audio and prompt
        output = pipe({'audio': audio, 'turns': turns, 'sampling_rate': sr}, max_new_tokens=512)

        # Debug: Print the final output from the model
        print(f"Model output: {output}")

        return output

    except Exception as e:
        return f"Error: {str(e)}"

iface = gr.Interface(
    fn=transcribe_and_respond,
    inputs=gr.Audio(sources="microphone", type="filepath"),
    outputs="text",
    title="Live Transcription and Response",
    description="Speak into your microphone, and the model will respond naturally and informatively.",
    live=True
)

if __name__ == "__main__":
    iface.launch()