File size: 2,741 Bytes
b5fadc4
69067ae
7467739
a3e60d6
b5fadc4
 
9061ed1
 
 
0197ed3
69067ae
 
b5fadc4
7494646
b5fadc4
7494646
b5fadc4
7467739
69067ae
7467739
 
b5fadc4
7467739
 
 
 
 
 
adb5e2a
7467739
 
685e8d2
b5fadc4
 
 
 
 
 
 
 
9061ed1
b5fadc4
 
9061ed1
 
b5fadc4
a3e60d6
69067ae
7467739
69067ae
 
7467739
 
69067ae
7467739
8ab530a
69067ae
b5fadc4
 
 
8ab530a
7494646
b5fadc4
 
 
 
 
 
 
 
 
 
9061ed1
7467739
7494646
9061ed1
0197ed3
b5fadc4
69067ae
9061ed1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch
from flask import Flask, render_template, request, jsonify
import os
import re
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from waitress import serve

app = Flask(__name__)

# βœ… Load Whisper ASR Model Correctly
device = "cuda" if torch.cuda.is_available() else "cpu"
asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3", device=0 if device == "cuda" else -1)

# Function to generate voice prompts
def generate_audio_prompt(text, filename):
    tts = gTTS(text=text, lang="en")
    tts.save(os.path.join("static", filename))

# Generate required voice prompts
prompts = {
    "welcome": "Welcome to Biryani Hub.",
    "ask_name": "Tell me your name.",
    "ask_email": "Please provide your email address.",
    "thank_you": "Thank you for registration."
}

for key, text in prompts.items():
    generate_audio_prompt(text, f"{key}.mp3")

# βœ… Ensure Proper Audio Format
def convert_to_wav(input_path, output_path):
    try:
        audio = AudioSegment.from_file(input_path)
        audio = audio.set_frame_rate(16000).set_channels(1)  # βœ… Convert to 16kHz, mono
        audio.export(output_path, format="wav")
    except Exception as e:
        raise Exception(f"Audio conversion failed: {str(e)}")

# βœ… Check for Silence
def is_silent_audio(audio_path):
    audio = AudioSegment.from_wav(audio_path)
    nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
    return len(nonsilent_parts) == 0

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/transcribe", methods=["POST"])
def transcribe():
    if "audio" not in request.files:
        return jsonify({"error": "No audio file provided"}), 400

    audio_file = request.files["audio"]
    input_audio_path = os.path.join("static", "temp_input.wav")
    output_audio_path = os.path.join("static", "temp.wav")
    audio_file.save(input_audio_path)

    try:
        # βœ… Convert audio to proper format
        convert_to_wav(input_audio_path, output_audio_path)

        # βœ… Check for silent audio
        if is_silent_audio(output_audio_path):
            return jsonify({"error": "No speech detected. Please try again."}), 400

        # βœ… Transcribe Using Whisper ASR
        result = asr_model(output_audio_path, generate_kwargs={"language": "en"})
        transcribed_text = result["text"].strip().capitalize()

        return jsonify({"text": transcribed_text})
    except Exception as e:
        return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500

# βœ… Start Production Server
if __name__ == "__main__":
    serve(app, host="0.0.0.0", port=7860)