File size: 3,249 Bytes
bb2885a
69067ae
d445f81
2c6d73a
bb2885a
2c6d73a
 
 
 
 
bb2885a
2c6d73a
 
 
 
 
bb2885a
2c6d73a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb2885a
2c6d73a
 
 
 
 
 
 
bb2885a
2c6d73a
 
 
 
 
 
 
 
 
 
 
bb2885a
2c6d73a
 
 
bb2885a
2c6d73a
 
 
 
 
 
 
 
 
 
 
bb2885a
2c6d73a
 
 
 
bb2885a
2c6d73a
 
bb2885a
2c6d73a
 
 
 
bb2885a
2c6d73a
 
 
 
 
 
bb2885a
2c6d73a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
from flask import Flask, render_template, request, jsonify
import os
import re
import ffmpeg
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from waitress import serve
import whisper  # Corrected whisper import

app = Flask(__name__)

# Load Whisper Model for Highly Accurate Speech-to-Text
device = "cuda" if torch.cuda.is_available() else "cpu"
asr_model = whisper.load_model("large-v3")

# Function to generate audio prompts
def generate_audio_prompt(text, filename):
    tts = gTTS(text=text, lang="en")
    tts.save(os.path.join("static", filename))

# Generate required voice prompts
prompts = {
    "welcome": "Welcome to Biryani Hub.",
    "ask_name": "Tell me your name.",
    "ask_email": "Please provide your email address.",
    "thank_you": "Thank you for registration."
}

for key, text in prompts.items():
    generate_audio_prompt(text, f"{key}.mp3")

# Symbol mapping for better recognition
SYMBOL_MAPPING = {
    "at the rate": "@",
    "at": "@",
    "dot": ".",
    "underscore": "_",
    "hash": "#",
    "plus": "+",
    "dash": "-",
    "comma": ",",
    "space": " "
}

# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
    try:
        audio = AudioSegment.from_file(input_path)
        audio.export(output_path, format="wav")
    except Exception as e:
        raise Exception(f"Audio conversion failed: {str(e)}")

# Function to clean transcribed text
def clean_transcription(text):
    text = text.lower().strip()
    ignore_phrases = ["my name is", "this is", "i am", "it's", "name"]
    for phrase in ignore_phrases:
        text = text.replace(phrase, "").strip()
    
    for word, symbol in SYMBOL_MAPPING.items():
        text = text.replace(word, symbol)
    
    return text.capitalize()

# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
    audio = AudioSegment.from_wav(audio_path)
    nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
    return len(nonsilent_parts) == 0

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/transcribe", methods=["POST"])
def transcribe():
    if "audio" not in request.files:
        return jsonify({"error": "No audio file provided"}), 400

    audio_file = request.files["audio"]
    input_audio_path = os.path.join("static", "temp_input.wav")
    output_audio_path = os.path.join("static", "temp.wav")
    audio_file.save(input_audio_path)

    try:
        # Convert to WAV
        convert_to_wav(input_audio_path, output_audio_path)

        # Check for silence
        if is_silent_audio(output_audio_path):
            return jsonify({"error": "No speech detected. Please try again."}), 400
        
        # Transcribe using Whisper
        result = asr_model.transcribe(output_audio_path)
        transcribed_text = clean_transcription(result["text"])
        
        return jsonify({"text": transcribed_text})
    except Exception as e:
        return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500

# Start Waitress Production Server
if __name__ == "__main__":
    serve(app, host="0.0.0.0", port=7860)