from flask import Flask, render_template, request, jsonify import os import torchafrom flask import Flask, render_template, request, jsonify import os import torch import re import ffmpeg # Ensure FFmpeg is installed from transformers import pipeline from gtts import gTTS from pydub import AudioSegment from pydub.silence import detect_nonsilent from waitress import serve import whisper # Improved Whisper ASR Model app = Flask(__name__) # Load Whisper Model for Highly Accurate Speech-to-Text device = "cuda" if torch.cuda.is_available() else "cpu" asr_model = whisper.load_model("large-v3", device=device) # Function to generate audio prompts def generate_audio_prompt(text, filename): tts = gTTS(text=text, lang="en") tts.save(os.path.join("static", filename)) # Generate required voice prompts prompts = { "welcome": "Welcome to Biryani Hub.", "ask_name": "Tell me your name.", "ask_email": "Please provide your email address.", "thank_you": "Thank you for registration." } for key, text in prompts.items(): generate_audio_prompt(text, f"{key}.mp3") # Symbol mapping for better recognition SYMBOL_MAPPING = { "at the rate": "@", "at": "@", "dot": ".", "underscore": "_", "hash": "#", "plus": "+", "dash": "-", "comma": ",", "space": " " } # Function to convert audio to WAV format (Fixes FFmpeg issues) def convert_to_wav(input_path, output_path): try: audio = AudioSegment.from_file(input_path) audio.export(output_path, format="wav") except Exception as e: raise Exception(f"Audio conversion failed: {str(e)}") # Function to clean transcribed text (Removes unnecessary words) def clean_transcription(text): text = text.lower().strip() ignore_phrases = ["my name is", "this is", "i am", "it's", "name"] for phrase in ignore_phrases: text = text.replace(phrase, "").strip() for word, symbol in SYMBOL_MAPPING.items(): text = text.replace(word, symbol) return text.capitalize() # Function to check if the audio contains actual speech def is_silent_audio(audio_path): audio = AudioSegment.from_wav(audio_path) nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16) return len(nonsilent_parts) == 0 # Returns True if silence detected @app.route("/") def index(): return render_template("index.html") @app.route("/transcribe", methods=["POST"]) def transcribe(): if "audio" not in request.files: return jsonify({"error": "No audio file provided"}), 400 audio_file = request.files["audio"] input_audio_path = os.path.join("static", "temp_input") output_audio_path = os.path.join("static", "temp.wav") audio_file.save(input_audio_path) try: # Convert audio to WAV format convert_to_wav(input_audio_path, output_audio_path) # Check if the audio contains valid speech if is_silent_audio(output_audio_path): return jsonify({"error": "No speech detected. Please try again."}), 400 # Transcribe using Whisper result = asr_model.transcribe(output_audio_path, language="en") transcribed_text = clean_transcription(result["text"]) return jsonify({"text": transcribed_text}) except Exception as e: return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500 # Use Waitress for Production Server if __name__ == "__main__": serve(app, host="0.0.0.0", port=7860) import re import ffmpeg # Ensures FFmpeg is installed from transformers import pipeline from gtts import gTTS from pydub import AudioSegment from pydub.silence import detect_nonsilent from waitress import serve import whisper_timestamped # Improved Whisper with timestamps app = Flask(__name__) # Load Whisper Model for Highly Accurate Speech-to-Text device = "cuda" if torch.cuda.is_available() else "cpu" asr_model = whisper_timestamped.load_model("medium", device=device) # Function to generate audio prompts def generate_audio_prompt(text, filename): tts = gTTS(text=text, lang="en") tts.save(os.path.join("static", filename)) # Generate required voice prompts prompts = { "welcome": "Welcome to Biryani Hub.", "ask_name": "Tell me your name.", "ask_email": "Please provide your email address.", "thank_you": "Thank you for registration." } for key, text in prompts.items(): generate_audio_prompt(text, f"{key}.mp3") # Symbol mapping for proper recognition SYMBOL_MAPPING = { "at the rate": "@", "at": "@", "dot": ".", "underscore": "_", "hash": "#", "plus": "+", "dash": "-", "comma": ",", "space": " " } # Function to convert audio to WAV format (Fixes FFmpeg issues) def convert_to_wav(input_path, output_path): try: audio = AudioSegment.from_file(input_path) audio.export(output_path, format="wav") except Exception as e: raise Exception(f"Audio conversion failed: {str(e)}") # Function to clean transcribed text (Removes unnecessary words) def clean_transcription(text): text = text.lower().strip() ignore_phrases = ["my name is", "this is", "i am", "it's", "name"] for phrase in ignore_phrases: text = text.replace(phrase, "").strip() for word, symbol in SYMBOL_MAPPING.items(): text = text.replace(word, symbol) return text.capitalize() # Function to check if the audio contains actual speech def is_silent_audio(audio_path): audio = AudioSegment.from_wav(audio_path) nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16) return len(nonsilent_parts) == 0 # Returns True if silence detected @app.route("/") def index(): return render_template("index.html") @app.route("/transcribe", methods=["POST"]) def transcribe(): if "audio" not in request.files: return jsonify({"error": "No audio file provided"}), 400 audio_file = request.files["audio"] input_audio_path = os.path.join("static", "temp_input") output_audio_path = os.path.join("static", "temp.wav") audio_file.save(input_audio_path) try: # Convert audio to WAV format convert_to_wav(input_audio_path, output_audio_path) # Check if the audio contains valid speech if is_silent_audio(output_audio_path): return jsonify({"error": "No speech detected. Please try again."}), 400 # Transcribe using Whisper result = asr_model.transcribe(output_audio_path, language="en") transcribed_text = clean_transcription(result["text"]) return jsonify({"text": transcribed_text}) except Exception as e: return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500 # Use Waitress for Production Server if __name__ == "__main__": serve(app, host="0.0.0.0", port=7860)