from flask import Flask, render_template, request, jsonify import torch from transformers import pipeline from gtts import gTTS import os import re from waitress import serve # Use Waitress for Production app = Flask(__name__) # Load Whisper Model for English Transcription device = "cuda" if torch.cuda.is_available() else "cpu" asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if device == "cuda" else -1) # Function to generate audio prompts def generate_audio_prompt(text, filename): tts = gTTS(text=text, lang="en") tts.save(os.path.join("static", filename)) # Generate audio prompts prompts = { "welcome": "Welcome to Biryani Hub.", "ask_name": "Tell me your name.", "ask_email": "Please provide your email address.", "thank_you": "Thank you for registration." } for key, text in prompts.items(): generate_audio_prompt(text, f"{key}.mp3") # Mapping for correctly converting spoken symbols to text SYMBOL_MAPPING = { "at the rate": "@", "at": "@", "dot": ".", "underscore": "_", "hash": "#", "plus": "+", "dash": "-", "comma": ",", "space": " " } # Function to clean and process transcribed text def clean_transcription(text): text = text.lower() for word, symbol in SYMBOL_MAPPING.items(): text = text.replace(word, symbol) return text @app.route("/") def index(): return render_template("index.html") @app.route("/transcribe", methods=["POST"]) def transcribe(): if "audio" not in request.files: return jsonify({"error": "No audio file provided"}), 400 audio_file = request.files["audio"] audio_path = os.path.join("static", "temp.wav") audio_file.save(audio_path) try: # Transcribe audio to text with Whisper result = asr_model(audio_path, generate_kwargs={"language": "en"}) transcribed_text = clean_transcription(result["text"]) return jsonify({"text": transcribed_text}) except Exception as e: return jsonify({"error": str(e)}), 500 # Run Waitress Server for Production if __name__ == "__main__": serve(app, host="0.0.0.0", port=7860)