|
from flask import Flask, render_template, request, jsonify |
|
import torch |
|
from transformers import pipeline |
|
from gtts import gTTS |
|
import os |
|
import re |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if device == "cuda" else -1) |
|
|
|
|
|
def generate_audio_prompt(text, filename): |
|
tts = gTTS(text=text, lang="en") |
|
tts.save(os.path.join("static", filename)) |
|
|
|
|
|
prompts = { |
|
"welcome": "Welcome to Biryani Hub.", |
|
"ask_name": "Tell me your name.", |
|
"ask_email": "Please provide your email address.", |
|
"thank_you": "Thank you for registration." |
|
} |
|
|
|
for key, text in prompts.items(): |
|
generate_audio_prompt(text, f"{key}.mp3") |
|
|
|
|
|
def clean_transcription(text): |
|
return re.sub(r"[^a-zA-Z0-9@.\s]", "", text) |
|
|
|
@app.route("/") |
|
def index(): |
|
return render_template("index.html") |
|
|
|
@app.route("/transcribe", methods=["POST"]) |
|
def transcribe(): |
|
if "audio" not in request.files: |
|
return jsonify({"error": "No audio file provided"}), 400 |
|
|
|
audio_file = request.files["audio"] |
|
audio_path = os.path.join("static", "temp.wav") |
|
audio_file.save(audio_path) |
|
|
|
try: |
|
|
|
result = asr_model(audio_path, generate_kwargs={"language": "en"}) |
|
transcribed_text = clean_transcription(result["text"]) |
|
return jsonify({"text": transcribed_text}) |
|
except Exception as e: |
|
return jsonify({"error": str(e)}), 500 |
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=5000, debug=True) |
|
|