|
from flask import Flask, render_template, request, jsonify |
|
import os |
|
import torch |
|
import whisper |
|
import re |
|
from pydub import AudioSegment |
|
from pydub.silence import detect_nonsilent |
|
from waitress import serve |
|
from gtts import gTTS |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
whisper_model = whisper.load_model("medium") |
|
|
|
|
|
def generate_audio_prompt(text, filename): |
|
tts = gTTS(text=text, lang="en") |
|
tts.save(os.path.join("static", filename)) |
|
|
|
|
|
prompts = { |
|
"welcome": "Welcome to Biryani Hub.", |
|
"ask_name": "Tell me your name.", |
|
"ask_email": "Please provide your email address.", |
|
"thank_you": "Thank you for registration." |
|
} |
|
|
|
for key, text in prompts.items(): |
|
generate_audio_prompt(text, f"{key}.mp3") |
|
|
|
|
|
SYMBOL_MAPPING = { |
|
"at the rate": "@", |
|
"at": "@", |
|
"dot": ".", |
|
"underscore": "_", |
|
"hash": "#", |
|
"plus": "+", |
|
"dash": "-", |
|
"comma": ",", |
|
"space": " " |
|
} |
|
|
|
|
|
def clean_transcription(text): |
|
text = text.lower().strip() |
|
for word, symbol in SYMBOL_MAPPING.items(): |
|
text = text.replace(word, symbol) |
|
return text.capitalize() |
|
|
|
|
|
def trim_silence(audio_path): |
|
audio = AudioSegment.from_wav(audio_path) |
|
nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16) |
|
|
|
if nonsilent_parts: |
|
start_trim = nonsilent_parts[0][0] |
|
end_trim = nonsilent_parts[-1][1] |
|
trimmed_audio = audio[start_trim:end_trim] |
|
trimmed_audio.export(audio_path, format="wav") |
|
|
|
@app.route("/") |
|
def index(): |
|
return render_template("index.html") |
|
|
|
@app.route("/transcribe", methods=["POST"]) |
|
def transcribe(): |
|
if "audio" not in request.files: |
|
return jsonify({"error": "No audio file provided"}), 400 |
|
|
|
audio_file = request.files["audio"] |
|
audio_path = os.path.join("static", "temp.wav") |
|
audio_file.save(audio_path) |
|
|
|
try: |
|
trim_silence(audio_path) |
|
|
|
|
|
result = whisper_model.transcribe(audio_path, language="english") |
|
transcribed_text = clean_transcription(result["text"]) |
|
|
|
return jsonify({"text": transcribed_text}) |
|
except Exception as e: |
|
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500 |
|
|
|
|
|
if __name__ == "__main__": |
|
serve(app, host="0.0.0.0", port=7860) |
|
|