File size: 4,384 Bytes
b5fadc4
69067ae
7467739
b5fadc4
 
9061ed1
 
 
c36f3c7
0197ed3
69067ae
 
1547e12
7494646
1547e12
7494646
1547e12
7467739
69067ae
7467739
 
b5fadc4
7467739
 
 
 
 
 
adb5e2a
7467739
 
685e8d2
8994492
 
 
 
 
 
 
 
 
 
 
 
 
 
b5fadc4
 
 
8994492
b5fadc4
 
 
9061ed1
8994492
b5fadc4
9061ed1
1547e12
 
a3e60d6
c36f3c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69067ae
7467739
69067ae
 
7467739
 
69067ae
7467739
8ab530a
69067ae
b5fadc4
 
 
8ab530a
7494646
8994492
b5fadc4
 
8994492
b5fadc4
 
 
8994492
b5fadc4
 
9061ed1
c36f3c7
 
 
 
 
 
 
 
 
 
 
 
 
 
7494646
9061ed1
0197ed3
8994492
69067ae
9061ed1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import torch
from flask import Flask, render_template, request, jsonify
import os
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from waitress import serve
from simple_salesforce import Salesforce

app = Flask(__name__)

# Use whisper-small for faster processing and better speed
device = "cuda" if torch.cuda.is_available() else "cpu"
asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if device == "cuda" else -1)

# Function to generate audio prompts
def generate_audio_prompt(text, filename):
    tts = gTTS(text=text, lang="en")
    tts.save(os.path.join("static", filename))

# Generate required voice prompts
prompts = {
    "welcome": "Welcome to Biryani Hub.",
    "ask_name": "Tell me your name.",
    "ask_email": "Please provide your email address.",
    "thank_you": "Thank you for registration."
}

for key, text in prompts.items():
    generate_audio_prompt(text, f"{key}.mp3")

# Symbol mapping for proper recognition
SYMBOL_MAPPING = {
    "at the rate": "@",
    "at": "@",
    "dot": ".",
    "underscore": "_",
    "hash": "#",
    "plus": "+",
    "dash": "-",
    "comma": ",",
    "space": " "
}

# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
    try:
        audio = AudioSegment.from_file(input_path)
        audio = audio.set_frame_rate(16000).set_channels(1)  # Convert to 16kHz, mono
        audio.export(output_path, format="wav")
    except Exception as e:
        raise Exception(f"Audio conversion failed: {str(e)}")

# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
    audio = AudioSegment.from_wav(audio_path)
    nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)  # Reduced silence duration
    return len(nonsilent_parts) == 0  # If no speech detected

# Salesforce connection details
sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')

# Function to create Salesforce record
def create_salesforce_record(name, email, phone_number):
    try:
        customer_login = sf.Customer_Login__c.create({
            'Name': name,
            'Email__c': email,
            'Phone_Number__c': phone_number
        })
        return customer_login
    except Exception as e:
        return {"error": f"Failed to create record in Salesforce: {str(e)}"}

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/transcribe", methods=["POST"])
def transcribe():
    if "audio" not in request.files:
        return jsonify({"error": "No audio file provided"}), 400

    audio_file = request.files["audio"]
    input_audio_path = os.path.join("static", "temp_input.wav")
    output_audio_path = os.path.join("static", "temp.wav")
    audio_file.save(input_audio_path)

    try:
        # Convert to WAV
        convert_to_wav(input_audio_path, output_audio_path)

        # Check for silence
        if is_silent_audio(output_audio_path):
            return jsonify({"error": "No speech detected. Please try again."}), 400

        # Use Whisper ASR model for transcription
        result = asr_model(output_audio_path, generate_kwargs={"language": "en"})
        transcribed_text = result["text"].strip().capitalize()

        # Now, let's split the transcribed text into name, email, and phone number (basic example)
        parts = transcribed_text.split()  # This is a simplistic approach; you may need a better parsing mechanism
        name = parts[0]  # Assuming first word is the name
        email = parts[1] if '@' in parts[1] else "[email protected]"  # Very basic email extraction
        phone_number = parts[2] if len(parts) > 2 else "0000000000"  # Basic phone number assumption

        # Create record in Salesforce
        salesforce_response = create_salesforce_record(name, email, phone_number)
        
        if "error" in salesforce_response:
            return jsonify(salesforce_response), 500
        
        return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})

    except Exception as e:
        return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500

# Start Production Server
if __name__ == "__main__":
    serve(app, host="0.0.0.0", port=7860)