File size: 5,947 Bytes
b5fadc4
69067ae
7467739
b5fadc4
 
9061ed1
 
f81f6f3
 
9061ed1
e78f5c3
0197ed3
69067ae
 
1547e12
7494646
1547e12
7494646
1547e12
7467739
69067ae
7467739
 
b5fadc4
7467739
 
 
 
 
 
adb5e2a
7467739
 
685e8d2
8994492
 
 
 
 
 
 
 
 
 
 
 
 
 
b5fadc4
 
 
8994492
b5fadc4
 
 
9061ed1
8994492
b5fadc4
9061ed1
1547e12
 
a3e60d6
f81f6f3
 
 
 
 
c36f3c7
f81f6f3
 
 
 
 
 
 
 
37b24eb
e78f5c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c36f3c7
 
 
f81f6f3
c36f3c7
 
 
 
 
7b35e8c
 
 
 
f81f6f3
7b35e8c
f81f6f3
 
c36f3c7
f81f6f3
 
e0fd1d2
69067ae
7467739
69067ae
 
7467739
 
69067ae
7467739
8ab530a
69067ae
b5fadc4
 
 
8ab530a
7494646
8994492
b5fadc4
 
8994492
b5fadc4
 
 
8994492
b5fadc4
 
9061ed1
f81f6f3
 
c36f3c7
 
 
e0fd1d2
 
c36f3c7
e0fd1d2
c36f3c7
 
e0fd1d2
c36f3c7
 
7494646
e0fd1d2
9061ed1
0197ed3
8994492
69067ae
9061ed1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import torch
from flask import Flask, render_template, request, jsonify
import os
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from salesforce import get_salesforce_connection  # Import the Salesforce connection function
import re
from waitress import serve
from dotenv import load_dotenv

app = Flask(__name__)

# Use whisper-small for faster processing and better speed
device = "cuda" if torch.cuda.is_available() else "cpu"
asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if device == "cuda" else -1)

# Function to generate audio prompts
def generate_audio_prompt(text, filename):
    tts = gTTS(text=text, lang="en")
    tts.save(os.path.join("static", filename))

# Generate required voice prompts
prompts = {
    "welcome": "Welcome to Biryani Hub.",
    "ask_name": "Tell me your name.",
    "ask_email": "Please provide your email address.",
    "thank_you": "Thank you for registration."
}

for key, text in prompts.items():
    generate_audio_prompt(text, f"{key}.mp3")

# Symbol mapping for proper recognition
SYMBOL_MAPPING = {
    "at the rate": "@",
    "at": "@",
    "dot": ".",
    "underscore": "_",
    "hash": "#",
    "plus": "+",
    "dash": "-",
    "comma": ",",
    "space": " "
}

# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
    try:
        audio = AudioSegment.from_file(input_path)
        audio = audio.set_frame_rate(16000).set_channels(1)  # Convert to 16kHz, mono
        audio.export(output_path, format="wav")
    except Exception as e:
        raise Exception(f"Audio conversion failed: {str(e)}")

# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
    audio = AudioSegment.from_wav(audio_path)
    nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)  # Reduced silence duration
    return len(nonsilent_parts) == 0  # If no speech detected

# Extract name, email, and phone number from transcribed text
def extract_name_email_phone(text):
    # Regex for basic email and phone number
    email = re.search(r'\S+@\S+', text)
    phone = re.search(r'\+?\d{10,15}', text)  # Consider different formats for phone numbers

    name = text.split(' ')[0]  # Simplified assumption that name is the first word
    email = email.group(0) if email else "[email protected]"
    phone = phone.group(0) if phone else "0000000000"

    return name, email, phone

# Get Salesforce connection using salesforce.py
sf = get_salesforce_connection()

# Load environment variables from .env file
load_dotenv()

def get_salesforce_connection():
    # Load Salesforce credentials from environment variables
    sf_username = os.getenv('SF_USERNAME')
    sf_password = os.getenv('SF_PASSWORD')
    sf_token = os.getenv('SF_SECURITY_TOKEN')

    # Check if credentials are available
    if not sf_username or not sf_password or not sf_token:
        raise ValueError("Salesforce credentials are missing from environment variables.")
    
    # Connect to Salesforce
    sf = Salesforce(username=sf_username, password=sf_password, security_token=sf_token)
    print("Connected to Salesforce successfully!")
    
    return sf

# Function to create Salesforce record
def create_salesforce_record(name, email, phone_number):
    try:
        # Create the record in Salesforce
        customer_login = sf.Customer_Login__c.create({
            'Name': name,
            'Email__c': email,
            'Phone_Number__c': phone_number
        })
        
        # Log the response from Salesforce
        if customer_login.get('id'):
            print(f"Record created successfully with ID: {customer_login['id']}")
            return customer_login
        else:
            print("Record creation failed: No ID returned")
            return {"error": "Record creation failed: No ID returned"}
    except Exception as e:
        print(f"Error creating Salesforce record: {str(e)}")
        return {"error": f"Failed to create record in Salesforce: {str(e)}"}

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/transcribe", methods=["POST"])
def transcribe():
    if "audio" not in request.files:
        return jsonify({"error": "No audio file provided"}), 400

    audio_file = request.files["audio"]
    input_audio_path = os.path.join("static", "temp_input.wav")
    output_audio_path = os.path.join("static", "temp.wav")
    audio_file.save(input_audio_path)

    try:
        # Convert to WAV
        convert_to_wav(input_audio_path, output_audio_path)

        # Check for silence
        if is_silent_audio(output_audio_path):
            return jsonify({"error": "No speech detected. Please try again."}), 400

        # Use Whisper ASR model for transcription
        result = asr_model(output_audio_path, generate_kwargs={"language": "en"})
        transcribed_text = result["text"].strip().capitalize()

        # Extract name, email, and phone number from the transcribed text
        name, email, phone_number = extract_name_email_phone(transcribed_text)

        # Create record in Salesforce
        salesforce_response = create_salesforce_record(name, email, phone_number)

        # Check if the response contains an error
        if "error" in salesforce_response:
            print(f"Error creating record in Salesforce: {salesforce_response['error']}")
            return jsonify(salesforce_response), 500
        
        print(f"Salesforce Response: {salesforce_response}")
        return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})

    except Exception as e:
        print(f"Error in transcribing or processing: {str(e)}")
        return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500

# Start Production Server
if __name__ == "__main__":
    serve(app, host="0.0.0.0", port=7860)