File size: 6,683 Bytes
4afd51c 5758799 d82403f a1cfef6 d82403f 3e74d12 a1cfef6 2fab7d3 d82403f 2fdeb17 2fab7d3 d82403f 3e74d12 d82403f 3e74d12 d82403f 3e74d12 d82403f 3e74d12 d82403f 3e74d12 d82403f 3e74d12 d82403f 5758799 d82403f 5ef3837 d82403f 2fdeb17 d82403f 2fdeb17 d82403f 2fdeb17 d82403f 2fdeb17 d82403f 9b40d8d 2fdeb17 9b40d8d a4315ce 9b40d8d c32f608 d82403f c1d8f2a d82403f 9b40d8d 1aa8118 9b40d8d efa0646 d82403f 2fdeb17 d82403f 9b40d8d d82403f 3e74d12 fe14dd8 3e74d12 fe14dd8 3e74d12 fe14dd8 3e74d12 d82403f 2fab7d3 d82403f 2fdeb17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import torch # <-- Import the torch module
from simple_salesforce import Salesforce
from flask import Flask, render_template, request, jsonify
import json
import time
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from transformers import pipeline
from transformers import AutoConfig # Import AutoConfig for the config object
from waitress import serve
# Initialize Flask App
app = Flask(__name__, template_folder="templates")
app.secret_key = os.urandom(24)
# Use whisper-small for faster processing and better speed
device = "cuda" if torch.cuda.is_available() else "cpu"
# Create config object to set timeout and other parameters
config = AutoConfig.from_pretrained("openai/whisper-small")
config.update({"timeout": 60}) # Set timeout to 60 seconds
# Function to generate audio prompts
def generate_audio_prompt(text, filename):
try:
tts = gTTS(text)
tts.save(os.path.join("static", filename))
except Exception as e:
print(f"Error: {e}")
time.sleep(5) # Wait before retrying
generate_audio_prompt(text, filename)
# Generate required voice prompts
prompts = {
"welcome": "Welcome to Biryani Hub.",
"ask_name": "Tell me your name.",
"ask_email": "Please provide your email address.",
"thank_you": "Thank you for registration."
}
for key, text in prompts.items():
generate_audio_prompt(text, f"{key}.mp3")
# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
try:
audio = AudioSegment.from_file(input_path)
audio = audio.set_frame_rate(16000).set_channels(1) # Convert to 16kHz, mono
audio.export(output_path, format="wav")
except Exception as e:
raise Exception(f"Audio conversion failed: {str(e)}")
# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
audio = AudioSegment.from_wav(audio_path)
nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
return len(nonsilent_parts) == 0
# Salesforce connection details (hardcoded)
username = '[email protected]'
password = 'Sati@1020'
security_token = 'sSSjyhInIsUohKpG8sHzty2q'
try:
print("Attempting to connect to Salesforce...")
sf = Salesforce(username=username, password=password, security_token=security_token)
print("Connected to Salesforce successfully!")
except Exception as e:
print(f"Failed to connect to Salesforce: {str(e)}")
# β
HOME ROUTE (Loads `index.html`)
@app.route("/", methods=["GET"])
def index():
return render_template("index.html")
# β
DASHBOARD ROUTE
@app.route("/dashboard", methods=["GET"])
def dashboard():
return render_template("dashboard.html")
# β
MENU PAGE ROUTE
@app.route("/menu_page", methods=["GET"])
def menu_page():
return render_template("menu_page.html")
# β
LOGIN API
@app.route('/login', methods=['POST'])
def login():
data = request.json
name = data.get('name')
email = data.get('email')
phone_number = data.get('phone_number')
if not name or not email or not phone_number:
return jsonify({'error': 'Missing required fields'}), 400
try:
customer_login = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone_number
})
return jsonify({'success': True, 'id': customer_login['id']}), 200
except Exception as e:
return jsonify({'error': f'Failed to create record in Salesforce: {str(e)}'}), 500
# β
REGISTER API
@app.route("/submit", methods=["POST"])
def submit():
data = request.json
name = data.get('name')
email = data.get('email')
phone = data.get('phone')
if not name or not email or not phone:
return jsonify({'error': 'Missing data'}), 400
try:
customer_login = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone
})
return jsonify({'success': True}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
# β
TRANSCRIBE AUDIO API
@app.route("/transcribe", methods=["POST"])
def transcribe():
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files["audio"]
input_audio_path = os.path.join("static", "temp_input.wav")
output_audio_path = os.path.join("static", "temp.wav")
audio_file.save(input_audio_path)
try:
convert_to_wav(input_audio_path, output_audio_path)
if is_silent_audio(output_audio_path):
return jsonify({"error": "No speech detected. Please try again."}), 400
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
result = asr_pipeline(output_audio_path)
transcribed_text = result["text"].strip().capitalize()
parts = transcribed_text.split()
name = parts[0] if len(parts) > 0 else "Unknown Name"
email = parts[1] if '@' in parts[1] else "[email protected]"
phone_number = parts[2] if len(parts) > 2 else "0000000000"
confirmation = f"Is this correct? Name: {name}, Email: {email}, Phone: {phone_number}"
generate_audio_prompt(confirmation, "confirmation.mp3")
salesforce_response = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone_number
})
return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})
except Exception as e:
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
# β
MENU API
@app.route("/menu", methods=["GET"])
def get_menu():
try:
# Fetch menu items from Salesforce
query = "SELECT Name, Price__c, Ingredients__c, Category__c FROM Menu_Item__c"
result = sf.query(query)
menu_items = []
for item in result["records"]:
menu_items.append({
"name": item["Name"],
"price": item["Price__c"],
"ingredients": item["Ingredients__c"],
"category": item["Category__c"]
})
# Pass the menu items to the template
return render_template("menu_page.html", menu=menu_items)
except Exception as e:
return jsonify({"error": f"Failed to fetch menu: {str(e)}"}), 500
# β
START PRODUCTION SERVER
if __name__ == "__main__":
print("β
Starting Flask API Server on port 7860...")
serve(app, host="0.0.0.0", port=7860)
|