Spaces:
Sleeping
Sleeping
File size: 8,444 Bytes
22bea9a b777b25 6ddba4d ee62e21 6ddba4d ee62e21 6ddba4d 2fab7d3 4e9c3dd 2fab7d3 4e9c3dd 95585ab 22bea9a 4e9c3dd 95585ab 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd e21d37e 6ddba4d e21d37e 4e9c3dd e21d37e 6ddba4d 4e9c3dd 6ddba4d ee62e21 6ddba4d 4e9c3dd 6ddba4d ee62e21 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d e21d37e 4e9c3dd c9ecf28 696d0f0 4e9c3dd e21d37e 00af749 b777b25 00af749 4e9c3dd 66083bc 4e9c3dd 5127047 66083bc 5127047 66083bc 2fab7d3 4e9c3dd 6ddba4d b777b25 6ddba4d 2fab7d3 5127047 1a545f7 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd cd94e4f 4e9c3dd b777b25 6ddba4d cd94e4f 4e9c3dd cd94e4f 4e9c3dd b777b25 4e9c3dd b777b25 4e9c3dd b777b25 4e9c3dd b777b25 4e9c3dd b777b25 4e9c3dd b777b25 4e9c3dd 6ddba4d 5127047 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d 4e9c3dd 6ddba4d 5127047 4e9c3dd 6ddba4d 2fab7d3 8eee015 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import torch
from flask import Flask, render_template, request, jsonify, redirect, session
import os
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from transformers import AutoConfig
import time
from waitress import serve
from simple_salesforce import Salesforce
import requests
# Initialize Flask app
app = Flask(__name__)
# Secret key for session management
app.secret_key = os.urandom(24) # For session management (secure)
# Set the device for processing
device = "cuda" if torch.cuda.is_available() else "cpu"
# Whisper ASR model configuration for speech recognition
config = AutoConfig.from_pretrained("openai/whisper-small")
config.update({"timeout": 60}) # Set timeout to 60 seconds
# Salesforce Connection Setup
try:
print("Attempting to connect to Salesforce...")
sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
print("Connected to Salesforce successfully!")
print("User Info:", sf.UserInfo) # Log the user info to verify the connection
except Exception as e:
print(f"Failed to connect to Salesforce: {str(e)}")
# Functions for Salesforce Operations
def create_salesforce_record(sf, name, email, phone_number):
try:
customer_login = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone_number
})
return customer_login
except Exception as e:
raise Exception(f"Failed to create record: {str(e)}")
def get_menu_items(sf):
query = "SELECT Name, Price__c, Ingredients__c, Category__c FROM Menu_Item__c"
result = sf.query(query)
return result['records']
# Voice-related function to generate audio prompts
def generate_audio_prompt(text, filename):
try:
tts = gTTS(text)
tts.save(os.path.join("static", filename))
except gtts.tts.gTTSError as e:
print(f"Error: {e}")
print("Retrying after 5 seconds...")
time.sleep(5) # Wait for 5 seconds before retrying
generate_audio_prompt(text, filename)
# Utility function to convert audio files to WAV format
def convert_to_wav(input_path, output_path):
try:
audio = AudioSegment.from_file(input_path)
audio = audio.set_frame_rate(16000).set_channels(1) # Convert to 16kHz, mono
audio.export(output_path, format="wav")
except Exception as e:
print(f"Error: {str(e)}")
raise Exception(f"Audio conversion failed: {str(e)}")
# Utility function to check for silence in audio
def is_silent_audio(audio_path):
audio = AudioSegment.from_wav(audio_path)
nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
print(f"Detected nonsilent parts: {nonsilent_parts}")
return len(nonsilent_parts) == 0 # If no speech detected
# Routes for Handling Views
@app.route("/")
def index():
return render_template("index.html")
@app.route("/dashboard", methods=["GET"])
def dashboard():
return render_template("dashboard.html")
@app.route('/login', methods=['POST'])
def login():
# Get data from the voice bot (name, email, phone number)
data = request.json # Assuming the voice bot sends JSON data
name = data.get('name')
email = data.get('email')
phone_number = data.get('phone_number')
if not name or not email or not phone_number:
return jsonify({'error': 'Missing required fields'}), 400
try:
# Create a Salesforce record for the customer
customer_login = create_salesforce_record(sf, name, email, phone_number)
session['customer_id'] = customer_login['id'] # Store customer ID in session
return redirect("/menu") # Redirect to the menu page after successful login
except Exception as e:
return jsonify({'error': f'Failed to create record in Salesforce: {str(e)}'}), 500
@app.route("/menu", methods=["GET"])
def menu_page():
# Fetch menu items from Salesforce
menu_items = get_menu_items(sf)
menu_data = [{"name": item['Name'], "price": item['Price__c'], "ingredients": item['Ingredients__c'], "category": item['Category__c']} for item in menu_items]
return render_template("menu_page.html", menu_items=menu_data)
@app.route("/cart", methods=["GET"])
def cart():
# Retrieve cart items from session
cart_items = session.get('cart_items', [])
return render_template("cart_page.html", cart_items=cart_items)
@app.route("/order-summary", methods=["GET"])
def order_summary():
# Retrieve the order details from session
order_details = session.get('cart_items', [])
total_price = sum(item['price'] * item['quantity'] for item in order_details)
return render_template("order_summary.html", order_details=order_details, total_price=total_price)
@app.route("/final_order", methods=["GET"])
def final_order():
# Clear cart items from session after confirming order
session.pop('cart_items', None)
return render_template("final_order.html")
@app.route("/order", methods=["POST"])
def place_order():
# Adding items to the cart
item_name = request.json.get('item_name')
quantity = request.json.get('quantity')
# Store the item in session cart
cart_items = session.get('cart_items', [])
cart_items.append({"name": item_name, "quantity": quantity, "price": 10}) # Example with fixed price
session['cart_items'] = cart_items # Save the updated cart to session
return jsonify({"success": True, "message": f"Added {item_name} to cart."})
@app.route("/transcribe", methods=["POST"])
def transcribe():
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files["audio"]
input_audio_path = os.path.join("static", "temp_input.wav")
output_audio_path = os.path.join("static", "temp.wav")
audio_file.save(input_audio_path)
try:
# Convert audio to WAV format
convert_to_wav(input_audio_path, output_audio_path)
# Check if audio contains silence
if is_silent_audio(output_audio_path):
return jsonify({"error": "No speech detected. Please try again."}), 400
else:
print("Audio contains speech, proceeding with transcription.")
# Use Whisper ASR model for transcription
result = None
retry_attempts = 3
for attempt in range(retry_attempts):
try:
result = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
print(f"Transcribed text: {result['text']}")
break
except requests.exceptions.ReadTimeout:
print(f"Timeout occurred, retrying attempt {attempt + 1}/{retry_attempts}...")
time.sleep(5)
if result is None:
return jsonify({"error": "Unable to transcribe audio after retries."}), 500
transcribed_text = result["text"].strip().capitalize()
print(f"Transcribed text: {transcribed_text}")
# Extract name, email, and phone number from the transcribed text
parts = transcribed_text.split()
name = parts[0] if len(parts) > 0 else "Unknown Name"
email = parts[1] if '@' in parts[1] else "[email protected]"
phone_number = parts[2] if len(parts) > 2 else "0000000000"
print(f"Parsed data - Name: {name}, Email: {email}, Phone Number: {phone_number}")
# Confirm details before submission
confirmation = f"Is this correct? Name: {name}, Email: {email}, Phone: {phone_number}"
generate_audio_prompt(confirmation, "confirmation.mp3")
# Simulate confirmation via user action
user_confirms = True # Assuming the user confirms, replace with actual logic
if user_confirms:
# Create a record in Salesforce
salesforce_response = create_salesforce_record(name, email, phone_number)
return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})
except Exception as e:
print(f"Error in transcribing or processing: {str(e)}")
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
# Start Production Server
if __name__ == "__main__":
serve(app, host="0.0.0.0", port=7860)
|