Subbu1304's picture
Update app.py
e2ae19f verified
raw
history blame
5.12 kB
import os
import time
import logging
import json
import requests
from flask import Flask, render_template, request, jsonify, session
from flask_session import Session
from simple_salesforce import Salesforce
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from waitress import serve
app = Flask(__name__)
# Configure Flask session
app.secret_key = os.getenv("SECRET_KEY", "sSSjyhInIsUohKpG8sHzty2q")
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up logging
logging.basicConfig(level=logging.INFO)
# Connect to Salesforce
try:
sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
print("βœ… Connected to Salesforce successfully!")
except Exception as e:
print(f"❌ Failed to connect to Salesforce: {str(e)}")
# Voice prompts
prompts = {
"welcome": "Welcome to Biryani Hub.",
"ask_name": "Tell me your name.",
"ask_email": "Please provide your email address.",
"thank_you": "Thank you for registration."
}
# Function to generate voice prompts
def generate_audio_prompt(text, filename):
try:
tts = gTTS(text)
tts.save(os.path.join("static", filename))
except gtts.tts.gTTSError as e:
time.sleep(5)
generate_audio_prompt(text, filename)
for key, text in prompts.items():
generate_audio_prompt(text, f"{key}.mp3")
# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
audio = AudioSegment.from_file(input_path)
audio = audio.set_frame_rate(16000).set_channels(1)
audio.export(output_path, format="wav")
# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
audio = AudioSegment.from_wav(audio_path)
nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
return len(nonsilent_parts) == 0
@app.route("/")
def index():
return render_template("index.html")
# βœ… LOGIN ENDPOINT
@app.route('/login', methods=['POST'])
def login():
data = request.json
email = data.get('email', '').strip().lower()
phone_number = data.get('phone_number', '').strip()
if not email or not phone_number:
return jsonify({'error': 'Missing email or phone number'}), 400
try:
query = f"SELECT Id, Name FROM Customer_Login__c WHERE LOWER(Email__c) = '{email}' AND Phone_Number__c = '{phone_number}' LIMIT 1"
result = sf.query(query)
if result['totalSize'] == 0:
return jsonify({'error': 'Invalid email or phone number. User not found'}), 401
user_data = result['records'][0]
session['user_id'] = user_data['Id']
session['name'] = user_data['Name']
return jsonify({'success': True, 'message': 'Login successful', 'user_id': user_data['Id'], 'name': user_data['Name']}), 200
except Exception as e:
return jsonify({'error': f'Unexpected error: {str(e)}'}), 500
# βœ… REGISTRATION ENDPOINT
@app.route("/register", methods=["POST"])
def register():
data = request.json
name = data.get('name', '').strip()
email = data.get('email', '').strip().lower()
phone = data.get('phone', '').strip()
if not name or not email or not phone:
return jsonify({'error': 'Missing data'}), 400
try:
query = f"SELECT Id FROM Customer_Login__c WHERE LOWER(Email__c) = '{email}' AND Phone_Number__c = '{phone}' LIMIT 1"
existing_user = sf.query(query)
if existing_user['totalSize'] > 0:
return jsonify({'error': 'User already exists'}), 409
customer_login = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone
})
if customer_login.get('id'):
return jsonify({'success': True, 'user_id': customer_login['id']}), 200
else:
return jsonify({'error': 'Failed to create record'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
# βœ… TRANSCRIPTION ENDPOINT
@app.route("/transcribe", methods=["POST"])
def transcribe():
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files["audio"]
input_audio_path = os.path.join("static", "temp_input.wav")
output_audio_path = os.path.join("static", "temp.wav")
audio_file.save(input_audio_path)
try:
convert_to_wav(input_audio_path, output_audio_path)
if is_silent_audio(output_audio_path):
return jsonify({"error": "No speech detected. Please try again."}), 400
result = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1)
transcribed_text = result(output_audio_path)["text"].strip().capitalize()
return jsonify({"text": transcribed_text})
except Exception as e:
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
# Start Production Server
if __name__ == "__main__":
serve(app, host="0.0.0.0", port=7860)