Spaces:
Sleeping
Sleeping
File size: 5,383 Bytes
b5fadc4 69067ae 7467739 b5fadc4 9061ed1 c36f3c7 0197ed3 69067ae 1547e12 7494646 1547e12 7494646 1547e12 7467739 69067ae 7467739 b5fadc4 7467739 adb5e2a 7467739 685e8d2 8994492 b5fadc4 8994492 b5fadc4 9061ed1 8994492 b5fadc4 9061ed1 1547e12 a3e60d6 c36f3c7 37b24eb c36f3c7 7b35e8c c36f3c7 7b35e8c c36f3c7 7b35e8c 37b24eb c36f3c7 e0fd1d2 69067ae 7467739 69067ae 7467739 69067ae 7467739 8ab530a 69067ae b5fadc4 8ab530a 7494646 8994492 b5fadc4 8994492 b5fadc4 8994492 b5fadc4 9061ed1 c36f3c7 e0fd1d2 c36f3c7 e0fd1d2 c36f3c7 e0fd1d2 c36f3c7 7494646 e0fd1d2 9061ed1 0197ed3 e0fd1d2 8994492 69067ae 9061ed1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import torch
from flask import Flask, render_template, request, jsonify
import os
from transformers import pipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
from waitress import serve
from simple_salesforce import Salesforce
app = Flask(__name__)
# Use whisper-small for faster processing and better speed
device = "cuda" if torch.cuda.is_available() else "cpu"
asr_model = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if device == "cuda" else -1)
# Function to generate audio prompts
def generate_audio_prompt(text, filename):
tts = gTTS(text=text, lang="en")
tts.save(os.path.join("static", filename))
# Generate required voice prompts
prompts = {
"welcome": "Welcome to Biryani Hub.",
"ask_name": "Tell me your name.",
"ask_email": "Please provide your email address.",
"thank_you": "Thank you for registration."
}
for key, text in prompts.items():
generate_audio_prompt(text, f"{key}.mp3")
# Symbol mapping for proper recognition
SYMBOL_MAPPING = {
"at the rate": "@",
"at": "@",
"dot": ".",
"underscore": "_",
"hash": "#",
"plus": "+",
"dash": "-",
"comma": ",",
"space": " "
}
# Function to convert audio to WAV format
def convert_to_wav(input_path, output_path):
try:
audio = AudioSegment.from_file(input_path)
audio = audio.set_frame_rate(16000).set_channels(1) # Convert to 16kHz, mono
audio.export(output_path, format="wav")
except Exception as e:
raise Exception(f"Audio conversion failed: {str(e)}")
# Function to check if audio contains actual speech
def is_silent_audio(audio_path):
audio = AudioSegment.from_wav(audio_path)
nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16) # Reduced silence duration
return len(nonsilent_parts) == 0 # If no speech detected
# Salesforce connection details
sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
# Debugging - Confirming Salesforce connection
print("Connected to Salesforce, checking user info...")
print(sf.UserInfo) # This will give you the user info if the connection is successful
# Function to create Salesforce record
def create_salesforce_record(name, email, phone_number):
try:
# Attempt to create a record in Salesforce
customer_login = sf.Customer_Login__c.create({
'Name': name,
'Email__c': email,
'Phone_Number__c': phone_number
})
# Log the response from Salesforce
print(f"Salesforce response: {customer_login}")
if customer_login.get('id'):
print(f"Record created successfully with ID: {customer_login['id']}")
else:
print("No ID returned, record creation may have failed.")
return customer_login
except Exception as e:
# Catch and log any exceptions during record creation
error_message = str(e)
print(f"Error creating Salesforce record: {error_message}")
return {"error": f"Failed to create record in Salesforce: {error_message}"}
@app.route("/")
def index():
return render_template("index.html")
@app.route("/transcribe", methods=["POST"])
def transcribe():
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files["audio"]
input_audio_path = os.path.join("static", "temp_input.wav")
output_audio_path = os.path.join("static", "temp.wav")
audio_file.save(input_audio_path)
try:
# Convert to WAV
convert_to_wav(input_audio_path, output_audio_path)
# Check for silence
if is_silent_audio(output_audio_path):
return jsonify({"error": "No speech detected. Please try again."}), 400
# Use Whisper ASR model for transcription
result = asr_model(output_audio_path, generate_kwargs={"language": "en"})
transcribed_text = result["text"].strip().capitalize()
# Now, let's split the transcribed text into name, email, and phone number (basic example)
parts = transcribed_text.split() # This is a simplistic approach; you may need a better parsing mechanism
name = parts[0] # Assuming first word is the name
email = parts[1] if '@' in parts[1] else "[email protected]" # Very basic email extraction
phone_number = parts[2] if len(parts) > 2 else "0000000000" # Basic phone number assumption
# Create record in Salesforce
salesforce_response = create_salesforce_record(name, email, phone_number)
# Check if the response contains an error
if "error" in salesforce_response:
print(f"Error creating record in Salesforce: {salesforce_response['error']}")
return jsonify(salesforce_response), 500
print(f"Salesforce Response: {salesforce_response}")
return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})
except Exception as e:
print(f"Error in transcribing or processing: {str(e)}")
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
# Start Production Server
if __name__ == "__main__":
serve(app, host="0.0.0.0", port=7860)
|