Update app.py
Browse files
app.py
CHANGED
@@ -73,91 +73,135 @@ def is_silent_audio(audio_path):
|
|
73 |
print(f"Detected nonsilent parts: {nonsilent_parts}")
|
74 |
return len(nonsilent_parts) == 0 # If no speech detected
|
75 |
|
76 |
-
#
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
try:
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
query = f"SELECT Id, Name FROM Customer_Login__c WHERE Email__c = '{email}' AND Phone_Number__c = '{mobile}'"
|
99 |
-
result = sf.query(query)
|
100 |
-
|
101 |
-
if result['totalSize'] > 0:
|
102 |
-
return jsonify({'success': True, 'message': 'User authenticated successfully.'}), 200
|
103 |
-
else:
|
104 |
-
return jsonify({'success': False, 'error': 'Invalid email or mobile number.'}), 400
|
105 |
except Exception as e:
|
106 |
-
|
107 |
-
return jsonify({'error': 'Something went wrong. Please try again later.'}), 500
|
108 |
-
|
109 |
-
if __name__ == "__main__":
|
110 |
-
app.run(host="0.0.0.0", port=7860, debug=True)
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
|
|
|
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
125 |
|
126 |
-
|
127 |
-
|
128 |
|
129 |
-
# Set up logging
|
130 |
-
logging.basicConfig(level=logging.INFO)
|
131 |
|
132 |
@app.route("/")
|
133 |
def index():
|
134 |
-
# Serve the HTML page for the voice-based login
|
135 |
return render_template("index.html")
|
136 |
|
137 |
-
@app.route("/
|
138 |
-
def
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
email = data.get("email")
|
143 |
-
mobile = data.get("mobile")
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
# For simplicity, we'll assume the capture was successful.
|
153 |
-
return jsonify({"success": True, "message": "Email and mobile captured successfully."}), 200
|
154 |
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
|
|
|
|
|
|
159 |
|
160 |
# Start Production Server
|
161 |
if __name__ == "__main__":
|
162 |
-
app
|
163 |
-
|
|
|
73 |
print(f"Detected nonsilent parts: {nonsilent_parts}")
|
74 |
return len(nonsilent_parts) == 0 # If no speech detected
|
75 |
|
76 |
+
# Salesforce connection details
|
77 |
+
try:
|
78 |
+
print("Attempting to connect to Salesforce...")
|
79 |
+
sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
|
80 |
+
print("Connected to Salesforce successfully!")
|
81 |
+
print("User Info:", sf.UserInfo) # Log the user info to verify the connection
|
82 |
+
except Exception as e:
|
83 |
+
print(f"Failed to connect to Salesforce: {str(e)}")
|
84 |
+
|
85 |
+
# Function to create Salesforce record
|
86 |
+
# API endpoint to receive data from voice bot
|
87 |
+
@app.route('/login', methods=['POST'])
|
88 |
+
def login():
|
89 |
+
# Get data from voice bot (name, email, phone number)
|
90 |
+
data = request.json # Assuming voice bot sends JSON data
|
91 |
+
|
92 |
+
name = data.get('name')
|
93 |
+
email = data.get('email')
|
94 |
+
phone_number = data.get('phone_number')
|
95 |
+
|
96 |
+
if not name or not email or not phone_number:
|
97 |
+
return jsonify({'error': 'Missing required fields'}), 400
|
98 |
+
|
99 |
+
# Create a record in Salesforce
|
100 |
try:
|
101 |
+
customer_login = sf.Customer_Login__c.create({
|
102 |
+
'Name': name,
|
103 |
+
'Email__c': email,
|
104 |
+
'Phone_Number__c': phone_number
|
105 |
+
})
|
106 |
+
return jsonify({'success': True, 'id': customer_login['id']}), 200
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
except Exception as e:
|
108 |
+
return jsonify({'error': f'Failed to create record in Salesforce: {str(e)}'}), 500
|
|
|
|
|
|
|
|
|
109 |
|
110 |
+
@app.route("/submit", methods=["POST"])
|
111 |
+
def submit():
|
112 |
+
data = request.json
|
113 |
+
name = data.get('name')
|
114 |
+
email = data.get('email')
|
115 |
+
phone = data.get('phone')
|
116 |
|
117 |
+
if not name or not email or not phone:
|
118 |
+
return jsonify({'error': 'Missing data'}), 400
|
119 |
|
120 |
+
try:
|
121 |
+
# Create Salesforce record
|
122 |
+
customer_login = sf.Customer_Login__c.create({
|
123 |
+
'Name': name,
|
124 |
+
'Email__c': email,
|
125 |
+
'Phone_Number__c': phone
|
126 |
+
})
|
127 |
+
|
128 |
+
if customer_login.get('id'):
|
129 |
+
return jsonify({'success': True})
|
130 |
+
else:
|
131 |
+
return jsonify({'error': 'Failed to create record'}), 500
|
132 |
|
133 |
+
except Exception as e:
|
134 |
+
return jsonify({'error': str(e)}), 500
|
135 |
|
|
|
|
|
136 |
|
137 |
@app.route("/")
|
138 |
def index():
|
|
|
139 |
return render_template("index.html")
|
140 |
|
141 |
+
@app.route("/transcribe", methods=["POST"])
|
142 |
+
def transcribe():
|
143 |
+
if "audio" not in request.files:
|
144 |
+
print("No audio file provided")
|
145 |
+
return jsonify({"error": "No audio file provided"}), 400
|
|
|
|
|
146 |
|
147 |
+
audio_file = request.files["audio"]
|
148 |
+
input_audio_path = os.path.join("static", "temp_input.wav")
|
149 |
+
output_audio_path = os.path.join("static", "temp.wav")
|
150 |
+
audio_file.save(input_audio_path)
|
151 |
|
152 |
+
try:
|
153 |
+
# Convert to WAV
|
154 |
+
convert_to_wav(input_audio_path, output_audio_path)
|
|
|
|
|
155 |
|
156 |
+
# Check for silence
|
157 |
+
if is_silent_audio(output_audio_path):
|
158 |
+
return jsonify({"error": "No speech detected. Please try again."}), 400
|
159 |
+
else:
|
160 |
+
print("Audio contains speech, proceeding with transcription.")
|
161 |
+
|
162 |
+
# Use Whisper ASR model for transcription
|
163 |
+
result = None
|
164 |
+
retry_attempts = 3
|
165 |
+
for attempt in range(retry_attempts):
|
166 |
+
try:
|
167 |
+
result = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
|
168 |
+
print(f"Transcribed text: {result['text']}")
|
169 |
+
break
|
170 |
+
except requests.exceptions.ReadTimeout:
|
171 |
+
print(f"Timeout occurred, retrying attempt {attempt + 1}/{retry_attempts}...")
|
172 |
+
time.sleep(5)
|
173 |
+
|
174 |
+
if result is None:
|
175 |
+
return jsonify({"error": "Unable to transcribe audio after retries."}), 500
|
176 |
+
|
177 |
+
transcribed_text = result["text"].strip().capitalize()
|
178 |
+
print(f"Transcribed text: {transcribed_text}")
|
179 |
+
|
180 |
+
# Extract name, email, and phone number from the transcribed text
|
181 |
+
parts = transcribed_text.split()
|
182 |
+
name = parts[0] if len(parts) > 0 else "Unknown Name"
|
183 |
+
email = parts[1] if '@' in parts[1] else "[email protected]"
|
184 |
+
phone_number = parts[2] if len(parts) > 2 else "0000000000"
|
185 |
+
print(f"Parsed data - Name: {name}, Email: {email}, Phone Number: {phone_number}")
|
186 |
+
|
187 |
+
# Create record in Salesforce
|
188 |
+
salesforce_response = create_salesforce_record(name, email, phone_number)
|
189 |
+
|
190 |
+
# Log the Salesforce response
|
191 |
+
print(f"Salesforce record creation response: {salesforce_response}")
|
192 |
+
|
193 |
+
# Check if the response contains an error
|
194 |
+
if "error" in salesforce_response:
|
195 |
+
print(f"Error creating record in Salesforce: {salesforce_response['error']}")
|
196 |
+
return jsonify(salesforce_response), 500
|
197 |
+
|
198 |
+
# If creation was successful, return the details
|
199 |
+
return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})
|
200 |
|
201 |
+
except Exception as e:
|
202 |
+
print(f"Error in transcribing or processing: {str(e)}")
|
203 |
+
return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
|
204 |
|
205 |
# Start Production Server
|
206 |
if __name__ == "__main__":
|
207 |
+
serve(app, host="0.0.0.0", port=7860)
|
|