lokesh341 commited on
Commit
635ad5b
·
verified ·
1 Parent(s): 027fc0b

Update templates/index.html

Browse files
Files changed (1) hide show
  1. templates/index.html +211 -209
templates/index.html CHANGED
@@ -1,210 +1,212 @@
1
 
2
- import torch
3
- from flask import Flask, render_template, request, jsonify
4
- import json
5
- import os
6
- from transformers import pipeline
7
- from gtts import gTTS
8
- from pydub import AudioSegment
9
- from pydub.silence import detect_nonsilent
10
- from transformers import AutoConfig # Import AutoConfig for the config object
11
- import time
12
- from waitress import serve
13
- from simple_salesforce import Salesforce
14
- import requests # Import requests for exception handling
15
-
16
- app = Flask(__name__)
17
-
18
- # Use whisper-small for faster processing and better speed
19
- device = "cuda" if torch.cuda.is_available() else "cpu"
20
-
21
- # Create config object to set timeout and other parameters
22
- config = AutoConfig.from_pretrained("openai/whisper-small")
23
- config.update({"timeout": 60}) # Set timeout to 60 seconds
24
-
25
- # Your function where you generate and save the audio
26
- def generate_audio_prompt(text, filename):
27
- try:
28
- tts = gTTS(text)
29
- tts.save(os.path.join("static", filename))
30
- except gtts.tts.gTTSError as e:
31
- print(f"Error: {e}")
32
- print("Retrying after 5 seconds...")
33
- time.sleep(5) # Wait for 5 seconds before retrying
34
- generate_audio_prompt(text, filename)
35
-
36
- # Generate required voice prompts
37
- prompts = {
38
- "welcome": "Welcome to Biryani Hub.",
39
- "ask_name": "Tell me your name.",
40
- "ask_email": "Please provide your email address.",
41
- "thank_you": "Thank you for registration."
42
- }
43
-
44
- for key, text in prompts.items():
45
- generate_audio_prompt(text, f"{key}.mp3")
46
-
47
- # Symbol mapping for proper recognition
48
- SYMBOL_MAPPING = {
49
- "at the rate": "@",
50
- "at": "@",
51
- "dot": ".",
52
- "underscore": "_",
53
- "hash": "#",
54
- "plus": "+",
55
- "dash": "-",
56
- "comma": ",",
57
- "space": " "
58
- }
59
-
60
- # Function to convert audio to WAV format
61
- def convert_to_wav(input_path, output_path):
62
- try:
63
- audio = AudioSegment.from_file(input_path)
64
- audio = audio.set_frame_rate(16000).set_channels(1) # Convert to 16kHz, mono
65
- audio.export(output_path, format="wav")
66
- except Exception as e:
67
- print(f"Error: {str(e)}")
68
- raise Exception(f"Audio conversion failed: {str(e)}")
69
-
70
- # Function to check if audio contains actual speech
71
- def is_silent_audio(audio_path):
72
- audio = AudioSegment.from_wav(audio_path)
73
- nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16) # Reduced silence duration
74
- print(f"Detected nonsilent parts: {nonsilent_parts}")
75
- return len(nonsilent_parts) == 0 # If no speech detected
76
-
77
- # Salesforce connection details
78
- try:
79
- print("Attempting to connect to Salesforce...")
80
- sf = Salesforce(username='[email protected]', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
81
- print("Connected to Salesforce successfully!")
82
- print("User Info:", sf.UserInfo) # Log the user info to verify the connection
83
- except Exception as e:
84
- print(f"Failed to connect to Salesforce: {str(e)}")
85
-
86
- # Function to create Salesforce record
87
- # API endpoint to receive data from voice bot
88
- @app.route('/login', methods=['POST'])
89
- def login():
90
- # Get data from voice bot (name, email, phone number)
91
- data = request.json # Assuming voice bot sends JSON data
92
-
93
- name = data.get('name')
94
- email = data.get('email')
95
- phone_number = data.get('phone_number')
96
-
97
- if not name or not email or not phone_number:
98
- return jsonify({'error': 'Missing required fields'}), 400
99
-
100
- # Create a record in Salesforce
101
- try:
102
- customer_login = sf.Customer_Login__c.create({
103
- 'Name': name,
104
- 'Email__c': email,
105
- 'Phone_Number__c': phone_number
106
- })
107
- return jsonify({'success': True, 'id': customer_login['id']}), 200
108
- except Exception as e:
109
- print(f"Error creating Salesforce record: {str(e)}")
110
- return jsonify({'error': f'Failed to create record in Salesforce: {str(e)}'}), 500
111
-
112
- @app.route("/submit", methods=["POST"])
113
- def submit():
114
- data = request.json
115
- name = data.get('name')
116
- email = data.get('email')
117
- phone = data.get('phone')
118
-
119
- if not name or not email or not phone:
120
- return jsonify({'error': 'Missing data'}), 400
121
-
122
- try:
123
- # Create Salesforce record
124
- customer_login = sf.Customer_Login__c.create({
125
- 'Name': name,
126
- 'Email__c': email,
127
- 'Phone_Number__c': phone
128
- })
129
-
130
- if customer_login.get('id'):
131
- return jsonify({'success': True, 'id': customer_login['id']})
132
- else:
133
- return jsonify({'error': 'Failed to create record'}), 500
134
-
135
- except Exception as e:
136
- print(f"Error during Salesforce record creation: {str(e)}")
137
- return jsonify({'error': str(e)}), 500
138
-
139
-
140
- @app.route("/")
141
- def index():
142
- return render_template("index.html")
143
-
144
- @app.route("/transcribe", methods=["POST"])
145
- def transcribe():
146
- if "audio" not in request.files:
147
- print("No audio file provided")
148
- return jsonify({"error": "No audio file provided"}), 400
149
-
150
- audio_file = request.files["audio"]
151
- input_audio_path = os.path.join("static", "temp_input.wav")
152
- output_audio_path = os.path.join("static", "temp.wav")
153
- audio_file.save(input_audio_path)
154
-
155
- try:
156
- # Convert to WAV
157
- convert_to_wav(input_audio_path, output_audio_path)
158
-
159
- # Check for silence
160
- if is_silent_audio(output_audio_path):
161
- return jsonify({"error": "No speech detected. Please try again."}), 400
162
- else:
163
- print("Audio contains speech, proceeding with transcription.")
164
-
165
- # Use Whisper ASR model for transcription
166
- result = None
167
- retry_attempts = 3
168
- for attempt in range(retry_attempts):
169
- try:
170
- result = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
171
- print(f"Transcribed text: {result['text']}")
172
- break
173
- except requests.exceptions.ReadTimeout:
174
- print(f"Timeout occurred, retrying attempt {attempt + 1}/{retry_attempts}...")
175
- time.sleep(5)
176
-
177
- if result is None:
178
- return jsonify({"error": "Unable to transcribe audio after retries."}), 500
179
-
180
- transcribed_text = result["text"].strip().capitalize()
181
- print(f"Transcribed text: {transcribed_text}")
182
-
183
- # Extract name, email, and phone number from the transcribed text
184
- parts = transcribed_text.split()
185
- name = parts[0] if len(parts) > 0 else "Unknown Name"
186
- email = parts[1] if '@' in parts[1] else "unknown@domain.com"
187
- phone_number = parts[2] if len(parts) > 2 else "0000000000"
188
- print(f"Parsed data - Name: {name}, Email: {email}, Phone Number: {phone_number}")
189
-
190
- # Create record in Salesforce
191
- salesforce_response = create_salesforce_record(name, email, phone_number)
192
-
193
- # Log the Salesforce response
194
- print(f"Salesforce record creation response: {salesforce_response}")
195
-
196
- # Check if the response contains an error
197
- if "error" in salesforce_response:
198
- print(f"Error creating record in Salesforce: {salesforce_response['error']}")
199
- return jsonify(salesforce_response), 500
200
-
201
- # If creation was successful, return the details
202
- return jsonify({"text": transcribed_text, "salesforce_record": salesforce_response})
203
-
204
- except Exception as e:
205
- print(f"Error in transcribing or processing: {str(e)}")
206
- return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
207
-
208
- # Start Production Server
209
- if __name__ == "__main__":
210
- serve(app, host="0.0.0.0", port=7860)
 
 
 
1
 
2
+ <!DOCTYPE html>
3
+ <html lang="en">
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>Biryani Hub Login & Registration</title>
8
+ <link href="https://fonts.googleapis.com/css2?family=Roboto:wght@400;500&display=swap" rel="stylesheet">
9
+ <link rel="stylesheet" href="{{ url_for('static', filename='styles.css') }}">
10
+ <style>
11
+ /* Styling remains unchanged */
12
+ </style>
13
+ </head>
14
+ <body>
15
+ <div class="container">
16
+ <h1>Welcome to Biryani Hub 🍽 🍗</h1>
17
+
18
+ <!-- Form Section -->
19
+ <div class="form-container">
20
+ <!-- Registration Form -->
21
+ <div class="form-section" id="registrationForm" style="display: block;">
22
+ <h2 class="header">Register</h2>
23
+ <label for="name">Your Name</label>
24
+ <input type="text" id="name" placeholder="Your name will appear here..." readonly>
25
+
26
+ <label for="email">Your Email</label>
27
+ <input type="text" id="email" placeholder="Your email will appear here..." readonly>
28
+
29
+ <label for="mobile">Your Mobile Number</label>
30
+ <input type="text" id="mobile" placeholder="Your mobile number will appear here..." readonly>
31
+
32
+ <p class="info" id="infoMessage">Listening 🗣🎙️...</p>
33
+ <p class="status" id="status">🔊...</p>
34
+ </div>
35
+
36
+ <!-- Login Form -->
37
+ <div class="form-section" id="loginForm" style="display: none;">
38
+ <h2 class="header">Login</h2>
39
+ <label for="loginEmail">Your Email</label>
40
+ <input type="text" id="loginEmail" placeholder="Your email will appear here..." readonly>
41
+
42
+ <label for="loginMobile">Your Mobile Number</label>
43
+ <input type="text" id="loginMobile" placeholder="Your mobile number will appear here..." readonly>
44
+
45
+ <p class="info" id="infoMessageLogin">Listening 🗣🎙️...</p>
46
+ <p class="status" id="statusLogin">🔊...</p>
47
+ </div>
48
+ </div>
49
+ </div>
50
+
51
+ <script>
52
+ let recognition;
53
+ let nameCaptured = "";
54
+ let emailCaptured = "";
55
+ let mobileCaptured = "";
56
+
57
+ if ('webkitSpeechRecognition' in window) {
58
+ recognition = new webkitSpeechRecognition();
59
+ recognition.continuous = false;
60
+ recognition.interimResults = false;
61
+ recognition.lang = 'en-US';
62
+ } else {
63
+ alert("Speech Recognition API is not supported in this browser.");
64
+ }
65
+
66
+ function speak(text, callback) {
67
+ const speech = new SpeechSynthesisUtterance(text);
68
+ speech.onend = callback;
69
+ window.speechSynthesis.speak(speech);
70
+ }
71
+
72
+ // Start by asking if the user is a new or existing customer
73
+ function askLoginOrRegister() {
74
+ speak("Are you a new customer or an existing customer? Say 'new' for registration or 'existing' for login.", function() {
75
+ recognition.start();
76
+ recognition.onresult = function(event) {
77
+ let response = event.results[0][0].transcript.trim().toLowerCase();
78
+ recognition.stop();
79
+ if (response.includes("new")) {
80
+ showRegistrationForm();
81
+ } else if (response.includes("existing")) {
82
+ showLoginForm();
83
+ } else {
84
+ speak("Sorry, I didn't understand. Please say 'new' for registration or 'existing' for login.", askLoginOrRegister);
85
+ }
86
+ };
87
+ });
88
+ }
89
+
90
+ function showRegistrationForm() {
91
+ document.getElementById('registrationForm').style.display = 'block';
92
+ document.getElementById('loginForm').style.display = 'none';
93
+ speak("Please tell me your name to begin the registration.", startListeningForName);
94
+ }
95
+
96
+ function showLoginForm() {
97
+ document.getElementById('loginForm').style.display = 'block';
98
+ document.getElementById('registrationForm').style.display = 'none';
99
+ speak("Please tell me your email to begin the login process.", startListeningForLoginEmail);
100
+ }
101
+
102
+ // Capture the name for registration
103
+ function startListeningForName() {
104
+ recognition.start();
105
+ recognition.onresult = function(event) {
106
+ nameCaptured = event.results[0][0].transcript.trim();
107
+ document.getElementById('name').value = nameCaptured;
108
+ recognition.stop();
109
+ speak("You said " + nameCaptured + ". Is it correct?", confirmName);
110
+ };
111
+ }
112
+
113
+ function confirmName() {
114
+ recognition.start();
115
+ recognition.onresult = function(event) {
116
+ let confirmation = event.results[0][0].transcript.trim().toLowerCase();
117
+ recognition.stop();
118
+ if (confirmation.includes("ok")) {
119
+ speak("Great! Now, tell me your email.", startListeningForEmail);
120
+ } else {
121
+ speak("Let's try again. Tell me your name.", startListeningForName);
122
+ }
123
+ };
124
+ }
125
+
126
+ // Capture email for registration
127
+ function startListeningForEmail() {
128
+ recognition.start();
129
+ recognition.onresult = function(event) {
130
+ emailCaptured = event.results[0][0].transcript.trim().replace(/\bat\b/g, '@').replace(/\s+/g, '');
131
+ document.getElementById('email').value = emailCaptured;
132
+ recognition.stop();
133
+ speak("You said " + emailCaptured + ". Is it correct?", confirmEmail);
134
+ };
135
+ }
136
+
137
+ function confirmEmail() {
138
+ recognition.start();
139
+ recognition.onresult = function(event) {
140
+ let confirmation = event.results[0][0].transcript.trim().toLowerCase();
141
+ recognition.stop();
142
+ if (confirmation.includes("ok")) {
143
+ speak("Great! Now, tell me your mobile number.", startListeningForMobile);
144
+ } else {
145
+ speak("Let's try again. Tell me your email.", startListeningForEmail);
146
+ }
147
+ };
148
+ }
149
+
150
+ // Capture mobile number for registration
151
+ function startListeningForMobile() {
152
+ recognition.start();
153
+ recognition.onresult = function(event) {
154
+ mobileCaptured = event.results[0][0].transcript.trim().replace(/\s+/g, '');
155
+ document.getElementById('mobile').value = mobileCaptured;
156
+ recognition.stop();
157
+ speak("You said " + mobileCaptured + ". Is it correct?", confirmMobile);
158
+ };
159
+ }
160
+
161
+ function confirmMobile() {
162
+ recognition.start();
163
+ recognition.onresult = function(event) {
164
+ let confirmation = event.results[0][0].transcript.trim().toLowerCase();
165
+ recognition.stop();
166
+ if (confirmation.includes("ok")) {
167
+ autoConfirm();
168
+ } else {
169
+ speak("Let's try again. Tell me your mobile number.", startListeningForMobile);
170
+ }
171
+ };
172
+ }
173
+
174
+ // Confirm details and submit automatically
175
+ function autoConfirm() {
176
+ document.getElementById('confirmName').textContent = document.getElementById('name').value;
177
+ document.getElementById('confirmEmail').textContent = document.getElementById('email').value;
178
+ document.getElementById('confirmPhone').textContent = document.getElementById('mobile').value;
179
+ document.getElementById('confirmation').style.display = 'block';
180
+ setTimeout(autoSubmit, 3000);
181
+ }
182
+
183
+ // Submit details to Salesforce
184
+ function autoSubmit() {
185
+ var name = document.getElementById('name').value;
186
+ var email = document.getElementById('email').value;
187
+ var phone = document.getElementById('mobile').value;
188
+ fetch('/submit', {
189
+ method: 'POST',
190
+ headers: { 'Content-Type': 'application/json' },
191
+ body: JSON.stringify({ name: name, email: email, phone: phone })
192
+ })
193
+ .then(response => response.json())
194
+ .then(data => {
195
+ if (data.success) {
196
+ document.getElementById('status').textContent = 'Your details were submitted successfully!';
197
+ document.getElementById('confirmation').style.display = 'none';
198
+ speak("Your registration is complete. Thank you for registering.");
199
+ setTimeout(() => location.reload(), 5000);
200
+ } else {
201
+ document.getElementById('status').textContent = 'There was an error submitting your details.';
202
+ speak("There was an error submitting your details. Please try again.");
203
+ }
204
+ });
205
+ }
206
+
207
+ window.onload = function () {
208
+ askLoginOrRegister();
209
+ };
210
+ </script>
211
+ </body>
212
+ </html>