wookimchye commited on
Commit
859f5d5
·
verified ·
1 Parent(s): 70ace2b

Upload 5 files

Browse files
Files changed (5) hide show
  1. .env +6 -0
  2. Dockerfile +17 -0
  3. Web-Chatbot-Speech-En-app.py +64 -0
  4. requirements.txt +7 -0
  5. templates/index.html +99 -0
.env ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ AI_SERVICE_ENDPOINT=https://iti109-sectionb.cognitiveservices.azure.com/
2
+ AI_SERVICE_KEY=2ou0CMAjUutj0D4In8U8AkxEIXtCrvYFOBMhqSW4rZ7x6yZ033GdJQQJ99ALACqBBLyXJ3w3AAAaACOGtVJj
3
+ QA_PROJECT_NAME=ITI109-SectionB-FAQ
4
+ QA_DEPLOYMENT_NAME=production
5
+ SPEECH_KEY=BHMIZNZ8xH7JQHXaGAoaOlwdx3bjxvhyuLxpHumSiRPXxpo1Rpb5JQQJ99BAACqBBLyXJ3w3AAAYACOGvOBV
6
+ SPEECH_REGION=southeastasia
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as the base image
2
+ FROM python:3.9
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy all project files to the container
8
+ COPY . /app
9
+
10
+ # Install dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Expose the port Flask will run on
14
+ EXPOSE 7860
15
+
16
+ # Command to start the application
17
+ CMD ["gunicorn", "--bind", "0.0.0.0:7860", "Web-Chatbot-Speech-En-app:app"]
Web-Chatbot-Speech-En-app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify, send_file
2
+ import requests
3
+ from dotenv import load_dotenv
4
+ import os
5
+
6
+ # import namespaces
7
+ from azure.core.credentials import AzureKeyCredential
8
+ from azure.ai.language.questionanswering import QuestionAnsweringClient
9
+ from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, AudioConfig
10
+ from azure.cognitiveservices.speech.audio import AudioOutputConfig
11
+
12
+ app = Flask(__name__)
13
+
14
+ # Azure Bot Service configuration
15
+ AZURE_BOT_ENDPOINT = "https://iti109-sectionb.cognitiveservices.azure.com/"
16
+ AZURE_BOT_KEY = "2ou0CMAjUutj0D4In8U8AkxEIXtCrvYFOBMhqSW4rZ7x6yZ033GdJQQJ99ALACqBBLyXJ3w3AAAaACOGtVJj"
17
+
18
+ # Get Configuration Settings
19
+ load_dotenv()
20
+ ai_endpoint = os.getenv('AI_SERVICE_ENDPOINT')
21
+ ai_key = os.getenv('AI_SERVICE_KEY')
22
+ ai_project_name = os.getenv('QA_PROJECT_NAME')
23
+ ai_deployment_name = os.getenv('QA_DEPLOYMENT_NAME')
24
+ speech_key = os.getenv('SPEECH_KEY')
25
+ speech_region = os.getenv('SPEECH_REGION')
26
+
27
+ # Create client using endpoint and key
28
+ credential = AzureKeyCredential(ai_key)
29
+ ai_client = QuestionAnsweringClient(endpoint=ai_endpoint, credential=credential)
30
+
31
+ @app.route('/')
32
+ def home():
33
+ return render_template('index.html') # HTML file for the web interface
34
+
35
+ @app.route('/ask', methods=['POST'])
36
+ def ask_bot():
37
+ user_question = request.json.get("question", "")
38
+
39
+ if not user_question:
40
+ return jsonify({"error": "No question provided"}), 400
41
+
42
+ try:
43
+ response = ai_client.get_answers(question=user_question,
44
+ project_name=ai_project_name,
45
+ deployment_name=ai_deployment_name)
46
+
47
+ bot_response = response.answers[0].answer if response.answers else "No response from bot"
48
+
49
+ # Text-to-Speech
50
+ speech_config = SpeechConfig(subscription=speech_key, region=speech_region)
51
+ audio_config = AudioConfig(filename="response.wav")
52
+ synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
53
+ synthesizer.speak_text(bot_response)
54
+
55
+ return jsonify({"answer": bot_response, "audio": "/response.wav"})
56
+ except requests.exceptions.RequestException as e:
57
+ return jsonify({"error": str(e)}), 500
58
+
59
+ @app.route('/response.wav')
60
+ def get_audio():
61
+ return send_file("response.wav", mimetype="audio/wav")
62
+
63
+ if __name__ == '__main__':
64
+ app.run(debug=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Flask
2
+ requests
3
+ azure-ai-language-questionanswering
4
+ azure-cognitiveservices-speech
5
+ azure-core
6
+ python-dotenv
7
+ gunicorn
templates/index.html ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Asian Food QnA Chatbot</title>
7
+
8
+ <style>
9
+ #user-input {
10
+ width: 50%;
11
+ max-width: 600px;
12
+ }
13
+ </style>
14
+
15
+ <script>
16
+ let recognition;
17
+
18
+ function startSpeechRecognition() {
19
+ if (!('webkitSpeechRecognition' in window)) {
20
+ alert('Speech Recognition not supported in this browser.');
21
+ return;
22
+ }
23
+
24
+ recognition = new webkitSpeechRecognition();
25
+ recognition.continuous = false;
26
+ recognition.interimResults = false;
27
+ recognition.lang = 'en-US';
28
+
29
+ recognition.onstart = function () {
30
+ document.getElementById("speech-status").textContent = "Listening...";
31
+ };
32
+
33
+ recognition.onresult = function (event) {
34
+ const speechResult = event.results[0][0].transcript;
35
+ document.getElementById("user-input").value = speechResult;
36
+ askBot(speechResult);
37
+ };
38
+
39
+ recognition.onerror = function (event) {
40
+ alert("Speech Recognition Error: " + event.error);
41
+ };
42
+
43
+ recognition.onend = function () {
44
+ document.getElementById("speech-status").textContent = "";
45
+ };
46
+
47
+ recognition.start();
48
+ }
49
+
50
+ async function askBot(question) {
51
+ const responseBox = document.getElementById("response-box");
52
+ responseBox.textContent = "Waiting for response...";
53
+
54
+ try {
55
+ const response = await fetch('/ask', {
56
+ method: 'POST',
57
+ headers: {
58
+ 'Content-Type': 'application/json'
59
+ },
60
+ body: JSON.stringify({ question })
61
+ });
62
+
63
+ const data = await response.json();
64
+
65
+ if (data.error) {
66
+ responseBox.textContent = "Error: " + data.error;
67
+ } else {
68
+ responseBox.innerHTML = "You: " + question + "<br>" + "Bot: " + data.answer;
69
+ //responseBox.textContent = data.answer;
70
+ const audioUrl = data.audio + "?t=" + new Date().getTime(); // Add a timestamp to prevent caching
71
+ const audio = new Audio(audioUrl);
72
+ audio.play();
73
+ document.getElementById("user-input").value = "";
74
+ }
75
+ } catch (error) {
76
+ responseBox.textContent = "Error: Unable to process the request.";
77
+ }
78
+ }
79
+
80
+ function handleFormSubmit(event) {
81
+ event.preventDefault();
82
+ const question = document.getElementById("user-input").value;
83
+ if (question.trim()) {
84
+ askBot(question);
85
+ }
86
+ }
87
+ </script>
88
+ </head>
89
+ <body>
90
+ <h1>Asian Food QnA Chatbot - English</h1>
91
+ <form onsubmit="handleFormSubmit(event)">
92
+ <input type="text" id="user-input" placeholder="Type your question here..." required>
93
+ <button type="submit">Ask</button>
94
+ </form>
95
+ <button onclick="startSpeechRecognition()">🎤 Speak</button>
96
+ <span id="speech-status" style="color: green; margin-left: 10px;"></span>
97
+ <div id="response-box" style="margin-top: 20px; font-weight: bold;">Response:</div>
98
+ </body>
99
+ </html>