Spaces:
Sleeping
Sleeping
modified
Browse files
app.py
CHANGED
@@ -1,3 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from groq import Groq
|
2 |
import gradio as gr
|
3 |
from gtts import gTTS
|
@@ -21,11 +206,6 @@ logger.addHandler(file_handler)
|
|
21 |
# Initialize Groq Client
|
22 |
client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
|
23 |
|
24 |
-
# client = Groq(
|
25 |
-
# api_key="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",
|
26 |
-
# )
|
27 |
-
|
28 |
-
|
29 |
# Function to encode the image
|
30 |
def encode_image(uploaded_image):
|
31 |
try:
|
@@ -43,9 +223,14 @@ def customLLMBot(user_input, uploaded_image, chat_history):
|
|
43 |
try:
|
44 |
logger.info("Processing input...")
|
45 |
|
|
|
|
|
|
|
46 |
if uploaded_image is not None:
|
47 |
# Encode the image to base64
|
48 |
base64_image = encode_image(uploaded_image)
|
|
|
|
|
49 |
logger.debug(f"Image received, size: {len(base64_image)} bytes")
|
50 |
|
51 |
# Create a message specifically for image prompts
|
@@ -79,24 +264,30 @@ def customLLMBot(user_input, uploaded_image, chat_history):
|
|
79 |
logger.info("Text processed successfully.")
|
80 |
|
81 |
# Extract the reply
|
82 |
-
LLM_reply = response.choices[0].message.content
|
83 |
logger.debug(f"LLM reply: {LLM_reply}")
|
84 |
|
85 |
-
# Append
|
86 |
-
chat_history.append((
|
87 |
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
except Exception as e:
|
|
|
91 |
logger.error(f"Error in customLLMBot function: {e}")
|
92 |
-
return
|
93 |
-
|
94 |
-
|
95 |
|
96 |
|
97 |
# Gradio Interface
|
98 |
def chatbot_ui():
|
99 |
-
chat_history = [] # Initialize empty chat history
|
100 |
|
101 |
with gr.Blocks() as demo:
|
102 |
gr.Markdown("# Healthcare Chatbot Doctor")
|
@@ -120,36 +311,8 @@ def chatbot_ui():
|
|
120 |
# Define actions
|
121 |
def handle_submit(user_query, image):
|
122 |
logger.info("User submitted a query.")
|
123 |
-
|
124 |
-
return
|
125 |
-
|
126 |
-
|
127 |
-
def chatbot_ui():
|
128 |
-
chat_history = [] # Initialize empty chat history
|
129 |
-
|
130 |
-
with gr.Blocks() as demo:
|
131 |
-
gr.Markdown("# Healthcare Chatbot Doctor")
|
132 |
-
|
133 |
-
# Layout for chatbot and input box alignment
|
134 |
-
with gr.Row():
|
135 |
-
with gr.Column(scale=3): # Main column for chatbot
|
136 |
-
chatbot = gr.Chatbot(label="Responses", elem_id="chatbot")
|
137 |
-
user_input = gr.Textbox(
|
138 |
-
label="Ask a health-related question",
|
139 |
-
placeholder="Describe your symptoms...",
|
140 |
-
elem_id="user-input",
|
141 |
-
lines=1,
|
142 |
-
)
|
143 |
-
with gr.Column(scale=1): # Side column for image and buttons
|
144 |
-
uploaded_image = gr.Image(label="Upload an Image", type="pil")
|
145 |
-
submit_btn = gr.Button("Submit")
|
146 |
-
clear_btn = gr.Button("Clear")
|
147 |
-
audio_output = gr.Audio(label="Audio Response")
|
148 |
-
|
149 |
-
# Define actions
|
150 |
-
def handle_submit(user_query, image):
|
151 |
-
response = customLLMBot(user_query, image, chat_history)
|
152 |
-
return response, None, "" # Return chat history and reset input
|
153 |
|
154 |
# Submit on pressing Enter key
|
155 |
user_input.submit(
|
@@ -165,20 +328,16 @@ def chatbot_ui():
|
|
165 |
outputs=[chatbot, audio_output, user_input],
|
166 |
)
|
167 |
|
168 |
-
#
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
174 |
|
175 |
return demo
|
176 |
|
177 |
|
178 |
# Launch the interface
|
179 |
chatbot_ui().launch(share=True,server_name="0.0.0.0", server_port=7860)
|
180 |
-
|
181 |
-
#chatbot_ui().launch(server_name="localhost", server_port=7860)
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
1 |
+
# from groq import Groq
|
2 |
+
# import gradio as gr
|
3 |
+
# from gtts import gTTS
|
4 |
+
# import uuid
|
5 |
+
# import base64
|
6 |
+
# from io import BytesIO
|
7 |
+
# import os
|
8 |
+
# import logging
|
9 |
+
|
10 |
+
# # Set up logger
|
11 |
+
# logger = logging.getLogger(__name__)
|
12 |
+
# logger.setLevel(logging.DEBUG)
|
13 |
+
# console_handler = logging.StreamHandler()
|
14 |
+
# file_handler = logging.FileHandler('chatbot_log.log')
|
15 |
+
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
16 |
+
# console_handler.setFormatter(formatter)
|
17 |
+
# file_handler.setFormatter(formatter)
|
18 |
+
# logger.addHandler(console_handler)
|
19 |
+
# logger.addHandler(file_handler)
|
20 |
+
|
21 |
+
# # Initialize Groq Client
|
22 |
+
# #client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
|
23 |
+
|
24 |
+
# client = Groq(
|
25 |
+
# api_key="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",
|
26 |
+
# )
|
27 |
+
|
28 |
+
|
29 |
+
# # Function to encode the image
|
30 |
+
# def encode_image(uploaded_image):
|
31 |
+
# try:
|
32 |
+
# logger.debug("Encoding image...")
|
33 |
+
# buffered = BytesIO()
|
34 |
+
# uploaded_image.save(buffered, format="PNG") # Ensure the correct format
|
35 |
+
# logger.debug("Image encoding complete.")
|
36 |
+
# return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
37 |
+
# except Exception as e:
|
38 |
+
# logger.error(f"Error encoding image: {e}")
|
39 |
+
# raise
|
40 |
+
|
41 |
+
# # Function to handle text and image inputs
|
42 |
+
# def customLLMBot(user_input, uploaded_image, chat_history):
|
43 |
+
# try:
|
44 |
+
# logger.info("Processing input...")
|
45 |
+
|
46 |
+
# if uploaded_image is not None:
|
47 |
+
# # Encode the image to base64
|
48 |
+
# base64_image = encode_image(uploaded_image)
|
49 |
+
# logger.debug(f"Image received, size: {len(base64_image)} bytes")
|
50 |
+
|
51 |
+
# # Create a message specifically for image prompts
|
52 |
+
# messages = [
|
53 |
+
# {
|
54 |
+
# "role": "user",
|
55 |
+
# "content": [
|
56 |
+
# {"type": "text", "text": "What's in this image?"},
|
57 |
+
# {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]
|
58 |
+
# }
|
59 |
+
# ]
|
60 |
+
|
61 |
+
# logger.info("Sending image to Groq API for processing...")
|
62 |
+
# # Send the image message to the Groq API
|
63 |
+
# response = client.chat.completions.create(
|
64 |
+
# model="llama-3.2-11b-vision-preview",
|
65 |
+
# messages=messages,
|
66 |
+
# )
|
67 |
+
# logger.info("Image processed successfully.")
|
68 |
+
# else:
|
69 |
+
# # Process text input
|
70 |
+
# logger.info("Processing text input...")
|
71 |
+
# messages = [
|
72 |
+
# {"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
|
73 |
+
# {"role": "user", "content": user_input},
|
74 |
+
# ]
|
75 |
+
# response = client.chat.completions.create(
|
76 |
+
# model="llama-3.2-11b-vision-preview",
|
77 |
+
# messages=messages,
|
78 |
+
# )
|
79 |
+
# logger.info("Text processed successfully.")
|
80 |
+
|
81 |
+
# # Extract the reply
|
82 |
+
# LLM_reply = response.choices[0].message.content.strip()
|
83 |
+
# logger.debug(f"LLM reply: {LLM_reply}")
|
84 |
+
|
85 |
+
# # Append user input and bot reply as a tuple to the chat history
|
86 |
+
# chat_history.append((user_input, LLM_reply))
|
87 |
+
|
88 |
+
# return chat_history
|
89 |
+
|
90 |
+
# except Exception as e:
|
91 |
+
# logger.error(f"Error in customLLMBot function: {e}")
|
92 |
+
# return chat_history
|
93 |
+
|
94 |
+
# # Log Viewer Function
|
95 |
+
# def get_logs():
|
96 |
+
# try:
|
97 |
+
# with open('/tmp/chatbot_log.log', 'r') as f:
|
98 |
+
# return f.read()[-3000:] # Return the last 3000 characters of the log
|
99 |
+
# except Exception as e:
|
100 |
+
# return f"Error reading logs: {e}"
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
# def chatbot_ui():
|
106 |
+
|
107 |
+
# chat_history = [] # Initialize empty chat history
|
108 |
+
|
109 |
+
# with gr.Blocks() as demo:
|
110 |
+
# gr.Markdown("# Healthcare Chatbot Doctor")
|
111 |
+
|
112 |
+
# # Layout for chatbot and input box alignment
|
113 |
+
# with gr.Row():
|
114 |
+
# with gr.Column(scale=3): # Main column for chatbot
|
115 |
+
# chatbot = gr.Chatbot(label="Responses", elem_id="chatbot")
|
116 |
+
# user_input = gr.Textbox(
|
117 |
+
# label="Ask a health-related question",
|
118 |
+
# placeholder="Describe your symptoms...",
|
119 |
+
# elem_id="user-input",
|
120 |
+
# lines=1,
|
121 |
+
# )
|
122 |
+
# with gr.Column(scale=1): # Side column for image and buttons
|
123 |
+
# uploaded_image = gr.Image(label="Upload an Image", type="pil")
|
124 |
+
# submit_btn = gr.Button("Submit")
|
125 |
+
# clear_btn = gr.Button("Clear")
|
126 |
+
# audio_output = gr.Audio(label="Audio Response")
|
127 |
+
# # Log Viewer
|
128 |
+
# with gr.Row():
|
129 |
+
# log_view = gr.Textbox(label="Logs", lines=10, interactive=False)
|
130 |
+
# view_logs_btn = gr.Button("View Logs")
|
131 |
+
# download_logs_btn = gr.Button("Download Logs")
|
132 |
+
|
133 |
+
# # Define actions
|
134 |
+
# def handle_submit(user_query, image):
|
135 |
+
# response = customLLMBot(user_query, image, chat_history)
|
136 |
+
# return response, None, "" # Return chat history and reset input
|
137 |
+
|
138 |
+
# # Submit on pressing Enter key
|
139 |
+
# user_input.submit(
|
140 |
+
# handle_submit,
|
141 |
+
# inputs=[user_input, uploaded_image],
|
142 |
+
# outputs=[chatbot, audio_output, user_input],
|
143 |
+
# )
|
144 |
+
|
145 |
+
# # Submit on button click
|
146 |
+
# submit_btn.click(
|
147 |
+
# handle_submit,
|
148 |
+
# inputs=[user_input, uploaded_image],
|
149 |
+
# outputs=[chatbot, audio_output, user_input],
|
150 |
+
# )
|
151 |
+
|
152 |
+
# # Clear button action
|
153 |
+
# clear_btn.click(
|
154 |
+
# lambda: ([], None, ""),
|
155 |
+
# inputs=[],
|
156 |
+
# outputs=[chatbot, uploaded_image, user_input],
|
157 |
+
# )
|
158 |
+
|
159 |
+
# # View Logs Button
|
160 |
+
# view_logs_btn.click(
|
161 |
+
# get_logs,
|
162 |
+
# inputs=[],
|
163 |
+
# outputs=[log_view],
|
164 |
+
# )
|
165 |
+
|
166 |
+
# # Download Logs Button
|
167 |
+
# def download_logs():
|
168 |
+
# return '/tmp/chatbot_log.log'
|
169 |
+
|
170 |
+
# download_logs_btn.click(
|
171 |
+
# download_logs,
|
172 |
+
# inputs=[],
|
173 |
+
# outputs=[gr.File()],
|
174 |
+
# )
|
175 |
+
|
176 |
+
# return demo
|
177 |
+
|
178 |
+
|
179 |
+
# # Launch the interface
|
180 |
+
# #chatbot_ui().launch(share=True,server_name="0.0.0.0", server_port=7860)
|
181 |
+
|
182 |
+
# chatbot_ui().launch(share=True,server_name="localhost", server_port=7860)
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
from groq import Groq
|
187 |
import gradio as gr
|
188 |
from gtts import gTTS
|
|
|
206 |
# Initialize Groq Client
|
207 |
client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
|
208 |
|
|
|
|
|
|
|
|
|
|
|
209 |
# Function to encode the image
|
210 |
def encode_image(uploaded_image):
|
211 |
try:
|
|
|
223 |
try:
|
224 |
logger.info("Processing input...")
|
225 |
|
226 |
+
# Append user input to the chat history
|
227 |
+
chat_history.append(("User", user_input))
|
228 |
+
|
229 |
if uploaded_image is not None:
|
230 |
# Encode the image to base64
|
231 |
base64_image = encode_image(uploaded_image)
|
232 |
+
|
233 |
+
# Log the image size and type
|
234 |
logger.debug(f"Image received, size: {len(base64_image)} bytes")
|
235 |
|
236 |
# Create a message specifically for image prompts
|
|
|
264 |
logger.info("Text processed successfully.")
|
265 |
|
266 |
# Extract the reply
|
267 |
+
LLM_reply = response.choices[0].message.content
|
268 |
logger.debug(f"LLM reply: {LLM_reply}")
|
269 |
|
270 |
+
# Append the bot's response to the chat history
|
271 |
+
chat_history.append(("Bot", LLM_reply))
|
272 |
|
273 |
+
# Generate audio for response
|
274 |
+
audio_file = f"response_{uuid.uuid4().hex}.mp3"
|
275 |
+
tts = gTTS(LLM_reply, lang='en')
|
276 |
+
tts.save(audio_file)
|
277 |
+
logger.info(f"Audio response saved as {audio_file}")
|
278 |
+
|
279 |
+
# Return the chat history (all Q&A) and the audio file
|
280 |
+
return [(entry[0], entry[1]) for entry in chat_history], audio_file
|
281 |
|
282 |
except Exception as e:
|
283 |
+
# Handle errors gracefully
|
284 |
logger.error(f"Error in customLLMBot function: {e}")
|
285 |
+
return [("User", f"An error occurred: {e}")], None
|
|
|
|
|
286 |
|
287 |
|
288 |
# Gradio Interface
|
289 |
def chatbot_ui():
|
290 |
+
chat_history = [] # Initialize empty chat history for the session
|
291 |
|
292 |
with gr.Blocks() as demo:
|
293 |
gr.Markdown("# Healthcare Chatbot Doctor")
|
|
|
311 |
# Define actions
|
312 |
def handle_submit(user_query, image):
|
313 |
logger.info("User submitted a query.")
|
314 |
+
response, audio = customLLMBot(user_query, image, chat_history)
|
315 |
+
return response, audio, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
|
317 |
# Submit on pressing Enter key
|
318 |
user_input.submit(
|
|
|
328 |
outputs=[chatbot, audio_output, user_input],
|
329 |
)
|
330 |
|
331 |
+
# Action for clearing all fields and resetting chat history
|
332 |
+
def clear_chat():
|
333 |
+
nonlocal chat_history
|
334 |
+
chat_history = [] # Reset chat history for new session
|
335 |
+
return [], "", None, None
|
336 |
+
|
337 |
+
clear_btn.click(clear_chat, inputs=[], outputs=[chatbot, user_input, uploaded_image, audio_output])
|
338 |
|
339 |
return demo
|
340 |
|
341 |
|
342 |
# Launch the interface
|
343 |
chatbot_ui().launch(share=True,server_name="0.0.0.0", server_port=7860)
|
|
|
|
|
|
|
|
|
|