Spaces:
Sleeping
Sleeping
modified
Browse files
app.py
CHANGED
@@ -43,67 +43,54 @@ def customLLMBot(user_input, uploaded_image, chat_history):
|
|
43 |
try:
|
44 |
logger.info("Processing input...")
|
45 |
|
46 |
-
# Append user input to the chat history
|
47 |
-
if user_input:
|
48 |
-
chat_history.append((user_input, None))
|
49 |
-
|
50 |
if uploaded_image is not None:
|
51 |
# Encode the image to base64
|
52 |
base64_image = encode_image(uploaded_image)
|
|
|
53 |
|
54 |
-
|
55 |
-
# Create a message specifically for the image
|
56 |
messages = [
|
57 |
{
|
58 |
"role": "user",
|
59 |
"content": [
|
60 |
{"type": "text", "text": "What's in this image?"},
|
61 |
-
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
|
62 |
-
]
|
63 |
}
|
64 |
]
|
65 |
|
66 |
-
|
|
|
67 |
response = client.chat.completions.create(
|
68 |
model="llama-3.2-11b-vision-preview",
|
69 |
messages=messages,
|
70 |
)
|
71 |
logger.info("Image processed successfully.")
|
72 |
-
|
73 |
-
#
|
74 |
-
|
75 |
-
chat_history[-1] = (chat_history[-1][0], LLM_reply)
|
76 |
-
logger.debug(f"LLM reply for image: {LLM_reply}")
|
77 |
-
|
78 |
-
# Return the updated chat history and clear uploaded image after processing
|
79 |
-
return [(q, r) for q, r in chat_history if r], None
|
80 |
-
|
81 |
-
if user_input:
|
82 |
-
# Handle text input
|
83 |
-
logger.debug("Processing text input...")
|
84 |
messages = [
|
85 |
{"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
|
86 |
{"role": "user", "content": user_input},
|
87 |
]
|
88 |
-
|
89 |
response = client.chat.completions.create(
|
90 |
model="llama-3.2-11b-vision-preview",
|
91 |
messages=messages,
|
92 |
)
|
93 |
logger.info("Text processed successfully.")
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
logger.debug(f"LLM reply for text: {LLM_reply}")
|
99 |
|
100 |
-
|
|
|
101 |
|
102 |
-
return chat_history
|
103 |
|
104 |
except Exception as e:
|
105 |
logger.error(f"Error in customLLMBot function: {e}")
|
106 |
-
return chat_history
|
|
|
107 |
|
108 |
|
109 |
|
@@ -136,8 +123,35 @@ def chatbot_ui():
|
|
136 |
def handle_submit(user_query, image):
|
137 |
logger.info("User submitted a query.")
|
138 |
updated_chat_history = customLLMBot(user_query, image, chat_history)
|
139 |
-
return updated_chat_history, None, "" #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
# Submit on pressing Enter key
|
143 |
user_input.submit(
|
@@ -153,11 +167,11 @@ def chatbot_ui():
|
|
153 |
outputs=[chatbot, audio_output, user_input],
|
154 |
)
|
155 |
|
156 |
-
#
|
157 |
clear_btn.click(
|
158 |
-
lambda: ([],
|
159 |
inputs=[],
|
160 |
-
outputs=[chatbot,
|
161 |
)
|
162 |
|
163 |
return demo
|
|
|
43 |
try:
|
44 |
logger.info("Processing input...")
|
45 |
|
|
|
|
|
|
|
|
|
46 |
if uploaded_image is not None:
|
47 |
# Encode the image to base64
|
48 |
base64_image = encode_image(uploaded_image)
|
49 |
+
logger.debug(f"Image received, size: {len(base64_image)} bytes")
|
50 |
|
51 |
+
# Create a message specifically for image prompts
|
|
|
52 |
messages = [
|
53 |
{
|
54 |
"role": "user",
|
55 |
"content": [
|
56 |
{"type": "text", "text": "What's in this image?"},
|
57 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]
|
|
|
58 |
}
|
59 |
]
|
60 |
|
61 |
+
logger.info("Sending image to Groq API for processing...")
|
62 |
+
# Send the image message to the Groq API
|
63 |
response = client.chat.completions.create(
|
64 |
model="llama-3.2-11b-vision-preview",
|
65 |
messages=messages,
|
66 |
)
|
67 |
logger.info("Image processed successfully.")
|
68 |
+
else:
|
69 |
+
# Process text input
|
70 |
+
logger.info("Processing text input...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
messages = [
|
72 |
{"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
|
73 |
{"role": "user", "content": user_input},
|
74 |
]
|
|
|
75 |
response = client.chat.completions.create(
|
76 |
model="llama-3.2-11b-vision-preview",
|
77 |
messages=messages,
|
78 |
)
|
79 |
logger.info("Text processed successfully.")
|
80 |
|
81 |
+
# Extract the reply
|
82 |
+
LLM_reply = response.choices[0].message.content.strip()
|
83 |
+
logger.debug(f"LLM reply: {LLM_reply}")
|
|
|
84 |
|
85 |
+
# Append user input and bot reply as a tuple to the chat history
|
86 |
+
chat_history.append((user_input, LLM_reply))
|
87 |
|
88 |
+
return chat_history
|
89 |
|
90 |
except Exception as e:
|
91 |
logger.error(f"Error in customLLMBot function: {e}")
|
92 |
+
return chat_history
|
93 |
+
|
94 |
|
95 |
|
96 |
|
|
|
123 |
def handle_submit(user_query, image):
|
124 |
logger.info("User submitted a query.")
|
125 |
updated_chat_history = customLLMBot(user_query, image, chat_history)
|
126 |
+
return updated_chat_history, None, "" # Return the updated chat history
|
127 |
+
|
128 |
+
|
129 |
+
def chatbot_ui():
|
130 |
+
chat_history = [] # Initialize empty chat history
|
131 |
+
|
132 |
+
with gr.Blocks() as demo:
|
133 |
+
gr.Markdown("# Healthcare Chatbot Doctor")
|
134 |
|
135 |
+
# Layout for chatbot and input box alignment
|
136 |
+
with gr.Row():
|
137 |
+
with gr.Column(scale=3): # Main column for chatbot
|
138 |
+
chatbot = gr.Chatbot(label="Responses", elem_id="chatbot")
|
139 |
+
user_input = gr.Textbox(
|
140 |
+
label="Ask a health-related question",
|
141 |
+
placeholder="Describe your symptoms...",
|
142 |
+
elem_id="user-input",
|
143 |
+
lines=1,
|
144 |
+
)
|
145 |
+
with gr.Column(scale=1): # Side column for image and buttons
|
146 |
+
uploaded_image = gr.Image(label="Upload an Image", type="pil")
|
147 |
+
submit_btn = gr.Button("Submit")
|
148 |
+
clear_btn = gr.Button("Clear")
|
149 |
+
audio_output = gr.Audio(label="Audio Response")
|
150 |
+
|
151 |
+
# Define actions
|
152 |
+
def handle_submit(user_query, image):
|
153 |
+
response = customLLMBot(user_query, image, chat_history)
|
154 |
+
return response, None, "" # Return chat history and reset input
|
155 |
|
156 |
# Submit on pressing Enter key
|
157 |
user_input.submit(
|
|
|
167 |
outputs=[chatbot, audio_output, user_input],
|
168 |
)
|
169 |
|
170 |
+
# Clear button action
|
171 |
clear_btn.click(
|
172 |
+
lambda: ([], None, ""),
|
173 |
inputs=[],
|
174 |
+
outputs=[chatbot, uploaded_image, user_input],
|
175 |
)
|
176 |
|
177 |
return demo
|