Reshmarb commited on
Commit
a43bc22
·
1 Parent(s): c15e606
Files changed (1) hide show
  1. app.py +30 -15
app.py CHANGED
@@ -43,53 +43,68 @@ def customLLMBot(user_input, uploaded_image, chat_history):
43
  try:
44
  logger.info("Processing input...")
45
 
 
 
 
 
46
  if uploaded_image is not None:
47
  # Encode the image to base64
48
  base64_image = encode_image(uploaded_image)
49
- logger.debug(f"Image received, size: {len(base64_image)} bytes")
50
 
51
- # Create a message specifically for image prompts
 
52
  messages = [
53
  {
54
  "role": "user",
55
  "content": [
56
  {"type": "text", "text": "What's in this image?"},
57
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]
 
58
  }
59
  ]
60
 
61
- logger.info("Sending image to Groq API for processing...")
62
- # Send the image message to the Groq API
63
  response = client.chat.completions.create(
64
  model="llama-3.2-11b-vision-preview",
65
  messages=messages,
66
  )
67
  logger.info("Image processed successfully.")
68
- else:
69
- # Process text input
70
- logger.info("Processing text input...")
 
 
 
 
 
 
 
 
 
71
  messages = [
72
  {"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
73
  {"role": "user", "content": user_input},
74
  ]
 
75
  response = client.chat.completions.create(
76
  model="llama-3.2-11b-vision-preview",
77
  messages=messages,
78
  )
79
  logger.info("Text processed successfully.")
80
 
81
- # Extract the reply
82
- LLM_reply = response.choices[0].message.content.strip()
83
- logger.debug(f"LLM reply: {LLM_reply}")
 
84
 
85
- # Append user input and bot reply as a tuple to the chat history
86
- chat_history.append((user_input, LLM_reply))
87
 
88
- return chat_history
89
 
90
  except Exception as e:
91
  logger.error(f"Error in customLLMBot function: {e}")
92
- return chat_history
 
93
 
94
 
95
 
 
43
  try:
44
  logger.info("Processing input...")
45
 
46
+ # Append user input to the chat history
47
+ if user_input:
48
+ chat_history.append((user_input, None))
49
+
50
  if uploaded_image is not None:
51
  # Encode the image to base64
52
  base64_image = encode_image(uploaded_image)
 
53
 
54
+ logger.debug("Image uploaded. Processing...")
55
+ # Create a message specifically for the image
56
  messages = [
57
  {
58
  "role": "user",
59
  "content": [
60
  {"type": "text", "text": "What's in this image?"},
61
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
62
+ ]
63
  }
64
  ]
65
 
66
+ # Send image query to Groq API
 
67
  response = client.chat.completions.create(
68
  model="llama-3.2-11b-vision-preview",
69
  messages=messages,
70
  )
71
  logger.info("Image processed successfully.")
72
+
73
+ # Extract and append the bot's response
74
+ LLM_reply = response.choices[0].message.content
75
+ chat_history[-1] = (chat_history[-1][0], LLM_reply)
76
+ logger.debug(f"LLM reply for image: {LLM_reply}")
77
+
78
+ # Return the updated chat history and clear uploaded image after processing
79
+ return [(q, r) for q, r in chat_history if r], None
80
+
81
+ if user_input:
82
+ # Handle text input
83
+ logger.debug("Processing text input...")
84
  messages = [
85
  {"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
86
  {"role": "user", "content": user_input},
87
  ]
88
+
89
  response = client.chat.completions.create(
90
  model="llama-3.2-11b-vision-preview",
91
  messages=messages,
92
  )
93
  logger.info("Text processed successfully.")
94
 
95
+ # Extract and append the bot's response
96
+ LLM_reply = response.choices[0].message.content
97
+ chat_history[-1] = (chat_history[-1][0], LLM_reply)
98
+ logger.debug(f"LLM reply for text: {LLM_reply}")
99
 
100
+ return [(q, r) for q, r in chat_history if r], None
 
101
 
102
+ return chat_history, None
103
 
104
  except Exception as e:
105
  logger.error(f"Error in customLLMBot function: {e}")
106
+ return chat_history + [(None, f"An error occurred: {e}")], None
107
+
108
 
109
 
110