Spaces:
Sleeping
Sleeping
File size: 5,650 Bytes
1ec0b16 d897c87 78b8ee2 d897c87 d32256a 03876cc d897c87 d32256a 082c62c d32256a d897c87 d32256a d897c87 3aa9f69 d32256a d897c87 d32256a d897c87 dfd847e 0c0aaee dfd847e d897c87 d32256a d897c87 dfd847e 98a9a27 d32256a 98a9a27 d897c87 f684842 d897c87 d32256a 98a9a27 d897c87 d32256a 98a9a27 d897c87 d32256a 02b95b7 98a9a27 dfd847e 98a9a27 d32256a dfd847e 0c0aaee 082c62c dfd847e f684842 b1186d2 d897c87 dfd847e d32256a 0c0aaee f684842 a6692e0 d897c87 4dc0555 0c0aaee 1ec0b16 3aa9f69 d897c87 1c66968 d897c87 0c0aaee d897c87 0c0aaee d897c87 0c0aaee d897c87 0c0aaee caff306 d897c87 0c0aaee b36b54e 1b4e41d 0c0aaee 1b4e41d 0c0aaee 1b4e41d b1e86a8 1ec0b16 b1186d2 d32256a d897c87 1b4e41d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
from groq import Groq
import gradio as gr
from gtts import gTTS
import uuid
import base64
from io import BytesIO
import os
import logging
# Set up logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler('chatbot_log.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
# Initialize Groq Client
client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
# Function to encode the image
def encode_image(uploaded_image):
try:
logger.debug("Encoding image...")
buffered = BytesIO()
uploaded_image.save(buffered, format="PNG") # Ensure the correct format
logger.debug("Image encoding complete.")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
except Exception as e:
logger.error(f"Error encoding image: {e}")
raise
# Function to handle text and image inputs
def customLLMBot(user_input, uploaded_image, chat_history):
try:
logger.info("Processing input...")
# Append user input to the chat history
chat_history.append((user_input, None))
if uploaded_image is not None:
# Encode the image to base64
base64_image = encode_image(uploaded_image)
# Log the image size and type
logger.debug(f"Image received, size: {len(base64_image)} bytes")
# Create a message specifically for image prompts
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
]
}
]
logger.info("Sending image to Groq API for processing...")
# Send the image message to the Groq API
response = client.chat.completions.create(
model="llama-3.2-11b-vision-preview",
messages=messages,
)
logger.info("Image processed successfully.")
else:
# Process text input
logger.info("Processing text input...")
messages = [
{"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."},
{"role": "user", "content": user_input},
]
response = client.chat.completions.create(
model="llama-3.2-11b-vision-preview",
messages=messages,
)
logger.info("Text processed successfully.")
# Extract the reply
LLM_reply = response.choices[0].message.content
logger.debug(f"LLM reply: {LLM_reply}")
# Append the bot's response to the chat history
chat_history[-1] = (user_input, LLM_reply)
# Generate audio for response
audio_file = f"response_{uuid.uuid4().hex}.mp3"
tts = gTTS(LLM_reply, lang='en')
tts.save(audio_file)
logger.info(f"Audio response saved as {audio_file}")
# Return chat history and audio file
return chat_history, audio_file
except Exception as e:
# Handle errors gracefully
logger.error(f"Error in customLLMBot function: {e}")
return [(user_input, f"An error occurred: {e}")], None
# Gradio Interface
def chatbot_ui():
chat_history = [] # Initialize empty chat history
with gr.Blocks() as demo:
gr.Markdown("# Healthcare Chatbot Doctor")
# Layout for chatbot and input box alignment
with gr.Row():
with gr.Column(scale=3): # Main column for chatbot
chatbot = gr.Chatbot(label=None, elem_id="chatbot").style(
align_self_right=True,
align_self_left=False,
match=False,
)
user_input = gr.Textbox(
label="Ask a health-related question",
placeholder="Describe your symptoms...",
elem_id="user-input",
lines=1,
)
with gr.Column(scale=1): # Side column for image and buttons
uploaded_image = gr.Image(label="Upload an Image", type="pil")
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear")
audio_output = gr.Audio(label="Audio Response")
# Define actions
def handle_submit(user_query, image):
logger.info("User submitted a query.")
response, audio = customLLMBot(user_query, image, chat_history)
return response, audio, ""
# Submit on pressing Enter key
user_input.submit(
handle_submit,
inputs=[user_input, uploaded_image],
outputs=[chatbot, audio_output, user_input],
)
# Submit on button click
submit_btn.click(
handle_submit,
inputs=[user_input, uploaded_image],
outputs=[chatbot, audio_output, user_input],
)
# Action for clearing all fields
clear_btn.click(
lambda: ([], "", None, None),
inputs=[],
outputs=[chatbot, user_input, uploaded_image, audio_output],
)
return demo
# Launch the interface
chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)
|