Spaces:
Sleeping
Sleeping
| from groq import Groq | |
| import gradio as gr | |
| from gtts import gTTS | |
| import uuid | |
| import base64 | |
| from io import BytesIO | |
| import os | |
| import logging | |
| # Set up logger | |
| logger = logging.getLogger(__name__) | |
| logger.setLevel(logging.DEBUG) | |
| console_handler = logging.StreamHandler() | |
| file_handler = logging.FileHandler('chatbot_log.log') | |
| formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
| console_handler.setFormatter(formatter) | |
| file_handler.setFormatter(formatter) | |
| logger.addHandler(console_handler) | |
| logger.addHandler(file_handler) | |
| # Initialize Groq Client | |
| client = Groq(api_key=os.getenv("GROQ_API_KEY_2")) | |
| # Function to encode the image | |
| def encode_image(uploaded_image): | |
| try: | |
| logger.debug("Encoding image...") | |
| buffered = BytesIO() | |
| uploaded_image.save(buffered, format="PNG") # Ensure the correct format | |
| logger.debug("Image encoding complete.") | |
| return base64.b64encode(buffered.getvalue()).decode("utf-8") | |
| except Exception as e: | |
| logger.error(f"Error encoding image: {e}") | |
| raise | |
| # Function to handle text and image inputs | |
| def customLLMBot(user_input, uploaded_image): | |
| try: | |
| logger.info("Processing input...") | |
| # Initialize a new chat history for the current interaction | |
| chat_history = [] | |
| # Append user input to the chat history | |
| chat_history.append(("User", user_input)) | |
| if uploaded_image is not None: | |
| # Encode the image to base64 | |
| base64_image = encode_image(uploaded_image) | |
| # Log the image size and type | |
| logger.debug(f"Image received, size: {len(base64_image)} bytes") | |
| # Create a message specifically for image prompts | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "text", "text": "What's in this image?"}, | |
| {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}] | |
| } | |
| ] | |
| logger.info("Sending image to Groq API for processing...") | |
| # Send the image message to the Groq API | |
| response = client.chat.completions.create( | |
| model="llama-3.2-11b-vision-preview", | |
| messages=messages, | |
| ) | |
| logger.info("Image processed successfully.") | |
| else: | |
| # Process text input | |
| logger.info("Processing text input...") | |
| messages = [ | |
| {"role": "system", "content": "You are Dr. HealthBuddy, a professional virtual doctor chatbot."}, | |
| {"role": "user", "content": user_input}, | |
| ] | |
| response = client.chat.completions.create( | |
| model="llama-3.2-11b-vision-preview", | |
| messages=messages, | |
| ) | |
| logger.info("Text processed successfully.") | |
| # Extract the reply | |
| LLM_reply = response.choices[0].message.content | |
| logger.debug(f"LLM reply: {LLM_reply}") | |
| # Append the bot's response to the chat history | |
| chat_history.append(("Bot", LLM_reply)) | |
| # Generate audio for response | |
| audio_file = f"response_{uuid.uuid4().hex}.mp3" | |
| tts = gTTS(LLM_reply, lang='en') | |
| tts.save(audio_file) | |
| logger.info(f"Audio response saved as {audio_file}") | |
| # Return only the current chat history (User query + Bot response) and audio file | |
| return [(entry[0], entry[1]) for entry in chat_history], audio_file | |
| except Exception as e: | |
| # Handle errors gracefully | |
| logger.error(f"Error in customLLMBot function: {e}") | |
| return [("User", f"An error occurred: {e}")], None | |
| # Gradio Interface | |
| def chatbot_ui(): | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Healthcare Chatbot Doctor") | |
| # Layout for chatbot and input box alignment | |
| with gr.Row(): | |
| with gr.Column(scale=3): # Main column for chatbot | |
| chatbot = gr.Chatbot(label="Responses", elem_id="chatbot") | |
| user_input = gr.Textbox( | |
| label="Ask a health-related question", | |
| placeholder="Describe your symptoms...", | |
| elem_id="user-input", | |
| lines=1, | |
| ) | |
| with gr.Column(scale=1): # Side column for image and buttons | |
| uploaded_image = gr.Image(label="Upload an Image", type="pil") | |
| submit_btn = gr.Button("Submit") | |
| clear_btn = gr.Button("Clear") | |
| audio_output = gr.Audio(label="Audio Response") | |
| # Define actions | |
| def handle_submit(user_query, image): | |
| logger.info("User submitted a query.") | |
| response, audio = customLLMBot(user_query, image) | |
| return response, audio, "" | |
| # Submit on pressing Enter key | |
| user_input.submit( | |
| handle_submit, | |
| inputs=[user_input, uploaded_image], | |
| outputs=[chatbot, audio_output, user_input], | |
| ) | |
| # Submit on button click | |
| submit_btn.click( | |
| handle_submit, | |
| inputs=[user_input, uploaded_image], | |
| outputs=[chatbot, audio_output, user_input], | |
| ) | |
| # Action for clearing all fields | |
| clear_btn.click( | |
| lambda: ([], "", None, None), | |
| inputs=[], | |
| outputs=[chatbot, user_input, uploaded_image, audio_output], | |
| ) | |
| return demo | |
| # Launch the interface | |
| chatbot_ui().launch(server_name="0.0.0.0", server_port=7860) | |
| #chatbot_ui().launch(server_name="localhost", server_port=7860) | |