chatbot2 / app.py
Reshmarb's picture
added file
b36b54e
raw
history blame
3.42 kB
from groq import Groq
import gradio as gr
from gtts import gTTS
import uuid
import os
from PIL import Image
import pytesseract
# Initialize the Groq client
client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
# Function to initialize conversation messages
def initialize_messages():
return [{
"role": "system",
"content": '''You are Dr. HealthBuddy, a highly experienced and professional virtual doctor chatbot...'''
}]
# Function to handle user input and history
def customLLMBot(user_input, image_path, history):
messages_prmt = history or initialize_messages()
image_analysis = ""
# Process the uploaded image
if image_path:
try:
img = Image.open(image_path)
extracted_text = pytesseract.image_to_string(img)
image_analysis = f"Analyzed Image: {extracted_text.strip()}"
except Exception as e:
image_analysis = f"Error processing the image: {str(e)}"
# Add user input and image analysis to the conversation
messages_prmt.append({"role": "user", "content": user_input})
if image_analysis:
messages_prmt.append({"role": "user", "content": image_analysis})
# Generate response using Groq
try:
response = client.chat.completions.create(
messages=messages_prmt,
model="llama3-8b-8192",
)
LLM_reply = response.choices[0].message.content
messages_prmt.append({"role": "assistant", "content": LLM_reply})
except Exception as e:
LLM_reply = f"Error generating response: {str(e)}"
# Generate audio response
audio_file = f"response_{uuid.uuid4().hex}.mp3"
try:
tts = gTTS(LLM_reply, lang='en')
tts.save(audio_file)
except Exception as e:
audio_file = None
LLM_reply += f"\n\nError generating audio: {str(e)}"
return [(user_input, LLM_reply)], audio_file, messages_prmt
# Chatbot UI
def chatbot_ui():
with gr.Blocks() as demo:
gr.Markdown("# Healthcare Chatbot Doctor")
chatbot = gr.Chatbot(label="English Responses")
user_input = gr.Textbox(
label="Ask anything related to your health condition",
placeholder="Enter your symptoms here...",
lines=1
)
image_input = gr.Image(label="Upload an image", type="filepath")
audio_output = gr.Audio(label="Audio Response")
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear")
# Combine submit button and Enter key functionality
submit_action = submit_btn.click(
customLLMBot,
inputs=[user_input],
outputs=[chatbot, audio_output],
)
user_input_action = user_input.submit(
customLLMBot,
inputs=[user_input],
outputs=[chatbot, audio_output],
)
# Reset the textbox after submission
for action in [submit_action, user_input_action]:
action.then(
lambda: "", # Clear input box
inputs=[],
outputs=user_input,
)
# Clear button functionality
clear_btn.click(
lambda: ([], "", None, [], None),
inputs=[],
outputs=[chatbot, user_input, audio_output],
)
return demo
# Launch the chatbot UI
chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)