Emmanuel Frimpong Asante
update space
1e5e843
raw
history blame
6.58 kB
# app.py
# Import necessary libraries
import os
import logging
import dotenv
import gradio as gr
import numpy as np
from pymongo import MongoClient
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from utils import PoultryFarmBot, llama3_response
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load environment variables from .env file
dotenv.load_dotenv()
# Setup logging for better monitoring
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# MongoDB Setup for logging and audit
MONGO_URI = os.getenv("MONGO_URI")
logger.info("Connecting to MongoDB.")
client = MongoClient(MONGO_URI)
db = client.poultry_farm # Connect to the 'poultry_farm' database
enquiries_collection = db.enquiries # Collection to store farmer enquiries
users_collection = db.users # Collection to store user credentials
logs_collection = db.logs # Collection to store application logs
def log_to_db(level, message):
log_entry = {
"level": level,
"message": message,
"timestamp": datetime.utcnow()
}
logs_collection.insert_one(log_entry)
# Override logger methods to also log to MongoDB
class MongoHandler(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
log_to_db(record.levelname, log_entry)
mongo_handler = MongoHandler()
mongo_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
mongo_handler.setFormatter(formatter)
logger.addHandler(mongo_handler)
# Hugging Face token setup
tok = os.getenv('HF_TOKEN')
if tok:
# Log in to Hugging Face using the token from environment variables
logger.info("Logging in to Hugging Face.")
else:
logger.warning("Hugging Face token not found in environment variables.")
# Initialize the bot instance
logger.info("Initializing PoultryFarmBot instance.")
bot = PoultryFarmBot(db)
# Load Llama 3.2 model and tokenizer for text generation
logger.info("Loading Llama 3.2 model and tokenizer.")
model_name = "meta-llama/Llama-3.2-3B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set the padding token to EOS token or add a new padding token
if tokenizer.pad_token is None:
logger.info("Adding padding token to tokenizer.")
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer))
def chatbot_response(image, text, username, password):
"""
Handle user input and generate appropriate responses.
Args:
image (numpy.ndarray): Image input for disease detection.
text (str): Text input for general queries.
username (str): Username for authentication.
password (str): Password for authentication.
Returns:
str: Response generated by the chatbot.
"""
user = bot.authenticate_user(username, password)
if not user:
return "Authentication failed. Please check your username and password."
user_id = user['_id']
# If an image is provided, diagnose the disease
if image is not None:
logger.info("Image input detected. Proceeding with disease diagnosis.")
diagnosis, name, status, recom = bot.diagnose_disease(image)
if name and status and recom:
logger.info("Diagnosis complete.")
bot.log_enquiry("image", "Image Enquiry", diagnosis, user_id)
return diagnosis
else:
logger.warning("Diagnosis incomplete.")
bot.log_enquiry("image", "Image Enquiry", diagnosis, user_id)
return diagnosis
else:
# Generate a response using Llama 3.2 for general text input
logger.info("Text input detected. Generating response.")
response = llama3_response(text, tokenizer, model)
bot.log_enquiry("text", text, response, user_id)
return response
# Gradio interface
def build_gradio_interface():
"""
Build the Gradio interface for the chatbot.
Returns:
gr.Blocks: Gradio Blocks object representing the chatbot interface.
"""
logger.info("Building Gradio interface.")
with gr.Blocks(theme=gr.themes.Base()):
gr.Markdown("# πŸ” Poultry Management Chatbot")
gr.Markdown("Welcome! This chatbot helps you manage your poultry with ease. You can upload an image for disease diagnosis or ask any questions about poultry management.")
chat_history = gr.Chatbot()
with gr.Row():
with gr.Column(scale=1):
fecal_image = gr.Image(
label="Upload Image of Poultry Feces (Optional)",
type="numpy",
elem_id="image-upload",
show_label=True,
)
with gr.Column(scale=2):
user_input = gr.Textbox(
label="Ask a question",
placeholder="Ask about poultry management...",
lines=3,
elem_id="user-input",
)
username = gr.Textbox(
label="Username",
placeholder="Enter your username",
lines=1,
elem_id="username-input",
)
password = gr.Textbox(
label="Password",
placeholder="Enter your password",
type="password",
lines=1,
elem_id="password-input",
)
output_box = gr.Textbox(
label="Response",
placeholder="Response will appear here...",
interactive=False,
lines=10,
elem_id="output-box",
)
submit_button = gr.Button(
"Submit",
variant="primary",
elem_id="submit-button"
)
# Connect the submit button to the chatbot response function
submit_button.click(
fn=chatbot_response,
inputs=[fecal_image, user_input, username, password],
outputs=[output_box]
)
logger.info("Gradio interface built successfully.")
return chatbot_interface
# Launch the Gradio interface
if __name__ == "__main__":
logger.info("Launching Gradio interface.")
interface = build_gradio_interface()
# Launch the interface with queuing enabled for concurrent requests
interface.queue().launch(debug=True, share=True)