|
|
|
|
|
|
|
import os |
|
import logging |
|
import dotenv |
|
import gradio as gr |
|
import numpy as np |
|
from pymongo import MongoClient |
|
from datetime import datetime |
|
from werkzeug.security import generate_password_hash, check_password_hash |
|
from utils import PoultryFarmBot, llama3_response |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
dotenv.load_dotenv() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
MONGO_URI = os.getenv("MONGO_URI") |
|
logger.info("Connecting to MongoDB.") |
|
client = MongoClient(MONGO_URI) |
|
db = client.poultry_farm |
|
enquiries_collection = db.enquiries |
|
users_collection = db.users |
|
logs_collection = db.logs |
|
|
|
def log_to_db(level, message): |
|
log_entry = { |
|
"level": level, |
|
"message": message, |
|
"timestamp": datetime.utcnow() |
|
} |
|
logs_collection.insert_one(log_entry) |
|
|
|
|
|
class MongoHandler(logging.Handler): |
|
def emit(self, record): |
|
log_entry = self.format(record) |
|
log_to_db(record.levelname, log_entry) |
|
|
|
mongo_handler = MongoHandler() |
|
mongo_handler.setLevel(logging.INFO) |
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') |
|
mongo_handler.setFormatter(formatter) |
|
logger.addHandler(mongo_handler) |
|
|
|
|
|
tok = os.getenv('HF_TOKEN') |
|
if tok: |
|
|
|
logger.info("Logging in to Hugging Face.") |
|
else: |
|
logger.warning("Hugging Face token not found in environment variables.") |
|
|
|
|
|
logger.info("Initializing PoultryFarmBot instance.") |
|
bot = PoultryFarmBot(db) |
|
|
|
|
|
logger.info("Loading Llama 3.2 model and tokenizer.") |
|
model_name = "meta-llama/Llama-3.2-3B" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
logger.info("Adding padding token to tokenizer.") |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
model.resize_token_embeddings(len(tokenizer)) |
|
|
|
def chatbot_response(image, text, username, password): |
|
""" |
|
Handle user input and generate appropriate responses. |
|
|
|
Args: |
|
image (numpy.ndarray): Image input for disease detection. |
|
text (str): Text input for general queries. |
|
username (str): Username for authentication. |
|
password (str): Password for authentication. |
|
|
|
Returns: |
|
str: Response generated by the chatbot. |
|
""" |
|
user = bot.authenticate_user(username, password) |
|
if not user: |
|
return "Authentication failed. Please check your username and password." |
|
|
|
user_id = user['_id'] |
|
|
|
|
|
if image is not None: |
|
logger.info("Image input detected. Proceeding with disease diagnosis.") |
|
diagnosis, name, status, recom = bot.diagnose_disease(image) |
|
if name and status and recom: |
|
logger.info("Diagnosis complete.") |
|
bot.log_enquiry("image", "Image Enquiry", diagnosis, user_id) |
|
return diagnosis |
|
else: |
|
logger.warning("Diagnosis incomplete.") |
|
bot.log_enquiry("image", "Image Enquiry", diagnosis, user_id) |
|
return diagnosis |
|
else: |
|
|
|
logger.info("Text input detected. Generating response.") |
|
response = llama3_response(text, tokenizer, model) |
|
bot.log_enquiry("text", text, response, user_id) |
|
return response |
|
|
|
|
|
def build_gradio_interface(): |
|
""" |
|
Build the Gradio interface for the chatbot. |
|
|
|
Returns: |
|
gr.Blocks: Gradio Blocks object representing the chatbot interface. |
|
""" |
|
logger.info("Building Gradio interface.") |
|
with gr.Blocks(theme=gr.themes.Base()): |
|
gr.Markdown("# π Poultry Management Chatbot") |
|
gr.Markdown("Welcome! This chatbot helps you manage your poultry with ease. You can upload an image for disease diagnosis or ask any questions about poultry management.") |
|
|
|
chat_history = gr.Chatbot() |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
fecal_image = gr.Image( |
|
label="Upload Image of Poultry Feces (Optional)", |
|
type="numpy", |
|
elem_id="image-upload", |
|
show_label=True, |
|
) |
|
with gr.Column(scale=2): |
|
user_input = gr.Textbox( |
|
label="Ask a question", |
|
placeholder="Ask about poultry management...", |
|
lines=3, |
|
elem_id="user-input", |
|
) |
|
username = gr.Textbox( |
|
label="Username", |
|
placeholder="Enter your username", |
|
lines=1, |
|
elem_id="username-input", |
|
) |
|
password = gr.Textbox( |
|
label="Password", |
|
placeholder="Enter your password", |
|
type="password", |
|
lines=1, |
|
elem_id="password-input", |
|
) |
|
|
|
output_box = gr.Textbox( |
|
label="Response", |
|
placeholder="Response will appear here...", |
|
interactive=False, |
|
lines=10, |
|
elem_id="output-box", |
|
) |
|
|
|
submit_button = gr.Button( |
|
"Submit", |
|
variant="primary", |
|
elem_id="submit-button" |
|
) |
|
|
|
submit_button.click( |
|
fn=chatbot_response, |
|
inputs=[fecal_image, user_input, username, password], |
|
outputs=[output_box] |
|
) |
|
logger.info("Gradio interface built successfully.") |
|
return chatbot_interface |
|
|
|
|
|
if __name__ == "__main__": |
|
logger.info("Launching Gradio interface.") |
|
interface = build_gradio_interface() |
|
|
|
interface.queue().launch(debug=True, share=True) |