frimponge's picture
Update app.py
910c42f verified
raw
history blame
7.46 kB
import os
import tensorflow as tf
from keras.models import load_model
import gradio as gr
import cv2
import numpy as np
from huggingface_hub import login
from pymongo import MongoClient
from transformers import AutoModelForCausalLM, AutoTokenizer
# Ensure the Hugging Face token is set
tok = os.getenv('HF_Token')
if tok:
login(token=tok, add_to_git_credential=True)
else:
print("Warning: Hugging Face token not found in environment variables.")
# MongoDB Setup (for inventory, record-keeping, etc.)
MONGO_URI = os.getenv("MONGO_URI")
client = MongoClient(MONGO_URI)
db = client.poultry_farm # Database
# Check GPU availability for TensorFlow
print("TensorFlow version:", tf.__version__)
print("Eager execution:", tf.executing_eagerly())
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
# Set TensorFlow to use mixed precision with available GPU
from tensorflow.keras import mixed_precision
if len(tf.config.list_physical_devices('GPU')) > 0:
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
print("Using mixed precision with GPU")
else:
print("Using CPU without mixed precision")
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
try:
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
with tf.device(device_name):
my_model = load_model('models/Final_Chicken_disease_model.h5', compile=True)
auth_model = load_model('models/auth_model.h5', compile=True)
print(f"Models loaded successfully on {device_name}.")
except Exception as e:
print(f"Error loading models: {e}")
# Updated Disease names and recommendations based on fecal analysis
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
recommend = {
0: 'Panadol',
1: 'You have no need Medicine',
2: 'Percetamol',
3: 'Ponston'
}
class PoultryFarmBot:
def __init__(self):
self.db = db # MongoDB database for future use
# Image Preprocessing for Fecal Disease Detection
def preprocess_image(self, image):
try:
image_check = cv2.resize(image, (224, 224))
image_check = np.expand_dims(image_check, axis=0) # Add batch dimension
return image_check
except Exception as e:
print(f"Error in image preprocessing: {e}")
return None
# Predict Disease from Fecal Image
def predict(self, image):
image_check = self.preprocess_image(image)
if image_check is None:
return "Image preprocessing failed.", None, None, None
# Predict using the fecal disease detection model
indx = my_model.predict(image_check).argmax()
name = name_disease.get(indx, "Unknown disease")
status = result.get(indx, "unknown condition")
recom = recommend.get(indx, "no recommendation available")
# Generate additional information about the disease using Llama 2
detailed_response = self.generate_disease_response(name, status, recom)
return detailed_response, name, status, recom
# Generate a detailed response using Llama 2 for disease information and recommendations
def generate_disease_response(self, disease_name, status, recommendation):
prompt = (
f"The disease detected is {disease_name}, classified as {status}. "
f"Recommended action: {recommendation}. "
f"Here is some information about {disease_name}: causes, symptoms, and treatment methods "
"to effectively manage this condition on a poultry farm."
)
response = llama2_response(prompt)
# Post-process to remove the prompt if accidentally included in the response
return response.replace(prompt, "").strip()
# Diagnose Disease Using Fecal Image
def diagnose_disease(self, image):
if image is not None and image.size > 0: # Ensure the image is valid and has elements
return self.predict(image)
return "Please provide an image of poultry faecal matter for disease detection.", None, None, None
# Initialize the bot instance
bot = PoultryFarmBot()
# Load Llama 3.2 model and tokenizer for text generation
model_name = "meta-llama/Llama-3.2-1B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set the padding token to EOS token or add a new padding token
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer))
# Define Llama 2 response generation
def llama2_response(user_input):
try:
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=150, padding=True)
outputs = model.generate(
inputs["input_ids"],
max_length=150,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.pad_token_id, # Use the newly set padding token
attention_mask=inputs["attention_mask"]
)
# Decode and return the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
except Exception as e:
return f"Error generating response: {str(e)}"
# Main chatbot function: handles both generative AI and disease detection
def chatbot_response(image, text):
# If an image is provided, perform disease detection
if image is not None:
diagnosis, name, status, recom = bot.diagnose_disease(image)
if name and status and recom:
return diagnosis
else:
return diagnosis # Return only the diagnostic message if no disease found
else:
# Use Llama 2 for more accurate responses
return llama2_response(text)
# Gradio interface styling and layout with ChatGPT-like theme
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
gr.Markdown("# 🐔 Poultry Management Chatbot")
gr.Markdown(
"This chatbot can help you manage your poultry with conversational AI. Upload an image of poultry fecal matter for disease detection or just ask questions!"
)
with gr.Row():
with gr.Column(scale=1):
fecal_image = gr.Image(
label="Upload Image of Poultry Feces (Optional)",
type="numpy",
elem_id="image-upload",
show_label=True,
)
with gr.Column(scale=2):
user_input = gr.Textbox(
label="Type your question or chat with the assistant",
placeholder="Ask a question about poultry management...",
lines=3,
elem_id="user-input",
)
output_box = gr.Textbox(
label="Response",
placeholder="The response will appear here...",
interactive=False,
lines=10,
elem_id="output-box",
)
submit_button = gr.Button(
"Submit",
variant="primary",
elem_id="submit-button"
)
submit_button.click(
fn=chatbot_response,
inputs=[fecal_image, user_input],
outputs=[output_box]
)
# Launch the Gradio interface
if __name__ == "__main__":
chatbot_interface.queue().launch(debug=True, share=True)