File size: 7,459 Bytes
fc33cc9 925904e 59b0f06 a2954e9 59b0f06 0f10097 efca706 1cef079 e7420c7 fc33cc9 8cb1d56 16955d5 fc33cc9 1cef079 8cb1d56 0363c84 1cef079 9597b4b 925904e 8cb1d56 925904e 42a4489 925904e 42a4489 fc33cc9 42a4489 8cb1d56 8de8a2c fc33cc9 8de8a2c 486db41 4e100a1 8cb1d56 4e100a1 8cb1d56 42a4489 8cb1d56 42a4489 8cb1d56 42a4489 8cb1d56 42a4489 8cb1d56 8de8a2c 8cb1d56 42a4489 ac8716a cab20df ac8716a cab20df 42a4489 fa011fc 910c42f 42a4489 910c42f fc33cc9 1cef079 0f10097 910c42f 8cb1d56 0c971f5 e7420c7 8cb1d56 f4756d6 0c971f5 e7420c7 f4756d6 fbbb3f7 e7420c7 8cb1d56 e7420c7 fa011fc e7420c7 fa011fc e7420c7 300aded e7420c7 fa011fc 8cb1d56 fa011fc 8cb1d56 fa011fc 8cb1d56 fa011fc 8cb1d56 fa011fc 8cb1d56 fa011fc 8cb1d56 fa011fc 8cb1d56 96a7d89 8cb1d56 fa011fc fbbb3f7 8cb1d56 fbbb3f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import os
import tensorflow as tf
from keras.models import load_model
import gradio as gr
import cv2
import numpy as np
from huggingface_hub import login
from pymongo import MongoClient
from transformers import AutoModelForCausalLM, AutoTokenizer
# Ensure the Hugging Face token is set
tok = os.getenv('HF_Token')
if tok:
login(token=tok, add_to_git_credential=True)
else:
print("Warning: Hugging Face token not found in environment variables.")
# MongoDB Setup (for inventory, record-keeping, etc.)
MONGO_URI = os.getenv("MONGO_URI")
client = MongoClient(MONGO_URI)
db = client.poultry_farm # Database
# Check GPU availability for TensorFlow
print("TensorFlow version:", tf.__version__)
print("Eager execution:", tf.executing_eagerly())
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
# Set TensorFlow to use mixed precision with available GPU
from tensorflow.keras import mixed_precision
if len(tf.config.list_physical_devices('GPU')) > 0:
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
print("Using mixed precision with GPU")
else:
print("Using CPU without mixed precision")
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
try:
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
with tf.device(device_name):
my_model = load_model('models/Final_Chicken_disease_model.h5', compile=True)
auth_model = load_model('models/auth_model.h5', compile=True)
print(f"Models loaded successfully on {device_name}.")
except Exception as e:
print(f"Error loading models: {e}")
# Updated Disease names and recommendations based on fecal analysis
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
recommend = {
0: 'Panadol',
1: 'You have no need Medicine',
2: 'Percetamol',
3: 'Ponston'
}
class PoultryFarmBot:
def __init__(self):
self.db = db # MongoDB database for future use
# Image Preprocessing for Fecal Disease Detection
def preprocess_image(self, image):
try:
image_check = cv2.resize(image, (224, 224))
image_check = np.expand_dims(image_check, axis=0) # Add batch dimension
return image_check
except Exception as e:
print(f"Error in image preprocessing: {e}")
return None
# Predict Disease from Fecal Image
def predict(self, image):
image_check = self.preprocess_image(image)
if image_check is None:
return "Image preprocessing failed.", None, None, None
# Predict using the fecal disease detection model
indx = my_model.predict(image_check).argmax()
name = name_disease.get(indx, "Unknown disease")
status = result.get(indx, "unknown condition")
recom = recommend.get(indx, "no recommendation available")
# Generate additional information about the disease using Llama 2
detailed_response = self.generate_disease_response(name, status, recom)
return detailed_response, name, status, recom
# Generate a detailed response using Llama 2 for disease information and recommendations
def generate_disease_response(self, disease_name, status, recommendation):
prompt = (
f"The disease detected is {disease_name}, classified as {status}. "
f"Recommended action: {recommendation}. "
f"Here is some information about {disease_name}: causes, symptoms, and treatment methods "
"to effectively manage this condition on a poultry farm."
)
response = llama2_response(prompt)
# Post-process to remove the prompt if accidentally included in the response
return response.replace(prompt, "").strip()
# Diagnose Disease Using Fecal Image
def diagnose_disease(self, image):
if image is not None and image.size > 0: # Ensure the image is valid and has elements
return self.predict(image)
return "Please provide an image of poultry faecal matter for disease detection.", None, None, None
# Initialize the bot instance
bot = PoultryFarmBot()
# Load Llama 3.2 model and tokenizer for text generation
model_name = "meta-llama/Llama-3.2-1B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set the padding token to EOS token or add a new padding token
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer))
# Define Llama 2 response generation
def llama2_response(user_input):
try:
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=150, padding=True)
outputs = model.generate(
inputs["input_ids"],
max_length=150,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.pad_token_id, # Use the newly set padding token
attention_mask=inputs["attention_mask"]
)
# Decode and return the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
except Exception as e:
return f"Error generating response: {str(e)}"
# Main chatbot function: handles both generative AI and disease detection
def chatbot_response(image, text):
# If an image is provided, perform disease detection
if image is not None:
diagnosis, name, status, recom = bot.diagnose_disease(image)
if name and status and recom:
return diagnosis
else:
return diagnosis # Return only the diagnostic message if no disease found
else:
# Use Llama 2 for more accurate responses
return llama2_response(text)
# Gradio interface styling and layout with ChatGPT-like theme
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
gr.Markdown("# 🐔 Poultry Management Chatbot")
gr.Markdown(
"This chatbot can help you manage your poultry with conversational AI. Upload an image of poultry fecal matter for disease detection or just ask questions!"
)
with gr.Row():
with gr.Column(scale=1):
fecal_image = gr.Image(
label="Upload Image of Poultry Feces (Optional)",
type="numpy",
elem_id="image-upload",
show_label=True,
)
with gr.Column(scale=2):
user_input = gr.Textbox(
label="Type your question or chat with the assistant",
placeholder="Ask a question about poultry management...",
lines=3,
elem_id="user-input",
)
output_box = gr.Textbox(
label="Response",
placeholder="The response will appear here...",
interactive=False,
lines=10,
elem_id="output-box",
)
submit_button = gr.Button(
"Submit",
variant="primary",
elem_id="submit-button"
)
submit_button.click(
fn=chatbot_response,
inputs=[fecal_image, user_input],
outputs=[output_box]
)
# Launch the Gradio interface
if __name__ == "__main__":
chatbot_interface.queue().launch(debug=True, share=True)
|