Update app.py
Browse files
app.py
CHANGED
@@ -7,11 +7,9 @@ import cv2
|
|
7 |
import numpy as np
|
8 |
from huggingface_hub import login
|
9 |
from pymongo import MongoClient
|
10 |
-
from
|
11 |
-
import json
|
12 |
-
import requests
|
13 |
|
14 |
-
# Ensure the
|
15 |
tok = os.getenv('HF_Token')
|
16 |
if tok:
|
17 |
login(token=tok, add_to_git_credential=True)
|
@@ -21,7 +19,7 @@ else:
|
|
21 |
# Set your OpenAI API key
|
22 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
23 |
|
24 |
-
# MongoDB Setup
|
25 |
MONGO_URI = os.getenv("MONGO_URI")
|
26 |
client = MongoClient(MONGO_URI)
|
27 |
db = client.poultry_farm # Database
|
@@ -31,9 +29,8 @@ print("TensorFlow version:", tf.__version__)
|
|
31 |
print("Eager execution:", tf.executing_eagerly())
|
32 |
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
|
33 |
|
34 |
-
# Set TensorFlow to use mixed precision
|
35 |
from tensorflow.keras import mixed_precision
|
36 |
-
|
37 |
if len(tf.config.list_physical_devices('GPU')) > 0:
|
38 |
policy = mixed_precision.Policy('mixed_float16')
|
39 |
mixed_precision.set_global_policy(policy)
|
@@ -44,19 +41,27 @@ else:
|
|
44 |
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
|
45 |
try:
|
46 |
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
|
47 |
-
with tf.device(device_name):
|
48 |
-
|
49 |
-
|
50 |
-
print(f"Models loaded successfully on {device_name}.")
|
51 |
except Exception as e:
|
52 |
-
print(f"Error loading
|
53 |
raise
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
class PoultryFarmBot:
|
56 |
def __init__(self):
|
57 |
-
self.db = db # MongoDB database
|
58 |
|
59 |
-
#
|
60 |
def preprocess_image(self, image):
|
61 |
try:
|
62 |
image_check = cv2.resize(image, (224, 224))
|
@@ -66,187 +71,118 @@ class PoultryFarmBot:
|
|
66 |
print(f"Error in image preprocessing: {e}")
|
67 |
return None
|
68 |
|
|
|
69 |
def predict(self, image):
|
70 |
image_check = self.preprocess_image(image)
|
71 |
if image_check is None:
|
72 |
return "Image preprocessing failed.", None, None, None
|
73 |
|
74 |
-
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
name = name_disease.get(indx)
|
79 |
-
status = result.get(indx)
|
80 |
-
recom = recommend.get(indx)
|
81 |
-
|
82 |
-
diagnosis = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}."
|
83 |
-
return diagnosis, name, status, recom
|
84 |
-
else: # If the image is not recognized as a chicken disease image
|
85 |
-
return (
|
86 |
-
"The uploaded image is not recognized as a chicken or does not appear to be related to any known chicken diseases. "
|
87 |
-
"Please ensure the image is clear and shows a chicken or its symptoms to receive a proper diagnosis."
|
88 |
-
), None, None, None
|
89 |
|
|
|
90 |
def diagnose_disease(self, image=None, symptoms=None):
|
91 |
if image is not None and image.size > 0: # Ensure image is valid and has elements
|
92 |
return self.predict(image)
|
93 |
elif symptoms:
|
94 |
-
# Generate diagnosis using
|
95 |
context = f"Symptoms: {symptoms}."
|
96 |
response = openai.ChatCompletion.create(
|
97 |
-
model="gpt-3.5-turbo",
|
98 |
messages=[
|
99 |
-
{"role": "system",
|
100 |
-
"content": "You are an advanced poultry farm management system, helping poultry farmers manage their flocks efficiently."},
|
101 |
{"role": "user", "content": context}
|
102 |
],
|
103 |
max_tokens=150
|
104 |
)
|
105 |
diagnosis = response['choices'][0]['message']['content'].strip()
|
106 |
return diagnosis, None, None, None
|
107 |
-
return "Please provide an image or describe the symptoms.", None, None, None
|
108 |
-
|
109 |
-
# Inventory Management
|
110 |
-
def track_inventory(self, item, usage):
|
111 |
-
collection = self.db.inventory
|
112 |
-
inventory_item = collection.find_one({"item": item})
|
113 |
-
if inventory_item:
|
114 |
-
new_quantity = inventory_item["quantity"] - usage
|
115 |
-
collection.update_one({"item": item}, {"$set": {"quantity": new_quantity}})
|
116 |
-
if new_quantity < 10:
|
117 |
-
return f"{item} inventory is low, please reorder."
|
118 |
-
return f"{item} inventory updated. Current inventory: {new_quantity} units."
|
119 |
-
else:
|
120 |
-
return f"Item {item} not recognized in inventory."
|
121 |
-
|
122 |
-
def add_inventory_item(self, item, quantity):
|
123 |
-
collection = self.db.inventory
|
124 |
-
if collection.find_one({"item": item}):
|
125 |
-
collection.update_one({"item": item}, {"$inc": {"quantity": quantity}})
|
126 |
-
else:
|
127 |
-
collection.insert_one({"item": item, "quantity": quantity})
|
128 |
-
return f"Added {quantity} units of {item} to the inventory."
|
129 |
-
|
130 |
-
def view_inventory(self):
|
131 |
-
collection = self.db.inventory
|
132 |
-
inventory_list = list(collection.find({}))
|
133 |
-
if not inventory_list:
|
134 |
-
return "Inventory is empty."
|
135 |
-
return json.dumps(inventory_list, indent=4)
|
136 |
-
|
137 |
-
# Chicken and Egg Management
|
138 |
-
def add_chicken(self, chicken_id, breed, age):
|
139 |
-
collection = self.db.chickens
|
140 |
-
collection.insert_one({"chicken_id": chicken_id, "breed": breed, "age": age})
|
141 |
-
return f"Chicken {chicken_id} added to the database."
|
142 |
-
|
143 |
-
def update_chicken(self, chicken_id, update_data):
|
144 |
-
collection = self.db.chickens
|
145 |
-
collection.update_one({"chicken_id": chicken_id}, {"$set": update_data})
|
146 |
-
return f"Chicken {chicken_id} updated."
|
147 |
-
|
148 |
-
def add_eggs(self, quantity):
|
149 |
-
collection = self.db.eggs
|
150 |
-
collection.insert_one({"date": datetime.now(), "quantity": quantity})
|
151 |
-
return f"Added {quantity} eggs to the database."
|
152 |
-
|
153 |
-
# Reporting and Analytics
|
154 |
-
def generate_report(self):
|
155 |
-
report = {
|
156 |
-
"date": str(datetime.now()),
|
157 |
-
"feed_inventory": list(self.db.inventory.find({})),
|
158 |
-
"chickens": list(self.db.chickens.find({})),
|
159 |
-
"eggs_collected": list(self.db.eggs.find({})),
|
160 |
-
"health_reports": list(self.db.health_reports.find({}))
|
161 |
-
}
|
162 |
-
self.db.reports.insert_one(report)
|
163 |
-
return json.dumps(report, indent=4)
|
164 |
-
|
165 |
-
# Integration with External Systems
|
166 |
-
def integrate_with_external_system(self, system_url, data):
|
167 |
-
try:
|
168 |
-
response = requests.post(system_url, json=data)
|
169 |
-
if response.status_code == 200:
|
170 |
-
return "Data successfully sent to external system."
|
171 |
-
else:
|
172 |
-
return f"Failed to send data. Status code: {response.status_code}"
|
173 |
-
|
174 |
-
except Exception as e:
|
175 |
-
return f"Integration failed with error: {str(e)}"
|
176 |
-
|
177 |
-
# Emergency Handling
|
178 |
-
def handle_emergency(self, emergency_type):
|
179 |
-
if emergency_type == "disease_outbreak":
|
180 |
-
return "Disease outbreak detected. Isolate affected chickens and contact a veterinarian immediately."
|
181 |
-
elif emergency_type == "equipment_failure":
|
182 |
-
return "Equipment failure detected. Check the equipment immediately and perform necessary repairs."
|
183 |
-
else:
|
184 |
-
return "Unknown emergency type."
|
185 |
-
|
186 |
|
187 |
# Initialize the bot instance
|
188 |
bot = PoultryFarmBot()
|
189 |
|
190 |
-
#
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
-
#
|
202 |
def generate_combined_response(image, text):
|
203 |
diagnosis, name, status, recom = bot.diagnose_disease(image=image, symptoms=text)
|
204 |
-
|
205 |
-
|
206 |
-
context = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}. "
|
207 |
if text:
|
208 |
context += f"Additionally, the user asked: '{text}'"
|
209 |
-
|
210 |
-
# Use OpenAI's GPT model to generate additional advice
|
211 |
-
response = openai.ChatCompletion.create(
|
212 |
-
model="gpt-3.5-turbo", # Use GPT-4 or gpt-3.5-turbo based on your API access
|
213 |
-
messages=[
|
214 |
-
{"role": "system", "content": "You are an expert poultry farm management assistant."},
|
215 |
-
{"role": "user", "content": context}
|
216 |
-
],
|
217 |
-
max_tokens=150
|
218 |
-
)
|
219 |
-
advice = response['choices'][0]['message']['content'].strip()
|
220 |
return diagnosis + "\n\nAdditional Advice: " + advice
|
221 |
else:
|
222 |
return diagnosis
|
223 |
|
224 |
-
# Gradio
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import numpy as np
|
8 |
from huggingface_hub import login
|
9 |
from pymongo import MongoClient
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
|
|
11 |
|
12 |
+
# Ensure the Hugging Face token is set
|
13 |
tok = os.getenv('HF_Token')
|
14 |
if tok:
|
15 |
login(token=tok, add_to_git_credential=True)
|
|
|
19 |
# Set your OpenAI API key
|
20 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
21 |
|
22 |
+
# MongoDB Setup (for inventory, record-keeping, etc.)
|
23 |
MONGO_URI = os.getenv("MONGO_URI")
|
24 |
client = MongoClient(MONGO_URI)
|
25 |
db = client.poultry_farm # Database
|
|
|
29 |
print("Eager execution:", tf.executing_eagerly())
|
30 |
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
|
31 |
|
32 |
+
# Set TensorFlow to use mixed precision with available GPU
|
33 |
from tensorflow.keras import mixed_precision
|
|
|
34 |
if len(tf.config.list_physical_devices('GPU')) > 0:
|
35 |
policy = mixed_precision.Policy('mixed_float16')
|
36 |
mixed_precision.set_global_policy(policy)
|
|
|
41 |
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
|
42 |
try:
|
43 |
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
|
44 |
+
with tf.device(device_name):
|
45 |
+
fecal_model = load_model('models/Fecal_Disease_Model.h5', compile=True)
|
46 |
+
print(f"Fecal disease detection model loaded successfully on {device_name}.")
|
|
|
47 |
except Exception as e:
|
48 |
+
print(f"Error loading fecal disease model: {e}")
|
49 |
raise
|
50 |
|
51 |
+
# Disease names and recommendations based on fecal analysis
|
52 |
+
name_disease = {0: "Coccidiosis", 1: "Salmonella", 2: "Worm Infestation"} # Update with actual classes
|
53 |
+
result = {0: "critical", 1: "serious", 2: "moderate"} # Update with relevant statuses
|
54 |
+
recommend = {
|
55 |
+
0: "Use anticoccidial medication and improve litter management.",
|
56 |
+
1: "Administer antibiotics as prescribed by a veterinarian.",
|
57 |
+
2: "Deworm chickens using an appropriate anti-parasitic drug."
|
58 |
+
} # Update with relevant recommendations
|
59 |
+
|
60 |
class PoultryFarmBot:
|
61 |
def __init__(self):
|
62 |
+
self.db = db # MongoDB database for future use
|
63 |
|
64 |
+
# Image Preprocessing for Fecal Disease Detection
|
65 |
def preprocess_image(self, image):
|
66 |
try:
|
67 |
image_check = cv2.resize(image, (224, 224))
|
|
|
71 |
print(f"Error in image preprocessing: {e}")
|
72 |
return None
|
73 |
|
74 |
+
# Predict Disease from Fecal Image
|
75 |
def predict(self, image):
|
76 |
image_check = self.preprocess_image(image)
|
77 |
if image_check is None:
|
78 |
return "Image preprocessing failed.", None, None, None
|
79 |
|
80 |
+
# Predict using the fecal disease detection model
|
81 |
+
indx = fecal_model.predict(image_check).argmax()
|
82 |
+
name = name_disease.get(indx, "Unknown disease")
|
83 |
+
status = result.get(indx, "unknown condition")
|
84 |
+
recom = recommend.get(indx, "no recommendation available")
|
85 |
|
86 |
+
diagnosis = f"The fecal sample indicates the poultry animal is in a {status} condition, diagnosed with {name}. The recommended action is {recom}."
|
87 |
+
return diagnosis, name, status, recom
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
+
# Diagnose Disease Using Fecal Image or Symptoms
|
90 |
def diagnose_disease(self, image=None, symptoms=None):
|
91 |
if image is not None and image.size > 0: # Ensure image is valid and has elements
|
92 |
return self.predict(image)
|
93 |
elif symptoms:
|
94 |
+
# Generate diagnosis using GPT-based model based on the provided symptoms
|
95 |
context = f"Symptoms: {symptoms}."
|
96 |
response = openai.ChatCompletion.create(
|
97 |
+
model="gpt-3.5-turbo",
|
98 |
messages=[
|
99 |
+
{"role": "system", "content": "You are an advanced poultry farm management system."},
|
|
|
100 |
{"role": "user", "content": context}
|
101 |
],
|
102 |
max_tokens=150
|
103 |
)
|
104 |
diagnosis = response['choices'][0]['message']['content'].strip()
|
105 |
return diagnosis, None, None, None
|
106 |
+
return "Please provide an image of poultry fecal matter or describe the symptoms.", None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
# Initialize the bot instance
|
109 |
bot = PoultryFarmBot()
|
110 |
|
111 |
+
# Load the Mistral model and tokenizer for text generation
|
112 |
+
model_name = "mistralai/mistral-7b-v0.1"
|
113 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
114 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
115 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
116 |
+
|
117 |
+
# Define Mistral-based response generation
|
118 |
+
def mistral_response(user_input):
|
119 |
+
try:
|
120 |
+
responses = generator(
|
121 |
+
user_input,
|
122 |
+
max_length=150,
|
123 |
+
num_return_sequences=1,
|
124 |
+
temperature=0.7
|
125 |
+
)
|
126 |
+
return responses[0]["generated_text"]
|
127 |
+
except Exception as e:
|
128 |
+
return f"Error generating response: {str(e)}"
|
129 |
|
130 |
+
# Gradio interface for disease diagnosis and chatbot response
|
131 |
def generate_combined_response(image, text):
|
132 |
diagnosis, name, status, recom = bot.diagnose_disease(image=image, symptoms=text)
|
133 |
+
if name and status and recom:
|
134 |
+
context = f"The fecal sample indicates the poultry animal is in a {status} condition, diagnosed with {name}. The recommended action is {recom}. "
|
|
|
135 |
if text:
|
136 |
context += f"Additionally, the user asked: '{text}'"
|
137 |
+
advice = mistral_response(context)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
return diagnosis + "\n\nAdditional Advice: " + advice
|
139 |
else:
|
140 |
return diagnosis
|
141 |
|
142 |
+
# Gradio interface styling and layout
|
143 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as diagnosis_interface:
|
144 |
+
gr.Markdown("# 🐔 Poultry Fecal Diagnostic Assistant")
|
145 |
+
gr.Markdown(
|
146 |
+
"Upload an image of poultry fecal matter or describe symptoms to get a diagnosis and advice."
|
147 |
+
)
|
148 |
+
|
149 |
+
with gr.Row():
|
150 |
+
with gr.Column(scale=1):
|
151 |
+
fecal_image = gr.Image(
|
152 |
+
label="Upload Image of Poultry Feces",
|
153 |
+
type="numpy",
|
154 |
+
tool="editor",
|
155 |
+
elem_id="image-upload",
|
156 |
+
show_label=True,
|
157 |
+
)
|
158 |
+
with gr.Column(scale=2):
|
159 |
+
symptom_text = gr.Textbox(
|
160 |
+
label="Describe Symptoms or Ask a Question",
|
161 |
+
placeholder="Enter symptoms of the poultry...",
|
162 |
+
lines=3,
|
163 |
+
elem_id="symptom-textbox",
|
164 |
+
)
|
165 |
+
|
166 |
+
output_box = gr.Textbox(
|
167 |
+
label="Diagnosis and Advice",
|
168 |
+
placeholder="The diagnosis and advice will appear here...",
|
169 |
+
interactive=False,
|
170 |
+
lines=6,
|
171 |
+
elem_id="output-box",
|
172 |
+
)
|
173 |
+
|
174 |
+
submit_button = gr.Button(
|
175 |
+
"Submit",
|
176 |
+
variant="primary",
|
177 |
+
elem_id="submit-button",
|
178 |
+
style={"width": "100%", "margin-top": "20px"},
|
179 |
+
)
|
180 |
+
submit_button.click(
|
181 |
+
fn=generate_combined_response,
|
182 |
+
inputs=[fecal_image, symptom_text],
|
183 |
+
outputs=[output_box],
|
184 |
+
)
|
185 |
+
|
186 |
+
# Launch the Gradio interface
|
187 |
+
if __name__ == "__main__":
|
188 |
+
diagnosis_interface.launch(debug=True, share=True)
|