Emmanuel Frimpong Asante
commited on
Commit
·
be57017
1
Parent(s):
2924899
update space
Browse files- README.md +4 -4
- services/disease_detection_service.py +8 -13
README.md
CHANGED
@@ -54,10 +54,10 @@ This project integrates both a **Poultry Farming Assistance System** and a **Pou
|
|
54 |
- [x] Implement session management and token validation.
|
55 |
|
56 |
### 3. Poultry Farming Assistance System
|
57 |
-
- [
|
58 |
-
- [
|
59 |
-
- [
|
60 |
-
- [
|
61 |
- [ ] Implement real-time health monitoring and alert system for farmers.
|
62 |
|
63 |
### 4. To-Do List Management
|
|
|
54 |
- [x] Implement session management and token validation.
|
55 |
|
56 |
### 3. Poultry Farming Assistance System
|
57 |
+
- [x] Integrate poultry disease detection model (`Final_Chicken_disease_model.h5`).
|
58 |
+
- [x] Create an image preprocessing pipeline for disease detection.
|
59 |
+
- [x] Build routes to upload and analyze poultry fecal images.
|
60 |
+
- [x] Develop health-related notifications and treatment suggestions.
|
61 |
- [ ] Implement real-time health monitoring and alert system for farmers.
|
62 |
|
63 |
### 4. To-Do List Management
|
services/disease_detection_service.py
CHANGED
@@ -16,20 +16,14 @@ logger = logging.getLogger(__name__)
|
|
16 |
|
17 |
# Load environment variables
|
18 |
MONGO_URI = os.getenv("MONGO_URI")
|
19 |
-
HF_TOKEN = os.getenv('HF_Token')
|
20 |
db_client = MongoClient(MONGO_URI)
|
21 |
db = db_client.poultry_farm # MongoDB for record-keeping
|
22 |
|
23 |
-
# Hugging Face login for Llama 3.2
|
24 |
-
if HF_TOKEN:
|
25 |
-
login(token=HF_TOKEN, add_to_git_credential=True)
|
26 |
-
else:
|
27 |
-
logger.warning("Hugging Face token not found in environment variables.")
|
28 |
|
29 |
# Mixed precision and GPU configuration
|
30 |
-
|
31 |
-
if
|
32 |
-
for gpu in
|
33 |
tf.config.experimental.set_memory_growth(gpu, True)
|
34 |
from tensorflow.keras import mixed_precision
|
35 |
policy = mixed_precision.Policy('mixed_float16')
|
@@ -38,7 +32,6 @@ if gpus:
|
|
38 |
else:
|
39 |
logger.info("Using CPU without mixed precision.")
|
40 |
|
41 |
-
|
42 |
# Model loading functions
|
43 |
def load_disease_model():
|
44 |
"""Load the disease detection model."""
|
@@ -53,7 +46,6 @@ def load_disease_model():
|
|
53 |
logger.error(f"Error loading disease model: {e}")
|
54 |
raise RuntimeError("Failed to load disease detection model")
|
55 |
|
56 |
-
|
57 |
def load_llama_model():
|
58 |
"""Load the Llama 3.2 model for text generation."""
|
59 |
try:
|
@@ -71,6 +63,9 @@ def load_llama_model():
|
|
71 |
logger.error(f"Error loading Llama model: {e}")
|
72 |
raise RuntimeError("Failed to load Llama model")
|
73 |
|
|
|
|
|
|
|
74 |
|
75 |
# Disease mapping and treatment guidelines
|
76 |
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
|
@@ -133,9 +128,9 @@ class PoultryFarmBot:
|
|
133 |
def llama_response(self, prompt):
|
134 |
"""Generate a response from the Llama 3.2 model based on a given prompt."""
|
135 |
try:
|
136 |
-
inputs =
|
137 |
outputs = llama_model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7)
|
138 |
-
return
|
139 |
except Exception as e:
|
140 |
logger.error(f"Llama model response error: {e}")
|
141 |
return "Error generating detailed response."
|
|
|
16 |
|
17 |
# Load environment variables
|
18 |
MONGO_URI = os.getenv("MONGO_URI")
|
|
|
19 |
db_client = MongoClient(MONGO_URI)
|
20 |
db = db_client.poultry_farm # MongoDB for record-keeping
|
21 |
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
# Mixed precision and GPU configuration
|
24 |
+
gpu_devices = tf.config.list_physical_devices('GPU')
|
25 |
+
if gpu_devices:
|
26 |
+
for gpu in gpu_devices:
|
27 |
tf.config.experimental.set_memory_growth(gpu, True)
|
28 |
from tensorflow.keras import mixed_precision
|
29 |
policy = mixed_precision.Policy('mixed_float16')
|
|
|
32 |
else:
|
33 |
logger.info("Using CPU without mixed precision.")
|
34 |
|
|
|
35 |
# Model loading functions
|
36 |
def load_disease_model():
|
37 |
"""Load the disease detection model."""
|
|
|
46 |
logger.error(f"Error loading disease model: {e}")
|
47 |
raise RuntimeError("Failed to load disease detection model")
|
48 |
|
|
|
49 |
def load_llama_model():
|
50 |
"""Load the Llama 3.2 model for text generation."""
|
51 |
try:
|
|
|
63 |
logger.error(f"Error loading Llama model: {e}")
|
64 |
raise RuntimeError("Failed to load Llama model")
|
65 |
|
66 |
+
# Load models at startup
|
67 |
+
disease_model = load_disease_model()
|
68 |
+
llama_model, llama_tokenizer = load_llama_model()
|
69 |
|
70 |
# Disease mapping and treatment guidelines
|
71 |
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
|
|
|
128 |
def llama_response(self, prompt):
|
129 |
"""Generate a response from the Llama 3.2 model based on a given prompt."""
|
130 |
try:
|
131 |
+
inputs = llama_tokenizer(prompt, return_tensors="pt", max_length=150, truncation=True, padding=True)
|
132 |
outputs = llama_model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7)
|
133 |
+
return llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
134 |
except Exception as e:
|
135 |
logger.error(f"Llama model response error: {e}")
|
136 |
return "Error generating detailed response."
|