Emmanuel Frimpong Asante
commited on
Commit
·
e88e97b
1
Parent(s):
f6bbd69
update space
Browse files- .idea/workspace.xml +10 -12
- services/convert_model.py +0 -12
- services/disease_detection_service.py +100 -35
.idea/workspace.xml
CHANGED
@@ -5,10 +5,8 @@
|
|
5 |
</component>
|
6 |
<component name="ChangeListManager">
|
7 |
<list default="true" id="27c9ae1a-a6fa-4472-8bcd-a7087620894b" name="Changes" comment="update space">
|
8 |
-
<change afterPath="$PROJECT_DIR$/models/auth.h5" afterDir="false" />
|
9 |
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
10 |
-
<change beforePath="$PROJECT_DIR$/
|
11 |
-
<change beforePath="$PROJECT_DIR$/services/convert_model.py" beforeDir="false" afterPath="$PROJECT_DIR$/services/convert_model.py" afterDir="false" />
|
12 |
<change beforePath="$PROJECT_DIR$/services/disease_detection_service.py" beforeDir="false" afterPath="$PROJECT_DIR$/services/disease_detection_service.py" afterDir="false" />
|
13 |
</list>
|
14 |
<option name="SHOW_DIALOG" value="false" />
|
@@ -131,14 +129,6 @@
|
|
131 |
<workItem from="1730397485849" duration="22781000" />
|
132 |
<workItem from="1730454506390" duration="12672000" />
|
133 |
</task>
|
134 |
-
<task id="LOCAL-00076" summary="update space">
|
135 |
-
<option name="closed" value="true" />
|
136 |
-
<created>1730402280098</created>
|
137 |
-
<option name="number" value="00076" />
|
138 |
-
<option name="presentableId" value="LOCAL-00076" />
|
139 |
-
<option name="project" value="LOCAL" />
|
140 |
-
<updated>1730402280098</updated>
|
141 |
-
</task>
|
142 |
<task id="LOCAL-00077" summary="update space">
|
143 |
<option name="closed" value="true" />
|
144 |
<created>1730402500058</created>
|
@@ -523,7 +513,15 @@
|
|
523 |
<option name="project" value="LOCAL" />
|
524 |
<updated>1730490013545</updated>
|
525 |
</task>
|
526 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
527 |
<servers />
|
528 |
</component>
|
529 |
<component name="TypeScriptGeneratedFilesManager">
|
|
|
5 |
</component>
|
6 |
<component name="ChangeListManager">
|
7 |
<list default="true" id="27c9ae1a-a6fa-4472-8bcd-a7087620894b" name="Changes" comment="update space">
|
|
|
8 |
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
9 |
+
<change beforePath="$PROJECT_DIR$/services/convert_model.py" beforeDir="false" />
|
|
|
10 |
<change beforePath="$PROJECT_DIR$/services/disease_detection_service.py" beforeDir="false" afterPath="$PROJECT_DIR$/services/disease_detection_service.py" afterDir="false" />
|
11 |
</list>
|
12 |
<option name="SHOW_DIALOG" value="false" />
|
|
|
129 |
<workItem from="1730397485849" duration="22781000" />
|
130 |
<workItem from="1730454506390" duration="12672000" />
|
131 |
</task>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
<task id="LOCAL-00077" summary="update space">
|
133 |
<option name="closed" value="true" />
|
134 |
<created>1730402500058</created>
|
|
|
513 |
<option name="project" value="LOCAL" />
|
514 |
<updated>1730490013545</updated>
|
515 |
</task>
|
516 |
+
<task id="LOCAL-00125" summary="update space">
|
517 |
+
<option name="closed" value="true" />
|
518 |
+
<created>1730491745530</created>
|
519 |
+
<option name="number" value="00125" />
|
520 |
+
<option name="presentableId" value="LOCAL-00125" />
|
521 |
+
<option name="project" value="LOCAL" />
|
522 |
+
<updated>1730491745530</updated>
|
523 |
+
</task>
|
524 |
+
<option name="localTasksCounter" value="126" />
|
525 |
<servers />
|
526 |
</component>
|
527 |
<component name="TypeScriptGeneratedFilesManager">
|
services/convert_model.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# convert_model.py
|
2 |
-
|
3 |
-
import tensorflow as tf
|
4 |
-
|
5 |
-
MODEL_PATH = "models/diseases.h5"
|
6 |
-
NEW_MODEL_PATH = "models/Final_Chicken_disease_model"
|
7 |
-
|
8 |
-
# Load the .h5 model
|
9 |
-
model = tf.keras.models.load_model(MODEL_PATH)
|
10 |
-
|
11 |
-
# Save it in the SavedModel format
|
12 |
-
model.save(NEW_MODEL_PATH)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
services/disease_detection_service.py
CHANGED
@@ -4,6 +4,7 @@ import os
|
|
4 |
import logging
|
5 |
import threading
|
6 |
import tensorflow as tf
|
|
|
7 |
import cv2
|
8 |
import numpy as np
|
9 |
from keras.models import load_model
|
@@ -19,6 +20,7 @@ logger = logging.getLogger(__name__)
|
|
19 |
# Load Hugging Face API token for secure model access
|
20 |
HF_TOKEN = os.environ.get('HF_Token')
|
21 |
if HF_TOKEN:
|
|
|
22 |
login(token=HF_TOKEN, add_to_git_credential=True)
|
23 |
logger.info("Hugging Face login successful.")
|
24 |
else:
|
@@ -32,8 +34,12 @@ logger.info("Configuration loaded from config.ini.")
|
|
32 |
# Configure TensorFlow for GPU and mixed precision if supported
|
33 |
gpu_devices = tf.config.list_physical_devices('GPU')
|
34 |
if gpu_devices:
|
|
|
35 |
for gpu in gpu_devices:
|
|
|
36 |
tf.config.experimental.set_memory_growth(gpu, True)
|
|
|
|
|
37 |
if tf.config.experimental.get_device_details(gpu_devices[0]).get('compute_capability', (0, 0))[0] >= 7:
|
38 |
from tensorflow.keras import mixed_precision
|
39 |
policy = mixed_precision.Policy('mixed_float16')
|
@@ -42,36 +48,38 @@ if gpu_devices:
|
|
42 |
else:
|
43 |
logger.info("No GPU detected, using CPU without mixed precision.")
|
44 |
|
|
|
|
|
|
|
|
|
45 |
# Global model variables and threading event for synchronization
|
46 |
disease_model, auth_model = None, None
|
47 |
llama_model, llama_tokenizer = None, None
|
48 |
model_loading_event = threading.Event()
|
49 |
|
50 |
-
#
|
51 |
-
def
|
52 |
-
"""Load
|
53 |
try:
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
model = load_model(model_path, compile=True)
|
58 |
-
logger.info(f"Disease detection model loaded successfully on {device.name}.")
|
59 |
return model
|
60 |
except Exception as e:
|
61 |
-
logger.error("Error loading
|
62 |
-
raise RuntimeError("Failed to load
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
# Function to load the authentication model
|
65 |
def load_auth_model():
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
auth_model = load_model(auth_model_path, compile=True)
|
70 |
-
logger.info("Authentication model loaded successfully.")
|
71 |
-
return auth_model
|
72 |
-
except Exception as e:
|
73 |
-
logger.error("Error loading authentication model", exc_info=True)
|
74 |
-
raise RuntimeError("Failed to load authentication model") from e
|
75 |
|
76 |
# Function to load the Llama 3.2 model for language generation
|
77 |
def load_llama_model():
|
@@ -81,8 +89,11 @@ def load_llama_model():
|
|
81 |
logger.info("Loading Llama 3.2 model and tokenizer using PyTorch-compatible model.")
|
82 |
model_name = "meta-llama/Llama-3.2-1B"
|
83 |
|
|
|
84 |
llama_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
85 |
-
|
|
|
|
|
86 |
|
87 |
# Add a padding token if it's missing
|
88 |
if llama_tokenizer.pad_token is None:
|
@@ -91,11 +102,11 @@ def load_llama_model():
|
|
91 |
llama_model.resize_token_embeddings(len(llama_tokenizer))
|
92 |
|
93 |
logger.info("Llama model and tokenizer loaded successfully.")
|
94 |
-
model_loading_event.set()
|
95 |
return llama_model, llama_tokenizer
|
96 |
except Exception as e:
|
97 |
logger.error("Failed to load Llama model", exc_info=True)
|
98 |
-
model_loading_event.set()
|
99 |
raise RuntimeError("Failed to load the Llama 3.2 model. Verify compatibility.")
|
100 |
|
101 |
# Disease mappings and recommended treatments
|
@@ -108,79 +119,133 @@ recommendations = {
|
|
108 |
3: 'Administer antibiotics as prescribed by a veterinarian and ensure biosecurity.'
|
109 |
}
|
110 |
|
111 |
-
|
112 |
class PoultryFarmBot:
|
113 |
def __init__(self, disease_model, auth_model):
|
114 |
self.disease_model = disease_model
|
115 |
self.auth_model = auth_model
|
116 |
-
logging.basicConfig(level=logging.INFO)
|
117 |
self.logger = logging.getLogger(__name__)
|
|
|
118 |
|
119 |
def preprocess_image(self, image: np.ndarray) -> np.ndarray:
|
120 |
"""Preprocess the input image for model prediction."""
|
121 |
try:
|
|
|
|
|
122 |
image = cv2.resize(image, (224, 224))
|
123 |
-
|
|
|
|
|
|
|
|
|
124 |
except Exception as e:
|
125 |
self.logger.error("Error in image preprocessing", exc_info=True)
|
126 |
raise ValueError("Invalid image format or empty image provided.")
|
127 |
|
128 |
def generate_detailed_response(self, disease_name: str, status: str, recommendation: str) -> str:
|
129 |
"""Generate detailed response using Llama model based on prediction."""
|
|
|
130 |
prompt = (
|
131 |
f"The detected disease is {disease_name}, classified as {status}. "
|
132 |
f"Suggested action: {recommendation}. "
|
133 |
f"Here is additional information on {disease_name}: causes, symptoms, and effective management."
|
134 |
)
|
|
|
|
|
135 |
response = self.llama_response(prompt)
|
|
|
|
|
136 |
return response.replace(prompt, "").strip()
|
137 |
|
138 |
def predict_disease(self, image: np.ndarray):
|
139 |
"""Predict disease from preprocessed image and provide detailed results."""
|
140 |
try:
|
|
|
|
|
141 |
preprocessed_image = self.preprocess_image(image)
|
142 |
|
143 |
# Use auth_model to verify the image is poultry-related
|
|
|
144 |
is_poultry = self.auth_model.predict(preprocessed_image.reshape(1, 224, 224, 3)).argmax()
|
|
|
145 |
if is_poultry != 0:
|
146 |
self.logger.info("Image not recognized as poultry.")
|
147 |
-
return
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
prediction = self.disease_model.predict(preprocessed_image.reshape(1, 224, 224, 3))
|
151 |
predicted_class = prediction.argmax()
|
|
|
152 |
disease_name = name_disease.get(predicted_class, "Unknown disease")
|
153 |
disease_status = status_map.get(predicted_class, "Unknown status")
|
154 |
recommendation = recommendations.get(predicted_class, "No recommendation available")
|
155 |
-
confidence = prediction[0][predicted_class] * 100
|
156 |
self.logger.info(f"Disease Prediction: {disease_name} with {confidence:.2f}% confidence.")
|
157 |
|
158 |
# Generate a detailed response using Llama model
|
159 |
detailed_response = self.generate_detailed_response(disease_name, disease_status, recommendation)
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
except Exception as e:
|
162 |
self.logger.error("Error in disease prediction", exc_info=True)
|
163 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
|
165 |
def llama_response(self, prompt: str) -> str:
|
166 |
"""Generate a response from the Llama model based on a prompt."""
|
167 |
try:
|
|
|
168 |
max_length = min(len(prompt) + 50, 512)
|
169 |
-
|
|
|
|
|
|
|
|
|
170 |
outputs = llama_model.generate(inputs["input_ids"], max_length=max_length, do_sample=True, temperature=0.7, top_p=0.9)
|
171 |
-
|
|
|
|
|
|
|
|
|
172 |
except Exception as e:
|
173 |
logger.error("Error generating response from Llama model", exc_info=True)
|
174 |
return "Error generating detailed response."
|
175 |
|
176 |
def diagnose_and_respond(self, image: np.ndarray):
|
177 |
"""Diagnose disease and provide comprehensive response."""
|
|
|
|
|
178 |
if image is None or image.size == 0:
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
return self.predict_disease(image)
|
181 |
|
182 |
-
|
183 |
# Load models upon service startup
|
|
|
184 |
disease_model = load_disease_model()
|
185 |
auth_model = load_auth_model()
|
186 |
llama_model, llama_tokenizer = load_llama_model()
|
|
|
4 |
import logging
|
5 |
import threading
|
6 |
import tensorflow as tf
|
7 |
+
import torch
|
8 |
import cv2
|
9 |
import numpy as np
|
10 |
from keras.models import load_model
|
|
|
20 |
# Load Hugging Face API token for secure model access
|
21 |
HF_TOKEN = os.environ.get('HF_Token')
|
22 |
if HF_TOKEN:
|
23 |
+
# Login to Hugging Face using the token from environment variables
|
24 |
login(token=HF_TOKEN, add_to_git_credential=True)
|
25 |
logger.info("Hugging Face login successful.")
|
26 |
else:
|
|
|
34 |
# Configure TensorFlow for GPU and mixed precision if supported
|
35 |
gpu_devices = tf.config.list_physical_devices('GPU')
|
36 |
if gpu_devices:
|
37 |
+
logger.info(f"Number of GPUs found: {len(gpu_devices)}")
|
38 |
for gpu in gpu_devices:
|
39 |
+
# Enable memory growth to prevent TensorFlow from allocating all GPU memory at once
|
40 |
tf.config.experimental.set_memory_growth(gpu, True)
|
41 |
+
logger.info(f"Enabled memory growth for GPU: {gpu.name}")
|
42 |
+
# Enable mixed precision if GPU has sufficient compute capability
|
43 |
if tf.config.experimental.get_device_details(gpu_devices[0]).get('compute_capability', (0, 0))[0] >= 7:
|
44 |
from tensorflow.keras import mixed_precision
|
45 |
policy = mixed_precision.Policy('mixed_float16')
|
|
|
48 |
else:
|
49 |
logger.info("No GPU detected, using CPU without mixed precision.")
|
50 |
|
51 |
+
# Check if GPU is available for PyTorch
|
52 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
53 |
+
logger.info(f"Using device for PyTorch: {device}")
|
54 |
+
|
55 |
# Global model variables and threading event for synchronization
|
56 |
disease_model, auth_model = None, None
|
57 |
llama_model, llama_tokenizer = None, None
|
58 |
model_loading_event = threading.Event()
|
59 |
|
60 |
+
# Utility function to load models
|
61 |
+
def load_model_util(model_path, model_name):
|
62 |
+
"""Load and compile a Keras model from the provided path."""
|
63 |
try:
|
64 |
+
logger.info(f"Loading {model_name} from path: {model_path}")
|
65 |
+
model = load_model(model_path, compile=True)
|
66 |
+
logger.info(f"{model_name} loaded successfully.")
|
|
|
|
|
67 |
return model
|
68 |
except Exception as e:
|
69 |
+
logger.error(f"Error loading {model_name}", exc_info=True)
|
70 |
+
raise RuntimeError(f"Failed to load {model_name}") from e
|
71 |
+
|
72 |
+
# Function to load the disease detection model
|
73 |
+
def load_disease_model():
|
74 |
+
# Load the disease detection model from the configured path
|
75 |
+
model_path = config.get('MODELS', 'DISEASE_MODEL_PATH', fallback="models/diseases.h5")
|
76 |
+
return load_model_util(model_path, "Disease detection model")
|
77 |
|
78 |
# Function to load the authentication model
|
79 |
def load_auth_model():
|
80 |
+
# Load the authentication model from the configured path
|
81 |
+
auth_model_path = config.get('MODELS', 'AUTH_MODEL_PATH', fallback="models/auth.h5")
|
82 |
+
return load_model_util(auth_model_path, "Authentication model")
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
# Function to load the Llama 3.2 model for language generation
|
85 |
def load_llama_model():
|
|
|
89 |
logger.info("Loading Llama 3.2 model and tokenizer using PyTorch-compatible model.")
|
90 |
model_name = "meta-llama/Llama-3.2-1B"
|
91 |
|
92 |
+
# Load the tokenizer and model from Hugging Face
|
93 |
llama_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
94 |
+
logger.info("Tokenizer loaded successfully.")
|
95 |
+
llama_model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
96 |
+
logger.info("Model loaded successfully.")
|
97 |
|
98 |
# Add a padding token if it's missing
|
99 |
if llama_tokenizer.pad_token is None:
|
|
|
102 |
llama_model.resize_token_embeddings(len(llama_tokenizer))
|
103 |
|
104 |
logger.info("Llama model and tokenizer loaded successfully.")
|
105 |
+
model_loading_event.set() # Signal that the model has been loaded
|
106 |
return llama_model, llama_tokenizer
|
107 |
except Exception as e:
|
108 |
logger.error("Failed to load Llama model", exc_info=True)
|
109 |
+
model_loading_event.set() # Signal that model loading failed
|
110 |
raise RuntimeError("Failed to load the Llama 3.2 model. Verify compatibility.")
|
111 |
|
112 |
# Disease mappings and recommended treatments
|
|
|
119 |
3: 'Administer antibiotics as prescribed by a veterinarian and ensure biosecurity.'
|
120 |
}
|
121 |
|
|
|
122 |
class PoultryFarmBot:
|
123 |
def __init__(self, disease_model, auth_model):
|
124 |
self.disease_model = disease_model
|
125 |
self.auth_model = auth_model
|
|
|
126 |
self.logger = logging.getLogger(__name__)
|
127 |
+
self.logger.info("PoultryFarmBot initialized with provided models.")
|
128 |
|
129 |
def preprocess_image(self, image: np.ndarray) -> np.ndarray:
|
130 |
"""Preprocess the input image for model prediction."""
|
131 |
try:
|
132 |
+
self.logger.info(f"Original image shape: {image.shape}")
|
133 |
+
# Resize the image to the required input size for the model
|
134 |
image = cv2.resize(image, (224, 224))
|
135 |
+
self.logger.info(f"Image resized to: {image.shape}")
|
136 |
+
# Normalize pixel values to be between 0 and 1
|
137 |
+
image = image / 255.0
|
138 |
+
self.logger.info("Image normalized for model input.")
|
139 |
+
return image
|
140 |
except Exception as e:
|
141 |
self.logger.error("Error in image preprocessing", exc_info=True)
|
142 |
raise ValueError("Invalid image format or empty image provided.")
|
143 |
|
144 |
def generate_detailed_response(self, disease_name: str, status: str, recommendation: str) -> str:
|
145 |
"""Generate detailed response using Llama model based on prediction."""
|
146 |
+
# Create a prompt with detailed information about the disease
|
147 |
prompt = (
|
148 |
f"The detected disease is {disease_name}, classified as {status}. "
|
149 |
f"Suggested action: {recommendation}. "
|
150 |
f"Here is additional information on {disease_name}: causes, symptoms, and effective management."
|
151 |
)
|
152 |
+
self.logger.info(f"Generated prompt for Llama model: {prompt}")
|
153 |
+
# Use the Llama model to generate a more detailed response
|
154 |
response = self.llama_response(prompt)
|
155 |
+
self.logger.info("Generated detailed response from Llama model.")
|
156 |
+
# Remove the original prompt from the response and return only the generated text
|
157 |
return response.replace(prompt, "").strip()
|
158 |
|
159 |
def predict_disease(self, image: np.ndarray):
|
160 |
"""Predict disease from preprocessed image and provide detailed results."""
|
161 |
try:
|
162 |
+
# Preprocess the image for prediction
|
163 |
+
self.logger.info("Starting image preprocessing for disease prediction.")
|
164 |
preprocessed_image = self.preprocess_image(image)
|
165 |
|
166 |
# Use auth_model to verify the image is poultry-related
|
167 |
+
self.logger.info("Verifying if the image is poultry-related.")
|
168 |
is_poultry = self.auth_model.predict(preprocessed_image.reshape(1, 224, 224, 3)).argmax()
|
169 |
+
self.logger.info(f"Auth model prediction result: {is_poultry}")
|
170 |
if is_poultry != 0:
|
171 |
self.logger.info("Image not recognized as poultry.")
|
172 |
+
return {
|
173 |
+
"message": "Image not recognized as poultry.",
|
174 |
+
"disease_name": "N/A",
|
175 |
+
"status": "N/A",
|
176 |
+
"recommendation": "N/A",
|
177 |
+
"confidence": None
|
178 |
+
}
|
179 |
+
|
180 |
+
# Predict disease if image is verified as poultry
|
181 |
+
self.logger.info("Predicting disease from the image.")
|
182 |
prediction = self.disease_model.predict(preprocessed_image.reshape(1, 224, 224, 3))
|
183 |
predicted_class = prediction.argmax()
|
184 |
+
self.logger.info(f"Disease model prediction result: {predicted_class}")
|
185 |
disease_name = name_disease.get(predicted_class, "Unknown disease")
|
186 |
disease_status = status_map.get(predicted_class, "Unknown status")
|
187 |
recommendation = recommendations.get(predicted_class, "No recommendation available")
|
188 |
+
confidence = float(prediction[0][predicted_class] * 100)
|
189 |
self.logger.info(f"Disease Prediction: {disease_name} with {confidence:.2f}% confidence.")
|
190 |
|
191 |
# Generate a detailed response using Llama model
|
192 |
detailed_response = self.generate_detailed_response(disease_name, disease_status, recommendation)
|
193 |
+
self.logger.info("Generated detailed response for disease prediction.")
|
194 |
+
return {
|
195 |
+
"message": detailed_response,
|
196 |
+
"disease_name": disease_name,
|
197 |
+
"status": disease_status,
|
198 |
+
"recommendation": recommendation,
|
199 |
+
"confidence": confidence
|
200 |
+
}
|
201 |
except Exception as e:
|
202 |
self.logger.error("Error in disease prediction", exc_info=True)
|
203 |
+
return {
|
204 |
+
"message": "Prediction failed.",
|
205 |
+
"disease_name": None,
|
206 |
+
"status": None,
|
207 |
+
"recommendation": None,
|
208 |
+
"confidence": None
|
209 |
+
}
|
210 |
|
211 |
def llama_response(self, prompt: str) -> str:
|
212 |
"""Generate a response from the Llama model based on a prompt."""
|
213 |
try:
|
214 |
+
# Limit the maximum length of the generated response
|
215 |
max_length = min(len(prompt) + 50, 512)
|
216 |
+
self.logger.info(f"Generating response with max length: {max_length}")
|
217 |
+
# Tokenize the input prompt
|
218 |
+
inputs = llama_tokenizer(prompt, return_tensors="pt", max_length=max_length, truncation=True, padding=True).to(device)
|
219 |
+
self.logger.info("Input prompt tokenized successfully.")
|
220 |
+
# Generate a response using the Llama model
|
221 |
outputs = llama_model.generate(inputs["input_ids"], max_length=max_length, do_sample=True, temperature=0.7, top_p=0.9)
|
222 |
+
self.logger.info("Response generated by Llama model.")
|
223 |
+
# Decode the generated response into a readable string
|
224 |
+
response = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
225 |
+
self.logger.info("Response decoded successfully.")
|
226 |
+
return response
|
227 |
except Exception as e:
|
228 |
logger.error("Error generating response from Llama model", exc_info=True)
|
229 |
return "Error generating detailed response."
|
230 |
|
231 |
def diagnose_and_respond(self, image: np.ndarray):
|
232 |
"""Diagnose disease and provide comprehensive response."""
|
233 |
+
# Check if the provided image is valid
|
234 |
+
self.logger.info("Starting diagnosis process.")
|
235 |
if image is None or image.size == 0:
|
236 |
+
self.logger.warning("Invalid image provided for diagnosis.")
|
237 |
+
return {
|
238 |
+
"message": "Provide a valid poultry fecal image.",
|
239 |
+
"disease_name": None,
|
240 |
+
"status": None,
|
241 |
+
"recommendation": None,
|
242 |
+
"confidence": None
|
243 |
+
}
|
244 |
+
# Predict disease from the provided image
|
245 |
return self.predict_disease(image)
|
246 |
|
|
|
247 |
# Load models upon service startup
|
248 |
+
logger.info("Loading models for disease detection service.")
|
249 |
disease_model = load_disease_model()
|
250 |
auth_model = load_auth_model()
|
251 |
llama_model, llama_tokenizer = load_llama_model()
|