File size: 9,267 Bytes
fc33cc9
925904e
 
 
59b0f06
a2954e9
59b0f06
0f10097
efca706
42a4489
 
 
fc33cc9
e69e187
16955d5
fc33cc9
 
 
 
 
925904e
 
 
 
 
 
 
42a4489
925904e
 
42a4489
 
 
 
 
 
925904e
42a4489
fc33cc9
42a4489
 
925904e
 
42a4489
fc33cc9
 
 
486db41
42a4489
ed234f0
fc33cc9
ed234f0
42a4489
efca706
454a4a7
 
fc33cc9
 
0f10097
69baece
 
 
 
0f10097
59b0f06
 
0f10097
59b0f06
 
42a4489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc33cc9
 
42a4489
 
fc33cc9
42a4489
 
 
 
 
0f10097
42a4489
 
 
59b0f06
42a4489
 
0f10097
42a4489
 
0f10097
42a4489
 
 
 
 
 
0f10097
 
42a4489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b017f70
2e61d46
43bc0fa
0f10097
43bc0fa
42a4489
43bc0fa
0f10097
59b0f06
0f10097
01bc5c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import os
import tensorflow as tf
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from keras.models import load_model
import gradio as gr
import cv2
import numpy as np
from huggingface_hub import login
from datetime import datetime, timedelta
import json
import requests

# Ensure the HF token is set
tok = os.getenv('HF_Token')
if tok:
    login(token=tok, add_to_git_credential=True)
else:
    print("Warning: Hugging Face token not found in environment variables.")

# Check GPU availability for both TensorFlow and PyTorch
print("Torch GPU available:", torch.cuda.is_available())
print("Number of GPUs:", torch.cuda.device_count())
print("TensorFlow version:", tf.__version__)
print("Eager execution:", tf.executing_eagerly())
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))

# Set TensorFlow to use mixed precision to leverage the T4 GPU's capabilities when available
from tensorflow.keras import mixed_precision

if len(tf.config.list_physical_devices('GPU')) > 0:
    policy = mixed_precision.Policy('mixed_float16')
    mixed_precision.set_global_policy(policy)
    print("Using mixed precision with GPU")
else:
    print("Using CPU without mixed precision")

# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
try:
    device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
    with tf.device(device_name):  # Use GPU if available, otherwise CPU
        my_model = load_model('models/Final_Chicken_disease_model.h5', compile=True)
        auth_model = load_model('models/auth_model.h5', compile=True)
        print(f"Models loaded successfully on {device_name}.")
except Exception as e:
    print(f"Error loading models: {e}")
    raise

# Set PyTorch device to GPU if available, otherwise CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Load the tokenizer and LLaMA model, ensuring they run on the correct device
llama_tokenizer = AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B-Instruct')
llama_model = AutoModelForCausalLM.from_pretrained(
    'meta-llama/Meta-Llama-3-8B-Instruct',
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32  # Use mixed precision if on GPU
).to(device)

# Explicitly set the pad token if not set
if llama_tokenizer.pad_token_id is None:
    llama_tokenizer.pad_token = llama_tokenizer.eos_token

# Dictionaries for disease names, results, and recommendations
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
recommend = {0: 'Panadol', 1: 'You have no need of Medicine', 2: 'Paracetamol', 3: 'Ponston'}


class PoultryFarmBot:
    def __init__(self):
        self.feed_inventory = 1000  # kg
        self.medicine_inventory = {"Panadol": 100, "Paracetamol": 50}
        self.chicken_health = {}
        self.reports = []

    # Health Monitoring and Disease Diagnosis
    def preprocess_image(self, image):
        try:
            image_check = cv2.resize(image, (224, 224))
            image_check = np.expand_dims(image_check, axis=0)  # Add batch dimension
            return image_check
        except Exception as e:
            print(f"Error in image preprocessing: {e}")
            return None

    def predict(self, image):
        image_check = self.preprocess_image(image)
        if image_check is None:
            return "Image preprocessing failed.", None, None, None

        indx = auth_model.predict(image_check).argmax()

        if indx == 0:  # If the image is recognized as a chicken disease image
            indx = my_model.predict(image_check).argmax()
            name = name_disease.get(indx)
            status = result.get(indx)
            recom = recommend.get(indx)

            diagnosis = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}."
            return diagnosis, name, status, recom
        else:  # If the image is not recognized as a chicken disease image
            return (
                "The uploaded image is not recognized as a chicken or does not appear to be related to any known chicken diseases. "
                "Please ensure the image is clear and shows a chicken or its symptoms to receive a proper diagnosis."
            ), None, None, None

    def diagnose_disease(self, image=None, symptoms=None):
        if image:
            return self.predict(image)
        elif symptoms:
            # Simulate symptom-based diagnosis
            return "Based on symptoms, the chicken might have Newcastle Disease."
        return "Please provide an image or describe the symptoms."

    # Inventory Management
    def track_inventory(self, item, usage):
        if item in self.medicine_inventory:
            self.medicine_inventory[item] -= usage
            if self.medicine_inventory[item] < 10:
                return f"{item} inventory is low, please reorder."
            return f"{item} inventory updated. Current inventory: {self.medicine_inventory[item]} units."
        elif item == "feed":
            self.feed_inventory -= usage
            if self.feed_inventory < 100:
                return "Feed inventory is low, please reorder."
            return f"Feed inventory updated. Current inventory: {self.feed_inventory} kg."
        return "Item not recognized in inventory."

    # Reporting and Analytics
    def generate_report(self):
        report = {
            "date": str(datetime.now()),
            "feed_inventory": self.feed_inventory,
            "medicine_inventory": self.medicine_inventory,
            "chickens_monitored": len(self.chicken_health),
            "health_reports": self.chicken_health
        }
        self.reports.append(report)
        return json.dumps(report, indent=4)

    # IoT Device Integration (Temperature and Humidity Monitoring)
    def monitor_environment(self, temperature, humidity):
        if temperature > 30:
            return "Temperature too high, increase ventilation."
        if humidity < 40:
            return "Humidity too low, consider using a humidifier."
        return "Environmental conditions are optimal."

    # Integration with External Systems
    def integrate_with_external_system(self, system_url, data):
        try:
            response = requests.post(system_url, json=data)
            if response.status_code == 200:
                return "Data successfully sent to external system."
            else:
                return f"Failed to send data. Status code: {response.status_code}"
        except Exception as e:
            return f"Integration failed with error: {str(e)}"

    # Emergency Handling
    def handle_emergency(self, emergency_type):
        if emergency_type == "disease_outbreak":
            return "Disease outbreak detected. Isolate affected chickens and contact a veterinarian immediately."
        elif emergency_type == "equipment_failure":
            return "Equipment failure detected. Check the equipment immediately and perform necessary repairs."
        else:
            return "Unknown emergency type."


# Example usage of the chatbot with integrated Health Monitoring and Disease Diagnosis
bot = PoultryFarmBot()

# Health Monitoring and Disease Diagnosis
image = None  # Replace with actual image input
symptoms = "coughing and sneezing"
diagnosis_result = bot.diagnose_disease(image=image, symptoms=symptoms)
print(diagnosis_result)

# Inventory Management
print(bot.track_inventory("feed", 50))
print(bot.track_inventory("Panadol", 10))

# Reporting and Analytics
print(bot.generate_report())

# IoT Device Integration (Temperature and Humidity Monitoring)
print(bot.monitor_environment(32, 35))

# Integration with External Systems
data_to_send = {"temperature": 32, "humidity": 35}
print(bot.integrate_with_external_system("https://api.external-system.com/data", data_to_send))

# Emergency Handling
print(bot.handle_emergency("disease_outbreak"))


# Gradio Interface for Health Monitoring
def generate_combined_response(image, text):
    diagnosis, name, status, recom = bot.diagnose_disease(image=image, symptoms=text)

    if name and status and recom:  # If the disease is recognized
        context = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}. "
        if text:
            context += f"Additionally, the user asked: '{text}'"
        inputs = llama_tokenizer(context, return_tensors='pt', padding=True).to(device)
        outputs = llama_model.generate(
            inputs['input_ids'],
            attention_mask=inputs['attention_mask'],  # Pass attention mask
            max_length=500,
            do_sample=True
        )
        advice = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
        return diagnosis + "\n\nAdditional Advice: " + advice
    else:
        return diagnosis


# Gradio Interface
interface = gr.Interface(
    fn=generate_combined_response,
    inputs=[gr.Image(label='Upload Image'), gr.Textbox(label='Describe symptoms or ask a question')],
    outputs=gr.Textbox(label="Response")
)

# Launch the interface
interface.launch(debug=True)