|
import keras |
|
from keras.models import load_model |
|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
my_model = load_model('models/Final_Chicken_disease_model.h5', compile=True) |
|
auth_model = load_model('models/auth_model.h5', compile=True) |
|
llama_tokenizer = AutoTokenizer.from_pretrained('huggingface/llama3') |
|
llama_model = AutoModelForCausalLM.from_pretrained('huggingface/llama3') |
|
|
|
|
|
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'} |
|
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'} |
|
recommend = {0: 'Panadol', 1: 'You have no need of Medicine', 2: 'Paracetamol', 3: 'Ponston'} |
|
|
|
|
|
def predict(image): |
|
|
|
image_check = cv2.resize(image, (224, 224)) |
|
image_check = np.expand_dims(image_check, axis=0) |
|
indx = auth_model.predict(image_check).argmax() |
|
|
|
if indx == 0: |
|
|
|
image = cv2.resize(image, (224, 224)) |
|
image = np.expand_dims(image, axis=0) |
|
indx = my_model.predict(image).argmax() |
|
|
|
name = name_disease.get(indx) |
|
status = result.get(indx) |
|
recom = recommend.get(indx) |
|
else: |
|
name = 'Unknown Image' |
|
status = 'N/A' |
|
recom = 'N/A' |
|
|
|
return f"Chicken is {status}, the disease it has is {name}, the recommended medication is {recom}" |
|
|
|
|
|
def chat_response(user_input): |
|
inputs = llama_tokenizer(user_input, return_tensors='pt') |
|
outputs = llama_model.generate(inputs['input_ids'], max_length=500, do_sample=True) |
|
response = llama_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
def combined_interface(image, text): |
|
if image is not None: |
|
return predict(image) |
|
elif text: |
|
return chat_response(text) |
|
else: |
|
return "Please provide an image or ask a question." |
|
|
|
|
|
|
|
interface = gr.Interface( |
|
fn=combined_interface, |
|
inputs=[gr.inputs.Image(label='Upload Image', optional=True), |
|
gr.inputs.Textbox(label='Ask a question', optional=True)], |
|
outputs=gr.Textbox(label="Response"), |
|
examples=[['disease.jpg', ''], ['', 'What should I do if my chicken is sick?']] |
|
) |
|
|
|
|
|
interface.launch(debug=True) |
|
|