Emmanuel Frimpong Asante commited on
Commit
4a029da
·
1 Parent(s): 0db5898

update space

Browse files
Files changed (1) hide show
  1. app.py +29 -11
app.py CHANGED
@@ -7,6 +7,7 @@ import numpy as np
7
  from huggingface_hub import login
8
  from pymongo import MongoClient
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
10
 
11
  # Ensure the Hugging Face token is set
12
  tok = os.environ.get('HF_Token')
@@ -32,11 +33,18 @@ print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
32
 
33
  # Set TensorFlow to use mixed precision with available GPU
34
  from tensorflow.keras import mixed_precision
 
35
  if len(tf.config.list_physical_devices('GPU')) > 0:
36
- # Set mixed precision policy to use float16 for better performance on GPU
37
- policy = mixed_precision.Policy('mixed_float16')
38
- mixed_precision.set_global_policy(policy)
39
- print("Using mixed precision with GPU")
 
 
 
 
 
 
40
  else:
41
  print("Using CPU without mixed precision")
42
 
@@ -46,11 +54,16 @@ try:
46
  device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
47
  print(f"Loading models on {device_name}...")
48
  with tf.device(device_name):
49
- # Load the poultry disease detection model
50
- my_model = load_model('models/disease_model.h5', compile=True)
 
 
 
 
 
 
 
51
  print("Disease detection model loaded successfully.")
52
- # Load the authentication model
53
- auth_model = load_model('models/auth_model.h5', compile=True)
54
  print("Authentication model loaded successfully.")
55
  print(f"Models loaded successfully on {device_name}.")
56
  except Exception as e:
@@ -66,6 +79,7 @@ recommend = {
66
  3: 'Ponston'
67
  }
68
 
 
69
  class PoultryFarmBot:
70
  def __init__(self):
71
  self.db = db # MongoDB database for future use
@@ -114,7 +128,7 @@ class PoultryFarmBot:
114
  # Generate a detailed response using Llama 2 for disease information and recommendations
115
  def generate_disease_response(self, disease_name, status, recommendation):
116
  print("Generating detailed disease response...")
117
- # Create a prompt for Llama 3 to generate detailed disease information
118
  prompt = (
119
  f"The disease detected is {disease_name}, classified as {status}. "
120
  f"Recommended action: {recommendation}. "
@@ -136,6 +150,7 @@ class PoultryFarmBot:
136
  print("Invalid image provided.")
137
  return "Please provide an image of poultry fecal matter for disease detection.", None, None, None
138
 
 
139
  # Initialize the bot instance
140
  print("Initializing PoultryFarmBot instance...")
141
  bot = PoultryFarmBot()
@@ -154,7 +169,8 @@ if tokenizer.pad_token is None:
154
  model.resize_token_embeddings(len(tokenizer))
155
  print("Pad token added and model resized.")
156
 
157
- # Define Llama 2 response generation
 
158
  def llama2_response(user_input):
159
  try:
160
  print("Generating response using Llama 2...")
@@ -178,6 +194,7 @@ def llama2_response(user_input):
178
  print(f"Error generating response: {e}")
179
  return f"Error generating response: {str(e)}"
180
 
 
181
  # Main chatbot function: handles both generative AI and disease detection
182
  def chatbot_response(image, text):
183
  print("Received user input for chatbot response...")
@@ -193,9 +210,10 @@ def chatbot_response(image, text):
193
  return diagnosis # Return only the diagnostic message if no disease found
194
  else:
195
  print("No image provided, using Llama 3.2 for text response...")
196
- # Use Llama 2 for more accurate responses to user text queries
197
  return llama2_response(text)
198
 
 
199
  # Gradio interface styling and layout with ChatGPT-like theme
200
  print("Setting up Gradio interface...")
201
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
 
7
  from huggingface_hub import login
8
  from pymongo import MongoClient
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
+ from concurrent.futures import ThreadPoolExecutor
11
 
12
  # Ensure the Hugging Face token is set
13
  tok = os.environ.get('HF_Token')
 
33
 
34
  # Set TensorFlow to use mixed precision with available GPU
35
  from tensorflow.keras import mixed_precision
36
+
37
  if len(tf.config.list_physical_devices('GPU')) > 0:
38
+ # Ensure the GPU supports mixed precision
39
+ gpu_device = tf.config.list_physical_devices('GPU')[0]
40
+ gpu_info = tf.config.experimental.get_device_details(gpu_device)
41
+ if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7:
42
+ # Set mixed precision policy to use float16 for better performance on supported GPUs
43
+ policy = mixed_precision.Policy('mixed_float16')
44
+ mixed_precision.set_global_policy(policy)
45
+ print("Using mixed precision with GPU")
46
+ else:
47
+ print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.")
48
  else:
49
  print("Using CPU without mixed precision")
50
 
 
54
  device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
55
  print(f"Loading models on {device_name}...")
56
  with tf.device(device_name):
57
+ def load_models():
58
+ with ThreadPoolExecutor() as executor:
59
+ future_disease_model = executor.submit(load_model, 'models/disease_model.h5', compile=True)
60
+ future_auth_model = executor.submit(load_model, 'models/auth_model.h5', compile=True)
61
+ return future_disease_model.result(), future_auth_model.result()
62
+
63
+
64
+ # Load models concurrently
65
+ my_model, auth_model = load_models()
66
  print("Disease detection model loaded successfully.")
 
 
67
  print("Authentication model loaded successfully.")
68
  print(f"Models loaded successfully on {device_name}.")
69
  except Exception as e:
 
79
  3: 'Ponston'
80
  }
81
 
82
+
83
  class PoultryFarmBot:
84
  def __init__(self):
85
  self.db = db # MongoDB database for future use
 
128
  # Generate a detailed response using Llama 2 for disease information and recommendations
129
  def generate_disease_response(self, disease_name, status, recommendation):
130
  print("Generating detailed disease response...")
131
+ # Create a prompt for Llama 2 to generate detailed disease information
132
  prompt = (
133
  f"The disease detected is {disease_name}, classified as {status}. "
134
  f"Recommended action: {recommendation}. "
 
150
  print("Invalid image provided.")
151
  return "Please provide an image of poultry fecal matter for disease detection.", None, None, None
152
 
153
+
154
  # Initialize the bot instance
155
  print("Initializing PoultryFarmBot instance...")
156
  bot = PoultryFarmBot()
 
169
  model.resize_token_embeddings(len(tokenizer))
170
  print("Pad token added and model resized.")
171
 
172
+
173
+ # Define Llama 3.2 response generation
174
  def llama2_response(user_input):
175
  try:
176
  print("Generating response using Llama 2...")
 
194
  print(f"Error generating response: {e}")
195
  return f"Error generating response: {str(e)}"
196
 
197
+
198
  # Main chatbot function: handles both generative AI and disease detection
199
  def chatbot_response(image, text):
200
  print("Received user input for chatbot response...")
 
210
  return diagnosis # Return only the diagnostic message if no disease found
211
  else:
212
  print("No image provided, using Llama 3.2 for text response...")
213
+ # Use Llama 3.2 for more accurate responses to user text queries
214
  return llama2_response(text)
215
 
216
+
217
  # Gradio interface styling and layout with ChatGPT-like theme
218
  print("Setting up Gradio interface...")
219
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface: