Emmanuel Frimpong Asante commited on
Commit
b8c7a7f
·
1 Parent(s): 287eae5

update space

Browse files
Files changed (1) hide show
  1. app.py +19 -33
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import os
3
  import tensorflow as tf
4
  from keras.models import load_model
@@ -9,8 +8,6 @@ from huggingface_hub import login
9
  from pymongo import MongoClient
10
  from transformers import AutoModelForCausalLM, AutoTokenizer
11
  from concurrent.futures import ThreadPoolExecutor
12
- import torch
13
- import random
14
 
15
  # Ensure the Hugging Face token is set
16
  tok = os.environ.get('HF_Token')
@@ -38,19 +35,16 @@ print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
38
  from tensorflow.keras import mixed_precision
39
 
40
  if len(tf.config.list_physical_devices('GPU')) > 0:
41
- try:
42
- # Ensure the GPU supports mixed precision
43
- gpu_device = tf.config.list_physical_devices('GPU')[0]
44
- gpu_info = tf.config.experimental.get_device_details(gpu_device)
45
- if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7:
46
- # Set mixed precision policy to use float16 for better performance on supported GPUs
47
- policy = mixed_precision.Policy('mixed_float16')
48
- mixed_precision.set_global_policy(policy)
49
- print("Using mixed precision with GPU")
50
- else:
51
- print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.")
52
- except Exception as e:
53
- print(f"Error during mixed precision setup: {e}")
54
  else:
55
  print("Using CPU without mixed precision")
56
 
@@ -67,7 +61,8 @@ try:
67
  print(f"Models loaded successfully on {device_name}.")
68
  except Exception as e:
69
  print(f"Error loading models: {e}")
70
- print("Falling back to CPU for model loading.")
 
71
  my_model, auth_model = None, None
72
 
73
  # Updated Disease names and recommendations based on fecal analysis
@@ -93,7 +88,7 @@ class PoultryFarmBot:
93
  # Resize the image to match model input size (224x224)
94
  image_check = cv2.resize(image, (224, 224))
95
  # Add batch dimension to the image array
96
- image_check = np.expand_dims(image_check, axis=0).astype('float16')
97
  print("Image preprocessing successful.")
98
  return image_check
99
  except Exception as e:
@@ -112,8 +107,7 @@ class PoultryFarmBot:
112
  # Predict using the fecal disease detection model
113
  try:
114
  print("Running model prediction...")
115
- with tf.device('/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'):
116
- indx = my_model.predict(image_check).argmax()
117
  print(f"Prediction complete. Predicted index: {indx}")
118
  name = name_disease.get(indx, "Unknown disease")
119
  status = result.get(indx, "unknown condition")
@@ -161,32 +155,23 @@ bot = PoultryFarmBot()
161
  print("Loading Llama 3.2 model and tokenizer...")
162
  model_name = "meta-llama/Llama-3.2-1B"
163
  tokenizer = AutoTokenizer.from_pretrained(model_name)
164
- try:
165
- # Use a random seed to avoid potential CUDA random_device error
166
- torch.manual_seed(random.randint(0, 10000))
167
- model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda' if torch.cuda.is_available() else 'cpu')
168
- print("Llama 3.2 model and tokenizer loaded successfully.")
169
- except Exception as e:
170
- print(f"Error loading Llama 3.2 model: {e}")
171
- model = None
172
 
173
  # Set the padding token to EOS token or add a new padding token
174
  if tokenizer.pad_token is None:
175
  print("Adding pad token to tokenizer...")
176
  tokenizer.add_special_tokens({'pad_token': '[PAD]'})
177
- if model:
178
- model.resize_token_embeddings(len(tokenizer))
179
  print("Pad token added and model resized.")
180
 
181
 
182
  # Define Llama 3.2 response generation
183
  def llama2_response(user_input):
184
- if model is None:
185
- return "Model is not available for generating response."
186
  try:
187
  print("Generating response using Llama 2...")
188
  # Tokenize user input for the Llama 2 model
189
- inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=500, padding=True).to('cuda' if torch.cuda.is_available() else 'cpu')
190
  # Generate a response using the Llama 2 model
191
  outputs = model.generate(
192
  inputs["input_ids"],
@@ -224,6 +209,7 @@ def chatbot_response(image, text):
224
  # Use Llama 3.2 for more accurate responses to user text queries
225
  return llama2_response(text)
226
 
 
227
  # Gradio interface styling and layout with ChatGPT-like theme
228
  print("Setting up Gradio interface...")
229
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
 
 
1
  import os
2
  import tensorflow as tf
3
  from keras.models import load_model
 
8
  from pymongo import MongoClient
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
  from concurrent.futures import ThreadPoolExecutor
 
 
11
 
12
  # Ensure the Hugging Face token is set
13
  tok = os.environ.get('HF_Token')
 
35
  from tensorflow.keras import mixed_precision
36
 
37
  if len(tf.config.list_physical_devices('GPU')) > 0:
38
+ # Ensure the GPU supports mixed precision
39
+ gpu_device = tf.config.list_physical_devices('GPU')[0]
40
+ gpu_info = tf.config.experimental.get_device_details(gpu_device)
41
+ if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7:
42
+ # Set mixed precision policy to use float16 for better performance on supported GPUs
43
+ policy = mixed_precision.Policy('mixed_float16')
44
+ mixed_precision.set_global_policy(policy)
45
+ print("Using mixed precision with GPU")
46
+ else:
47
+ print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.")
 
 
 
48
  else:
49
  print("Using CPU without mixed precision")
50
 
 
61
  print(f"Models loaded successfully on {device_name}.")
62
  except Exception as e:
63
  print(f"Error loading models: {e}")
64
+ if 'weight_decay' in str(e):
65
+ print("Invalid argument 'weight_decay' found. Please adjust optimizer settings.")
66
  my_model, auth_model = None, None
67
 
68
  # Updated Disease names and recommendations based on fecal analysis
 
88
  # Resize the image to match model input size (224x224)
89
  image_check = cv2.resize(image, (224, 224))
90
  # Add batch dimension to the image array
91
+ image_check = np.expand_dims(image_check, axis=0)
92
  print("Image preprocessing successful.")
93
  return image_check
94
  except Exception as e:
 
107
  # Predict using the fecal disease detection model
108
  try:
109
  print("Running model prediction...")
110
+ indx = my_model.predict(image_check).argmax()
 
111
  print(f"Prediction complete. Predicted index: {indx}")
112
  name = name_disease.get(indx, "Unknown disease")
113
  status = result.get(indx, "unknown condition")
 
155
  print("Loading Llama 3.2 model and tokenizer...")
156
  model_name = "meta-llama/Llama-3.2-1B"
157
  tokenizer = AutoTokenizer.from_pretrained(model_name)
158
+ model = AutoModelForCausalLM.from_pretrained(model_name)
159
+ print("Llama 3.2 model and tokenizer loaded successfully.")
 
 
 
 
 
 
160
 
161
  # Set the padding token to EOS token or add a new padding token
162
  if tokenizer.pad_token is None:
163
  print("Adding pad token to tokenizer...")
164
  tokenizer.add_special_tokens({'pad_token': '[PAD]'})
165
+ model.resize_token_embeddings(len(tokenizer))
 
166
  print("Pad token added and model resized.")
167
 
168
 
169
  # Define Llama 3.2 response generation
170
  def llama2_response(user_input):
 
 
171
  try:
172
  print("Generating response using Llama 2...")
173
  # Tokenize user input for the Llama 2 model
174
+ inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=500, padding=True)
175
  # Generate a response using the Llama 2 model
176
  outputs = model.generate(
177
  inputs["input_ids"],
 
209
  # Use Llama 3.2 for more accurate responses to user text queries
210
  return llama2_response(text)
211
 
212
+
213
  # Gradio interface styling and layout with ChatGPT-like theme
214
  print("Setting up Gradio interface...")
215
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface: