Emmanuel Frimpong Asante commited on
Commit
9597b4b
·
1 Parent(s): af09ede

"Update space"

Browse files

Signed-off-by: Emmanuel Frimpong Asante <[email protected]>

Files changed (2) hide show
  1. app.py +13 -27
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
 
2
  import tensorflow as tf
3
  import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from keras.models import load_model
6
  import gradio as gr
7
  import cv2
@@ -18,9 +18,10 @@ if tok:
18
  else:
19
  print("Warning: Hugging Face token not found in environment variables.")
20
 
21
- # Check GPU availability for both TensorFlow and PyTorch
22
- print("Torch GPU available:", torch.cuda.is_available())
23
- print("Number of GPUs:", torch.cuda.device_count())
 
24
  print("TensorFlow version:", tf.__version__)
25
  print("Eager execution:", tf.executing_eagerly())
26
  print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
@@ -46,21 +47,6 @@ except Exception as e:
46
  print(f"Error loading models: {e}")
47
  raise
48
 
49
- # Set PyTorch device to GPU if available, otherwise CPU
50
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
51
- print(f"Using device: {device}")
52
-
53
- # Load the tokenizer and LLaMA model, ensuring they run on the correct device
54
- llama_tokenizer = AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B-Instruct')
55
- llama_model = AutoModelForCausalLM.from_pretrained(
56
- 'meta-llama/Meta-Llama-3-8B-Instruct',
57
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 # Use mixed precision if on GPU
58
- ).to(device)
59
-
60
- # Explicitly set the pad token if not set
61
- if llama_tokenizer.pad_token_id is None:
62
- llama_tokenizer.pad_token = llama_tokenizer.eos_token
63
-
64
  # Dictionaries for disease names, results, and recommendations
65
  name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
66
  result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
@@ -201,7 +187,7 @@ print(bot.integrate_with_external_system("https://api.external-system.com/data",
201
  print(bot.handle_emergency("disease_outbreak"))
202
 
203
 
204
- # Gradio Interface for Health Monitoring
205
  def generate_combined_response(image, text):
206
  diagnosis, name, status, recom = bot.diagnose_disease(image=image, symptoms=text)
207
 
@@ -209,14 +195,14 @@ def generate_combined_response(image, text):
209
  context = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}. "
210
  if text:
211
  context += f"Additionally, the user asked: '{text}'"
212
- inputs = llama_tokenizer(context, return_tensors='pt', padding=True).to(device)
213
- outputs = llama_model.generate(
214
- inputs['input_ids'],
215
- attention_mask=inputs['attention_mask'], # Pass attention mask
216
- max_length=500,
217
- do_sample=True
218
  )
219
- advice = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
220
  return diagnosis + "\n\nAdditional Advice: " + advice
221
  else:
222
  return diagnosis
 
1
  import os
2
+ import openai # Import the OpenAI library
3
  import tensorflow as tf
4
  import torch
 
5
  from keras.models import load_model
6
  import gradio as gr
7
  import cv2
 
18
  else:
19
  print("Warning: Hugging Face token not found in environment variables.")
20
 
21
+ # Set your OpenAI API key
22
+ openai.api_key = os.getenv("OPENAI_API_KEY")
23
+
24
+ # Check GPU availability for TensorFlow
25
  print("TensorFlow version:", tf.__version__)
26
  print("Eager execution:", tf.executing_eagerly())
27
  print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
 
47
  print(f"Error loading models: {e}")
48
  raise
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  # Dictionaries for disease names, results, and recommendations
51
  name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
52
  result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
 
187
  print(bot.handle_emergency("disease_outbreak"))
188
 
189
 
190
+ # Function to generate a response using OpenAI's GPT model
191
  def generate_combined_response(image, text):
192
  diagnosis, name, status, recom = bot.diagnose_disease(image=image, symptoms=text)
193
 
 
195
  context = f"The chicken is in a {status} condition, diagnosed with {name}. The recommended medication is {recom}. "
196
  if text:
197
  context += f"Additionally, the user asked: '{text}'"
198
+
199
+ # Use OpenAI's GPT model to generate additional advice
200
+ response = openai.Completion.create(
201
+ model="gpt-3.5-turbo", # Use GPT-4 or gpt-3.5-turbo based on your API access
202
+ prompt=context,
203
+ max_tokens=150
204
  )
205
+ advice = response.choices[0].text.strip()
206
  return diagnosis + "\n\nAdditional Advice: " + advice
207
  else:
208
  return diagnosis
requirements.txt CHANGED
@@ -6,4 +6,4 @@ transformers~=4.43.3
6
  numpy~=1.23.5
7
  torchvision
8
  accelerate
9
-
 
6
  numpy~=1.23.5
7
  torchvision
8
  accelerate
9
+ openai