Emmanuel Frimpong Asante commited on
Commit
01bc5c3
·
1 Parent(s): e61f474

"Update space"

Browse files

Signed-off-by: Emmanuel Frimpong Asante <[email protected]>

Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -7,6 +7,7 @@ import torch # Import torch here
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
  from huggingface_hub import login
9
  import os
 
10
 
11
  # Ensure the HF token is set
12
  tok = os.getenv('HF_Token')
@@ -53,6 +54,7 @@ def predict(image):
53
  return f"Chicken is {status}, the disease it has is {name}, the recommended medication is {recom}"
54
 
55
 
 
56
  def chat_response(user_input):
57
  inputs = llama_tokenizer(user_input, return_tensors='pt')
58
  outputs = llama_model.generate(inputs['input_ids'], max_length=500, do_sample=True)
@@ -88,4 +90,4 @@ interface = gr.Interface(
88
  )
89
 
90
  # Launch the interface
91
- interface.launch(debug=True, share=True)
 
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
  from huggingface_hub import login
9
  import os
10
+ import spaces
11
 
12
  # Ensure the HF token is set
13
  tok = os.getenv('HF_Token')
 
54
  return f"Chicken is {status}, the disease it has is {name}, the recommended medication is {recom}"
55
 
56
 
57
+ @spaces.GPU(duration=200)
58
  def chat_response(user_input):
59
  inputs = llama_tokenizer(user_input, return_tensors='pt')
60
  outputs = llama_model.generate(inputs['input_ids'], max_length=500, do_sample=True)
 
90
  )
91
 
92
  # Launch the interface
93
+ interface.launch(debug=True)