Emmanuel Frimpong Asante commited on
Commit
300aded
·
1 Parent(s): fa011fc

"Update space"

Browse files

Signed-off-by: Emmanuel Frimpong Asante <[email protected]>

Files changed (1) hide show
  1. app.py +16 -20
app.py CHANGED
@@ -102,22 +102,18 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
102
  model = AutoModelForCausalLM.from_pretrained(model_name)
103
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
104
 
105
- # Define Mistral-based response generation
106
- def mistral_response(user_input):
 
107
  try:
108
- responses = generator(
109
- user_input,
110
- max_length=150,
111
- num_return_sequences=1,
112
- truncation=True, # Explicitly set truncation
113
- temperature=0.7,
114
- do_sample=True, # Enable sampling for temperature setting
115
- pad_token_id=tokenizer.eos_token_id # Set padding token ID to EOS token
116
- )
117
- return responses[0]["generated_text"]
118
  except Exception as e:
119
- return f"Error generating response: {str(e)}"
120
-
121
 
122
  # Main chatbot function: handles both generative AI and disease detection
123
  def chatbot_response(image, text):
@@ -125,13 +121,12 @@ def chatbot_response(image, text):
125
  if image is not None:
126
  diagnosis, name, status, recom = bot.diagnose_disease(image)
127
  if name and status and recom:
128
- return diagnosis
129
  else:
130
- return diagnosis # Return only the diagnostic message if no disease found
131
-
132
- # If no image is provided, proceed with generative AI response
133
- return mistral_response(text)
134
-
135
 
136
  # Gradio interface styling and layout with ChatGPT-like theme
137
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
@@ -173,6 +168,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) a
173
  fn=chatbot_response,
174
  inputs=[fecal_image, user_input],
175
  outputs=[output_box],
 
176
  )
177
 
178
  # Launch the Gradio interface
 
102
  model = AutoModelForCausalLM.from_pretrained(model_name)
103
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
104
 
105
+
106
+ # Define Mistral-based response generation with streaming support
107
+ def mistral_response_stream(user_input):
108
  try:
109
+ inputs = tokenizer(user_input, return_tensors="pt", truncation=True)
110
+ # Stream the response token by token
111
+ for output in model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7,
112
+ pad_token_id=tokenizer.eos_token_id, early_stopping=True,
113
+ return_dict_in_generate=True, output_scores=True):
114
+ yield tokenizer.decode(output.tolist(), skip_special_tokens=True)
 
 
 
 
115
  except Exception as e:
116
+ yield f"Error generating response: {str(e)}"
 
117
 
118
  # Main chatbot function: handles both generative AI and disease detection
119
  def chatbot_response(image, text):
 
121
  if image is not None:
122
  diagnosis, name, status, recom = bot.diagnose_disease(image)
123
  if name and status and recom:
124
+ yield diagnosis
125
  else:
126
+ yield diagnosis # Return only the diagnostic message if no disease found
127
+ else:
128
+ # Stream the generative AI response
129
+ yield from mistral_response_stream(text)
 
130
 
131
  # Gradio interface styling and layout with ChatGPT-like theme
132
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
 
168
  fn=chatbot_response,
169
  inputs=[fecal_image, user_input],
170
  outputs=[output_box],
171
+ stream=True # Enable streaming
172
  )
173
 
174
  # Launch the Gradio interface