Emmanuel Frimpong Asante commited on
Commit
fbbb3f7
·
1 Parent(s): 300aded

"Update space"

Browse files

Signed-off-by: Emmanuel Frimpong Asante <[email protected]>

Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -102,16 +102,17 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
102
  model = AutoModelForCausalLM.from_pretrained(model_name)
103
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
104
 
105
-
106
  # Define Mistral-based response generation with streaming support
107
  def mistral_response_stream(user_input):
108
  try:
109
  inputs = tokenizer(user_input, return_tensors="pt", truncation=True)
 
 
 
110
  # Stream the response token by token
111
- for output in model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7,
112
- pad_token_id=tokenizer.eos_token_id, early_stopping=True,
113
- return_dict_in_generate=True, output_scores=True):
114
- yield tokenizer.decode(output.tolist(), skip_special_tokens=True)
115
  except Exception as e:
116
  yield f"Error generating response: {str(e)}"
117
 
@@ -167,10 +168,9 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) a
167
  submit_button.click(
168
  fn=chatbot_response,
169
  inputs=[fecal_image, user_input],
170
- outputs=[output_box],
171
- stream=True # Enable streaming
172
  )
173
 
174
  # Launch the Gradio interface
175
  if __name__ == "__main__":
176
- chatbot_interface.launch(debug=True, share=True)
 
102
  model = AutoModelForCausalLM.from_pretrained(model_name)
103
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
104
 
 
105
  # Define Mistral-based response generation with streaming support
106
  def mistral_response_stream(user_input):
107
  try:
108
  inputs = tokenizer(user_input, return_tensors="pt", truncation=True)
109
+ outputs = model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7,
110
+ pad_token_id=tokenizer.eos_token_id)
111
+
112
  # Stream the response token by token
113
+ for output in outputs:
114
+ response = tokenizer.decode(output, skip_special_tokens=True)
115
+ yield response
 
116
  except Exception as e:
117
  yield f"Error generating response: {str(e)}"
118
 
 
168
  submit_button.click(
169
  fn=chatbot_response,
170
  inputs=[fecal_image, user_input],
171
+ outputs=[output_box]
 
172
  )
173
 
174
  # Launch the Gradio interface
175
  if __name__ == "__main__":
176
+ chatbot_interface.queue().launch(debug=True, share=True)