Emmanuel Frimpong Asante
commited on
Commit
·
300aded
1
Parent(s):
fa011fc
"Update space"
Browse filesSigned-off-by: Emmanuel Frimpong Asante <[email protected]>
app.py
CHANGED
@@ -102,22 +102,18 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
102 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
103 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
104 |
|
105 |
-
|
106 |
-
|
|
|
107 |
try:
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
do_sample=True, # Enable sampling for temperature setting
|
115 |
-
pad_token_id=tokenizer.eos_token_id # Set padding token ID to EOS token
|
116 |
-
)
|
117 |
-
return responses[0]["generated_text"]
|
118 |
except Exception as e:
|
119 |
-
|
120 |
-
|
121 |
|
122 |
# Main chatbot function: handles both generative AI and disease detection
|
123 |
def chatbot_response(image, text):
|
@@ -125,13 +121,12 @@ def chatbot_response(image, text):
|
|
125 |
if image is not None:
|
126 |
diagnosis, name, status, recom = bot.diagnose_disease(image)
|
127 |
if name and status and recom:
|
128 |
-
|
129 |
else:
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
|
136 |
# Gradio interface styling and layout with ChatGPT-like theme
|
137 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
|
@@ -173,6 +168,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) a
|
|
173 |
fn=chatbot_response,
|
174 |
inputs=[fecal_image, user_input],
|
175 |
outputs=[output_box],
|
|
|
176 |
)
|
177 |
|
178 |
# Launch the Gradio interface
|
|
|
102 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
103 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
104 |
|
105 |
+
|
106 |
+
# Define Mistral-based response generation with streaming support
|
107 |
+
def mistral_response_stream(user_input):
|
108 |
try:
|
109 |
+
inputs = tokenizer(user_input, return_tensors="pt", truncation=True)
|
110 |
+
# Stream the response token by token
|
111 |
+
for output in model.generate(inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7,
|
112 |
+
pad_token_id=tokenizer.eos_token_id, early_stopping=True,
|
113 |
+
return_dict_in_generate=True, output_scores=True):
|
114 |
+
yield tokenizer.decode(output.tolist(), skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
115 |
except Exception as e:
|
116 |
+
yield f"Error generating response: {str(e)}"
|
|
|
117 |
|
118 |
# Main chatbot function: handles both generative AI and disease detection
|
119 |
def chatbot_response(image, text):
|
|
|
121 |
if image is not None:
|
122 |
diagnosis, name, status, recom = bot.diagnose_disease(image)
|
123 |
if name and status and recom:
|
124 |
+
yield diagnosis
|
125 |
else:
|
126 |
+
yield diagnosis # Return only the diagnostic message if no disease found
|
127 |
+
else:
|
128 |
+
# Stream the generative AI response
|
129 |
+
yield from mistral_response_stream(text)
|
|
|
130 |
|
131 |
# Gradio interface styling and layout with ChatGPT-like theme
|
132 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
|
|
|
168 |
fn=chatbot_response,
|
169 |
inputs=[fecal_image, user_input],
|
170 |
outputs=[output_box],
|
171 |
+
stream=True # Enable streaming
|
172 |
)
|
173 |
|
174 |
# Launch the Gradio interface
|