Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -111,9 +111,10 @@ custom_css = """
|
|
111 |
"""
|
112 |
|
113 |
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, css=custom_css) as demo:
|
114 |
-
#
|
115 |
-
|
116 |
-
|
|
|
117 |
|
118 |
# Model info and CTA section
|
119 |
gr.HTML("""
|
@@ -154,7 +155,7 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
154 |
bubble_full_width=False,
|
155 |
height=500,
|
156 |
show_copy_button=True,
|
157 |
-
type="messages"
|
158 |
)
|
159 |
|
160 |
with gr.Row():
|
@@ -236,17 +237,19 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
236 |
info="Maximum response length"
|
237 |
)
|
238 |
|
239 |
-
def handle_user_message(user_message: str,
|
240 |
-
|
|
|
241 |
min_p_val: float, max_tokens: int):
|
242 |
"""
|
243 |
-
Handles user input, updates
|
244 |
"""
|
245 |
# Prevent multiple submissions
|
246 |
if is_generating or not user_message.strip():
|
247 |
return {
|
248 |
-
chatbot:
|
249 |
-
|
|
|
250 |
is_generating_state: is_generating,
|
251 |
user_input: user_message,
|
252 |
submit_btn: gr.update(interactive=not is_generating)
|
@@ -255,13 +258,17 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
255 |
# Set generating state
|
256 |
is_generating = True
|
257 |
|
258 |
-
#
|
259 |
-
|
|
|
|
|
|
|
260 |
|
261 |
# Yield intermediate state to show user message and disable input
|
262 |
yield {
|
263 |
-
chatbot:
|
264 |
-
|
|
|
265 |
is_generating_state: is_generating,
|
266 |
user_input: "",
|
267 |
submit_btn: gr.update(interactive=False, value="π Generating...")
|
@@ -271,9 +278,7 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
271 |
messages_for_model = []
|
272 |
if system_prompt_text.strip():
|
273 |
messages_for_model.append({"role": "system", "content": system_prompt_text.strip()})
|
274 |
-
|
275 |
-
# Add conversation history (excluding system messages for model input)
|
276 |
-
messages_for_model.extend([msg for msg in conversation if msg["role"] != "system"])
|
277 |
|
278 |
try:
|
279 |
# Generate response with hyperparameters
|
@@ -286,7 +291,10 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
286 |
max_new_tokens=max_tokens
|
287 |
)
|
288 |
|
289 |
-
#
|
|
|
|
|
|
|
290 |
if thinking and thinking.strip():
|
291 |
formatted_response = f"""<details>
|
292 |
<summary><b>π€ Show Reasoning Process</b></summary>
|
@@ -299,72 +307,78 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
299 |
else:
|
300 |
formatted_response = answer
|
301 |
|
302 |
-
#
|
303 |
-
|
304 |
|
305 |
except Exception as e:
|
306 |
error_msg = f"β Error generating response: {str(e)}"
|
307 |
-
|
|
|
308 |
|
309 |
# Reset generating state
|
310 |
is_generating = False
|
311 |
|
312 |
# Final yield with complete response
|
313 |
yield {
|
314 |
-
chatbot:
|
315 |
-
|
|
|
316 |
is_generating_state: is_generating,
|
317 |
user_input: "",
|
318 |
submit_btn: gr.update(interactive=True, value="Send")
|
319 |
}
|
320 |
|
321 |
def clear_history():
|
322 |
-
"""Clear
|
323 |
return {
|
324 |
chatbot: [],
|
325 |
-
|
|
|
326 |
is_generating_state: False,
|
327 |
user_input: "",
|
328 |
submit_btn: gr.update(interactive=True, value="Send")
|
329 |
}
|
330 |
|
331 |
-
def retry_last(
|
332 |
temp: float, top_p_val: float, top_k_val: int,
|
333 |
min_p_val: float, max_tokens: int):
|
334 |
"""
|
335 |
-
Retry the last user message.
|
336 |
"""
|
337 |
-
# Safety check: ensure there is a
|
338 |
-
if not
|
339 |
# If nothing to retry, yield the current state and stop
|
340 |
yield {
|
341 |
-
chatbot:
|
342 |
-
|
|
|
343 |
is_generating_state: False
|
344 |
}
|
345 |
return
|
346 |
|
347 |
-
# Remove the last assistant message
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
|
|
355 |
else:
|
356 |
# If no user message found, just return current state
|
357 |
yield {
|
358 |
-
chatbot:
|
359 |
-
|
|
|
360 |
is_generating_state: False
|
361 |
}
|
362 |
return
|
363 |
|
364 |
# Use 'yield from' to properly call the generator and pass its updates
|
365 |
yield from handle_user_message(
|
366 |
-
last_user_msg,
|
367 |
-
temp, top_p_val, top_k_val, min_p_val, max_tokens
|
368 |
)
|
369 |
|
370 |
def on_input_change(text, is_generating):
|
@@ -374,31 +388,35 @@ Think using bullet points and short sentences to simulate thoughts and emoticons
|
|
374 |
# Event listeners
|
375 |
submit_event = submit_btn.click(
|
376 |
handle_user_message,
|
377 |
-
inputs=[user_input,
|
378 |
-
temperature, top_p, top_k, min_p, max_new_tokens],
|
379 |
-
outputs=[chatbot,
|
|
|
380 |
show_progress=True
|
381 |
)
|
382 |
|
383 |
submit_event_enter = user_input.submit(
|
384 |
handle_user_message,
|
385 |
-
inputs=[user_input,
|
386 |
-
temperature, top_p, top_k, min_p, max_new_tokens],
|
387 |
-
outputs=[chatbot,
|
|
|
388 |
show_progress=True
|
389 |
)
|
390 |
|
391 |
# Clear button event
|
392 |
clear_btn.click(
|
393 |
clear_history,
|
394 |
-
outputs=[chatbot,
|
|
|
395 |
)
|
396 |
|
397 |
-
# Retry button event
|
398 |
retry_btn.click(
|
399 |
retry_last,
|
400 |
-
inputs=[
|
401 |
-
|
|
|
402 |
show_progress=True
|
403 |
)
|
404 |
|
|
|
111 |
"""
|
112 |
|
113 |
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, css=custom_css) as demo:
|
114 |
+
# Separate states for display and model context
|
115 |
+
display_history_state = gr.State([]) # For Gradio chatbot display (with HTML formatting)
|
116 |
+
model_history_state = gr.State([]) # Clean history for model (plain text only)
|
117 |
+
is_generating_state = gr.State(False) # To prevent multiple submissions
|
118 |
|
119 |
# Model info and CTA section
|
120 |
gr.HTML("""
|
|
|
155 |
bubble_full_width=False,
|
156 |
height=500,
|
157 |
show_copy_button=True,
|
158 |
+
type="messages"
|
159 |
)
|
160 |
|
161 |
with gr.Row():
|
|
|
237 |
info="Maximum response length"
|
238 |
)
|
239 |
|
240 |
+
def handle_user_message(user_message: str, display_history: list, model_history: list,
|
241 |
+
system_prompt_text: str, is_generating: bool,
|
242 |
+
temp: float, top_p_val: float, top_k_val: int,
|
243 |
min_p_val: float, max_tokens: int):
|
244 |
"""
|
245 |
+
Handles user input, updates histories, and generates the model's response.
|
246 |
"""
|
247 |
# Prevent multiple submissions
|
248 |
if is_generating or not user_message.strip():
|
249 |
return {
|
250 |
+
chatbot: display_history,
|
251 |
+
display_history_state: display_history,
|
252 |
+
model_history_state: model_history,
|
253 |
is_generating_state: is_generating,
|
254 |
user_input: user_message,
|
255 |
submit_btn: gr.update(interactive=not is_generating)
|
|
|
258 |
# Set generating state
|
259 |
is_generating = True
|
260 |
|
261 |
+
# Update model history (clean format for model - PLAIN TEXT ONLY)
|
262 |
+
model_history.append({"role": "user", "content": user_message.strip()})
|
263 |
+
|
264 |
+
# Update display history (for Gradio chatbot)
|
265 |
+
display_history.append({"role": "user", "content": user_message.strip()})
|
266 |
|
267 |
# Yield intermediate state to show user message and disable input
|
268 |
yield {
|
269 |
+
chatbot: display_history,
|
270 |
+
display_history_state: display_history,
|
271 |
+
model_history_state: model_history,
|
272 |
is_generating_state: is_generating,
|
273 |
user_input: "",
|
274 |
submit_btn: gr.update(interactive=False, value="π Generating...")
|
|
|
278 |
messages_for_model = []
|
279 |
if system_prompt_text.strip():
|
280 |
messages_for_model.append({"role": "system", "content": system_prompt_text.strip()})
|
281 |
+
messages_for_model.extend(model_history)
|
|
|
|
|
282 |
|
283 |
try:
|
284 |
# Generate response with hyperparameters
|
|
|
291 |
max_new_tokens=max_tokens
|
292 |
)
|
293 |
|
294 |
+
# Update model history with CLEAN answer (no HTML formatting)
|
295 |
+
model_history.append({"role": "assistant", "content": answer})
|
296 |
+
|
297 |
+
# Format response for display (with HTML formatting)
|
298 |
if thinking and thinking.strip():
|
299 |
formatted_response = f"""<details>
|
300 |
<summary><b>π€ Show Reasoning Process</b></summary>
|
|
|
307 |
else:
|
308 |
formatted_response = answer
|
309 |
|
310 |
+
# Update display history with formatted response
|
311 |
+
display_history.append({"role": "assistant", "content": formatted_response})
|
312 |
|
313 |
except Exception as e:
|
314 |
error_msg = f"β Error generating response: {str(e)}"
|
315 |
+
display_history.append({"role": "assistant", "content": error_msg})
|
316 |
+
# Don't add error to model history to avoid confusing the model
|
317 |
|
318 |
# Reset generating state
|
319 |
is_generating = False
|
320 |
|
321 |
# Final yield with complete response
|
322 |
yield {
|
323 |
+
chatbot: display_history,
|
324 |
+
display_history_state: display_history,
|
325 |
+
model_history_state: model_history,
|
326 |
is_generating_state: is_generating,
|
327 |
user_input: "",
|
328 |
submit_btn: gr.update(interactive=True, value="Send")
|
329 |
}
|
330 |
|
331 |
def clear_history():
|
332 |
+
"""Clear both display and model histories"""
|
333 |
return {
|
334 |
chatbot: [],
|
335 |
+
display_history_state: [],
|
336 |
+
model_history_state: [],
|
337 |
is_generating_state: False,
|
338 |
user_input: "",
|
339 |
submit_btn: gr.update(interactive=True, value="Send")
|
340 |
}
|
341 |
|
342 |
+
def retry_last(display_history: list, model_history: list, system_prompt_text: str,
|
343 |
temp: float, top_p_val: float, top_k_val: int,
|
344 |
min_p_val: float, max_tokens: int):
|
345 |
"""
|
346 |
+
Retry the last user message with corrected history and generator handling.
|
347 |
"""
|
348 |
+
# Safety check: ensure there is a history and the last message was from the assistant
|
349 |
+
if not model_history or model_history[-1]["role"] != "assistant":
|
350 |
# If nothing to retry, yield the current state and stop
|
351 |
yield {
|
352 |
+
chatbot: display_history,
|
353 |
+
display_history_state: display_history,
|
354 |
+
model_history_state: model_history,
|
355 |
is_generating_state: False
|
356 |
}
|
357 |
return
|
358 |
|
359 |
+
# Remove the last assistant message from both histories
|
360 |
+
model_history.pop() # Remove assistant's clean message from model history
|
361 |
+
display_history.pop() # Remove assistant's formatted message from display history
|
362 |
+
|
363 |
+
# Get the last user message to resubmit it, then remove it from both histories
|
364 |
+
if model_history and model_history[-1]["role"] == "user":
|
365 |
+
last_user_msg = model_history[-1]["content"]
|
366 |
+
model_history.pop() # Remove user message from model history
|
367 |
+
display_history.pop() # Remove user message from display history
|
368 |
else:
|
369 |
# If no user message found, just return current state
|
370 |
yield {
|
371 |
+
chatbot: display_history,
|
372 |
+
display_history_state: display_history,
|
373 |
+
model_history_state: model_history,
|
374 |
is_generating_state: False
|
375 |
}
|
376 |
return
|
377 |
|
378 |
# Use 'yield from' to properly call the generator and pass its updates
|
379 |
yield from handle_user_message(
|
380 |
+
last_user_msg, display_history, model_history,
|
381 |
+
system_prompt_text, False, temp, top_p_val, top_k_val, min_p_val, max_tokens
|
382 |
)
|
383 |
|
384 |
def on_input_change(text, is_generating):
|
|
|
388 |
# Event listeners
|
389 |
submit_event = submit_btn.click(
|
390 |
handle_user_message,
|
391 |
+
inputs=[user_input, display_history_state, model_history_state, system_prompt,
|
392 |
+
is_generating_state, temperature, top_p, top_k, min_p, max_new_tokens],
|
393 |
+
outputs=[chatbot, display_history_state, model_history_state, is_generating_state,
|
394 |
+
user_input, submit_btn],
|
395 |
show_progress=True
|
396 |
)
|
397 |
|
398 |
submit_event_enter = user_input.submit(
|
399 |
handle_user_message,
|
400 |
+
inputs=[user_input, display_history_state, model_history_state, system_prompt,
|
401 |
+
is_generating_state, temperature, top_p, top_k, min_p, max_new_tokens],
|
402 |
+
outputs=[chatbot, display_history_state, model_history_state, is_generating_state,
|
403 |
+
user_input, submit_btn],
|
404 |
show_progress=True
|
405 |
)
|
406 |
|
407 |
# Clear button event
|
408 |
clear_btn.click(
|
409 |
clear_history,
|
410 |
+
outputs=[chatbot, display_history_state, model_history_state, is_generating_state,
|
411 |
+
user_input, submit_btn]
|
412 |
)
|
413 |
|
414 |
+
# Retry button event - FIXED OUTPUTS
|
415 |
retry_btn.click(
|
416 |
retry_last,
|
417 |
+
inputs=[display_history_state, model_history_state, system_prompt,
|
418 |
+
temperature, top_p, top_k, min_p, max_new_tokens],
|
419 |
+
outputs=[chatbot, display_history_state, model_history_state, is_generating_state],
|
420 |
show_progress=True
|
421 |
)
|
422 |
|