Spaces:
Running
Running
UX Layout adjustment
Browse files
app.py
CHANGED
@@ -46,23 +46,21 @@ class DynamicState:
|
|
46 |
def ui_state_controller(self):
|
47 |
"""生成动态UI组件状态"""
|
48 |
print("UPDATE UI!!")
|
49 |
-
# [control_button,
|
50 |
lang_data = LANGUAGE_CONFIG[self.current_language]
|
51 |
control_value = (
|
52 |
lang_data["pause_btn"] if self.should_stream else lang_data["generate_btn"]
|
53 |
)
|
54 |
control_variant = "secondary" if self.should_stream else "primary"
|
55 |
-
|
56 |
lang_data["completed"]
|
57 |
if self.stream_completed
|
58 |
else lang_data["interrupted"]
|
59 |
)
|
|
|
60 |
return (
|
61 |
gr.update(value=control_value, variant=control_variant),
|
62 |
-
gr.update(
|
63 |
-
value=status_value,
|
64 |
-
),
|
65 |
-
gr.update(),
|
66 |
gr.update(interactive=not self.should_stream),
|
67 |
)
|
68 |
|
@@ -186,7 +184,10 @@ class ConvoState:
|
|
186 |
if dynamic_state.in_cot
|
187 |
else lang_data["loading_output"]
|
188 |
)
|
189 |
-
|
|
|
|
|
|
|
190 |
|
191 |
interval = 1.0 / self.throughput
|
192 |
start_time = time.time()
|
@@ -198,7 +199,10 @@ class ConvoState:
|
|
198 |
except Exception as e:
|
199 |
error_msg = LANGUAGE_CONFIG[self.current_language].get("error", "Error")
|
200 |
full_response += f"\n\n[{error_msg}: {str(e)}]"
|
201 |
-
|
|
|
|
|
|
|
202 |
{
|
203 |
"role": "assistant",
|
204 |
"content": error_msg,
|
@@ -208,11 +212,15 @@ class ConvoState:
|
|
208 |
|
209 |
finally:
|
210 |
dynamic_state.should_stream = False
|
211 |
-
if "status" not in locals():
|
212 |
-
status = "Whoops... ERROR"
|
213 |
if "response_stream" in locals():
|
214 |
response_stream.close()
|
215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
|
218 |
def update_interface_language(selected_lang, convo_state, dynamic_state):
|
@@ -220,14 +228,19 @@ def update_interface_language(selected_lang, convo_state, dynamic_state):
|
|
220 |
convo_state.current_language = selected_lang
|
221 |
dynamic_state.current_language = selected_lang
|
222 |
lang_data = LANGUAGE_CONFIG[selected_lang]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
return [
|
224 |
gr.update(value=f"{lang_data['title']}"),
|
225 |
gr.update(
|
226 |
label=lang_data["prompt_label"], placeholder=lang_data["prompt_placeholder"]
|
227 |
),
|
228 |
-
gr.update(
|
229 |
-
label=lang_data["editor_label"], placeholder=lang_data["editor_placeholder"]
|
230 |
-
),
|
231 |
gr.update(
|
232 |
label=lang_data["sync_threshold_label"],
|
233 |
info=lang_data["sync_threshold_info"],
|
@@ -256,7 +269,7 @@ theme = gr.themes.Base(font="system-ui", primary_hue="stone")
|
|
256 |
|
257 |
with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
258 |
convo_state = gr.State(ConvoState)
|
259 |
-
dynamic_state = gr.State(DynamicState)
|
260 |
|
261 |
with gr.Row(variant=""):
|
262 |
title_md = gr.Markdown(
|
@@ -273,15 +286,38 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
273 |
|
274 |
with gr.Row(equal_height=True):
|
275 |
|
276 |
-
# 思考编辑面板
|
277 |
with gr.Column(scale=1, min_width=400):
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
thought_editor = gr.Textbox(
|
279 |
-
label=LANGUAGE_CONFIG[
|
280 |
lines=16,
|
281 |
placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
|
282 |
autofocus=True,
|
283 |
elem_id="editor",
|
284 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
with gr.Row():
|
286 |
sync_threshold_slider = gr.Slider(
|
287 |
minimum=0,
|
@@ -300,36 +336,10 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
300 |
info=LANGUAGE_CONFIG["en"]["throughput_info"],
|
301 |
)
|
302 |
|
303 |
-
# 对话面板
|
304 |
-
with gr.Column(scale=1, min_width=500):
|
305 |
-
chatbot = gr.Chatbot(
|
306 |
-
type="messages",
|
307 |
-
height=300,
|
308 |
-
value=LANGUAGE_CONFIG["en"]["bot_default"],
|
309 |
-
group_consecutive_messages=False,
|
310 |
-
show_copy_all_button=True,
|
311 |
-
show_share_button=True,
|
312 |
-
label=LANGUAGE_CONFIG["en"]["bot_label"],
|
313 |
-
)
|
314 |
-
prompt_input = gr.Textbox(
|
315 |
-
label=LANGUAGE_CONFIG["en"]["prompt_label"],
|
316 |
-
lines=2,
|
317 |
-
placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
|
318 |
-
max_lines=5,
|
319 |
-
)
|
320 |
-
with gr.Row():
|
321 |
-
control_button = gr.Button(
|
322 |
-
value=LANGUAGE_CONFIG["en"]["generate_btn"], variant="primary"
|
323 |
-
)
|
324 |
-
next_turn_btn = gr.Button(
|
325 |
-
value=LANGUAGE_CONFIG["en"]["clear_btn"], interactive=True
|
326 |
-
)
|
327 |
-
status_indicator = gr.Markdown(AppConfig.LOADING_DEFAULT)
|
328 |
intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
|
329 |
|
330 |
# 交互逻辑
|
331 |
-
|
332 |
-
stateful_ui = (control_button, status_indicator, thought_editor, next_turn_btn)
|
333 |
|
334 |
throughput_control.change(
|
335 |
lambda val, s: setattr(s, "throughput", val),
|
@@ -345,43 +355,39 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
345 |
queue=False,
|
346 |
)
|
347 |
|
348 |
-
def wrap_stream_generator(
|
349 |
-
convo_state, dynamic_state, prompt, content
|
350 |
-
): # Pass dynamic_state here
|
351 |
for response in convo_state.generate_ai_response(
|
352 |
prompt, content, dynamic_state
|
353 |
-
):
|
354 |
yield response
|
355 |
|
356 |
-
gr.on(
|
357 |
[control_button.click, prompt_input.submit, thought_editor.submit],
|
358 |
-
lambda d: d.control_button_handler(),
|
359 |
[dynamic_state],
|
360 |
stateful_ui,
|
361 |
show_progress=False,
|
362 |
-
).then(
|
363 |
-
wrap_stream_generator,
|
364 |
[convo_state, dynamic_state, prompt_input, thought_editor],
|
365 |
-
[thought_editor,
|
366 |
concurrency_limit=100,
|
367 |
-
).then(
|
368 |
-
lambda d: d.ui_state_controller(),
|
369 |
[dynamic_state],
|
370 |
stateful_ui,
|
371 |
show_progress=False,
|
372 |
)
|
373 |
|
374 |
next_turn_btn.click(
|
375 |
-
lambda d: d.reset_workspace(),
|
376 |
[dynamic_state],
|
377 |
stateful_ui + (thought_editor, prompt_input, chatbot),
|
378 |
queue=False,
|
379 |
)
|
380 |
|
381 |
lang_selector.change(
|
382 |
-
lambda lang, s, d: update_interface_language(
|
383 |
-
lang, s, d
|
384 |
-
), # Pass dynamic_state to update_interface_language
|
385 |
[lang_selector, convo_state, dynamic_state],
|
386 |
[
|
387 |
title_md,
|
|
|
46 |
def ui_state_controller(self):
|
47 |
"""生成动态UI组件状态"""
|
48 |
print("UPDATE UI!!")
|
49 |
+
# [control_button, thought_editor, reset_button]
|
50 |
lang_data = LANGUAGE_CONFIG[self.current_language]
|
51 |
control_value = (
|
52 |
lang_data["pause_btn"] if self.should_stream else lang_data["generate_btn"]
|
53 |
)
|
54 |
control_variant = "secondary" if self.should_stream else "primary"
|
55 |
+
status_suffix = (
|
56 |
lang_data["completed"]
|
57 |
if self.stream_completed
|
58 |
else lang_data["interrupted"]
|
59 |
)
|
60 |
+
editor_label = f"{lang_data['editor_label']} - {status_suffix}"
|
61 |
return (
|
62 |
gr.update(value=control_value, variant=control_variant),
|
63 |
+
gr.update(label=editor_label),
|
|
|
|
|
|
|
64 |
gr.update(interactive=not self.should_stream),
|
65 |
)
|
66 |
|
|
|
184 |
if dynamic_state.in_cot
|
185 |
else lang_data["loading_output"]
|
186 |
)
|
187 |
+
editor_label = f"{lang_data['editor_label']} - {status}"
|
188 |
+
yield full_response, gr.update(
|
189 |
+
label=editor_label
|
190 |
+
), self.flatten_output()
|
191 |
|
192 |
interval = 1.0 / self.throughput
|
193 |
start_time = time.time()
|
|
|
199 |
except Exception as e:
|
200 |
error_msg = LANGUAGE_CONFIG[self.current_language].get("error", "Error")
|
201 |
full_response += f"\n\n[{error_msg}: {str(e)}]"
|
202 |
+
editor_label = f"{lang_data['editor_label']} - {error_msg}"
|
203 |
+
yield full_response, gr.update(
|
204 |
+
label=editor_label
|
205 |
+
), self.flatten_output() + [
|
206 |
{
|
207 |
"role": "assistant",
|
208 |
"content": error_msg,
|
|
|
212 |
|
213 |
finally:
|
214 |
dynamic_state.should_stream = False
|
|
|
|
|
215 |
if "response_stream" in locals():
|
216 |
response_stream.close()
|
217 |
+
final_status = (
|
218 |
+
lang_data["completed"]
|
219 |
+
if dynamic_state.stream_completed
|
220 |
+
else lang_data["interrupted"]
|
221 |
+
)
|
222 |
+
editor_label = f"{lang_data['editor_label']} - {final_status}"
|
223 |
+
yield full_response, gr.update(label=editor_label), self.flatten_output()
|
224 |
|
225 |
|
226 |
def update_interface_language(selected_lang, convo_state, dynamic_state):
|
|
|
228 |
convo_state.current_language = selected_lang
|
229 |
dynamic_state.current_language = selected_lang
|
230 |
lang_data = LANGUAGE_CONFIG[selected_lang]
|
231 |
+
base_editor_label = lang_data["editor_label"]
|
232 |
+
status_suffix = (
|
233 |
+
lang_data["completed"]
|
234 |
+
if dynamic_state.stream_completed
|
235 |
+
else lang_data["interrupted"]
|
236 |
+
)
|
237 |
+
editor_label = f"{base_editor_label} - {status_suffix}"
|
238 |
return [
|
239 |
gr.update(value=f"{lang_data['title']}"),
|
240 |
gr.update(
|
241 |
label=lang_data["prompt_label"], placeholder=lang_data["prompt_placeholder"]
|
242 |
),
|
243 |
+
gr.update(label=editor_label, placeholder=lang_data["editor_placeholder"]),
|
|
|
|
|
244 |
gr.update(
|
245 |
label=lang_data["sync_threshold_label"],
|
246 |
info=lang_data["sync_threshold_info"],
|
|
|
269 |
|
270 |
with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
271 |
convo_state = gr.State(ConvoState)
|
272 |
+
dynamic_state = gr.State(DynamicState)
|
273 |
|
274 |
with gr.Row(variant=""):
|
275 |
title_md = gr.Markdown(
|
|
|
286 |
|
287 |
with gr.Row(equal_height=True):
|
288 |
|
|
|
289 |
with gr.Column(scale=1, min_width=400):
|
290 |
+
prompt_input = gr.Textbox(
|
291 |
+
label=LANGUAGE_CONFIG["en"]["prompt_label"],
|
292 |
+
lines=2,
|
293 |
+
placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
|
294 |
+
max_lines=5,
|
295 |
+
)
|
296 |
thought_editor = gr.Textbox(
|
297 |
+
label=f"{LANGUAGE_CONFIG['en']['editor_label']} - {LANGUAGE_CONFIG['en']['editor_default']}",
|
298 |
lines=16,
|
299 |
placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
|
300 |
autofocus=True,
|
301 |
elem_id="editor",
|
302 |
)
|
303 |
+
with gr.Row():
|
304 |
+
control_button = gr.Button(
|
305 |
+
value=LANGUAGE_CONFIG["en"]["generate_btn"], variant="primary"
|
306 |
+
)
|
307 |
+
next_turn_btn = gr.Button(
|
308 |
+
value=LANGUAGE_CONFIG["en"]["clear_btn"], interactive=True
|
309 |
+
)
|
310 |
+
|
311 |
+
with gr.Column(scale=1, min_width=500):
|
312 |
+
chatbot = gr.Chatbot(
|
313 |
+
type="messages",
|
314 |
+
height=300,
|
315 |
+
value=LANGUAGE_CONFIG["en"]["bot_default"],
|
316 |
+
group_consecutive_messages=False,
|
317 |
+
show_copy_all_button=True,
|
318 |
+
show_share_button=True,
|
319 |
+
label=LANGUAGE_CONFIG["en"]["bot_label"],
|
320 |
+
)
|
321 |
with gr.Row():
|
322 |
sync_threshold_slider = gr.Slider(
|
323 |
minimum=0,
|
|
|
336 |
info=LANGUAGE_CONFIG["en"]["throughput_info"],
|
337 |
)
|
338 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
339 |
intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
|
340 |
|
341 |
# 交互逻辑
|
342 |
+
stateful_ui = (control_button, thought_editor, next_turn_btn)
|
|
|
343 |
|
344 |
throughput_control.change(
|
345 |
lambda val, s: setattr(s, "throughput", val),
|
|
|
355 |
queue=False,
|
356 |
)
|
357 |
|
358 |
+
def wrap_stream_generator(convo_state, dynamic_state, prompt, content):
|
|
|
|
|
359 |
for response in convo_state.generate_ai_response(
|
360 |
prompt, content, dynamic_state
|
361 |
+
):
|
362 |
yield response
|
363 |
|
364 |
+
gr.on(
|
365 |
[control_button.click, prompt_input.submit, thought_editor.submit],
|
366 |
+
lambda d: d.control_button_handler(),
|
367 |
[dynamic_state],
|
368 |
stateful_ui,
|
369 |
show_progress=False,
|
370 |
+
).then(
|
371 |
+
wrap_stream_generator,
|
372 |
[convo_state, dynamic_state, prompt_input, thought_editor],
|
373 |
+
[thought_editor, thought_editor, chatbot],
|
374 |
concurrency_limit=100,
|
375 |
+
).then(
|
376 |
+
lambda d: d.ui_state_controller(),
|
377 |
[dynamic_state],
|
378 |
stateful_ui,
|
379 |
show_progress=False,
|
380 |
)
|
381 |
|
382 |
next_turn_btn.click(
|
383 |
+
lambda d: d.reset_workspace(),
|
384 |
[dynamic_state],
|
385 |
stateful_ui + (thought_editor, prompt_input, chatbot),
|
386 |
queue=False,
|
387 |
)
|
388 |
|
389 |
lang_selector.change(
|
390 |
+
lambda lang, s, d: update_interface_language(lang, s, d),
|
|
|
|
|
391 |
[lang_selector, convo_state, dynamic_state],
|
392 |
[
|
393 |
title_md,
|
lang.py
CHANGED
@@ -13,10 +13,10 @@ LANGUAGE_CONFIG = {
|
|
13 |
"throughput_label": "⏱ Sync Rate",
|
14 |
"throughput_info": "Tokens/s - 5:Learn, 10:Follow, 50:Skim",
|
15 |
"language_label": "Language",
|
16 |
-
"loading_thinking": "🤖 AI Thinking
|
17 |
-
"loading_output": "🖨️ Result Writing
|
18 |
-
"interrupted": "🤔 Pause, Human thinking time
|
19 |
-
"completed": "✅ Completed
|
20 |
"error": "Error",
|
21 |
"api_config_label": "API Configuration",
|
22 |
"api_key_label": "API Key",
|
@@ -32,6 +32,7 @@ LANGUAGE_CONFIG = {
|
|
32 |
"content": "Welcome to our co-thinking space! Ready to synchronize our cognitive rhythms? \n Shall we start by adjusting the throughput slider to match your reading pace? \n Enter your task below, edit my thinking process when I pause, and let's begin weaving thoughts together →",
|
33 |
},
|
34 |
],
|
|
|
35 |
},
|
36 |
"zh": {
|
37 |
"title": "CoT-Lab: 人机协同思维实验室\n在一轮对话中跟随、学习、迭代思维链",
|
@@ -46,10 +47,10 @@ LANGUAGE_CONFIG = {
|
|
46 |
"throughput_label": "⏱ 同步思考速度",
|
47 |
"throughput_info": "词元/秒 - 5:学习, 10:跟读, 50:跳读",
|
48 |
"language_label": "界面语言",
|
49 |
-
"loading_thinking": "🤖 AI思考中...
|
50 |
-
"loading_output": "🖨️ 结果输出中...
|
51 |
-
"interrupted": "🤔
|
52 |
-
"completed": "✅ 已完成
|
53 |
"error": "错误",
|
54 |
"api_config_label": "API配置",
|
55 |
"api_key_label": "API密钥",
|
@@ -66,5 +67,6 @@ LANGUAGE_CONFIG = {
|
|
66 |
},
|
67 |
{"role": "assistant", "content": "**Shift+Enter** 可以暂停/继续AI生成"},
|
68 |
],
|
|
|
69 |
},
|
70 |
}
|
|
|
13 |
"throughput_label": "⏱ Sync Rate",
|
14 |
"throughput_info": "Tokens/s - 5:Learn, 10:Follow, 50:Skim",
|
15 |
"language_label": "Language",
|
16 |
+
"loading_thinking": "🤖 AI Thinking ↓ Shift+Enter to Pause",
|
17 |
+
"loading_output": "🖨️ Result Writing ↓ Shift+Enter to Pause",
|
18 |
+
"interrupted": "🤔 Pause, Human thinking time - **EDIT THOUGHTS BELOW**",
|
19 |
+
"completed": "✅ Completed → Check overview",
|
20 |
"error": "Error",
|
21 |
"api_config_label": "API Configuration",
|
22 |
"api_key_label": "API Key",
|
|
|
32 |
"content": "Welcome to our co-thinking space! Ready to synchronize our cognitive rhythms? \n Shall we start by adjusting the throughput slider to match your reading pace? \n Enter your task below, edit my thinking process when I pause, and let's begin weaving thoughts together →",
|
33 |
},
|
34 |
],
|
35 |
+
"editor_default": "AI thought will start with this, leave blank to think freely",
|
36 |
},
|
37 |
"zh": {
|
38 |
"title": "CoT-Lab: 人机协同思维实验室\n在一轮对话中跟随、学习、迭代思维链",
|
|
|
47 |
"throughput_label": "⏱ 同步思考速度",
|
48 |
"throughput_info": "词元/秒 - 5:学习, 10:跟读, 50:跳读",
|
49 |
"language_label": "界面语言",
|
50 |
+
"loading_thinking": "🤖 AI思考中... **Shift+Enter**可暂停",
|
51 |
+
"loading_output": "🖨️ 结果输出中... **Shift+Enter**可暂停",
|
52 |
+
"interrupted": "🤔 暂停,人类思考回合 **下面的思考过程可以编辑**",
|
53 |
+
"completed": "✅ 已完成 → 查看完整对话",
|
54 |
"error": "错误",
|
55 |
"api_config_label": "API配置",
|
56 |
"api_key_label": "API密钥",
|
|
|
67 |
},
|
68 |
{"role": "assistant", "content": "**Shift+Enter** 可以暂停/继续AI生成"},
|
69 |
],
|
70 |
+
"editor_default": "AI思维会以此开头,留空即为默认思考",
|
71 |
},
|
72 |
}
|