openfree commited on
Commit
275364c
ยท
verified ยท
1 Parent(s): f932d41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -27
app.py CHANGED
@@ -290,7 +290,6 @@ def convert_chat_messages_to_gradio_format(messages):
290
  """
291
  gradio_chat = []
292
  user_text, assistant_text = None, None
293
-
294
  for msg in messages:
295
  if msg.role == "user":
296
  if user_text is not None or assistant_text is not None:
@@ -304,10 +303,8 @@ def convert_chat_messages_to_gradio_format(messages):
304
  assistant_text = msg.content
305
  else:
306
  assistant_text += msg.content
307
-
308
  if user_text is not None or assistant_text is not None:
309
  gradio_chat.append((user_text or "", assistant_text or ""))
310
-
311
  return gradio_chat
312
 
313
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
@@ -316,17 +313,14 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
316
  """
317
  if not user_message.strip():
318
  user_message = "...(No content from user)..."
319
-
320
  try:
321
  print(f"\n=== [Gemini] New Request ===\nUser message: '{user_message}'")
322
  chat_history = format_chat_history(messages)
323
  chat = model.start_chat(history=chat_history)
324
  response = chat.send_message(user_message, stream=True)
325
-
326
  thought_buffer = ""
327
  response_buffer = ""
328
  thinking_complete = False
329
-
330
  # "Thinking" ์—ญํ•  ์ถ”๊ฐ€
331
  messages.append(
332
  ChatMessage(
@@ -336,11 +330,9 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
336
  )
337
  )
338
  yield convert_chat_messages_to_gradio_format(messages)
339
-
340
  for chunk in response:
341
  parts = chunk.candidates[0].content.parts
342
  current_chunk = parts[0].text
343
-
344
  if len(parts) == 2 and not thinking_complete:
345
  thought_buffer += current_chunk
346
  messages[-1] = ChatMessage(
@@ -349,7 +341,6 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
349
  metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
350
  )
351
  yield convert_chat_messages_to_gradio_format(messages)
352
-
353
  response_buffer = parts[1].text
354
  messages.append(ChatMessage(role="assistant", content=response_buffer))
355
  thinking_complete = True
@@ -363,11 +354,8 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
363
  content=thought_buffer,
364
  metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
365
  )
366
-
367
  yield convert_chat_messages_to_gradio_format(messages)
368
-
369
  print(f"\n=== [Gemini] Final Response ===\n{response_buffer}")
370
-
371
  except Exception as e:
372
  print(f"\n=== [Gemini] Error ===\n{str(e)}")
373
  messages.append(ChatMessage(role="assistant", content=f"I encountered an error: {str(e)}"))
@@ -381,15 +369,14 @@ def user_message(msg: str, history: list, doc_text: str) -> tuple[str, list]:
381
  user_query = f"๋‹ค์Œ ๋ฌธ์„œ๋ฅผ ์ฐธ๊ณ ํ•˜์—ฌ ๋‹ต๋ณ€:\n\n{doc_text}\n\n์งˆ๋ฌธ: {msg}"
382
  else:
383
  user_query = msg
384
-
385
  history.append(ChatMessage(role="user", content=user_query))
386
  return "", history
387
 
388
  def reset_states(file_a, file_b):
389
  """
390
- ์ƒˆ ํŒŒ์ผ ์—…๋กœ๋“œ ์‹œ chat_history์™€ md_state๋ฅผ ์ดˆ๊ธฐํ™” (Chatbot์€ ์ˆจ๊น€ ์ฒ˜๋ฆฌ)
391
  """
392
- return [], "", []
393
 
394
  ###############################
395
  # UI ํ†ตํ•ฉ
@@ -405,27 +392,29 @@ if __name__ == "__main__":
405
  </div>
406
  """)
407
 
408
- md_state = gr.State("") # ๋ณ€ํ™˜๋œ ๋งˆํฌ๋‹ค์šด ํ…์ŠคํŠธ (๋‘ ํŒŒ์ผ์˜ ๊ฒฐ๊ณผ ํ†ตํ•ฉ)
 
 
409
  chat_history = gr.State([]) # ChatMessage ๋ฆฌ์ŠคํŠธ
410
 
411
- # ๋‹จ์ผ(์ˆจ๊น€) Chatbot ์ปดํฌ๋„ŒํŠธ: ํ™”๋ฉด์— ๋ณด์ด์ง€ ์•Š์Œ
412
- hidden_chatbot = gr.Chatbot(visible=False)
413
 
414
  with gr.Row():
415
  file_a = gr.File(label="๋ฌธ์„œ A ์—…๋กœ๋“œ", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
416
  file_b = gr.File(label="๋ฌธ์„œ B ์—…๋กœ๋“œ", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
417
  convert_btn = gr.Button("๋น„๊ต์šฉ ๋ณ€ํ™˜ํ•˜๊ธฐ")
418
 
419
- # ํŒŒ์ผ ์—…๋กœ๋“œ ์‹œ ์ƒํƒœ ์ดˆ๊ธฐํ™” (Chatbot์€ ์ˆจ๊น€ ์ฒ˜๋ฆฌ)
420
  file_a.change(
421
  fn=reset_states,
422
  inputs=[file_a, file_b],
423
- outputs=[chat_history, md_state, hidden_chatbot]
424
  )
425
  file_b.change(
426
  fn=reset_states,
427
  inputs=[file_a, file_b],
428
- outputs=[chat_history, md_state, hidden_chatbot]
429
  )
430
 
431
  max_pages = gr.Slider(1, 20, 10, visible=False)
@@ -438,7 +427,7 @@ if __name__ == "__main__":
438
  convert_btn.click(
439
  fn=to_markdown_comparison,
440
  inputs=[file_a, file_b, max_pages, is_ocr, layout_mode, formula_enable, table_enable, language],
441
- outputs=md_state,
442
  show_progress=True
443
  )
444
 
@@ -455,24 +444,23 @@ if __name__ == "__main__":
455
  chat_input = gr.Textbox(lines=1, placeholder="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”...")
456
  clear_btn = gr.Button("๋Œ€ํ™” ์ดˆ๊ธฐํ™”")
457
 
458
- # ์ฑ„ํŒ… ์ž…๋ ฅ ํ›„ LLM ์‘๋‹ต์€ hidden_chatbot์— ์ €์žฅ(ํ™”๋ฉด์— ๋ณด์ด์ง€ ์•Š์Œ)
459
  chat_input.submit(
460
  fn=user_message,
461
- inputs=[chat_input, chat_history, md_state],
462
  outputs=[chat_input, chat_history]
463
  ).then(
464
  fn=stream_gemini_response,
465
  inputs=[chat_input, chat_history],
466
- outputs=hidden_chatbot
467
  )
468
 
469
  def clear_all():
470
- return [], "", []
471
 
472
  clear_btn.click(
473
  fn=clear_all,
474
  inputs=[],
475
- outputs=[chat_history, md_state, hidden_chatbot]
476
  )
477
 
478
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True, ssr_mode=True)
 
290
  """
291
  gradio_chat = []
292
  user_text, assistant_text = None, None
 
293
  for msg in messages:
294
  if msg.role == "user":
295
  if user_text is not None or assistant_text is not None:
 
303
  assistant_text = msg.content
304
  else:
305
  assistant_text += msg.content
 
306
  if user_text is not None or assistant_text is not None:
307
  gradio_chat.append((user_text or "", assistant_text or ""))
 
308
  return gradio_chat
309
 
310
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
 
313
  """
314
  if not user_message.strip():
315
  user_message = "...(No content from user)..."
 
316
  try:
317
  print(f"\n=== [Gemini] New Request ===\nUser message: '{user_message}'")
318
  chat_history = format_chat_history(messages)
319
  chat = model.start_chat(history=chat_history)
320
  response = chat.send_message(user_message, stream=True)
 
321
  thought_buffer = ""
322
  response_buffer = ""
323
  thinking_complete = False
 
324
  # "Thinking" ์—ญํ•  ์ถ”๊ฐ€
325
  messages.append(
326
  ChatMessage(
 
330
  )
331
  )
332
  yield convert_chat_messages_to_gradio_format(messages)
 
333
  for chunk in response:
334
  parts = chunk.candidates[0].content.parts
335
  current_chunk = parts[0].text
 
336
  if len(parts) == 2 and not thinking_complete:
337
  thought_buffer += current_chunk
338
  messages[-1] = ChatMessage(
 
341
  metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
342
  )
343
  yield convert_chat_messages_to_gradio_format(messages)
 
344
  response_buffer = parts[1].text
345
  messages.append(ChatMessage(role="assistant", content=response_buffer))
346
  thinking_complete = True
 
354
  content=thought_buffer,
355
  metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
356
  )
 
357
  yield convert_chat_messages_to_gradio_format(messages)
 
358
  print(f"\n=== [Gemini] Final Response ===\n{response_buffer}")
 
359
  except Exception as e:
360
  print(f"\n=== [Gemini] Error ===\n{str(e)}")
361
  messages.append(ChatMessage(role="assistant", content=f"I encountered an error: {str(e)}"))
 
369
  user_query = f"๋‹ค์Œ ๋ฌธ์„œ๋ฅผ ์ฐธ๊ณ ํ•˜์—ฌ ๋‹ต๋ณ€:\n\n{doc_text}\n\n์งˆ๋ฌธ: {msg}"
370
  else:
371
  user_query = msg
 
372
  history.append(ChatMessage(role="user", content=user_query))
373
  return "", history
374
 
375
  def reset_states(file_a, file_b):
376
  """
377
+ ์ƒˆ ํŒŒ์ผ ์—…๋กœ๋“œ ์‹œ chat_history์™€ md_state๋ฅผ ์ดˆ๊ธฐํ™”
378
  """
379
+ return [], "", ""
380
 
381
  ###############################
382
  # UI ํ†ตํ•ฉ
 
392
  </div>
393
  """)
394
 
395
+ # ๋ณ€ํ™˜ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์—ฌ์ค„ visible Markdown ์ปดํฌ๋„ŒํŠธ
396
+ conversion_md = gr.Markdown(label="๋ณ€ํ™˜ ๊ฒฐ๊ณผ", visible=True)
397
+ md_state = gr.State("") # ๋‚ด๋ถ€ ์ƒํƒœ (๋ฌธ์„œ ๋ณ€ํ™˜ ๊ฒฐ๊ณผ)
398
  chat_history = gr.State([]) # ChatMessage ๋ฆฌ์ŠคํŠธ
399
 
400
+ # visible Chatbot ์ปดํฌ๋„ŒํŠธ
401
+ chatbot = gr.Chatbot(visible=True)
402
 
403
  with gr.Row():
404
  file_a = gr.File(label="๋ฌธ์„œ A ์—…๋กœ๋“œ", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
405
  file_b = gr.File(label="๋ฌธ์„œ B ์—…๋กœ๋“œ", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
406
  convert_btn = gr.Button("๋น„๊ต์šฉ ๋ณ€ํ™˜ํ•˜๊ธฐ")
407
 
408
+ # ํŒŒ์ผ ์—…๋กœ๋“œ ์‹œ ์ƒํƒœ ์ดˆ๊ธฐํ™”
409
  file_a.change(
410
  fn=reset_states,
411
  inputs=[file_a, file_b],
412
+ outputs=[chat_history, md_state, chatbot]
413
  )
414
  file_b.change(
415
  fn=reset_states,
416
  inputs=[file_a, file_b],
417
+ outputs=[chat_history, md_state, chatbot]
418
  )
419
 
420
  max_pages = gr.Slider(1, 20, 10, visible=False)
 
427
  convert_btn.click(
428
  fn=to_markdown_comparison,
429
  inputs=[file_a, file_b, max_pages, is_ocr, layout_mode, formula_enable, table_enable, language],
430
+ outputs=conversion_md,
431
  show_progress=True
432
  )
433
 
 
444
  chat_input = gr.Textbox(lines=1, placeholder="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”...")
445
  clear_btn = gr.Button("๋Œ€ํ™” ์ดˆ๊ธฐํ™”")
446
 
 
447
  chat_input.submit(
448
  fn=user_message,
449
+ inputs=[chat_input, chat_history, conversion_md],
450
  outputs=[chat_input, chat_history]
451
  ).then(
452
  fn=stream_gemini_response,
453
  inputs=[chat_input, chat_history],
454
+ outputs=chatbot
455
  )
456
 
457
  def clear_all():
458
+ return [], "", ""
459
 
460
  clear_btn.click(
461
  fn=clear_all,
462
  inputs=[],
463
+ outputs=[chat_history, md_state, chatbot]
464
  )
465
 
466
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True, ssr_mode=True)