add PDF reader component, avatar, and clear button
Browse files- data/avatar-bot.png +0 -0
- data/avatar-user.png +0 -0
- data/sample.txt +0 -1
- pyproject.toml +1 -0
- requirements-dev.lock +2 -0
- requirements.lock +2 -0
- src/pdfchat/app.py +33 -22
data/avatar-bot.png
ADDED
|
|
data/avatar-user.png
ADDED
|
|
data/sample.txt
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
hello
|
|
|
|
|
|
pyproject.toml
CHANGED
|
@@ -9,6 +9,7 @@ dependencies = [
|
|
| 9 |
"gradio>=4.19.2",
|
| 10 |
"langchain>=0.1.9",
|
| 11 |
"gradio-pdf>=0.0.5",
|
|
|
|
| 12 |
]
|
| 13 |
readme = "README.md"
|
| 14 |
requires-python = ">= 3.8"
|
|
|
|
| 9 |
"gradio>=4.19.2",
|
| 10 |
"langchain>=0.1.9",
|
| 11 |
"gradio-pdf>=0.0.5",
|
| 12 |
+
"loguru>=0.7.2",
|
| 13 |
]
|
| 14 |
readme = "README.md"
|
| 15 |
requires-python = ">= 3.8"
|
requirements-dev.lock
CHANGED
|
@@ -122,6 +122,8 @@ langsmith==0.1.8
|
|
| 122 |
# via langchain
|
| 123 |
# via langchain-community
|
| 124 |
# via langchain-core
|
|
|
|
|
|
|
| 125 |
markdown-it-py==3.0.0
|
| 126 |
# via rich
|
| 127 |
markupsafe==2.1.5
|
|
|
|
| 122 |
# via langchain
|
| 123 |
# via langchain-community
|
| 124 |
# via langchain-core
|
| 125 |
+
loguru==0.7.2
|
| 126 |
+
# via pdfchat
|
| 127 |
markdown-it-py==3.0.0
|
| 128 |
# via rich
|
| 129 |
markupsafe==2.1.5
|
requirements.lock
CHANGED
|
@@ -109,6 +109,8 @@ langsmith==0.1.8
|
|
| 109 |
# via langchain
|
| 110 |
# via langchain-community
|
| 111 |
# via langchain-core
|
|
|
|
|
|
|
| 112 |
markdown-it-py==3.0.0
|
| 113 |
# via rich
|
| 114 |
markupsafe==2.1.5
|
|
|
|
| 109 |
# via langchain
|
| 110 |
# via langchain-community
|
| 111 |
# via langchain-core
|
| 112 |
+
loguru==0.7.2
|
| 113 |
+
# via pdfchat
|
| 114 |
markdown-it-py==3.0.0
|
| 115 |
# via rich
|
| 116 |
markupsafe==2.1.5
|
src/pdfchat/app.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
-
import time
|
| 2 |
from dataclasses import dataclass
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import gradio as gr
|
| 6 |
-
from
|
|
|
|
| 7 |
|
| 8 |
MODEL_CALM2 = "cyberagent/calm2"
|
| 9 |
|
|
@@ -56,31 +56,23 @@ def bot(history: ChatHistory, query: str, file_path: str) -> ChatHistory:
|
|
| 56 |
return history
|
| 57 |
document = open_file(file_path)
|
| 58 |
history.add_chat(Chat(query=query, response=document))
|
| 59 |
-
|
| 60 |
|
| 61 |
# TODO: use streaming inference
|
| 62 |
return history
|
| 63 |
|
| 64 |
|
| 65 |
with gr.Blocks() as app:
|
|
|
|
| 66 |
with gr.Row():
|
| 67 |
-
with gr.Column(scale=
|
| 68 |
model_name = gr.Dropdown(
|
| 69 |
choices=[MODEL_CALM2],
|
| 70 |
value=MODEL_CALM2,
|
| 71 |
label="Model",
|
| 72 |
)
|
| 73 |
-
file_box =
|
| 74 |
label="Document",
|
| 75 |
-
file_types=[".pdf", ".txt"],
|
| 76 |
-
file_count="single",
|
| 77 |
-
container=False,
|
| 78 |
-
)
|
| 79 |
-
gr.Examples(
|
| 80 |
-
examples=[["data/sample.txt"], ["data/sample.pdf"]],
|
| 81 |
-
inputs=[file_box],
|
| 82 |
-
outputs=[],
|
| 83 |
-
fn=lambda model_name, document: None,
|
| 84 |
)
|
| 85 |
with gr.Accordion("Parameters", open=False):
|
| 86 |
temperature_slider = gr.Slider(
|
|
@@ -91,24 +83,43 @@ with gr.Blocks() as app:
|
|
| 91 |
minimum=0.1, maximum=1.0, value=0.5, label="Top P"
|
| 92 |
)
|
| 93 |
top_p_slider.change(lambda x: x, [top_p_slider])
|
| 94 |
-
with gr.Column(scale=
|
| 95 |
chatbot = gr.Chatbot(
|
| 96 |
bubble_full_width=False,
|
| 97 |
height=650,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
)
|
| 99 |
-
ic(chatbot)
|
| 100 |
with gr.Row():
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
show_label=False,
|
| 104 |
-
placeholder="Type your message here",
|
| 105 |
-
container=False,
|
| 106 |
)
|
| 107 |
-
submit_button = gr.Button("Submit",
|
| 108 |
submit = submit_button.click(
|
| 109 |
fn=bot,
|
| 110 |
inputs=[chatbot, text_box, file_box],
|
| 111 |
outputs=chatbot,
|
| 112 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
app.queue().launch(debug=True)
|
|
|
|
|
|
|
| 1 |
from dataclasses import dataclass
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
import gradio as gr
|
| 5 |
+
from gradio_pdf import PDF
|
| 6 |
+
from loguru import logger
|
| 7 |
|
| 8 |
MODEL_CALM2 = "cyberagent/calm2"
|
| 9 |
|
|
|
|
| 56 |
return history
|
| 57 |
document = open_file(file_path)
|
| 58 |
history.add_chat(Chat(query=query, response=document))
|
| 59 |
+
logger.info(history)
|
| 60 |
|
| 61 |
# TODO: use streaming inference
|
| 62 |
return history
|
| 63 |
|
| 64 |
|
| 65 |
with gr.Blocks() as app:
|
| 66 |
+
gr.Markdown("# Chat with PDF")
|
| 67 |
with gr.Row():
|
| 68 |
+
with gr.Column(scale=35):
|
| 69 |
model_name = gr.Dropdown(
|
| 70 |
choices=[MODEL_CALM2],
|
| 71 |
value=MODEL_CALM2,
|
| 72 |
label="Model",
|
| 73 |
)
|
| 74 |
+
file_box = PDF(
|
| 75 |
label="Document",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
)
|
| 77 |
with gr.Accordion("Parameters", open=False):
|
| 78 |
temperature_slider = gr.Slider(
|
|
|
|
| 83 |
minimum=0.1, maximum=1.0, value=0.5, label="Top P"
|
| 84 |
)
|
| 85 |
top_p_slider.change(lambda x: x, [top_p_slider])
|
| 86 |
+
with gr.Column(scale=65):
|
| 87 |
chatbot = gr.Chatbot(
|
| 88 |
bubble_full_width=False,
|
| 89 |
height=650,
|
| 90 |
+
show_copy_button=True,
|
| 91 |
+
avatar_images=(
|
| 92 |
+
Path("data/avatar-user.png"),
|
| 93 |
+
Path("data/avatar-bot.png"),
|
| 94 |
+
),
|
| 95 |
+
)
|
| 96 |
+
text_box = gr.Textbox(
|
| 97 |
+
lines=2,
|
| 98 |
+
label="Chat message",
|
| 99 |
+
show_label=False,
|
| 100 |
+
placeholder="Type your message here",
|
| 101 |
+
container=False,
|
| 102 |
)
|
|
|
|
| 103 |
with gr.Row():
|
| 104 |
+
clear_button = gr.ClearButton(
|
| 105 |
+
[text_box, chatbot, file_box], variant="secondary", size="sm"
|
|
|
|
|
|
|
|
|
|
| 106 |
)
|
| 107 |
+
submit_button = gr.Button("Submit", variant="primary", size="sm")
|
| 108 |
submit = submit_button.click(
|
| 109 |
fn=bot,
|
| 110 |
inputs=[chatbot, text_box, file_box],
|
| 111 |
outputs=chatbot,
|
| 112 |
)
|
| 113 |
+
examples = gr.Examples(
|
| 114 |
+
examples=[
|
| 115 |
+
[
|
| 116 |
+
"data/sample.pdf",
|
| 117 |
+
"胃がん手術の説明書の要点を箇条書きで要約してください",
|
| 118 |
+
]
|
| 119 |
+
],
|
| 120 |
+
inputs=[file_box, text_box],
|
| 121 |
+
outputs=[],
|
| 122 |
+
fn=lambda model_name, document: None,
|
| 123 |
+
)
|
| 124 |
|
| 125 |
app.queue().launch(debug=True)
|