binh99 commited on
Commit
374c4cd
·
1 Parent(s): a16851f

First commit

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/Chat_QnA_v2.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="14">
8
+ <item index="0" class="java.lang.String" itemvalue="scikit-image" />
9
+ <item index="1" class="java.lang.String" itemvalue="scipy" />
10
+ <item index="2" class="java.lang.String" itemvalue="thop" />
11
+ <item index="3" class="java.lang.String" itemvalue="tabulate" />
12
+ <item index="4" class="java.lang.String" itemvalue="opencv-python" />
13
+ <item index="5" class="java.lang.String" itemvalue="torch" />
14
+ <item index="6" class="java.lang.String" itemvalue="numpy" />
15
+ <item index="7" class="java.lang.String" itemvalue="loguru" />
16
+ <item index="8" class="java.lang.String" itemvalue="torchvision" />
17
+ <item index="9" class="java.lang.String" itemvalue="tqdm" />
18
+ <item index="10" class="java.lang.String" itemvalue="matplotlib" />
19
+ <item index="11" class="java.lang.String" itemvalue="pycocotools" />
20
+ <item index="12" class="java.lang.String" itemvalue="opencv_python" />
21
+ <item index="13" class="java.lang.String" itemvalue="Pillow" />
22
+ </list>
23
+ </value>
24
+ </option>
25
+ </inspection_tool>
26
+ <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
27
+ <option name="ignoredErrors">
28
+ <list>
29
+ <option value="N806" />
30
+ </list>
31
+ </option>
32
+ </inspection_tool>
33
+ </profile>
34
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Chat_QnA_v2.iml" filepath="$PROJECT_DIR$/.idea/Chat_QnA_v2.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
+ </component>
6
+ </project>
app.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import openai
4
+ import re
5
+ import numpy as np
6
+ from sklearn.neighbors import NearestNeighbors
7
+ import tensorflow_hub as hub
8
+ import fitz
9
+
10
+ my_api_key = "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"
11
+ openai.api_key = my_api_key
12
+
13
+
14
+ def add_source_numbers(lst, source_name="Source", use_source=True):
15
+ if use_source:
16
+ return [f'[{idx + 1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)]
17
+ else:
18
+ return [f'[{idx + 1}]\t "{item}"' for idx, item in enumerate(lst)]
19
+
20
+
21
+ def add_details(lst):
22
+ nodes = []
23
+ for index, txt in enumerate(lst):
24
+ brief = txt[:25].replace("\n", "")
25
+ nodes.append(
26
+ f"<details><summary>{brief}...</summary><p>{txt}</p></details>"
27
+ )
28
+ return nodes
29
+
30
+
31
+ prompt_template = "Instructions: Compose a comprehensive reply to the query using the search results given. " \
32
+ "Make sure to cite results using [number] notation after the reference. " \
33
+ "If the search results mention multiple subjects " \
34
+ "with the same name, create separate answers for each. Only include information found in the results and " \
35
+ "don't add any additional information. Make sure the answer is correct and don't output false content. " \
36
+ "Ignore outlier search results which has nothing to do with the question. Only answer what is asked. " \
37
+ "The answer should be short and concise.\n\nQuery: {question}\nAnswer: "
38
+
39
+ # MODELS = ["universal-sentence-encoder", "instructor-large"]
40
+ MODELS = ["text-davinci-001", "text-davinci-002", "text-davinci-003"]
41
+
42
+
43
+ def add_source_numbers(lst):
44
+ return [item[:3] + '\t' + item[3:] for item in (lst)]
45
+
46
+
47
+ def add_details(lst):
48
+ nodes = []
49
+ for index, txt in enumerate(lst):
50
+ brief = txt[:25].replace("\n", "")
51
+ nodes.append(
52
+ f"<details><summary>{brief}...</summary><p>{txt}</p></details>"
53
+ )
54
+ return nodes
55
+
56
+
57
+ def preprocess(text):
58
+ text = text.replace('\n', ' ')
59
+ text = re.sub('\s+', ' ', text)
60
+ return text
61
+
62
+
63
+ def pdf_to_text(files_src, start_page=1, end_page=None):
64
+ text_list = []
65
+ for file in files_src:
66
+ if os.path.splitext(file.name)[1] == ".pdf":
67
+ doc = fitz.open(file.name)
68
+ total_pages = doc.page_count
69
+ if end_page is None:
70
+ end_page = total_pages
71
+ for i in range(start_page - 1, end_page):
72
+ text = doc.load_page(i).get_text("text")
73
+ text = preprocess(text)
74
+ text_list.append(text)
75
+ doc.close()
76
+
77
+ return text_list
78
+
79
+
80
+ def text_to_chunks(texts, word_length=150, start_page=1):
81
+ text_toks = [t.split(' ') for t in texts]
82
+ page_nums = []
83
+ chunks = []
84
+
85
+ for idx, words in enumerate(text_toks):
86
+ for i in range(0, len(words), word_length):
87
+ chunk = words[i:i + word_length]
88
+ if (i + word_length) > len(words) and (len(chunk) < word_length) and (
89
+ len(text_toks) != (idx + 1)):
90
+ text_toks[idx + 1] = chunk + text_toks[idx + 1]
91
+ continue
92
+ chunk = ' '.join(chunk).strip()
93
+ chunk = f'[{idx + start_page}]' + ' ' + '"' + chunk + '"'
94
+ chunks.append(chunk)
95
+ return chunks
96
+
97
+
98
+ def embedding(model, files_src, batch=1000):
99
+ name_file = '_'.join([os.path.basename(file.name).split('.')[0] for file in files_src])
100
+ embeddings_file = f"{name_file}.npy"
101
+ texts = pdf_to_text(files_src)
102
+ chunks = text_to_chunks(texts)
103
+ if os.path.isfile(embeddings_file):
104
+ embeddings = np.load(embeddings_file)
105
+ return embeddings, chunks
106
+ # texts = pdf_to_text(files_src)
107
+ # chunks = text_to_chunks(texts)
108
+ data = chunks
109
+ embeddings = []
110
+ for i in range(0, len(data), batch):
111
+ text_batch = data[i:(i + batch)]
112
+ emb_batch = model(text_batch)
113
+ embeddings.append(emb_batch)
114
+ embeddings = np.vstack(embeddings)
115
+ np.save(embeddings_file, embeddings)
116
+ return embeddings, chunks
117
+
118
+
119
+ def get_top_chunks(inp_emb, data, n_neighbors=5):
120
+ n_neighbors = min(n_neighbors, len(data))
121
+ nn = NearestNeighbors(n_neighbors=n_neighbors)
122
+ nn.fit(data)
123
+ neighbors = nn.kneighbors(inp_emb, return_distance=False)[0]
124
+ return neighbors
125
+
126
+
127
+ def predict(
128
+ history,
129
+ chatbot,
130
+ inputs,
131
+ temperature,
132
+ selected_model=MODELS[0],
133
+ files=None
134
+ ):
135
+ old_inputs = None
136
+ display_reference = []
137
+ if files:
138
+ old_inputs = inputs
139
+ emb_model = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
140
+
141
+ vector_emb, chunks = embedding(emb_model, files)
142
+
143
+ input_emb = emb_model([inputs])
144
+ index_top_chunks = get_top_chunks(input_emb, vector_emb)
145
+ topn_chunks = [chunks[i] for i in index_top_chunks]
146
+ prompt = ""
147
+ prompt += 'search results:\n\n'
148
+ for c in topn_chunks:
149
+ prompt += c + '\n\n'
150
+ prompt += prompt_template
151
+ prompt += f"Query: {inputs}\nAnswer:"
152
+ inputs = prompt
153
+ reference_results = add_source_numbers(topn_chunks)
154
+ display_reference = add_details(reference_results)
155
+ display_reference = "\n\n" + "".join(display_reference)
156
+ else:
157
+ display_reference = ""
158
+
159
+ history.append(inputs)
160
+ if old_inputs:
161
+ chatbot.append((old_inputs, ""))
162
+ else:
163
+ chatbot.append((inputs, ""))
164
+ completions = openai.Completion.create(
165
+ engine=selected_model,
166
+ prompt=inputs,
167
+ max_tokens=256,
168
+ stop=None,
169
+ temperature=temperature,
170
+ )
171
+ message = completions.choices[0].text
172
+ # print(message)
173
+ if old_inputs is not None:
174
+ history[-1] = old_inputs
175
+ chatbot[-1] = (chatbot[-1][0], message + display_reference)
176
+ return chatbot, history
177
+
178
+
179
+ # Create theme
180
+ with open("assets/custom.css", "r", encoding="utf-8") as f:
181
+ customCSS = f.read()
182
+ beautiful_theme = gr.themes.Soft(
183
+ primary_hue=gr.themes.Color(
184
+ c50="#02C160",
185
+ c100="rgba(2, 193, 96, 0.2)",
186
+ c200="#02C160",
187
+ c300="rgba(2, 193, 96, 0.32)",
188
+ c400="rgba(2, 193, 96, 0.32)",
189
+ c500="rgba(2, 193, 96, 1.0)",
190
+ c600="rgba(2, 193, 96, 1.0)",
191
+ c700="rgba(2, 193, 96, 0.32)",
192
+ c800="rgba(2, 193, 96, 0.32)",
193
+ c900="#02C160",
194
+ c950="#02C160",
195
+ ),
196
+ radius_size=gr.themes.sizes.radius_sm,
197
+ ).set(
198
+ button_primary_background_fill="#06AE56",
199
+ button_primary_background_fill_dark="#06AE56",
200
+ button_primary_background_fill_hover="#07C863",
201
+ button_primary_border_color="#06AE56",
202
+ button_primary_border_color_dark="#06AE56",
203
+ button_primary_text_color="#FFFFFF",
204
+ button_primary_text_color_dark="#FFFFFF",
205
+ block_title_text_color="*primary_500",
206
+ block_title_background_fill="*primary_100",
207
+ input_background_fill="#F6F6F6",
208
+ )
209
+
210
+ # Gradio app
211
+ title = """<h1 align="left" style="min-width:200px; margin-top:6px; white-space: nowrap;">ChatGPT 🚀</h1>"""
212
+ with gr.Blocks(css=customCSS, theme=beautiful_theme) as demo:
213
+ history = gr.State([])
214
+ user_question = gr.State("")
215
+
216
+ with gr.Row():
217
+ with gr.Column(scale=1):
218
+ gr.HTML(title)
219
+
220
+ with gr.Row().style(equal_height=True):
221
+ with gr.Column(scale=5):
222
+ with gr.Row():
223
+ chatbot = gr.Chatbot(elem_id="chatbot").style(height="100%")
224
+ with gr.Row():
225
+ with gr.Column(scale=12):
226
+ user_input = gr.Textbox(
227
+ show_label=False, placeholder="Enter here"
228
+ ).style(container=False)
229
+ with gr.Column(min_width=70, scale=1):
230
+ submitBtn = gr.Button("Send", variant="primary")
231
+
232
+ with gr.Column():
233
+ with gr.Column(min_width=50, scale=1):
234
+ with gr.Tab(label="ChatGPT"):
235
+ model_select_dropdown = gr.Dropdown(
236
+ label="Select model", choices=MODELS, multiselect=False, value=MODELS[0]
237
+ )
238
+ index_files = gr.Files(label="Files", type="file", multiple=True)
239
+ gr.Markdown(
240
+ "⚠️Be careful to change ⚠️\n\nIf you can't use it, please restore the default settings")
241
+ with gr.Accordion("Parameter", open=False):
242
+ temperature = gr.Slider(
243
+ minimum=-0,
244
+ maximum=1.0,
245
+ value=0.0,
246
+ step=0.1,
247
+ interactive=True,
248
+ label="Temperature",
249
+ )
250
+ user_input.submit(predict, inputs=[history, chatbot, user_input, temperature, model_select_dropdown, index_files],
251
+ outputs=[chatbot, history])
252
+ user_input.submit(lambda: "", None, user_input)
253
+ submitBtn.click(predict, inputs=[history, chatbot, user_input, temperature, model_select_dropdown, index_files],
254
+ outputs=[chatbot, history])
255
+ submitBtn.click(lambda: "", None, user_input)
256
+ demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7860, debug=True)
custom.css ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root {
2
+ --chatbot-color-light: #F3F3F3;
3
+ --chatbot-color-dark: #121111;
4
+ }
5
+
6
+ /* 覆盖gradio的页脚信息QAQ */
7
+ footer {
8
+ display: none !important;
9
+ }
10
+ #footer{
11
+ text-align: center;
12
+ }
13
+ #footer div{
14
+ display: inline-block;
15
+ }
16
+ #footer .versions{
17
+ font-size: 85%;
18
+ opacity: 0.85;
19
+ }
20
+
21
+ /* status_display */
22
+ #status_display {
23
+ display: flex;
24
+ min-height: 2.5em;
25
+ align-items: flex-end;
26
+ justify-content: flex-end;
27
+ }
28
+ #status_display p {
29
+ font-size: .85em;
30
+ font-family: monospace;
31
+ color: var(--body-text-color-subdued);
32
+ }
33
+
34
+ #chuanhu_chatbot, #status_display {
35
+ transition: all 0.6s;
36
+ }
37
+
38
+ /* usage_display */
39
+ #usage_display {
40
+ position: relative;
41
+ margin: 0;
42
+ box-shadow: var(--block-shadow);
43
+ border-width: var(--block-border-width);
44
+ border-color: var(--block-border-color);
45
+ border-radius: var(--block-radius);
46
+ background: var(--block-background-fill);
47
+ width: 100%;
48
+ line-height: var(--line-sm);
49
+ min-height: 2em;
50
+ }
51
+ #usage_display p, #usage_display span {
52
+ margin: 0;
53
+ padding: .5em 1em;
54
+ font-size: .85em;
55
+ color: var(--body-text-color-subdued);
56
+ }
57
+ .progress-bar {
58
+ background-color: var(--input-background-fill);;
59
+ margin: 0 1em;
60
+ height: 20px;
61
+ border-radius: 10px;
62
+ overflow: hidden;
63
+ }
64
+ .progress {
65
+ background-color: var(--block-title-background-fill);;
66
+ height: 100%;
67
+ border-radius: 10px;
68
+ text-align: right;
69
+ transition: width 0.5s ease-in-out;
70
+ }
71
+ .progress-text {
72
+ /* color: white; */
73
+ color: var(--color-accent) !important;
74
+ font-size: 1em !important;
75
+ font-weight: bold;
76
+ padding-right: 10px;
77
+ line-height: 20px;
78
+ }
79
+ /* list */
80
+ ol:not(.options), ul:not(.options) {
81
+ padding-inline-start: 2em !important;
82
+ }
83
+
84
+ /* 亮色 */
85
+ @media (prefers-color-scheme: light) {
86
+ #chuanhu_chatbot {
87
+ background-color: var(--chatbot-color-light) !important;
88
+ color: #000000 !important;
89
+ }
90
+ [data-testid = "bot"] {
91
+ background-color: #FFFFFF !important;
92
+ }
93
+ [data-testid = "user"] {
94
+ background-color: #95EC69 !important;
95
+ }
96
+ }
97
+ /* 暗色 */
98
+ @media (prefers-color-scheme: dark) {
99
+ #chuanhu_chatbot {
100
+ background-color: var(--chatbot-color-dark) !important;
101
+ color: #FFFFFF !important;
102
+ }
103
+ [data-testid = "bot"] {
104
+ background-color: #2C2C2C !important;
105
+ }
106
+ [data-testid = "user"] {
107
+ background-color: #26B561 !important;
108
+ }
109
+ body {
110
+ background-color: var(--neutral-950) !important;
111
+ }
112
+ }
113
+
114
+ /* 对话气泡 */
115
+ [class *= "message"] {
116
+ border-radius: var(--radius-xl) !important;
117
+ border: none;
118
+ padding: var(--spacing-xl) !important;
119
+ font-size: var(--text-md) !important;
120
+ line-height: var(--line-md) !important;
121
+ min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
122
+ min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
123
+ }
124
+ [data-testid = "bot"] {
125
+ max-width: 85%;
126
+ border-bottom-left-radius: 0 !important;
127
+ }
128
+ [data-testid = "user"] {
129
+ max-width: 85%;
130
+ width: auto !important;
131
+ border-bottom-right-radius: 0 !important;
132
+ }
133
+ /* 表格 */
134
+ table {
135
+ margin: 1em 0;
136
+ border-collapse: collapse;
137
+ empty-cells: show;
138
+ }
139
+ td,th {
140
+ border: 1.2px solid var(--border-color-primary) !important;
141
+ padding: 0.2em;
142
+ }
143
+ thead {
144
+ background-color: rgba(175,184,193,0.2);
145
+ }
146
+ thead th {
147
+ padding: .5em .2em;
148
+ }
149
+ /* 行内代码 */
150
+ code {
151
+ display: inline;
152
+ white-space: break-spaces;
153
+ border-radius: 6px;
154
+ margin: 0 2px 0 2px;
155
+ padding: .2em .4em .1em .4em;
156
+ background-color: rgba(175,184,193,0.2);
157
+ }
158
+ /* 代码块 */
159
+ pre code {
160
+ display: block;
161
+ overflow: auto;
162
+ white-space: pre;
163
+ background-color: hsla(0, 0%, 0%, 80%)!important;
164
+ border-radius: 10px;
165
+ padding: 1.4em 1.2em 0em 1.4em;
166
+ margin: 1.2em 2em 1.2em 0.5em;
167
+ color: #FFF;
168
+ box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
169
+ }
170
+ /* 代码高亮样式 */
171
+ .highlight .hll { background-color: #49483e }
172
+ .highlight .c { color: #75715e } /* Comment */
173
+ .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
174
+ .highlight .k { color: #66d9ef } /* Keyword */
175
+ .highlight .l { color: #ae81ff } /* Literal */
176
+ .highlight .n { color: #f8f8f2 } /* Name */
177
+ .highlight .o { color: #f92672 } /* Operator */
178
+ .highlight .p { color: #f8f8f2 } /* Punctuation */
179
+ .highlight .ch { color: #75715e } /* Comment.Hashbang */
180
+ .highlight .cm { color: #75715e } /* Comment.Multiline */
181
+ .highlight .cp { color: #75715e } /* Comment.Preproc */
182
+ .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
183
+ .highlight .c1 { color: #75715e } /* Comment.Single */
184
+ .highlight .cs { color: #75715e } /* Comment.Special */
185
+ .highlight .gd { color: #f92672 } /* Generic.Deleted */
186
+ .highlight .ge { font-style: italic } /* Generic.Emph */
187
+ .highlight .gi { color: #a6e22e } /* Generic.Inserted */
188
+ .highlight .gs { font-weight: bold } /* Generic.Strong */
189
+ .highlight .gu { color: #75715e } /* Generic.Subheading */
190
+ .highlight .kc { color: #66d9ef } /* Keyword.Constant */
191
+ .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
192
+ .highlight .kn { color: #f92672 } /* Keyword.Namespace */
193
+ .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
194
+ .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
195
+ .highlight .kt { color: #66d9ef } /* Keyword.Type */
196
+ .highlight .ld { color: #e6db74 } /* Literal.Date */
197
+ .highlight .m { color: #ae81ff } /* Literal.Number */
198
+ .highlight .s { color: #e6db74 } /* Literal.String */
199
+ .highlight .na { color: #a6e22e } /* Name.Attribute */
200
+ .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
201
+ .highlight .nc { color: #a6e22e } /* Name.Class */
202
+ .highlight .no { color: #66d9ef } /* Name.Constant */
203
+ .highlight .nd { color: #a6e22e } /* Name.Decorator */
204
+ .highlight .ni { color: #f8f8f2 } /* Name.Entity */
205
+ .highlight .ne { color: #a6e22e } /* Name.Exception */
206
+ .highlight .nf { color: #a6e22e } /* Name.Function */
207
+ .highlight .nl { color: #f8f8f2 } /* Name.Label */
208
+ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
209
+ .highlight .nx { color: #a6e22e } /* Name.Other */
210
+ .highlight .py { color: #f8f8f2 } /* Name.Property */
211
+ .highlight .nt { color: #f92672 } /* Name.Tag */
212
+ .highlight .nv { color: #f8f8f2 } /* Name.Variable */
213
+ .highlight .ow { color: #f92672 } /* Operator.Word */
214
+ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
215
+ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
216
+ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
217
+ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
218
+ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
219
+ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
220
+ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
221
+ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
222
+ .highlight .sc { color: #e6db74 } /* Literal.String.Char */
223
+ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
224
+ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
225
+ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
226
+ .highlight .se { color: #ae81ff } /* Literal.String.Escape */
227
+ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
228
+ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
229
+ .highlight .sx { color: #e6db74 } /* Literal.String.Other */
230
+ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
231
+ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
232
+ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
233
+ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
234
+ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
235
+ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
236
+ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
237
+ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
238
+ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
239
+ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ markdown
3
+ PyMuPDF
4
+ openai
5
+ tensorflow_hub