Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -31,11 +31,18 @@ prompt_template = "Instructions: Compose a comprehensive reply to the query usin
|
|
31 |
"with the same name, create separate answers for each. Only include information found in the results and " \
|
32 |
"don't add any additional information. Make sure the answer is correct and don't output false content. " \
|
33 |
"Ignore outlier search results which has nothing to do with the question. Only answer what is asked. " \
|
34 |
-
"The answer should be short and concise
|
35 |
|
36 |
# MODELS = ["universal-sentence-encoder", "instructor-large"]
|
37 |
MODELS = ["text-davinci-001", "text-davinci-002", "text-davinci-003"]
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
def set_openai_api_key(my_api_key):
|
40 |
openai.api_key = my_api_key
|
41 |
return gr.update(visible = True)
|
@@ -127,6 +134,7 @@ def predict(
|
|
127 |
chatbot,
|
128 |
inputs,
|
129 |
temperature,
|
|
|
130 |
selected_model=MODELS[0],
|
131 |
files=None
|
132 |
):
|
@@ -142,6 +150,7 @@ def predict(
|
|
142 |
topn_chunks = [chunks[i] for i in index_top_chunks]
|
143 |
prompt = ""
|
144 |
prompt += 'search results:\n\n'
|
|
|
145 |
for c in topn_chunks:
|
146 |
prompt += c + '\n\n'
|
147 |
prompt += prompt_template
|
@@ -234,6 +243,9 @@ with gr.Blocks(css=customCSS, theme=beautiful_theme) as demo:
|
|
234 |
model_select_dropdown = gr.Dropdown(
|
235 |
label="Select model", choices=MODELS, multiselect=False, value=MODELS[0]
|
236 |
)
|
|
|
|
|
|
|
237 |
index_files = gr.Files(label="Files", type="file", multiple=True)
|
238 |
with gr.Tab(label="Advanced"):
|
239 |
gr.Markdown(
|
@@ -248,10 +260,10 @@ with gr.Blocks(css=customCSS, theme=beautiful_theme) as demo:
|
|
248 |
label="Temperature",
|
249 |
)
|
250 |
openAI_key.submit(set_openai_api_key, [openAI_key], [input_raws])
|
251 |
-
user_input.submit(predict, inputs=[history, chatbot, user_input, temperature, model_select_dropdown, index_files],
|
252 |
outputs=[chatbot, history])
|
253 |
user_input.submit(lambda: "", None, user_input)
|
254 |
-
submitBtn.click(predict, inputs=[history, chatbot, user_input, temperature, model_select_dropdown, index_files],
|
255 |
outputs=[chatbot, history])
|
256 |
submitBtn.click(lambda: "", None, user_input)
|
257 |
demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
|
|
31 |
"with the same name, create separate answers for each. Only include information found in the results and " \
|
32 |
"don't add any additional information. Make sure the answer is correct and don't output false content. " \
|
33 |
"Ignore outlier search results which has nothing to do with the question. Only answer what is asked. " \
|
34 |
+
"The answer should be short and concise. Reply in {reply_language}. \n\nQuery: {question}\nAnswer: "
|
35 |
|
36 |
# MODELS = ["universal-sentence-encoder", "instructor-large"]
|
37 |
MODELS = ["text-davinci-001", "text-davinci-002", "text-davinci-003"]
|
38 |
+
LANGUAGES =
|
39 |
+
"English",
|
40 |
+
"简体中文",
|
41 |
+
"日本語",
|
42 |
+
"Deutsch",
|
43 |
+
"Vietnamese"
|
44 |
+
"跟随问题语言(不稳定)"
|
45 |
+
]
|
46 |
def set_openai_api_key(my_api_key):
|
47 |
openai.api_key = my_api_key
|
48 |
return gr.update(visible = True)
|
|
|
134 |
chatbot,
|
135 |
inputs,
|
136 |
temperature,
|
137 |
+
lang = LANGUAGES[0],
|
138 |
selected_model=MODELS[0],
|
139 |
files=None
|
140 |
):
|
|
|
150 |
topn_chunks = [chunks[i] for i in index_top_chunks]
|
151 |
prompt = ""
|
152 |
prompt += 'search results:\n\n'
|
153 |
+
prompt_template.replace("{reply_language}", lang)
|
154 |
for c in topn_chunks:
|
155 |
prompt += c + '\n\n'
|
156 |
prompt += prompt_template
|
|
|
243 |
model_select_dropdown = gr.Dropdown(
|
244 |
label="Select model", choices=MODELS, multiselect=False, value=MODELS[0]
|
245 |
)
|
246 |
+
language_select_dropdown = gr.Dropdown(
|
247 |
+
label="Select reply language", choices=LANGUAGES, multiselect=False, value=LANGUAGES[0]
|
248 |
+
)
|
249 |
index_files = gr.Files(label="Files", type="file", multiple=True)
|
250 |
with gr.Tab(label="Advanced"):
|
251 |
gr.Markdown(
|
|
|
260 |
label="Temperature",
|
261 |
)
|
262 |
openAI_key.submit(set_openai_api_key, [openAI_key], [input_raws])
|
263 |
+
user_input.submit(predict, inputs=[history, chatbot, user_input, temperature, language_select_dropdown, model_select_dropdown, index_files],
|
264 |
outputs=[chatbot, history])
|
265 |
user_input.submit(lambda: "", None, user_input)
|
266 |
+
submitBtn.click(predict, inputs=[history, chatbot, user_input, temperature, language_select_dropdown, model_select_dropdown, index_files],
|
267 |
outputs=[chatbot, history])
|
268 |
submitBtn.click(lambda: "", None, user_input)
|
269 |
demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7860, debug=True)
|