Update app.py
Browse files
app.py
CHANGED
|
@@ -355,6 +355,16 @@ def create_picture(history, prompt):
|
|
| 355 |
def invoke (prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
|
| 356 |
global splittet
|
| 357 |
print(splittet)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
#Prompt an history anhängen und einen Text daraus machen
|
| 359 |
if (file == None):
|
| 360 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
|
@@ -368,14 +378,7 @@ def invoke (prompt, file, history, rag_option, model_option, openai_api_key, k=3
|
|
| 368 |
#history für Langchain formatieren
|
| 369 |
#history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
|
| 370 |
|
| 371 |
-
|
| 372 |
-
#raise gr.Error("OpenAI API Key is required.")
|
| 373 |
-
#eigenen OpenAI key nutzen
|
| 374 |
-
openai_api_key= OAI_API_KEY
|
| 375 |
-
if (rag_option is None):
|
| 376 |
-
raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
|
| 377 |
-
if (prompt == ""):
|
| 378 |
-
raise gr.Error("Prompt ist erforderlich.")
|
| 379 |
try:
|
| 380 |
###########################
|
| 381 |
#LLM auswählen (OpenAI oder HF)
|
|
@@ -545,7 +548,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 545 |
max_context_length_tokens,
|
| 546 |
repetition_penalty
|
| 547 |
],
|
| 548 |
-
outputs=[
|
| 549 |
show_progress=True,
|
| 550 |
)
|
| 551 |
|
|
@@ -556,12 +559,12 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 556 |
|
| 557 |
# Chatbot
|
| 558 |
transfer_input_args = dict(
|
| 559 |
-
fn=add_text, inputs=[
|
| 560 |
)
|
| 561 |
|
| 562 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|
| 563 |
predict_event2 = submitBtn.click(**transfer_input_args, queue=False,).then(**predict_args)
|
| 564 |
-
predict_event3 = upload.upload(file_anzeigen, [
|
| 565 |
|
| 566 |
cancelBtn.click(
|
| 567 |
cancels=[predict_event1,predict_event2, predict_event3 ]
|
|
|
|
| 355 |
def invoke (prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
|
| 356 |
global splittet
|
| 357 |
print(splittet)
|
| 358 |
+
|
| 359 |
+
if (openai_api_key == "" or openai_api_key == "sk-"):
|
| 360 |
+
#raise gr.Error("OpenAI API Key is required.")
|
| 361 |
+
#eigenen OpenAI key nutzen
|
| 362 |
+
openai_api_key= OAI_API_KEY
|
| 363 |
+
if (rag_option is None):
|
| 364 |
+
raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
|
| 365 |
+
if (prompt == ""):
|
| 366 |
+
raise gr.Error("Prompt ist erforderlich.")
|
| 367 |
+
|
| 368 |
#Prompt an history anhängen und einen Text daraus machen
|
| 369 |
if (file == None):
|
| 370 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
|
|
|
| 378 |
#history für Langchain formatieren
|
| 379 |
#history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
|
| 380 |
|
| 381 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 382 |
try:
|
| 383 |
###########################
|
| 384 |
#LLM auswählen (OpenAI oder HF)
|
|
|
|
| 548 |
max_context_length_tokens,
|
| 549 |
repetition_penalty
|
| 550 |
],
|
| 551 |
+
outputs=[chatbot, status_display], #[ chatbot, history, status_display],
|
| 552 |
show_progress=True,
|
| 553 |
)
|
| 554 |
|
|
|
|
| 559 |
|
| 560 |
# Chatbot
|
| 561 |
transfer_input_args = dict(
|
| 562 |
+
fn=add_text, inputs=[chatbot, user_input, upload], outputs=[chatbot, user_input, user_question], show_progress=True
|
| 563 |
)
|
| 564 |
|
| 565 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|
| 566 |
predict_event2 = submitBtn.click(**transfer_input_args, queue=False,).then(**predict_args)
|
| 567 |
+
predict_event3 = upload.upload(file_anzeigen, [upload], [file_display] ) #.then(**predict_args)
|
| 568 |
|
| 569 |
cancelBtn.click(
|
| 570 |
cancels=[predict_event1,predict_event2, predict_event3 ]
|