Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -132,16 +132,16 @@ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
|
|
| 132 |
|
| 133 |
##############################################
|
| 134 |
#History - die Frage oder das File eintragen...
|
| 135 |
-
def add_text(history, prompt, file):
|
| 136 |
if (file == None):
|
| 137 |
-
|
| 138 |
else:
|
| 139 |
if (prompt == ""):
|
| 140 |
-
|
| 141 |
else:
|
| 142 |
-
|
| 143 |
|
| 144 |
-
return
|
| 145 |
|
| 146 |
def add_file(history, file, prompt):
|
| 147 |
if (prompt == ""):
|
|
@@ -390,17 +390,19 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
|
|
| 390 |
#Bild ausgeben
|
| 391 |
image = Image.open(io.BytesIO(result))
|
| 392 |
image_64 = umwandeln_fuer_anzeige(image)
|
| 393 |
-
|
|
|
|
| 394 |
print("history zeichnen......................")
|
| 395 |
-
print(
|
| 396 |
-
return history, "Success"
|
| 397 |
else:
|
| 398 |
result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
|
| 399 |
#Antwort als Stream ausgeben... wenn Textantwort gefordert
|
|
|
|
| 400 |
history[-1][1] = result
|
| 401 |
print("history nach Zusatz und mit KI Antwort...........")
|
| 402 |
-
print(
|
| 403 |
-
return
|
| 404 |
"""
|
| 405 |
for character in result:
|
| 406 |
history[-1][1] += character
|
|
@@ -438,6 +440,7 @@ def generate_text (prompt, file, chatbot, history, rag_option, model_option, ope
|
|
| 438 |
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
|
| 439 |
else:
|
| 440 |
prompt_neu = prompt + file.path, #b64encode(file).decode("utf-8")
|
|
|
|
| 441 |
print(prompt_neu)
|
| 442 |
history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history)
|
| 443 |
|
|
@@ -612,7 +615,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 612 |
|
| 613 |
# Chatbot
|
| 614 |
transfer_input_args = dict(
|
| 615 |
-
fn=add_text, inputs=[chatbot, user_input, upload], outputs=[chatbot, history, user_question, user_input, file_display], show_progress=True
|
| 616 |
)
|
| 617 |
|
| 618 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|
|
|
|
| 132 |
|
| 133 |
##############################################
|
| 134 |
#History - die Frage oder das File eintragen...
|
| 135 |
+
def add_text(chatbot, history, prompt, file):
|
| 136 |
if (file == None):
|
| 137 |
+
chatbot = chatbot + [(prompt, None)]
|
| 138 |
else:
|
| 139 |
if (prompt == ""):
|
| 140 |
+
chatbot = chatbot + [((file.name,), "Prompt fehlt!")]
|
| 141 |
else:
|
| 142 |
+
chatbot = chatbot + [((file.name,), None), (prompt, None)]
|
| 143 |
|
| 144 |
+
return chatbot, history, prompt, "", gr.File( label=None, interactive=False, height=20, min_width=20, visible=False, scale=2) #gr.Textbox(value="", interactive=False)
|
| 145 |
|
| 146 |
def add_file(history, file, prompt):
|
| 147 |
if (prompt == ""):
|
|
|
|
| 390 |
#Bild ausgeben
|
| 391 |
image = Image.open(io.BytesIO(result))
|
| 392 |
image_64 = umwandeln_fuer_anzeige(image)
|
| 393 |
+
chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8'))
|
| 394 |
+
history[-1][1] = b64encode(image_64).decode('utf-8')
|
| 395 |
print("history zeichnen......................")
|
| 396 |
+
print(chatbot)
|
| 397 |
+
return chatbot, history, "Success"
|
| 398 |
else:
|
| 399 |
result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
|
| 400 |
#Antwort als Stream ausgeben... wenn Textantwort gefordert
|
| 401 |
+
chatbot[-1][1] = result
|
| 402 |
history[-1][1] = result
|
| 403 |
print("history nach Zusatz und mit KI Antwort...........")
|
| 404 |
+
print(chatbot)
|
| 405 |
+
return chatbot, history, "Success"
|
| 406 |
"""
|
| 407 |
for character in result:
|
| 408 |
history[-1][1] += character
|
|
|
|
| 440 |
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
|
| 441 |
else:
|
| 442 |
prompt_neu = prompt + file.path, #b64encode(file).decode("utf-8")
|
| 443 |
+
print("prompt_neu............................")
|
| 444 |
print(prompt_neu)
|
| 445 |
history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history)
|
| 446 |
|
|
|
|
| 615 |
|
| 616 |
# Chatbot
|
| 617 |
transfer_input_args = dict(
|
| 618 |
+
fn=add_text, inputs=[chatbot, history, user_input, upload], outputs=[chatbot, history, user_question, user_input, file_display], show_progress=True
|
| 619 |
)
|
| 620 |
|
| 621 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|