Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -384,6 +384,10 @@ def vote(data: gr.LikeData):
|
|
| 384 |
|
| 385 |
def read_image(image, size=512):
|
| 386 |
return np.array(Image.fromarray(image).resize((size, size)))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 387 |
|
| 388 |
|
| 389 |
additional_inputs = [
|
|
@@ -397,11 +401,16 @@ additional_inputs = [
|
|
| 397 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
|
| 398 |
]
|
| 399 |
|
| 400 |
-
reference_image = gr.Image(label="Reference Image")
|
| 401 |
|
| 402 |
-
chatbot_stream = gr.Chatbot()
|
| 403 |
|
| 404 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 405 |
additional_inputs = additional_inputs,
|
| 406 |
title = "ChatGPT vom LI",
|
| 407 |
theme="soft",
|
|
@@ -412,10 +421,7 @@ chat_interface_stream = gr.ChatInterface(fn=invoke,
|
|
| 412 |
submit_btn = "Abschicken",
|
| 413 |
description = description,
|
| 414 |
)
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
with gr.Blocks() as demo:
|
| 419 |
gr.HTML(
|
| 420 |
"""
|
| 421 |
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
|
@@ -429,6 +435,24 @@ with gr.Blocks() as demo:
|
|
| 429 |
</div>
|
| 430 |
</div>
|
| 431 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
with gr.Tab("Chatbot"):
|
| 433 |
iface = gr.Interface(
|
| 434 |
fn=chatbot_response,
|
|
@@ -440,7 +464,7 @@ with gr.Blocks() as demo:
|
|
| 440 |
|
| 441 |
iface.launch()
|
| 442 |
|
| 443 |
-
|
| 444 |
with gr.Row():
|
| 445 |
chatbot_stream.like(vote, None, None)
|
| 446 |
chat_interface_stream.queue().launch()
|
|
@@ -455,6 +479,6 @@ with gr.Blocks() as demo:
|
|
| 455 |
reference_image,
|
| 456 |
queue=False
|
| 457 |
)
|
|
|
|
| 458 |
|
| 459 |
-
|
| 460 |
-
#demo.queue().launch() """
|
|
|
|
| 384 |
|
| 385 |
def read_image(image, size=512):
|
| 386 |
return np.array(Image.fromarray(image).resize((size, size)))
|
| 387 |
+
|
| 388 |
+
def add_file(history, file):
|
| 389 |
+
history = history + [((file.name,), None)]
|
| 390 |
+
return history
|
| 391 |
|
| 392 |
|
| 393 |
additional_inputs = [
|
|
|
|
| 401 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
|
| 402 |
]
|
| 403 |
|
|
|
|
| 404 |
|
|
|
|
| 405 |
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
with gr.Blocks() as demo:
|
| 409 |
+
reference_image = gr.Image(label="Reference Image")
|
| 410 |
+
|
| 411 |
+
chatbot_stream = gr.Chatbot()
|
| 412 |
+
|
| 413 |
+
chat_interface_stream = gr.ChatInterface(fn=invoke,
|
| 414 |
additional_inputs = additional_inputs,
|
| 415 |
title = "ChatGPT vom LI",
|
| 416 |
theme="soft",
|
|
|
|
| 421 |
submit_btn = "Abschicken",
|
| 422 |
description = description,
|
| 423 |
)
|
| 424 |
+
|
|
|
|
|
|
|
|
|
|
| 425 |
gr.HTML(
|
| 426 |
"""
|
| 427 |
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
|
|
|
| 435 |
</div>
|
| 436 |
</div>
|
| 437 |
""")
|
| 438 |
+
|
| 439 |
+
with gr.Row():
|
| 440 |
+
prompt = gr.Textbox(
|
| 441 |
+
scale=4,
|
| 442 |
+
show_label=False,
|
| 443 |
+
placeholder="Gib einen Text ein oder lade eine Datei (Bild, File, Audio) hoch",
|
| 444 |
+
container=False,
|
| 445 |
+
)
|
| 446 |
+
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
|
| 447 |
+
|
| 448 |
+
txt_msg = txt.submit(invoke, [chatbot_stream, prompt], [chatbot_stream, prompt], queue=False).then(bot, chatbot_stream, chatbot_stream, api_name="bot_response")
|
| 449 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [prompt], queue=False)
|
| 450 |
+
file_msg = btn.upload(add_file, [chatbot_stream, btn], [chatbot_stream], queue=False).then(bot, chatbot_stream, chatbot_stream)
|
| 451 |
+
|
| 452 |
+
chatbot_stream.like(print_like_dislike, None, None)
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
"""
|
| 456 |
with gr.Tab("Chatbot"):
|
| 457 |
iface = gr.Interface(
|
| 458 |
fn=chatbot_response,
|
|
|
|
| 464 |
|
| 465 |
iface.launch()
|
| 466 |
|
| 467 |
+
|
| 468 |
with gr.Row():
|
| 469 |
chatbot_stream.like(vote, None, None)
|
| 470 |
chat_interface_stream.queue().launch()
|
|
|
|
| 479 |
reference_image,
|
| 480 |
queue=False
|
| 481 |
)
|
| 482 |
+
"""
|
| 483 |
|
| 484 |
+
demo.queue().launch()
|
|
|