Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,19 +13,17 @@ def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, to
|
|
13 |
|
14 |
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Max New Tokens"),
|
23 |
-
gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.95, label="Top-p"),
|
24 |
-
gr.inputs.Slider(minimum=0, maximum=100, step=1, default=50, label="Top-k")
|
25 |
-
],
|
26 |
-
outputs=[gr.outputs.Textbox(label="Generated Text")],
|
27 |
-
title="Text Generation with Grammarly Model"
|
28 |
-
)
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
|
15 |
|
16 |
+
additional_inputs=[
|
17 |
+
gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
|
18 |
+
gr.Slider( label="Max new tokens", value=150, minimum=0, maximum=250, step=64, interactive=True, info="The maximum numbers of new tokens", ),
|
19 |
+
gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
|
20 |
+
gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
|
21 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
gr.ChatInterface(
|
24 |
+
fn=generate_text,
|
25 |
+
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
26 |
+
additional_inputs=additional_inputs,
|
27 |
+
title="My Grammarly Space",
|
28 |
+
concurrency_limit=20,
|
29 |
+
).launch(show_api=False)
|