ColeGuion commited on
Commit
3e83dc6
·
verified ·
1 Parent(s): c475c70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -15
app.py CHANGED
@@ -13,19 +13,17 @@ def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, to
13
 
14
 
15
 
16
- # Define your Gradio interface
17
- iface = gr.Interface(
18
- fn=generate_text,
19
- inputs=[
20
- gr.inputs.Textbox(lines=2, label="Input Text"),
21
- gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.9, label="Temperature"),
22
- gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Max New Tokens"),
23
- gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.95, label="Top-p"),
24
- gr.inputs.Slider(minimum=0, maximum=100, step=1, default=50, label="Top-k")
25
- ],
26
- outputs=[gr.outputs.Textbox(label="Generated Text")],
27
- title="Text Generation with Grammarly Model"
28
- )
29
 
30
- # Launch the interface
31
- iface.launch()
 
 
 
 
 
 
13
 
14
 
15
 
16
+ additional_inputs=[
17
+ gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
18
+ gr.Slider( label="Max new tokens", value=150, minimum=0, maximum=250, step=64, interactive=True, info="The maximum numbers of new tokens", ),
19
+ gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
20
+ gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
21
+ ]
 
 
 
 
 
 
 
22
 
23
+ gr.ChatInterface(
24
+ fn=generate_text,
25
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
26
+ additional_inputs=additional_inputs,
27
+ title="My Grammarly Space",
28
+ concurrency_limit=20,
29
+ ).launch(show_api=False)