Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline, GenerationConfig | |
def greet(prompt, temperature,max_length, top_p,): | |
return "Hello, World!" | |
def language_model(prompt, temperature, max_length, top_p, model): | |
generator = pipeline("text-generation", model=model) | |
config = GenerationConfig.from_pretrained(model) | |
# add params to config | |
config.top_p = top_p | |
config.temperature = temperature | |
config.do_sample = True | |
if temperature == 0: | |
config.do_sample = False | |
# generate text | |
data = generator(prompt, max_new_tokens=max_length, generation_config=config) | |
return data[0]["generated_text"] | |
demo = gr.Interface( | |
fn=language_model, | |
inputs=[gr.Textbox(placeholder="Write a nice poem ! ", lines=5, label="Input Text"), | |
gr.Slider(minimum=0, maximum=2, value=1, label="Temperature", info="Controls randomness: Lowering results in less random completions."), | |
gr.Slider(minimum=1, maximum=256, value=16, label="Maximum Tokens",info="The maximum number of tokens to generate."), | |
gr.Slider(minimum=0, maximum=1, value=1, label="Top-p", info="Controls diversity via nucleus sampling."), | |
gr.Dropdown(choices=["distilgpt2","gpt2", "gpt2-large"], value="gpt2",label="Model", info="Model to choose from Huggingface")], | |
outputs=[gr.Textbox(placeholder="Hello, World!")], | |
) | |
demo.launch(share=True) | |