|
import gradio as gr |
|
from transformers import T5Tokenizer,AutoModelForCausalLM |
|
|
|
tokenizer = T5Tokenizer.from_pretrained("rinna/japanese-gpt2-small") |
|
model3 = AutoModelForCausalLM.from_pretrained("./models") |
|
model3.to("cpu") |
|
descriptions = "文書を入力すると、その後に、サザンオールスターズ風の歌詞を生成します" |
|
|
|
def getarate_sentences3(seed_sentence): |
|
x = tokenizer.encode(seed_sentence, return_tensors="pt", add_special_tokens=False) |
|
x = x.cpu() |
|
y = model3.generate(x, |
|
min_length=50, |
|
max_length=100, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=1.2, |
|
num_return_sequences=1, |
|
pad_token_id=tokenizer.pad_token_id, |
|
bos_token_id=tokenizer.bos_token_id, |
|
eos_token_id=tokenizer.eos_token_id, |
|
bad_word_ids=[[tokenizer.unk_token_id]] |
|
) |
|
generated_sentences = tokenizer.batch_decode(y, skip_special_tokens=True) |
|
return generated_sentences[0] |
|
|
|
demo = gr.Interface(fn=getarate_sentences3, inputs=gr.Textbox(lines=3, placeholder="文章を入力してください"), outputs="text", title="Southern All Stars style lyrics", description=descriptions) |
|
|
|
|
|
demo.launch() |