Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from transformers import T5Tokenizer,AutoModelForCausalLM
|
|
4 |
tokenizer = T5Tokenizer.from_pretrained("rinna/japanese-gpt2-small")
|
5 |
model3 = AutoModelForCausalLM.from_pretrained("./models")
|
6 |
model3.to("cpu")
|
|
|
7 |
|
8 |
def getarate_sentences3(seed_sentence):
|
9 |
x = tokenizer.encode(seed_sentence, return_tensors="pt", add_special_tokens=False) # 入力
|
@@ -24,6 +25,7 @@ def getarate_sentences3(seed_sentence):
|
|
24 |
generated_sentences = tokenizer.batch_decode(y, skip_special_tokens=True) # 特殊トークンをスキップして文章に変換
|
25 |
return generated_sentences[0]
|
26 |
|
27 |
-
demo = gr.Interface(fn=getarate_sentences3, inputs="
|
|
|
28 |
|
29 |
demo.launch()
|
|
|
4 |
tokenizer = T5Tokenizer.from_pretrained("rinna/japanese-gpt2-small")
|
5 |
model3 = AutoModelForCausalLM.from_pretrained("./models")
|
6 |
model3.to("cpu")
|
7 |
+
description = "文書を入力すると、その後に、サザンオールスターズ風の歌詞を生成します"
|
8 |
|
9 |
def getarate_sentences3(seed_sentence):
|
10 |
x = tokenizer.encode(seed_sentence, return_tensors="pt", add_special_tokens=False) # 入力
|
|
|
25 |
generated_sentences = tokenizer.batch_decode(y, skip_special_tokens=True) # 特殊トークンをスキップして文章に変換
|
26 |
return generated_sentences[0]
|
27 |
|
28 |
+
demo = gr.Interface(fn=getarate_sentences3, inputs=gr.Textbox(lines=3, placeholder="文章を入力してください"), outputs="text", title="Southern All Stars style lyrics", description=descriptions)
|
29 |
+
|
30 |
|
31 |
demo.launch()
|