Update README.md
Browse files
README.md
CHANGED
@@ -32,10 +32,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
32 |
|
33 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
34 |
|
35 |
-
|
36 |
-
def create_summarization_prompt(text_to_sum):
|
37 |
-
return "記事:\n" + text_to_sum + "\n\n要約:\n"
|
38 |
-
|
39 |
def do_closed_qa(context, question):
|
40 |
return context + "\n\n" + question
|
41 |
|
@@ -52,7 +48,6 @@ test_article = """ モノマネのレパートリーに「リーチ・マイ
|
|
52 |
|
53 |
test_question = " リーチ・マイケルは何を送ってきましたか?"
|
54 |
|
55 |
-
pipe(create_summarization_prompt(test_article), max_new_tokens=256, temperature=0)[0]["generated_text"]
|
56 |
pipe(do_closed_qa(test_article, question), max_new_tokens=128, temperature=0)[0]["generated_text"]
|
57 |
```
|
58 |
|
|
|
32 |
|
33 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
34 |
|
|
|
|
|
|
|
|
|
35 |
def do_closed_qa(context, question):
|
36 |
return context + "\n\n" + question
|
37 |
|
|
|
48 |
|
49 |
test_question = " リーチ・マイケルは何を送ってきましたか?"
|
50 |
|
|
|
51 |
pipe(do_closed_qa(test_article, question), max_new_tokens=128, temperature=0)[0]["generated_text"]
|
52 |
```
|
53 |
|