emotionanalysis commited on
Commit
f646ab4
·
verified ·
1 Parent(s): 9c29abb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -14
README.md CHANGED
@@ -1,24 +1,33 @@
1
  ---
2
  license: mit
 
 
 
3
  ---
4
 
5
  paust/pko-t5-base model based
6
 
7
- Since this model based on paust/pko-t5-base tokenizer, you need to import it.
8
 
9
- '''
10
- import torch
11
- from transformers import T5Model, T5Tokenizer, T5ForConditionalGeneration
12
 
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- model.to(device)
15
 
16
- tokenizer = T5Tokenizer.from_pretrained("KETI-AIR/ke-t5-base-ko")
 
17
 
18
- inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding="max_length")
19
- inputs = {key: value.to(device) for key, value in inputs.items()}
20
- outputs = model.generate(input_ids=inputs["input_ids"], max_length=128, num_beams=4, early_stopping=True)
21
-
22
- generated_comment = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
- print(generated_comment)
24
- '''
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ base_model:
4
+ - paust/pko-t5-base
5
+ pipeline_tag: text2text-generation
6
  ---
7
 
8
  paust/pko-t5-base model based
9
 
10
+ Since this model is based on paust/pko-t5-base tokenizer, you need to import it.
11
 
12
+ from transformers import T5TokenizerFast, T5ForConditionalGeneration
13
+ tokenizer = T5TokenizerFast.from_pretrained("paust/pko-t5-base")
14
+ model = T5ForConditionalGeneration.from_pretrained(emotionanalysis/diaryempathizer-t5-ko)
15
 
 
 
16
 
17
+ import torch
18
+ from transformers import T5TokenizerFast, T5ForConditionalGeneration
19
 
20
+ model = T5ForConditionalGeneration.from_pretrained(emotionanalysis/diaryempathizer-t5-ko)
21
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+ model.to(device)
23
+
24
+ tokenizer = T5TokenizerFast.from_pretrained("paust/pko-t5-base")
25
+ input_text = """
26
+ """
27
+
28
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding="max_length")
29
+ inputs = {key: value.to(device) for key, value in inputs.items()}
30
+ outputs = model.generate(input_ids=inputs["input_ids"], max_length=128, num_beams=4, early_stopping=True)
31
+
32
+ generated_comment = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ print(generated_comment)