NebulasBellum commited on
Commit
4d3d630
·
verified ·
1 Parent(s): f5ce029

update function with cpu

Browse files
Files changed (1) hide show
  1. sumar_hf_space.py +22 -22
sumar_hf_space.py CHANGED
@@ -1,22 +1,22 @@
1
- import torch
2
- from transformers import GPT2Tokenizer, T5ForConditionalGeneration
3
-
4
-
5
- tokenizer = GPT2Tokenizer.from_pretrained('RussianNLP/FRED-T5-Summarizer', eos_token='</s>')
6
- model = T5ForConditionalGeneration.from_pretrained('RussianNLP/FRED-T5-Summarizer')
7
- device = 'cuda'
8
- model.to(device)
9
-
10
- input_text = "<LM> Сократи текст.\n "
11
-
12
- def make_summarization(user_text):
13
- processing_text = input_text + user_text
14
- input_ids = torch.tensor([tokenizer.encode(processing_text)]).to(device)
15
- outputs = model.generate(input_ids, eos_token_id=tokenizer.eos_token_id,
16
- num_beams=3,
17
- min_new_tokens=17,
18
- max_new_tokens=200,
19
- do_sample=True,
20
- no_repeat_ngram_size=4,
21
- top_p=0.9)
22
- return tokenizer.decode(outputs[0][1:])
 
1
+ import torch
2
+ from transformers import GPT2Tokenizer, T5ForConditionalGeneration
3
+
4
+
5
+ tokenizer = GPT2Tokenizer.from_pretrained('RussianNLP/FRED-T5-Summarizer', eos_token='</s>')
6
+ model = T5ForConditionalGeneration.from_pretrained('RussianNLP/FRED-T5-Summarizer')
7
+ device = 'cpu'
8
+ model.to(device)
9
+
10
+ input_text = "<LM> Сократи текст.\n "
11
+
12
+ def make_summarization(user_text):
13
+ processing_text = input_text + user_text
14
+ input_ids = torch.tensor([tokenizer.encode(processing_text)]).to(device)
15
+ outputs = model.generate(input_ids, eos_token_id=tokenizer.eos_token_id,
16
+ num_beams=3,
17
+ min_new_tokens=17,
18
+ max_new_tokens=200,
19
+ do_sample=True,
20
+ no_repeat_ngram_size=4,
21
+ top_p=0.9)
22
+ return tokenizer.decode(outputs[0][1:])