Mr-Vicky-01 commited on
Commit
9d41ff0
·
verified ·
1 Parent(s): 670bde4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -8,12 +8,12 @@ pip install transformers[torch]
8
  ```
9
 
10
  ```python
11
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
12
  import torch
13
  import time
14
 
15
  tokenizer = AutoTokenizer.from_pretrained("AquilaX-AI/DB-Summarizer")
16
- model = AutoModelForSeq2SeqLM.from_pretrained("AquilaX-AI/DB-Summarizer")
17
 
18
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
19
 
@@ -28,7 +28,7 @@ inputs = tokenizer(summ_inp, return_tensors="pt")
28
  model.to(device)
29
  inputs = inputs.to(device)
30
  outputs = model.generate(**inputs, max_length=526)
31
- answer = summ_tokenizer.decode(outputs[0], skip_special_tokens=True)
32
  print(answer)
33
 
34
  end = time.time()
 
8
  ```
9
 
10
  ```python
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
  import torch
13
  import time
14
 
15
  tokenizer = AutoTokenizer.from_pretrained("AquilaX-AI/DB-Summarizer")
16
+ model = AutoModelForCausalLM.from_pretrained("AquilaX-AI/DB-Summarizer")
17
 
18
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
19
 
 
28
  model.to(device)
29
  inputs = inputs.to(device)
30
  outputs = model.generate(**inputs, max_length=526)
31
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
  print(answer)
33
 
34
  end = time.time()