|
--- |
|
license: apache-2.0 |
|
--- |
|
|
|
## INFERENCE CODE |
|
```bash |
|
pip install transformers[torch] |
|
``` |
|
|
|
```python |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
import torch |
|
import time |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("AquilaX-AI/DB-Summarizer") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("AquilaX-AI/DB-Summarizer") |
|
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
|
|
question = "How many Vulnerability found today" |
|
db_result = "243" |
|
summ_inp = f"Summarize the following Questions and Response: Question: {question} Response: {db_result}".lower() |
|
|
|
import time |
|
start = time.time() |
|
|
|
inputs = tokenizer(summ_inp, return_tensors="pt") |
|
model.to(device) |
|
inputs = inputs.to(device) |
|
outputs = model.generate(**inputs, max_length=526) |
|
answer = summ_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
print(answer) |
|
|
|
end = time.time() |
|
print(f"Time taken: {end - start}") |
|
``` |