Einmalumdiewelt commited on
Commit
8d7ce6b
·
1 Parent(s): 8cb95e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -8
app.py CHANGED
@@ -1,8 +1,5 @@
1
  import gradio as gr
2
 
3
- import torch
4
- device = "cuda" if torch.cuda.is_available() else "cpu"
5
-
6
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
 
8
  tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
@@ -11,11 +8,9 @@ model.to(device)
11
 
12
 
13
  def summarize(inputs):
14
- with torch.no_grad():
15
- # with a max_length of 200, the model has a chance to encapsule lots of information
16
- preds = model.generate(**inputs,max_length=200,min_length=100)
17
- # we decode the predictions to store them
18
- decoded_predictions = tokenizer.batch_decode(preds, skip_special_tokens=True)
19
  # each batches predictions are appended to the list
20
  return decoded_predictions
21
 
 
1
  import gradio as gr
2
 
 
 
 
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
 
8
 
9
 
10
  def summarize(inputs):
11
+ preds = model.generate(**inputs,max_length=200,min_length=100)
12
+ # we decode the predictions to store them
13
+ decoded_predictions = tokenizer.batch_decode(preds, skip_special_tokens=True)
 
 
14
  # each batches predictions are appended to the list
15
  return decoded_predictions
16