Einmalumdiewelt commited on
Commit
2241007
·
1 Parent(s): c38dbf5

Update app.py

Browse files

added sliders and different models

Files changed (1) hide show
  1. app.py +30 -15
app.py CHANGED
@@ -3,14 +3,22 @@ import torch
3
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
6
- tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
7
- model = AutoModelForSeq2SeqLM.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
8
- device = "cpu"
9
- #"cuda" if torch.cuda.is_available() else "cpu"
10
- model.to(device)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- def summarize(inputs):
14
  #define model inputs
15
  inputs = tokenizer(
16
  inputs,
@@ -19,7 +27,7 @@ def summarize(inputs):
19
  padding="max_length",
20
  return_tensors='pt').to(device)
21
  #generate preds
22
- preds = model.generate(**inputs,max_length=200,min_length=100)
23
  #we decode the predictions to store them
24
  decoded_predictions = tokenizer.batch_decode(preds, skip_special_tokens=True)
25
  #return
@@ -35,17 +43,24 @@ examples = [["summarize: Maschinelles Lernen ist ein Oberbegriff für die „kü
35
  # title=title,
36
  # description=description,
37
  # examples=examples)
38
- txt=gr.Textbox(lines=30, label="German", placeholder="Paste your German text in here")
39
- out=gr.Textbox(lines=10, label="Summary")
40
 
41
  interface = gr.Interface(summarize,
 
 
42
  inputs=txt,
43
- outputs=out,
 
 
 
 
 
 
44
  title=title,
45
  description=description,
46
- examples=examples)
47
 
48
- interface.launch(share=True)
49
-
50
-
51
-
 
3
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
 
 
 
 
 
6
 
7
+ def summarize(inputs,model=model,summary_length=200):
8
+ if model=="T5-base":
9
+ tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
10
+ model = AutoModelForSeq2SeqLM.from_pretrained("Einmalumdiewelt/T5-Base_GNAD")
11
+ elif model =="Google pegasus":
12
+ tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/PegasusXSUM_GNAD")
13
+ model = AutoModelForSeq2SeqLM.from_pretrained("Einmalumdiewelt/PegasusXSUM_GNAD")
14
+ elif model =="Facebook bart-large":
15
+ tokenizer = AutoTokenizer.from_pretrained("Einmalumdiewelt/BART_large_CNN_GNAD")
16
+ model = AutoModelForSeq2SeqLM.from_pretrained("Einmalumdiewelt/BART_large_CNN_GNAD")
17
+
18
+ device = "CPU"
19
+ #"cuda" if torch.cuda.is_available() else "CPU"
20
+ model.to(device)
21
 
 
22
  #define model inputs
23
  inputs = tokenizer(
24
  inputs,
 
27
  padding="max_length",
28
  return_tensors='pt').to(device)
29
  #generate preds
30
+ preds = model.generate(**inputs,max_length=summary_length,min_length=30)
31
  #we decode the predictions to store them
32
  decoded_predictions = tokenizer.batch_decode(preds, skip_special_tokens=True)
33
  #return
 
43
  # title=title,
44
  # description=description,
45
  # examples=examples)
46
+ txt=gr.Textbox(lines=15, label="I want to summarize this:", placeholder="Paste your German text in here. Don't forget to add the prefix "summarize: " for T5-base architecture.")
47
+ out=gr.Textbox(lines=5, label="Here's your summary:")
48
 
49
  interface = gr.Interface(summarize,
50
+ [
51
+ # input
52
  inputs=txt,
53
+ # Selection of models for inference
54
+ gr.Dropdown(["T5-base", "Google pegasus", "Facebook bart-large"]),
55
+ # Length of summaries
56
+ gr.Slider(50, 250, step=50, label="summary length", value=150),
57
+ # ouptut
58
+ outputs=out
59
+ ],
60
  title=title,
61
  description=description,
62
+ examples=examples)
63
 
64
+ # launch interface
65
+ if __name__ == "__main__":
66
+ interface.launch(share=True)