Amitontheweb commited on
Commit
f32f4fc
·
verified ·
1 Parent(s): e2eec3b

Upload 2 files

Browse files
Files changed (2) hide show
  1. Keywords-to-Title-Generator.py +150 -0
  2. requirements.txt +3 -0
Keywords-to-Title-Generator.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # ### Keywords to Title Generator
5
+ # - https://huggingface.co/EnglishVoice/t5-base-keywords-to-headline?text=diabetic+diet+plan
6
+ # - Apache 2.0
7
+
8
+ # In[1]:
9
+
10
+
11
+ import torch
12
+ from transformers import T5ForConditionalGeneration,T5Tokenizer
13
+
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+ model = T5ForConditionalGeneration.from_pretrained("EnglishVoice/t5-base-keywords-to-headline")
17
+ tokenizer = T5Tokenizer.from_pretrained("EnglishVoice/t5-base-keywords-to-headline", clean_up_tokenization_spaces=True, legacy=False)
18
+ model = model.to(device)
19
+
20
+
21
+
22
+ # In[55]:
23
+
24
+
25
+ keywords = "music, sleep, night"
26
+
27
+ text = "headline: " + keywords
28
+ encoding = tokenizer.encode_plus(text, return_tensors = "pt")
29
+ input_ids = encoding["input_ids"].to(device)
30
+ attention_masks = encoding["attention_mask"].to(device)
31
+ beam_outputs = model.generate(
32
+ input_ids = input_ids,
33
+ attention_mask = attention_masks,
34
+ max_new_tokens = 25,
35
+ do_sample = True,
36
+ num_return_sequences = 5,
37
+ temperature = 1.2,
38
+ #num_beams = 20,
39
+ #num_beam_groups = 20,
40
+ #diversity_penalty=0.8,
41
+ no_repeat_ngram_size = 3,
42
+ penalty_alpha = 0.8,
43
+ #early_stopping = True,
44
+ top_k = 15,
45
+ #top_p = 0.60,
46
+ )
47
+
48
+ for i in range(len(beam_outputs)):
49
+ result = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
50
+ print(result)
51
+
52
+
53
+ # In[1]:
54
+
55
+
56
+ import gradio as gr
57
+
58
+
59
+ # In[ ]:
60
+
61
+
62
+
63
+
64
+
65
+ # In[ ]:
66
+
67
+
68
+
69
+
70
+
71
+ # In[ ]:
72
+
73
+
74
+
75
+
76
+
77
+ # In[ ]:
78
+
79
+
80
+
81
+
82
+
83
+ # In[ ]:
84
+
85
+
86
+ '''
87
+ #Create a four button panel for changing parameters with one click
88
+
89
+ def fn(text):
90
+ return ("Hello gradio!")
91
+
92
+ with gr.Blocks () as demo:
93
+
94
+ with gr.Row(variant='compact') as PanelRow1: #first row: top
95
+
96
+ with gr.Column(scale=0, min_width=180) as PanelCol5:
97
+ gr.HTML("")
98
+ with gr.Column(scale=0) as PanelCol4:
99
+ submit = gr.Button("Temp++", scale=0)
100
+ with gr.Column(scale=1) as PanelCol5:
101
+ gr.HTML("")
102
+
103
+
104
+ with gr.Row(variant='compact') as PanelRow2: #2nd row: left, right, middle
105
+
106
+ with gr.Column(min_width=100) as PanelCol1:
107
+ submit = gr.Button("Contrastive")
108
+ with gr.Column(min_width=100) as PanelCol2:
109
+ submit = gr.Button("Re-generate")
110
+ with gr.Column(min_width=100) as PanelCol3:
111
+ submit = gr.Button("Diversity Beam")
112
+
113
+ with gr.Column(min_width=100) as PanelCol5:
114
+ gr.HTML("")
115
+ with gr.Column(min_width=100) as PanelCol5:
116
+ gr.HTML("")
117
+ with gr.Column(scale=0) as PanelCol5:
118
+ gr.HTML("")
119
+
120
+ with gr.Row(variant='compact') as PanelRow3: #last row: down
121
+ with gr.Column(scale=0, min_width=180) as PanelCol7:
122
+ gr.HTML("")
123
+ with gr.Column(scale=1) as PanelCol6:
124
+ submit = gr.Button("Temp--", scale=0)
125
+
126
+ with gr.Column(scale=0) as PanelCol5:
127
+ gr.HTML("")
128
+
129
+ demo.launch()
130
+ '''
131
+
132
+
133
+ # In[164]:
134
+
135
+
136
+ import gc
137
+ gc.collect()
138
+
139
+
140
+ # In[166]:
141
+
142
+
143
+ gr.close_all()
144
+
145
+
146
+ # In[ ]:
147
+
148
+
149
+
150
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio