File size: 30,895 Bytes
a53f410
 
 
 
dd20309
a53f410
 
 
 
 
 
dd20309
a53f410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd20309
a53f410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd20309
a53f410
 
 
dd20309
 
a53f410
 
 
 
dd20309
a53f410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd20309
a53f410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From C:\\Users\\franz\\AppData\\Local\\Temp\\ipykernel_16992\\1198363771.py:6: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.config.list_physical_devices('GPU')` instead.\n",
      "GPU is available\n"
     ]
    }
   ],
   "source": [
    "\n",
    "import gpt_2_simple as gpt2\n",
    "import os\n",
    "import tensorflow as tf\n",
    "import pandas as pd\n",
    "import re\n",
    "print(\"GPU is\", \"available\" if tf.test.is_gpu_available() else \"NOT AVAILABLE\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_name = \"124M\"\n",
    "if not os.path.isdir(os.path.join(\"models\", model_name)):\n",
    "\tprint(f\"Downloading {model_name} model...\")\n",
    "\tgpt2.download_gpt2(model_name=model_name)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = 'AbstractGenerator/'\n",
    "checkpoint_dir =path+'weights/'\n",
    "data_path = path+'TrainigData/'\n",
    "\n",
    "\n",
    "\n",
    "file_name_en = 'en'\n",
    "file_path_en = data_path+file_name_en\n",
    "\n",
    "file_name_es = 'es'\n",
    "file_path_es = data_path+file_name_es\n",
    "\n",
    "\n",
    "prefix= '<|startoftext|>'\n",
    "sufix ='<|endoftext|>'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# create trainig data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "en = pd.read_csv('CSV\\scientific_paper_en.csv')\n",
    "es = pd.read_csv('CSV\\scientific_paper_es.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import codecs\n",
    "def createTrainingData(ds,fileName= 'resumen.txt' ,path ='TrainigData/'):\n",
    "    with codecs.open(path+fileName,'a','utf-8') as f:\n",
    "        for i in ds.index:\n",
    "            f.write(prefix+\"\\n\")\n",
    "            f.write(ds.iloc[i]['text_no_abstract'])\n",
    "            f.write(\"ABSTRACT\\n\")\n",
    "            f.write(ds.iloc[i]['abstract']+\"\\n\")\n",
    "            f.write(sufix)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "createTrainingData(en,'en.txt',data_path)\n",
    "createTrainingData(es,'es.txt',data_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# pretrained"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = gpt2.start_tf_sess()\n",
    "gpt2.load_gpt2(sess,checkpoint_dir=checkpoint_dir,run_name='run1')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# train "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.compat.v1.reset_default_graph()\n",
    "sess = gpt2.start_tf_sess()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## en"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpt2.finetune(sess,\n",
    "              file_path_en+'.txt',\n",
    "              model_name=model_name,\n",
    "              checkpoint_dir=checkpoint_dir,   \n",
    "              steps=1000\n",
    "              )   "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## es"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint models\\124M\\model.ckpt\n",
      "INFO:tensorflow:Restoring parameters from models\\124M\\model.ckpt\n",
      "Loading dataset...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1/1 [00:51<00:00, 51.03s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dataset has 17511492 tokens\n",
      "Training...\n"
     ]
    },
    {
     "ename": "ResourceExhaustedError",
     "evalue": "Graph execution error:\n\nfailed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mResourceExhaustedError\u001b[0m                    Traceback (most recent call last)",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1377\u001b[0m, in \u001b[0;36mBaseSession._do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1375'>1376</a>\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1376'>1377</a>\u001b[0m   \u001b[39mreturn\u001b[39;00m fn(\u001b[39m*\u001b[39;49margs)\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1377'>1378</a>\u001b[0m \u001b[39mexcept\u001b[39;00m errors\u001b[39m.\u001b[39mOpError \u001b[39mas\u001b[39;00m e:\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1360\u001b[0m, in \u001b[0;36mBaseSession._do_run.<locals>._run_fn\u001b[1;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1358'>1359</a>\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_extend_graph()\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1359'>1360</a>\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_tf_sessionrun(options, feed_dict, fetch_list,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1360'>1361</a>\u001b[0m                                 target_list, run_metadata)\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1453\u001b[0m, in \u001b[0;36mBaseSession._call_tf_sessionrun\u001b[1;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1450'>1451</a>\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_call_tf_sessionrun\u001b[39m(\u001b[39mself\u001b[39m, options, feed_dict, fetch_list, target_list,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1451'>1452</a>\u001b[0m                         run_metadata):\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1452'>1453</a>\u001b[0m   \u001b[39mreturn\u001b[39;00m tf_session\u001b[39m.\u001b[39;49mTF_SessionRun_wrapper(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_session, options, feed_dict,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1453'>1454</a>\u001b[0m                                           fetch_list, target_list,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1454'>1455</a>\u001b[0m                                           run_metadata)\n",
      "\u001b[1;31mResourceExhaustedError\u001b[0m: failed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.\n",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mResourceExhaustedError\u001b[0m                    Traceback (most recent call last)",
      "\u001b[1;32mc:\\Users\\franz\\OneDrive\\Documentos\\GitHub\\Generador-de-abstracts\\AbstractGenerator.ipynb Cell 15'\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=0'>1</a>\u001b[0m gpt2\u001b[39m.\u001b[39;49mfinetune(sess,\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=1'>2</a>\u001b[0m               file_path_es\u001b[39m+\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39m.txt\u001b[39;49m\u001b[39m'\u001b[39;49m,\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=2'>3</a>\u001b[0m               model_name\u001b[39m=\u001b[39;49mmodel_name,\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=3'>4</a>\u001b[0m               checkpoint_dir\u001b[39m=\u001b[39;49mcheckpoint_dir,   \n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=4'>5</a>\u001b[0m               steps\u001b[39m=\u001b[39;49m\u001b[39m1000\u001b[39;49m\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/franz/OneDrive/Documentos/GitHub/Generador-de-abstracts/AbstractGenerator.ipynb#ch0000014?line=5'>6</a>\u001b[0m               )\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\gpt_2_simple\\gpt_2.py:339\u001b[0m, in \u001b[0;36mfinetune\u001b[1;34m(sess, dataset, steps, model_name, model_dir, combine, batch_size, learning_rate, accumulate_gradients, restore_from, run_name, checkpoint_dir, sample_every, sample_length, sample_num, multi_gpu, save_every, print_every, max_checkpoints, use_memory_saving_gradients, only_train_transformer_layers, optimizer, overwrite, reuse)\u001b[0m\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=336'>337</a>\u001b[0m     sess\u001b[39m.\u001b[39mrun(opt_reset)\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=337'>338</a>\u001b[0m     \u001b[39mfor\u001b[39;00m _ \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(accumulate_gradients):\n\u001b[1;32m--> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=338'>339</a>\u001b[0m         sess\u001b[39m.\u001b[39;49mrun(\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=339'>340</a>\u001b[0m             opt_compute, feed_dict\u001b[39m=\u001b[39;49m{context: sample_batch()})\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=340'>341</a>\u001b[0m     (v_loss, v_summary) \u001b[39m=\u001b[39m sess\u001b[39m.\u001b[39mrun((opt_apply, summary_loss))\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/gpt_2_simple/gpt_2.py?line=341'>342</a>\u001b[0m \u001b[39melse\u001b[39;00m:\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:967\u001b[0m, in \u001b[0;36mBaseSession.run\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=963'>964</a>\u001b[0m run_metadata_ptr \u001b[39m=\u001b[39m tf_session\u001b[39m.\u001b[39mTF_NewBuffer() \u001b[39mif\u001b[39;00m run_metadata \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=965'>966</a>\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m--> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=966'>967</a>\u001b[0m   result \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run(\u001b[39mNone\u001b[39;49;00m, fetches, feed_dict, options_ptr,\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=967'>968</a>\u001b[0m                      run_metadata_ptr)\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=968'>969</a>\u001b[0m   \u001b[39mif\u001b[39;00m run_metadata:\n\u001b[0;32m    <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=969'>970</a>\u001b[0m     proto_data \u001b[39m=\u001b[39m tf_session\u001b[39m.\u001b[39mTF_GetBuffer(run_metadata_ptr)\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1190\u001b[0m, in \u001b[0;36mBaseSession._run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1186'>1187</a>\u001b[0m \u001b[39m# We only want to really perform the run if fetches or targets are provided,\u001b[39;00m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1187'>1188</a>\u001b[0m \u001b[39m# or if the call is a partial run that specifies feeds.\u001b[39;00m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1188'>1189</a>\u001b[0m \u001b[39mif\u001b[39;00m final_fetches \u001b[39mor\u001b[39;00m final_targets \u001b[39mor\u001b[39;00m (handle \u001b[39mand\u001b[39;00m feed_dict_tensor):\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1189'>1190</a>\u001b[0m   results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_do_run(handle, final_targets, final_fetches,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1190'>1191</a>\u001b[0m                          feed_dict_tensor, options, run_metadata)\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1191'>1192</a>\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1192'>1193</a>\u001b[0m   results \u001b[39m=\u001b[39m []\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1370\u001b[0m, in \u001b[0;36mBaseSession._do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1366'>1367</a>\u001b[0m   \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_call_tf_sessionprun(handle, feed_dict, fetch_list)\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1368'>1369</a>\u001b[0m \u001b[39mif\u001b[39;00m handle \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1369'>1370</a>\u001b[0m   \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1370'>1371</a>\u001b[0m                        run_metadata)\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1371'>1372</a>\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1372'>1373</a>\u001b[0m   \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_do_call(_prun_fn, handle, feeds, fetches)\n",
      "File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1396\u001b[0m, in \u001b[0;36mBaseSession._do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1390'>1391</a>\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m'\u001b[39m\u001b[39monly supports NHWC tensor format\u001b[39m\u001b[39m'\u001b[39m \u001b[39min\u001b[39;00m message:\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1391'>1392</a>\u001b[0m   message \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m (\u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39mA possible workaround: Try disabling Grappler optimizer\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1392'>1393</a>\u001b[0m               \u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39mby modifying the config for creating the session eg.\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1393'>1394</a>\u001b[0m               \u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39msession_config.graph_options.rewrite_options.\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m   <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1394'>1395</a>\u001b[0m               \u001b[39m'\u001b[39m\u001b[39mdisable_meta_optimizer = True\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m-> <a href='file:///c%3A/Users/franz/.conda/envs/tf-gpu/lib/site-packages/tensorflow/python/client/session.py?line=1395'>1396</a>\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mtype\u001b[39m(e)(node_def, op, message)\n",
      "\u001b[1;31mResourceExhaustedError\u001b[0m: Graph execution error:\n\nfailed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode."
     ]
    }
   ],
   "source": [
    "gpt2.finetune(sess,\n",
    "              file_path_es+'.txt',\n",
    "              model_name=model_name,\n",
    "              checkpoint_dir=checkpoint_dir,   \n",
    "              steps=1000\n",
    "              )   "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## en "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "text = \"\"\"Introduction and preliminaries\n",
    "The focus of this paper is decompositions of (k, `)-sparse graphs into edge-disjoint subgraphs\n",
    "that certify sparsity. We use graph to mean a multigraph, possibly with loops. We say that a\n",
    "graph is (k, `)-sparse if no subset of n′ vertices spans more than kn′− ` edges in the graph; a\n",
    "(k, `)-sparse graph with kn′− ` edges is (k, `)-tight. We call the range k ≤ `≤ 2k−1 the upper\n",
    "range of sparse graphs and 0≤ `≤ k the lower range.\n",
    "In this paper, we present efficient algorithms for finding decompositions that certify sparsity\n",
    "in the upper range of `. Our algorithms also apply in the lower range, which was already ad-\n",
    "dressed by [3, 4, 5, 6, 19]. A decomposition certifies the sparsity of a graph if the sparse graphs\n",
    "and graphs admitting the decomposition coincide.\n",
    "Our algorithms are based on a new characterization of sparse graphs, which we call the\n",
    "pebble game with colors. The pebble game with colors is a simple graph construction rule that\n",
    "produces a sparse graph along with a sparsity-certifying decomposition.\n",
    "We define and study a canonical class of pebble game constructions, which correspond to\n",
    "previously studied decompositions of sparse graphs into edge disjoint trees. Our results provide\n",
    "a unifying framework for all the previously known special cases, including Nash-Williams-\n",
    "Tutte and [7, 24]. Indeed, in the lower range, canonical pebble game constructions capture the\n",
    "properties of the augmenting paths used in matroid union and intersection algorithms[5, 6].\n",
    "Since the sparse graphs in the upper range are not known to be unions or intersections of the\n",
    "matroids for which there are efficient augmenting path algorithms, these do not easily apply in\n",
    "∗ Research of both authors funded by the NSF under grants NSF CCF-0430990 and NSF-DARPA CARGO\n",
    "CCR-0310661 to the first author.\n",
    "2 Ileana Streinu, Louis Theran\n",
    "Term Meaning\n",
    "Sparse graph G Every non-empty subgraph on n′ vertices has ≤ kn′− ` edges\n",
    "Tight graph G G = (V,E) is sparse and |V |= n, |E|= kn− `\n",
    "Block H in G G is sparse, and H is a tight subgraph\n",
    "Component H of G G is sparse and H is a maximal block\n",
    "Map-graph Graph that admits an out-degree-exactly-one orientation\n",
    "(k, `)-maps-and-trees Edge-disjoint union of ` trees and (k− `) map-grpahs\n",
    "`Tk Union of ` trees, each vertex is in exactly k of them\n",
    "Set of tree-pieces of an `Tk induced on V ′ ⊂V Pieces of trees in the `Tk spanned by E(V ′)\n",
    "Proper `Tk Every V ′ ⊂V contains ≥ ` pieces of trees from the `Tk\n",
    "Table 1. Sparse graph and decomposition terminology used in this paper.\n",
    "the upper range. Pebble game with colors constructions may thus be considered a strengthening\n",
    "of augmenting paths to the upper range of matroidal sparse graphs.\n",
    "1.1. Sparse graphs\n",
    "\n",
    "ABSTRACT\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpt2.generate(sess,prefix=text,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## es"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "text = \"\"\"El foco de este documento son las descomposicións de (k, `)-sparse gráficos en bordes-disjunto subgraphs\n",
    "que certifique la escasez. Usamos el gráfico para significar un múltiplo, posiblemente con bucles. Nosotros decimos que un\n",
    "grafo es (k, `)-sparse si ningún subconjunto de n′ vértices abarca más de kn ` bordes en el gráfico; a\n",
    "(k, `)-sparse gráfico con kn ` bordes es (k, `)-estrechado. Llamamos al rango k ≤  2k−1 el superior\n",
    "rango de gráficos escasos y 0≤  k el rango inferior.\n",
    "En este artículo, presentamos algoritmos eficientes para encontrar descomposicións que certifiquen la escasez\n",
    "en el rango superior de `. Nuestros algoritmos también se aplican en el rango inferior, que ya era ad-\n",
    "vestido por [3, 4, 5, 6, 19]. Una descomposición certifica la escasez de un gráfico si los gráficos dispersos\n",
    "y los gráficos que admiten la descomposición coinciden.\n",
    "Nuestros algoritmos se basan en una nueva caracterización de gráficos escasos, que llamamos el\n",
    "juego de guijarros con colores. El juego de guijarros con colores es una regla de construcción de gráficos simples que\n",
    "produce un gráfico escaso junto con una descomposición certificadora de la escasez.\n",
    "Definimos y estudiamos una clase canónica de construcciones de juego de guijarros, que corresponden a\n",
    "previamente estudiado las descomposiciones de los gráficos escasos en los árboles disjuntos del borde. Nuestros resultados proporcionan\n",
    "un marco unificador para todos los casos especiales conocidos anteriormente, incluidos Nash-Williams-\n",
    "Tutte y [7, 24]. De hecho, en el rango inferior, las construcciones canónicas de juego de guijarros capturan la\n",
    "propiedades de las rutas de aumento utilizadas en los algoritmos de unión de matroides y de intersección[5, 6].\n",
    "Dado que los gráficos escasos en el rango superior no se sabe que son uniones o intersecciones de la\n",
    "matroides para los que hay algoritmos de ruta de aumento eficiente, estos no se aplican fácilmente en\n",
    "* Investigación de ambos autores financiada por la NSF bajo subvenciones NSF CCF-0430990 y NSF-DARPA CARGO\n",
    "CCR-0310661 al primer autor.\n",
    "2 Ileana Streinu, Louis Theran\n",
    "Significado del término\n",
    "Gráfico escaso G Cada subgrafo no vacío en n′ vértices tiene ≤ kn ` bordes\n",
    "El gráfico ajustado G G = (V,E) es escaso y V = n, E= kn− `\n",
    "El bloque H en G G es escaso, y H es un subgrafo apretado\n",
    "El componente H de G G es escaso y H es un bloqueo máximo\n",
    "Gráfico cartográfico que admite una orientación de grado-exactamente-uno\n",
    "(k, `)-maps-and-trees Edge-disjunt union de ` árboles y (k- `) map-grpahs\n",
    "`Tk Unión de ` árboles, cada vértice está exactamente en k de ellos\n",
    "Conjunto de piezas arbóreas de un `Tk inducido en V ′ ́V Piezas de árboles en el `Tk extendido por E(V ′)\n",
    "`Tk Apropiado Cada V ′ V contiene ≥ ` pedazos de árboles de la `Tk\n",
    "Cuadro 1 Gráfico escaso y terminología de descomposición utilizada en este artículo.\n",
    "el rango superior. Pebble juego con construcciones de colores por lo tanto puede ser considerado un fortalecimiento\n",
    "de caminos de aumento a la gama superior de gráficos de la escasez matroidal.\n",
    "1.1. Gráficos escasos\n",
    "Un gráfico es (k, `)-sparse si para cualquier subgrafo no vacío con bordes m′ y n′ vértices, m′ ≤\n",
    "kn `. Observamos que esta condición implica que 0 ≤ ` ≤ 2k− 1, y a partir de ahora en este\n",
    "Haremos esta suposición. Un gráfico escaso que tiene n vértices y exactamente bordes kn\n",
    "se llama apretado.\n",
    "Para un gráfico G = (V,E), y V ′  V, utilizamos el intervalo de notación (V ′) para el número de bordes\n",
    "en el subgráfico inducido por V ′. En un gráfico dirigido, out(V ′) es el número de bordes con la cola\n",
    "en V ′ y la cabeza en V −V ′; para un subgráfico inducido por V ′, llamamos a tal borde un borde superior.\n",
    "Hay dos tipos importantes de subgrafías de gráficos escasos. Un bloque es un subgrafo apretado de\n",
    "un gráfico escaso. Un componente es un bloque máximo.\n",
    "La Tabla 1 resume la escasa terminología gráfica utilizada en este artículo.\n",
    "1.2. Descomposiciónes de certificación de la sparsidad\n",
    "Un k-arborescencia es un gráfico que admite una descomposición en k borde-desjunto que abarca los árboles.\n",
    "La Figura 1(a) muestra un ejemplo de una 3-arborescencia. Se describen los gráficos k-arborescentes\n",
    "por los conocidos teoremas de Tutte [23] y Nash-Williams [17] como exactamente el (k,k) apretado\n",
    "gráficos.\n",
    "ABSTRACT\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpt2.generate(sess,prefix=text,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# gradio interface"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateAbstract(text):\n",
    "   # with tf.compat.v1.variable_scope(\"weight\", reuse = True):\n",
    "    #sess = tf.compat.v1.get_variable('sess',gpt2.start_tf_sess())\n",
    "    tf.compat.v1.reset_default_graph()\n",
    "    sess = gpt2.start_tf_sess()\n",
    "    gpt2.load_gpt2(sess,checkpoint_dir=checkpoint_dir,run_name='run1')\n",
    "    txt = gpt2.generate(sess,prefix=str(text)+\"\\nABSTRACT\", return_as_list=True,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)[0]\n",
    "    return str(txt[txt.find('ABSTRACT'):])\n",
    "\n",
    "\n",
    "\n",
    "iface = gr.Interface(fn=generateAbstract, inputs=gr.inputs.Textbox(lines=10, placeholder=\"text\"), outputs=\"textbox\")\n",
    "iface.launch(debug = True )"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "53fbdc69e3e12c371950068c144423682c30d04ec68c2bd46937202e33e0058d"
  },
  "kernelspec": {
   "display_name": "Python 3.7.11 ('receta')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}