{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{user.username}\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"Salesforce/codet5-base\")\n",
    "model = AutoModelForSeq2SeqLM.from_pretrained(\"Salesforce/codet5-base\")\n",
    "\n",
    "text = \"def greet(user): print(f'hello <extra_id_0>!')\"\n",
    "input_ids = tokenizer(text, return_tensors=\"pt\").input_ids\n",
    "\n",
    "# simply generate a single sequence\n",
    "generated_ids = model.generate(input_ids, max_length=8)\n",
    "print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))\n",
    "# this prints \"{user.username}\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [],
   "source": [
    "import ast\n",
    "\n",
    "def filter_codes(codes):\n",
    "    codes = list(set(codes))\n",
    "    new_codes = []\n",
    "    for code in codes:\n",
    "        if ';' in code:\n",
    "            code = code[code.index(';'):]\n",
    "        try:\n",
    "            ast.parse(code)\n",
    "        except Exception:\n",
    "            continue\n",
    "        new_codes.append(code)\n",
    "    return new_codes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "def temp_value(value):\n",
    "    if value[0] == '[' and value[-1] == ']':\n",
    "        return '[<extra_id_0>]'\n",
    "    if value[0] == '\"' and value[-1] == '\"':\n",
    "        return '\"<extra_id_0>\"'\n",
    "    if value[0] == \"'\" and value[-1] == \"'\":\n",
    "        return \"'<extra_id_0>'\"\n",
    "    if value[0] == '{' and value[-1] == '}':\n",
    "        return '{<extra_id_0>}'\n",
    "    return '<extra_id_0>'\n",
    "\n",
    "def temp_var(var):\n",
    "    value = var[4:]\n",
    "    return var[:4] + temp_value(value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_code(start, code):\n",
    "    return f'def main(): {\"; \".join(start)}; {code}; return {\", \".join([v.split()[0] for v in start])}'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "import ast\n",
    "\n",
    "def filter_codes(codes):\n",
    "    codes = list(set(codes))\n",
    "    new_codes = []\n",
    "    for code in codes:\n",
    "        if ';' in code:\n",
    "            code = code[code.index(';'):]\n",
    "        try:\n",
    "            ast.parse(code)\n",
    "        except Exception:\n",
    "            continue\n",
    "        new_codes.append(code)\n",
    "    return new_codes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "def alt_from_code(code):\n",
    "    input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
    "    generated_ids = model.generate(input_ids, num_return_sequences=100, max_length=20, do_sample=True, temperature=1.0)\n",
    "    return filter_codes(tokenizer.batch_decode(generated_ids, skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "import errno\n",
    "import os\n",
    "import signal\n",
    "import functools\n",
    "\n",
    "class TimeoutError(Exception):\n",
    "    pass\n",
    "\n",
    "def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n",
    "    def decorator(func):\n",
    "        def _handle_timeout(signum, frame):\n",
    "            raise TimeoutError(error_message)\n",
    "\n",
    "        @functools.wraps(func)\n",
    "        def wrapper(*args, **kwargs):\n",
    "            signal.signal(signal.SIGALRM, _handle_timeout)\n",
    "            signal.alarm(seconds)\n",
    "            try:\n",
    "                result = func(*args, **kwargs)\n",
    "            finally:\n",
    "                signal.alarm(0)\n",
    "            return result\n",
    "\n",
    "        return wrapper\n",
    "\n",
    "    return decorator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "def state_dict_to_str(state):\n",
    "    vals = []\n",
    "    for k, v in state.items():\n",
    "        vals.append(\n",
    "            f'{k} = {v}'\n",
    "        )\n",
    "    vals = sorted(vals)\n",
    "    return '; '.join(vals)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "@timeout(seconds=3)\n",
    "def trace_code(start_state: str, code: str):\n",
    "    state = {}\n",
    "    try:\n",
    "        exec(start_state, {}, state)\n",
    "    except Exception:\n",
    "        return\n",
    "    start_state = dict(state)\n",
    "    try:\n",
    "        exec(code, {}, state)\n",
    "    except Exception:\n",
    "        return\n",
    "    return state_dict_to_str(start_state), code, state_dict_to_str(state)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'start': 'g = 100; i = 1; l = [1, 100, 1]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 200; i = 1; l = [1, 100, 1]'},\n",
       " {'start': 'g = 100; i = 1; l = [1, 1]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 101; i = 1; l = [1, 1]'},\n",
       " {'start': 'g = 100; i = 1; l = [1, 1, 1]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 101; i = 1; l = [1, 1, 1]'},\n",
       " {'start': 'g = 100; i = 1; l = [100, 100]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 200; i = 1; l = [100, 100]'},\n",
       " {'start': 'g = 100; i = 1; l = [50, 50, 50, 40]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 150; i = 1; l = [50, 50, 50, 40]'},\n",
       " {'start': 'g = 100; i = 1; l = [0, 10]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 110; i = 1; l = [0, 10]'},\n",
       " {'start': 'g = 100; i = 1; l = [100, 900, 10, 10]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 1000; i = 1; l = [100, 900, 10, 10]'},\n",
       " {'start': 'g = 100; i = 1; l = [1, 1, 2]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 101; i = 1; l = [1, 1, 2]'},\n",
       " {'start': 'g = 100; i = 1; l = [100, 100, 100, 0, 0]',\n",
       "  'code': 'g += l[i]',\n",
       "  'end': 'g = 200; i = 1; l = [100, 100, 100, 0, 0]'}]"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_working_alts(other_vars, var_alts, code):\n",
    "    rows = []\n",
    "    for alt in var_alts:\n",
    "        start = other_vars + [alt]\n",
    "        result = trace_code('; '.join(start), code)\n",
    "        if result:\n",
    "            rows.append({'start': result[0], 'code': result[1], 'end': result[2]})\n",
    "    return rows\n",
    "\n",
    "test_alt_vars = [\n",
    "    'l = [1, 100, 1]',\n",
    "    'l = [1, 1]',\n",
    "    'l = [f]',\n",
    "    'l = [1, 1, 1,]',\n",
    "    'l = [i = 10]',\n",
    "    'l = [100, 100]',\n",
    "    'l = [l[i].max(), l[i].min()]',\n",
    "    'l = [1]',\n",
    "    'l = [50, 50, 50, 40]',\n",
    "    'l = [0, 10]',\n",
    "    'l = [100, 900, 10, 10]',\n",
    "    'l = [i, 1, 2]',\n",
    "    'l = [100, 100, 100, 0, 0]'\n",
    "]\n",
    "get_working_alts(['g = 100', 'i = 1'], test_alt_vars, 'g += l[i]')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['g = 100', 'i = 1'],\n",
       " ['l = [1, 2]',\n",
       "  'l = [g, i, j]',\n",
       "  'l = [i,g]',\n",
       "  'l = [k, j, k2]',\n",
       "  'l = [1.0, 0.01, 0.01, 0.01]',\n",
       "  'l = [k, j]',\n",
       "  'l = [j]',\n",
       "  'l = [r, t, d]',\n",
       "  'l = [g, i, l]',\n",
       "  'l = [1]',\n",
       "  'l = [l]',\n",
       "  'l = [i, 1]',\n",
       "  'l = [g + h*g + i*i]',\n",
       "  'l = [g, i, 1]',\n",
       "  'l = [b[i], b [ j ]]',\n",
       "  'l = [2, 3, 3,]',\n",
       "  'l = [a[g, e, c]]',\n",
       "  'l = [b [ a ] [b[3]]]',\n",
       "  'l = [g - 1, i]',\n",
       "  'l = [2]',\n",
       "  'l = [5]',\n",
       "  'l = [6, 5, 3, 2]',\n",
       "  'l = [b[g], b[i], b[g]]',\n",
       "  'l = [b[i][j]]',\n",
       "  'l = [c[j ], c[j+1 ]]',\n",
       "  'l = [i, g * g]',\n",
       "  'l = [g]',\n",
       "  'l = [g, i, f]',\n",
       "  'l = [a [ i ]]',\n",
       "  'l = [1, 1, 1]',\n",
       "  'l = [1, 4, 4]',\n",
       "  'l = [b [j ]]',\n",
       "  'l = [g, i]',\n",
       "  'l = [1, 0, 0]',\n",
       "  'l = [i, l]',\n",
       "  'l = [0.0]',\n",
       "  'l = [i]',\n",
       "  'l = [g, i, 0]',\n",
       "  'l = [{ i }]',\n",
       "  'l = [i, v[0], v[1],l]',\n",
       "  'l = [c[j ],]',\n",
       "  'l = [0]',\n",
       "  'l = [a [ 0 ]]',\n",
       "  'l = [d, g, i]',\n",
       "  'l = [g, g, i]',\n",
       "  'l = [b[j ]]'])"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_alts_for_var(start_vars, alt_i, code):\n",
    "    start_vars[alt_i] = temp_var(start_vars[alt_i])\n",
    "    code = make_code(start_vars, row['code'])\n",
    "    var_alts = alt_from_code(code)\n",
    "    alt_var_temp = start_vars[alt_i]\n",
    "    del start_vars[alt_i]\n",
    "    return start_vars, [alt_var_temp.replace('<extra_id_0>', alt) for alt in var_alts]\n",
    "\n",
    "alt_start_vars, var_alts = get_alts_for_var(\n",
    "    ['g = 100', 'i = 1', 'l = [100, 100, 0, 0, -100, -100]'], 2, 'g += l[i]'\n",
    ")\n",
    "alt_start_vars, var_alts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(29,\n",
       " [{'start': 'g = 50; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
       "   'code': 'g += l[i]',\n",
       "   'end': 'g = 150; i = 1; l = [100, 100, 0, 0, -100, -100]'},\n",
       "  {'start': 'g = 10; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
       "   'code': 'g += l[i]',\n",
       "   'end': 'g = 110; i = 1; l = [100, 100, 0, 0, -100, -100]'},\n",
       "  {'start': 'g = -3; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
       "   'code': 'g += l[i]',\n",
       "   'end': 'g = 97; i = 1; l = [100, 100, 0, 0, -100, -100]'}])"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def make_alternatives(row):\n",
    "    start_vars = row['start'].split('; ')\n",
    "\n",
    "    alts = []\n",
    "    for i in range(len(start_vars)):\n",
    "        alt_start_vars, var_alts = get_alts_for_var(list(start_vars), i, row['code'])\n",
    "        alts += get_working_alts(alt_start_vars, var_alts, row['code'])\n",
    "\n",
    "    return alts\n",
    "\n",
    "alts = make_alternatives(\n",
    "    {'start': 'g = 100; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
    "    'code': 'g += l[i]',\n",
    "    'end': 'g = 200; i = 1; l = [100, 100, 0, 0, -100, -100]'}\n",
    ")\n",
    "len(alts), alts[:3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/8968897 [00:09<24001:13:52,  9.63s/it]<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
      "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
      "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
      "  0%|          | 22/8968897 [02:45<14831:12:33,  5.95s/it]<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
      "<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
      "<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
      "  0%|          | 34/8968897 [04:26<26565:33:36, 10.66s/it]<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
      "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
      "  0%|          | 44/8968897 [10:01<34031:25:54, 13.66s/it] \n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Input \u001b[0;32mIn [69]\u001b[0m, in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m id_, line \u001b[38;5;129;01min\u001b[39;00m tqdm(\u001b[38;5;28menumerate\u001b[39m(f), total\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m8968897\u001b[39m):\n\u001b[1;32m     19\u001b[0m     row \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(line)\n\u001b[0;32m---> 20\u001b[0m     alts \u001b[38;5;241m=\u001b[39m \u001b[43mmake_alternatives\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrow\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     21\u001b[0m     new_rows \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m [row] \u001b[38;5;241m+\u001b[39m alts\n\u001b[1;32m     22\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m new_rows \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(new_rows) \u001b[38;5;241m%\u001b[39m \u001b[38;5;241m10_000\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
      "Input \u001b[0;32mIn [68]\u001b[0m, in \u001b[0;36mmake_alternatives\u001b[0;34m(row)\u001b[0m\n\u001b[1;32m      4\u001b[0m alts \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m      5\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(start_vars)):\n\u001b[0;32m----> 6\u001b[0m     alt_start_vars, var_alts \u001b[38;5;241m=\u001b[39m \u001b[43mget_alts_for_var\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mstart_vars\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrow\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mcode\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      7\u001b[0m     alts \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m get_working_alts(alt_start_vars, var_alts, row[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcode\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m      9\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m alts\n",
      "Input \u001b[0;32mIn [67]\u001b[0m, in \u001b[0;36mget_alts_for_var\u001b[0;34m(start_vars, alt_i, code)\u001b[0m\n\u001b[1;32m      2\u001b[0m start_vars[alt_i] \u001b[38;5;241m=\u001b[39m temp_var(start_vars[alt_i])\n\u001b[1;32m      3\u001b[0m code \u001b[38;5;241m=\u001b[39m make_code(start_vars, row[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcode\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m----> 4\u001b[0m var_alts \u001b[38;5;241m=\u001b[39m \u001b[43malt_from_code\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      5\u001b[0m alt_var_temp \u001b[38;5;241m=\u001b[39m start_vars[alt_i]\n\u001b[1;32m      6\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m start_vars[alt_i]\n",
      "Input \u001b[0;32mIn [62]\u001b[0m, in \u001b[0;36malt_from_code\u001b[0;34m(code)\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21malt_from_code\u001b[39m(code):\n\u001b[1;32m      2\u001b[0m     input_ids \u001b[38;5;241m=\u001b[39m tokenizer(code, return_tensors\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39minput_ids\n\u001b[0;32m----> 3\u001b[0m     generated_ids \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_return_sequences\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m20\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdo_sample\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1.0\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m      4\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m filter_codes(tokenizer\u001b[38;5;241m.\u001b[39mbatch_decode(generated_ids, skip_special_tokens\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m))\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/autograd/grad_mode.py:28\u001b[0m, in \u001b[0;36m_DecoratorContextManager.__call__.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m     25\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m     26\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m     27\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m():\n\u001b[0;32m---> 28\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/generation_utils.py:1200\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, max_length, min_length, do_sample, early_stopping, num_beams, temperature, top_k, top_p, repetition_penalty, bad_words_ids, bos_token_id, pad_token_id, eos_token_id, length_penalty, no_repeat_ngram_size, encoder_no_repeat_ngram_size, num_return_sequences, max_time, max_new_tokens, decoder_start_token_id, use_cache, num_beam_groups, diversity_penalty, prefix_allowed_tokens_fn, logits_processor, stopping_criteria, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, forced_bos_token_id, forced_eos_token_id, remove_invalid_values, synced_gpus, **model_kwargs)\u001b[0m\n\u001b[1;32m   1192\u001b[0m     input_ids, model_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_expand_inputs_for_generation(\n\u001b[1;32m   1193\u001b[0m         input_ids,\n\u001b[1;32m   1194\u001b[0m         expand_size\u001b[38;5;241m=\u001b[39mnum_return_sequences,\n\u001b[1;32m   1195\u001b[0m         is_encoder_decoder\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mis_encoder_decoder,\n\u001b[1;32m   1196\u001b[0m         \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[1;32m   1197\u001b[0m     )\n\u001b[1;32m   1199\u001b[0m     \u001b[38;5;66;03m# 12. run sample\u001b[39;00m\n\u001b[0;32m-> 1200\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1201\u001b[0m \u001b[43m        \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1202\u001b[0m \u001b[43m        \u001b[49m\u001b[43mlogits_processor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlogits_processor\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1203\u001b[0m \u001b[43m        \u001b[49m\u001b[43mlogits_warper\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlogits_warper\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1204\u001b[0m \u001b[43m        \u001b[49m\u001b[43mstopping_criteria\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstopping_criteria\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1205\u001b[0m \u001b[43m        \u001b[49m\u001b[43mpad_token_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpad_token_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1206\u001b[0m \u001b[43m        \u001b[49m\u001b[43meos_token_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43meos_token_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1207\u001b[0m \u001b[43m        \u001b[49m\u001b[43moutput_scores\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_scores\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1208\u001b[0m \u001b[43m        \u001b[49m\u001b[43mreturn_dict_in_generate\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict_in_generate\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1209\u001b[0m \u001b[43m        \u001b[49m\u001b[43msynced_gpus\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msynced_gpus\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1210\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1211\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1213\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m is_beam_gen_mode:\n\u001b[1;32m   1214\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m num_return_sequences \u001b[38;5;241m>\u001b[39m num_beams:\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/generation_utils.py:1710\u001b[0m, in \u001b[0;36mGenerationMixin.sample\u001b[0;34m(self, input_ids, logits_processor, stopping_criteria, logits_warper, max_length, pad_token_id, eos_token_id, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, synced_gpus, **model_kwargs)\u001b[0m\n\u001b[1;32m   1707\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprepare_inputs_for_generation(input_ids, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs)\n\u001b[1;32m   1709\u001b[0m \u001b[38;5;66;03m# forward pass to get next token\u001b[39;00m\n\u001b[0;32m-> 1710\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1711\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1712\u001b[0m \u001b[43m    \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m   1713\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1714\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1715\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1717\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m synced_gpus \u001b[38;5;129;01mand\u001b[39;00m this_peer_finished:\n\u001b[1;32m   1718\u001b[0m     cur_len \u001b[38;5;241m=\u001b[39m cur_len \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:1616\u001b[0m, in \u001b[0;36mT5ForConditionalGeneration.forward\u001b[0;34m(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, head_mask, decoder_head_mask, cross_attn_head_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m   1613\u001b[0m         decoder_attention_mask \u001b[38;5;241m=\u001b[39m decoder_attention_mask\u001b[38;5;241m.\u001b[39mto(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdecoder\u001b[38;5;241m.\u001b[39mfirst_device)\n\u001b[1;32m   1615\u001b[0m \u001b[38;5;66;03m# Decode\u001b[39;00m\n\u001b[0;32m-> 1616\u001b[0m decoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1617\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_input_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1618\u001b[0m \u001b[43m    \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1619\u001b[0m \u001b[43m    \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_inputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1620\u001b[0m \u001b[43m    \u001b[49m\u001b[43mpast_key_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1621\u001b[0m \u001b[43m    \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1622\u001b[0m \u001b[43m    \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1623\u001b[0m \u001b[43m    \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1624\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcross_attn_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1625\u001b[0m \u001b[43m    \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1626\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1627\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1628\u001b[0m \u001b[43m    \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1629\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1631\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m decoder_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m   1633\u001b[0m \u001b[38;5;66;03m# Set device for model parallelism\u001b[39;00m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:1011\u001b[0m, in \u001b[0;36mT5Stack.forward\u001b[0;34m(self, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, inputs_embeds, head_mask, cross_attn_head_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m    998\u001b[0m     layer_outputs \u001b[38;5;241m=\u001b[39m checkpoint(\n\u001b[1;32m    999\u001b[0m         create_custom_forward(layer_module),\n\u001b[1;32m   1000\u001b[0m         hidden_states,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m   1008\u001b[0m         \u001b[38;5;28;01mNone\u001b[39;00m,  \u001b[38;5;66;03m# past_key_value is always None with gradient checkpointing\u001b[39;00m\n\u001b[1;32m   1009\u001b[0m     )\n\u001b[1;32m   1010\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1011\u001b[0m     layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1012\u001b[0m \u001b[43m        \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1013\u001b[0m \u001b[43m        \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1014\u001b[0m \u001b[43m        \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1015\u001b[0m \u001b[43m        \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1016\u001b[0m \u001b[43m        \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_extended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1017\u001b[0m \u001b[43m        \u001b[49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1018\u001b[0m \u001b[43m        \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1019\u001b[0m \u001b[43m        \u001b[49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1020\u001b[0m \u001b[43m        \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1021\u001b[0m \u001b[43m        \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1022\u001b[0m \u001b[43m        \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1023\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1025\u001b[0m \u001b[38;5;66;03m# layer_outputs is a tuple with:\u001b[39;00m\n\u001b[1;32m   1026\u001b[0m \u001b[38;5;66;03m# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)\u001b[39;00m\n\u001b[1;32m   1027\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_cache \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m:\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:672\u001b[0m, in \u001b[0;36mT5Block.forward\u001b[0;34m(self, hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, past_key_value, use_cache, output_attentions, return_dict)\u001b[0m\n\u001b[1;32m    669\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    670\u001b[0m     query_length \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m--> 672\u001b[0m cross_attention_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayer\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    673\u001b[0m \u001b[43m    \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    674\u001b[0m \u001b[43m    \u001b[49m\u001b[43mkey_value_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    675\u001b[0m \u001b[43m    \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    676\u001b[0m \u001b[43m    \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    677\u001b[0m \u001b[43m    \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    678\u001b[0m \u001b[43m    \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_past_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    679\u001b[0m \u001b[43m    \u001b[49m\u001b[43mquery_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    680\u001b[0m \u001b[43m    \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    681\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    682\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    683\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m cross_attention_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    685\u001b[0m \u001b[38;5;66;03m# clamp inf values to enable fp16 training\u001b[39;00m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:587\u001b[0m, in \u001b[0;36mT5LayerCrossAttention.forward\u001b[0;34m(self, hidden_states, key_value_states, attention_mask, position_bias, layer_head_mask, past_key_value, use_cache, query_length, output_attentions)\u001b[0m\n\u001b[1;32m    574\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m    575\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m    576\u001b[0m     hidden_states,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    584\u001b[0m     output_attentions\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m    585\u001b[0m ):\n\u001b[1;32m    586\u001b[0m     normed_hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayer_norm(hidden_states)\n\u001b[0;32m--> 587\u001b[0m     attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mEncDecAttention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    588\u001b[0m \u001b[43m        \u001b[49m\u001b[43mnormed_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    589\u001b[0m \u001b[43m        \u001b[49m\u001b[43mmask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    590\u001b[0m \u001b[43m        \u001b[49m\u001b[43mkey_value_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkey_value_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    591\u001b[0m \u001b[43m        \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    592\u001b[0m \u001b[43m        \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    593\u001b[0m \u001b[43m        \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    594\u001b[0m \u001b[43m        \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    595\u001b[0m \u001b[43m        \u001b[49m\u001b[43mquery_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    596\u001b[0m \u001b[43m        \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    597\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    598\u001b[0m     layer_output \u001b[38;5;241m=\u001b[39m hidden_states \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdropout(attention_output[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m    599\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m (layer_output,) \u001b[38;5;241m+\u001b[39m attention_output[\u001b[38;5;241m1\u001b[39m:]  \u001b[38;5;66;03m# add attentions if we output them\u001b[39;00m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:525\u001b[0m, in \u001b[0;36mT5Attention.forward\u001b[0;34m(self, hidden_states, mask, key_value_states, position_bias, past_key_value, layer_head_mask, query_length, use_cache, output_attentions)\u001b[0m\n\u001b[1;32m    522\u001b[0m     attn_weights \u001b[38;5;241m=\u001b[39m attn_weights \u001b[38;5;241m*\u001b[39m layer_head_mask\n\u001b[1;32m    524\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m unshape(torch\u001b[38;5;241m.\u001b[39mmatmul(attn_weights, value_states))  \u001b[38;5;66;03m# (batch_size, seq_length, dim)\u001b[39;00m\n\u001b[0;32m--> 525\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mo\u001b[49m\u001b[43m(\u001b[49m\u001b[43mattn_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    527\u001b[0m present_key_value_state \u001b[38;5;241m=\u001b[39m (key_states, value_states) \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_decoder \u001b[38;5;129;01mand\u001b[39;00m use_cache) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m    528\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (attn_output,) \u001b[38;5;241m+\u001b[39m (present_key_value_state,) \u001b[38;5;241m+\u001b[39m (position_bias,)\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1101\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/linear.py:103\u001b[0m, in \u001b[0;36mLinear.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    102\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 103\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/functional.py:1848\u001b[0m, in \u001b[0;36mlinear\u001b[0;34m(input, weight, bias)\u001b[0m\n\u001b[1;32m   1846\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_variadic(\u001b[38;5;28minput\u001b[39m, weight, bias):\n\u001b[1;32m   1847\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(linear, (\u001b[38;5;28minput\u001b[39m, weight, bias), \u001b[38;5;28minput\u001b[39m, weight, bias\u001b[38;5;241m=\u001b[39mbias)\n\u001b[0;32m-> 1848\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_C\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_nn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import json, gzip\n",
    "from tqdm import  tqdm\n",
    "\n",
    "\n",
    "with open('data.single_start_alts.jsonl.gz', 'w') as f:\n",
    "    f.write('')\n",
    "\n",
    "\n",
    "def write_rows_compressed(rows):\n",
    "    rows = [json.dumps(r) for r in rows]\n",
    "    with gzip.open('data.alts.jsonl.gz', 'ab') as f:\n",
    "        f.write('\\n'.join(rows).encode() + b'\\n')\n",
    "\n",
    "\n",
    "# currently takes ~10 seconds per iteration for 89,68,897 samples so 1k days\n",
    "with open('../data.jsonl', 'r', encoding=\"utf-8\") as f:\n",
    "    new_rows = []\n",
    "    for id_, line in tqdm(enumerate(f), total=8968897):\n",
    "        row = json.loads(line)\n",
    "        alts = make_alternatives(row)\n",
    "        new_rows += [row] + alts\n",
    "        if new_rows and len(new_rows) % 10_000 == 0:\n",
    "            write_rows_compressed(new_rows)\n",
    "            new_rows = []\n",
    "            break\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['1, 2',\n",
       " '1, 0',\n",
       " '1, 1, 1, 1',\n",
       " '1, 1',\n",
       " '\"ab\",i,2',\n",
       " '0, 1',\n",
       " '8',\n",
       " '\"s\", \"m\", \"v\", \"r \"',\n",
       " 'g, - p',\n",
       " '1, 1, 1,',\n",
       " '7, 5, 6',\n",
       " 'g, i, l',\n",
       " '1',\n",
       " '1,1,2,3',\n",
       " '1, 2, 2',\n",
       " '\"ab\", \"aa\", \"ab\", \"aa\"',\n",
       " '1, 2, 3, 4',\n",
       " '\"ab\",\"ace\",\"ae\",\"ad\"',\n",
       " 'i, i',\n",
       " '\"ab\", \"a\", \"e\"',\n",
       " '100, 100, 100',\n",
       " '1,3,3,4,5,6,7,9,0',\n",
       " '\" a\"',\n",
       " '0, 1, 2',\n",
       " '0, 1, 1, 1, 0',\n",
       " '\"ab\", \"bal,ca\"',\n",
       " 'g,i, l [ i ]',\n",
       " '1, 3,4, 6',\n",
       " 'a',\n",
       " '1, 2, 3',\n",
       " '9, 9',\n",
       " '( 1)',\n",
       " '2, - 1, - 1',\n",
       " '0 | 1 | 0|0',\n",
       " '{ 1 }',\n",
       " 'i - 1',\n",
       " 'o, l1, o2, l',\n",
       " '\"ab\"',\n",
       " '1, 1, 2',\n",
       " 'g, i',\n",
       " '0, 0',\n",
       " '\"a\"',\n",
       " 'i, l',\n",
       " 'i',\n",
       " '0,0',\n",
       " '- l [ i ]',\n",
       " '1, 2, 3, 1',\n",
       " 'l[ i - 1 ]',\n",
       " '\"1\",\"2\", \"3\",\"4\", \"5\"',\n",
       " 'g, g, i']"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "code ='def main(): g = \"ab\"; i = 1; l = [<extra_id_0>]; g += l[i]; return g, i, l'\n",
    "\n",
    "input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
    "generated_ids = model.generate(input_ids, num_return_sequences=100, max_length=20, do_sample=True, temperature=1.0)\n",
    "filter_codes(tokenizer.batch_decode(generated_ids, skip_special_tokens=True))\n",
    "\n",
    "# 100 samples -> ~8 valid alternatives, 3.1s on macos CPU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['<pad><s><extra_id_0>5<extra_id_1>g i l [ 0</s><pad><pad>',\n",
       " '<pad><s><extra_id_0>0<extra_id_1>0, 0, 0</s><pad><pad>',\n",
       " '<pad><s><extra_id_0>0<extra_id_1>1 1 2, 1</s><pad><pad>',\n",
       " \"<pad><s><extra_id_0>'<extra_id_1>i</s><pad><pad><pad><pad><pad><pad>\",\n",
       " '<pad><s><extra_id_0>0<extra_id_1>a t</s><pad><pad><pad><pad><pad>',\n",
       " '<pad><s><extra_id_0>0.0<extra_id_1>e. f_i</s>',\n",
       " '<pad><s><extra_id_0>\" \"<extra_id_1>1</s><pad><pad><pad><pad><pad>',\n",
       " '<pad><s><extra_id_0>0<extra_id_1>n = 1 l =</s><pad><pad>',\n",
       " '<pad><s><extra_id_0>0, 0, 1<extra_id_1>1</s><pad><pad>',\n",
       " '<pad><s><extra_id_0>1<extra_id_1>k y y x z</s><pad><pad>']"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "code ='def main(): g = <extra_id_0>; i = 1; l = [<extra_id_1>]; g += l[i]; return g, i, l'\n",
    "\n",
    "input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
    "generated_ids = model.generate(input_ids, num_return_sequences=10, max_length=20, do_sample=True, temperature=1.0)\n",
    "tokenizer.batch_decode(generated_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "ced6a873299cbeeefe969ab88294103b352f8c83b6537b9e08e8739795321d60"
  },
  "kernelspec": {
   "display_name": "Python 3.9.9 64-bit ('3.9.9': pyenv)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.9"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}