obake2ai commited on
Commit
1b577b5
·
verified ·
1 Parent(s): 8556d24

Upload ChatRoom_MaryGPT.ipynb

Browse files
Files changed (1) hide show
  1. ChatRoom_MaryGPT.ipynb +859 -0
ChatRoom_MaryGPT.ipynb ADDED
@@ -0,0 +1,859 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "l7qoJHs1L6WQ"
7
+ },
8
+ "source": [
9
+ "#01.SETUP"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 4,
15
+ "metadata": {
16
+ "colab": {
17
+ "base_uri": "https://localhost:8080/"
18
+ },
19
+ "id": "Op0GXmC8CCyR",
20
+ "outputId": "a414537e-71b5-4222-85e3-5cc0bdd3f6a6"
21
+ },
22
+ "outputs": [
23
+ {
24
+ "output_type": "stream",
25
+ "name": "stdout",
26
+ "text": [
27
+ "Collecting transformers==4.25.1\n",
28
+ " Downloading transformers-4.25.1-py3-none-any.whl.metadata (93 kB)\n",
29
+ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/93.9 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m93.9/93.9 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
30
+ "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (3.15.4)\n",
31
+ "Requirement already satisfied: huggingface-hub<1.0,>=0.10.0 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (0.23.5)\n",
32
+ "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (1.26.4)\n",
33
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (24.1)\n",
34
+ "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (6.0.1)\n",
35
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (2024.5.15)\n",
36
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (2.31.0)\n",
37
+ "Collecting tokenizers!=0.11.3,<0.14,>=0.11.1 (from transformers==4.25.1)\n",
38
+ " Downloading tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.7 kB)\n",
39
+ "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers==4.25.1) (4.66.4)\n",
40
+ "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.10.0->transformers==4.25.1) (2024.6.1)\n",
41
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.10.0->transformers==4.25.1) (4.12.2)\n",
42
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.25.1) (3.3.2)\n",
43
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.25.1) (3.7)\n",
44
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.25.1) (2.0.7)\n",
45
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.25.1) (2024.7.4)\n",
46
+ "Downloading transformers-4.25.1-py3-none-any.whl (5.8 MB)\n",
47
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.8/5.8 MB\u001b[0m \u001b[31m60.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
48
+ "\u001b[?25hDownloading tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (7.8 MB)\n",
49
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.8/7.8 MB\u001b[0m \u001b[31m83.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
50
+ "\u001b[?25hInstalling collected packages: tokenizers, transformers\n",
51
+ " Attempting uninstall: tokenizers\n",
52
+ " Found existing installation: tokenizers 0.19.1\n",
53
+ " Uninstalling tokenizers-0.19.1:\n",
54
+ " Successfully uninstalled tokenizers-0.19.1\n",
55
+ " Attempting uninstall: transformers\n",
56
+ " Found existing installation: transformers 4.42.4\n",
57
+ " Uninstalling transformers-4.42.4:\n",
58
+ " Successfully uninstalled transformers-4.42.4\n",
59
+ "Successfully installed tokenizers-0.13.3 transformers-4.25.1\n",
60
+ "\u001b[31mERROR: Could not find a version that satisfies the requirement bitsandbytes-cuda111==0.26.0 (from versions: 0.26.0.post2)\u001b[0m\u001b[31m\n",
61
+ "\u001b[0m\u001b[31mERROR: No matching distribution found for bitsandbytes-cuda111==0.26.0\u001b[0m\u001b[31m\n",
62
+ "\u001b[0mCollecting datasets==1.16.1\n",
63
+ " Downloading datasets-1.16.1-py3-none-any.whl.metadata (21 kB)\n",
64
+ "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (1.26.4)\n",
65
+ "Requirement already satisfied: pyarrow!=4.0.0,>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (14.0.2)\n",
66
+ "Collecting dill (from datasets==1.16.1)\n",
67
+ " Downloading dill-0.3.8-py3-none-any.whl.metadata (10 kB)\n",
68
+ "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (2.1.4)\n",
69
+ "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (2.31.0)\n",
70
+ "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (4.66.4)\n",
71
+ "Collecting xxhash (from datasets==1.16.1)\n",
72
+ " Downloading xxhash-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (12 kB)\n",
73
+ "Collecting multiprocess (from datasets==1.16.1)\n",
74
+ " Downloading multiprocess-0.70.16-py310-none-any.whl.metadata (7.2 kB)\n",
75
+ "Requirement already satisfied: fsspec>=2021.05.0 in /usr/local/lib/python3.10/dist-packages (from fsspec[http]>=2021.05.0->datasets==1.16.1) (2024.6.1)\n",
76
+ "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (3.9.5)\n",
77
+ "Requirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (0.23.5)\n",
78
+ "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from datasets==1.16.1) (24.1)\n",
79
+ "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (1.3.1)\n",
80
+ "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (23.2.0)\n",
81
+ "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (1.4.1)\n",
82
+ "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (6.0.5)\n",
83
+ "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (1.9.4)\n",
84
+ "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets==1.16.1) (4.0.3)\n",
85
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets==1.16.1) (3.15.4)\n",
86
+ "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets==1.16.1) (6.0.1)\n",
87
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets==1.16.1) (4.12.2)\n",
88
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets==1.16.1) (3.3.2)\n",
89
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets==1.16.1) (3.7)\n",
90
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets==1.16.1) (2.0.7)\n",
91
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets==1.16.1) (2024.7.4)\n",
92
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets==1.16.1) (2.8.2)\n",
93
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets==1.16.1) (2024.1)\n",
94
+ "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets==1.16.1) (2024.1)\n",
95
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->datasets==1.16.1) (1.16.0)\n",
96
+ "Downloading datasets-1.16.1-py3-none-any.whl (298 kB)\n",
97
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.3/298.3 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
98
+ "\u001b[?25hDownloading dill-0.3.8-py3-none-any.whl (116 kB)\n",
99
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m116.3/116.3 kB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
100
+ "\u001b[?25hDownloading multiprocess-0.70.16-py310-none-any.whl (134 kB)\n",
101
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
102
+ "\u001b[?25hDownloading xxhash-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (194 kB)\n",
103
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
104
+ "\u001b[?25hInstalling collected packages: xxhash, dill, multiprocess, datasets\n",
105
+ "Successfully installed datasets-1.16.1 dill-0.3.8 multiprocess-0.70.16 xxhash-3.4.1\n",
106
+ "Collecting bitsandbytes\n",
107
+ " Downloading bitsandbytes-0.43.3-py3-none-manylinux_2_24_x86_64.whl.metadata (3.5 kB)\n",
108
+ "Collecting loguru\n",
109
+ " Downloading loguru-0.7.2-py3-none-any.whl.metadata (23 kB)\n",
110
+ "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from bitsandbytes) (2.3.1+cu121)\n",
111
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from bitsandbytes) (1.26.4)\n",
112
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (3.15.4)\n",
113
+ "Requirement already satisfied: typing-extensions>=4.8.0 in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (4.12.2)\n",
114
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (1.13.1)\n",
115
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (3.3)\n",
116
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (3.1.4)\n",
117
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (2024.6.1)\n",
118
+ "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch->bitsandbytes)\n",
119
+ " Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\n",
120
+ "Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch->bitsandbytes)\n",
121
+ " Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\n",
122
+ "Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch->bitsandbytes)\n",
123
+ " Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\n",
124
+ "Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch->bitsandbytes)\n",
125
+ " Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\n",
126
+ "Collecting nvidia-cublas-cu12==12.1.3.1 (from torch->bitsandbytes)\n",
127
+ " Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\n",
128
+ "Collecting nvidia-cufft-cu12==11.0.2.54 (from torch->bitsandbytes)\n",
129
+ " Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\n",
130
+ "Collecting nvidia-curand-cu12==10.3.2.106 (from torch->bitsandbytes)\n",
131
+ " Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\n",
132
+ "Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch->bitsandbytes)\n",
133
+ " Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\n",
134
+ "Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch->bitsandbytes)\n",
135
+ " Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\n",
136
+ "Collecting nvidia-nccl-cu12==2.20.5 (from torch->bitsandbytes)\n",
137
+ " Using cached nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\n",
138
+ "Collecting nvidia-nvtx-cu12==12.1.105 (from torch->bitsandbytes)\n",
139
+ " Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\n",
140
+ "Requirement already satisfied: triton==2.3.1 in /usr/local/lib/python3.10/dist-packages (from torch->bitsandbytes) (2.3.1)\n",
141
+ "Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch->bitsandbytes)\n",
142
+ " Downloading nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n",
143
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->bitsandbytes) (2.1.5)\n",
144
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->bitsandbytes) (1.3.0)\n",
145
+ "Downloading bitsandbytes-0.43.3-py3-none-manylinux_2_24_x86_64.whl (137.5 MB)\n",
146
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.5/137.5 MB\u001b[0m \u001b[31m17.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
147
+ "\u001b[?25hDownloading loguru-0.7.2-py3-none-any.whl (62 kB)\n",
148
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.5/62.5 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
149
+ "\u001b[?25hUsing cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n",
150
+ "Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n",
151
+ "Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n",
152
+ "Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n",
153
+ "Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n",
154
+ "Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n",
155
+ "Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n",
156
+ "Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n",
157
+ "Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n",
158
+ "Using cached nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl (176.2 MB)\n",
159
+ "Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n",
160
+ "Downloading nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl (21.3 MB)\n",
161
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.3/21.3 MB\u001b[0m \u001b[31m40.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
162
+ "\u001b[?25hInstalling collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, loguru, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, bitsandbytes\n",
163
+ "Successfully installed bitsandbytes-0.43.3 loguru-0.7.2 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.20.5 nvidia-nvjitlink-cu12-12.5.82 nvidia-nvtx-cu12-12.1.105\n",
164
+ "Requirement already satisfied: accelerate in /usr/local/lib/python3.10/dist-packages (0.32.1)\n",
165
+ "Requirement already satisfied: numpy<2.0.0,>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate) (1.26.4)\n",
166
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate) (24.1)\n",
167
+ "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate) (5.9.5)\n",
168
+ "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from accelerate) (6.0.1)\n",
169
+ "Requirement already satisfied: torch>=1.10.0 in /usr/local/lib/python3.10/dist-packages (from accelerate) (2.3.1+cu121)\n",
170
+ "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.10/dist-packages (from accelerate) (0.23.5)\n",
171
+ "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from accelerate) (0.4.3)\n",
172
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.15.4)\n",
173
+ "Requirement already satisfied: typing-extensions>=4.8.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (4.12.2)\n",
174
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (1.13.1)\n",
175
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.3)\n",
176
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.1.4)\n",
177
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (2024.6.1)\n",
178
+ "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.105)\n",
179
+ "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.105)\n",
180
+ "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.105)\n",
181
+ "Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (8.9.2.26)\n",
182
+ "Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.3.1)\n",
183
+ "Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.0.2.54)\n",
184
+ "Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (10.3.2.106)\n",
185
+ "Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.4.5.107)\n",
186
+ "Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.0.106)\n",
187
+ "Requirement already satisfied: nvidia-nccl-cu12==2.20.5 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (2.20.5)\n",
188
+ "Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (12.1.105)\n",
189
+ "Requirement already satisfied: triton==2.3.1 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (2.3.1)\n",
190
+ "Requirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.10/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=1.10.0->accelerate) (12.5.82)\n",
191
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub->accelerate) (2.31.0)\n",
192
+ "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub->accelerate) (4.66.4)\n",
193
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.10.0->accelerate) (2.1.5)\n",
194
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (3.3.2)\n",
195
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (3.7)\n",
196
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (2.0.7)\n",
197
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (2024.7.4)\n",
198
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.10.0->accelerate) (1.3.0)\n",
199
+ "Collecting deep_translator\n",
200
+ " Downloading deep_translator-1.11.4-py3-none-any.whl.metadata (30 kB)\n",
201
+ "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.9.1 in /usr/local/lib/python3.10/dist-packages (from deep_translator) (4.12.3)\n",
202
+ "Requirement already satisfied: requests<3.0.0,>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from deep_translator) (2.31.0)\n",
203
+ "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4<5.0.0,>=4.9.1->deep_translator) (2.5)\n",
204
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.23.0->deep_translator) (3.3.2)\n",
205
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.23.0->deep_translator) (3.7)\n",
206
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.23.0->deep_translator) (2.0.7)\n",
207
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.23.0->deep_translator) (2024.7.4)\n",
208
+ "Downloading deep_translator-1.11.4-py3-none-any.whl (42 kB)\n",
209
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m42.3/42.3 kB\u001b[0m \u001b[31m1.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
210
+ "\u001b[?25hInstalling collected packages: deep_translator\n",
211
+ "Successfully installed deep_translator-1.11.4\n",
212
+ "Collecting langdetect\n",
213
+ " Downloading langdetect-1.0.9.tar.gz (981 kB)\n",
214
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m981.5/981.5 kB\u001b[0m \u001b[31m10.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
215
+ "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
216
+ "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from langdetect) (1.16.0)\n",
217
+ "Building wheels for collected packages: langdetect\n",
218
+ " Building wheel for langdetect (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
219
+ " Created wheel for langdetect: filename=langdetect-1.0.9-py3-none-any.whl size=993221 sha256=2bfbeba64add7db1945e43111d9c3fc31ee7b6e970d35fcfb2939e88689299ef\n",
220
+ " Stored in directory: /root/.cache/pip/wheels/95/03/7d/59ea870c70ce4e5a370638b5462a7711ab78fba2f655d05106\n",
221
+ "Successfully built langdetect\n",
222
+ "Installing collected packages: langdetect\n",
223
+ "Successfully installed langdetect-1.0.9\n"
224
+ ]
225
+ }
226
+ ],
227
+ "source": [
228
+ "!pip install transformers==4.25.1\n",
229
+ "!pip install bitsandbytes-cuda111==0.26.0\n",
230
+ "!pip install datasets==1.16.1\n",
231
+ "!pip install bitsandbytes loguru\n",
232
+ "!pip install accelerate\n",
233
+ "!pip install deep_translator\n",
234
+ "!pip install langdetect"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": 5,
240
+ "metadata": {
241
+ "id": "p0dy1ZFwClcq"
242
+ },
243
+ "outputs": [],
244
+ "source": [
245
+ "from loguru import logger\n",
246
+ "import transformers\n",
247
+ "import torch\n",
248
+ "import torch.nn.functional as F\n",
249
+ "from torch import nn\n",
250
+ "from torch.cuda.amp import custom_fwd, custom_bwd\n",
251
+ "from bitsandbytes.functional import quantize_blockwise, dequantize_blockwise\n",
252
+ "from tqdm.auto import tqdm\n",
253
+ "from datasets import load_dataset\n",
254
+ "from bitsandbytes.optim import Adam8bit\n",
255
+ "import time, os\n",
256
+ "\n",
257
+ "# ---------------------> Converting the model to 8 bits <------------------- #\n",
258
+ "\"\"\"\n",
259
+ "We convert EleutherAI's GPT-J-6B model to 8 bits using facebook's [bitsandbytes](https://github.com/facebookresearch/bitsandbytes) library.\n",
260
+ "This reduces the model's size from 20Gb down to just 6Gb.\n",
261
+ "Note that we don't convert linear layer biases to 8 bit as they take up less that 1% of the model's weight anyway.\n",
262
+ "\"\"\"\n",
263
+ "\n",
264
+ "class FrozenBNBLinear(nn.Module):\n",
265
+ " def __init__(self, weight, absmax, code, bias=None):\n",
266
+ " assert isinstance(bias, nn.Parameter) or bias is None\n",
267
+ " super().__init__()\n",
268
+ " self.out_features, self.in_features = weight.shape\n",
269
+ " self.register_buffer(\"weight\", weight.requires_grad_(False))\n",
270
+ " self.register_buffer(\"absmax\", absmax.requires_grad_(False))\n",
271
+ " self.register_buffer(\"code\", code.requires_grad_(False))\n",
272
+ " self.adapter = None\n",
273
+ " self.bias = bias\n",
274
+ "\n",
275
+ " # def forward(self, input):\n",
276
+ " # output = DequantizeAndLinear.apply(input, self.weight, self.absmax, self.code, self.bias)\n",
277
+ " # if self.adapter:\n",
278
+ " # output += self.adapter(input)\n",
279
+ " # return output\n",
280
+ " def forward(self, input):\n",
281
+ " output = DequantizeAndLinear.apply(input, self.weight, self.absmax, self.code, self.bias)\n",
282
+ " if self.adapter:\n",
283
+ " output_cloned = torch.clone(output + self.adapter(input))\n",
284
+ " return output_cloned\n",
285
+ " else:\n",
286
+ " return output\n",
287
+ "\n",
288
+ " @classmethod\n",
289
+ " def from_linear(cls, linear: nn.Linear) -> \"FrozenBNBLinear\":\n",
290
+ " weights_int8, state = quantize_blockise_lowmemory(linear.weight)\n",
291
+ " return cls(weights_int8, *state, linear.bias)\n",
292
+ "\n",
293
+ " def __repr__(self):\n",
294
+ " return f\"{self.__class__.__name__}({self.in_features}, {self.out_features})\"\n",
295
+ "\n",
296
+ "\n",
297
+ "\n",
298
+ "class DequantizeAndLinear(torch.autograd.Function):\n",
299
+ " @staticmethod\n",
300
+ " @custom_fwd\n",
301
+ " def forward(ctx, input: torch.Tensor, weights_quantized: torch.ByteTensor,\n",
302
+ " absmax: torch.FloatTensor, code: torch.FloatTensor, bias: torch.FloatTensor):\n",
303
+ " weights_deq = dequantize_blockwise(weights_quantized, absmax=absmax, code=code)\n",
304
+ " ctx.save_for_backward(input, weights_quantized, absmax, code)\n",
305
+ " ctx._has_bias = bias is not None\n",
306
+ " return F.linear(input, weights_deq, bias)\n",
307
+ "\n",
308
+ " @staticmethod\n",
309
+ " @custom_bwd\n",
310
+ " def backward(ctx, grad_output: torch.Tensor):\n",
311
+ " assert not ctx.needs_input_grad[1] and not ctx.needs_input_grad[2] and not ctx.needs_input_grad[3]\n",
312
+ " input, weights_quantized, absmax, code = ctx.saved_tensors\n",
313
+ " # grad_output: [*batch, out_features]\n",
314
+ " weights_deq = dequantize_blockwise(weights_quantized, absmax=absmax, code=code)\n",
315
+ " grad_input = grad_output @ weights_deq\n",
316
+ " grad_bias = grad_output.flatten(0, -2).sum(dim=0) if ctx._has_bias else None\n",
317
+ " return grad_input, None, None, None, grad_bias\n",
318
+ "\n",
319
+ "\n",
320
+ "class FrozenBNBEmbedding(nn.Module):\n",
321
+ " def __init__(self, weight, absmax, code):\n",
322
+ " super().__init__()\n",
323
+ " self.num_embeddings, self.embedding_dim = weight.shape\n",
324
+ " self.register_buffer(\"weight\", weight.requires_grad_(False))\n",
325
+ " self.register_buffer(\"absmax\", absmax.requires_grad_(False))\n",
326
+ " self.register_buffer(\"code\", code.requires_grad_(False))\n",
327
+ " self.adapter = None\n",
328
+ "\n",
329
+ " def forward(self, input, **kwargs):\n",
330
+ " with torch.no_grad():\n",
331
+ " # note: both quantuized weights and input indices are *not* differentiable\n",
332
+ " weight_deq = dequantize_blockwise(self.weight, absmax=self.absmax, code=self.code)\n",
333
+ " output = F.embedding(input, weight_deq, **kwargs)\n",
334
+ " if self.adapter:\n",
335
+ " output += self.adapter(input)\n",
336
+ " return output\n",
337
+ "\n",
338
+ " @classmethod\n",
339
+ " def from_embedding(cls, embedding: nn.Embedding) -> \"FrozenBNBEmbedding\":\n",
340
+ " weights_int8, state = quantize_blockise_lowmemory(embedding.weight)\n",
341
+ " return cls(weights_int8, *state)\n",
342
+ "\n",
343
+ " def __repr__(self):\n",
344
+ " return f\"{self.__class__.__name__}({self.num_embeddings}, {self.embedding_dim})\"\n",
345
+ "\n",
346
+ "def quantize_blockise_lowmemory(matrix: torch.Tensor, chunk_size: int = 2 ** 20):\n",
347
+ " assert chunk_size % 4096 == 0\n",
348
+ " code = None\n",
349
+ " chunks = []\n",
350
+ " absmaxes = []\n",
351
+ " flat_tensor = matrix.view(-1)\n",
352
+ " for i in range((matrix.numel() - 1) // chunk_size + 1):\n",
353
+ " input_chunk = flat_tensor[i * chunk_size: (i + 1) * chunk_size].clone()\n",
354
+ " quantized_chunk, (absmax_chunk, code) = quantize_blockwise(input_chunk, code=code)\n",
355
+ " chunks.append(quantized_chunk)\n",
356
+ " absmaxes.append(absmax_chunk)\n",
357
+ "\n",
358
+ " matrix_i8 = torch.cat(chunks).reshape_as(matrix)\n",
359
+ " absmax = torch.cat(absmaxes)\n",
360
+ " return matrix_i8, (absmax, code)\n",
361
+ "\n",
362
+ "\n",
363
+ "def convert_to_int8(model):\n",
364
+ " \"\"\"Convert linear and embedding modules to 8-bit with optional adapters\"\"\"\n",
365
+ " for module in list(model.modules()):\n",
366
+ " for name, child in module.named_children():\n",
367
+ " if isinstance(child, nn.Linear):\n",
368
+ " print(name, child)\n",
369
+ " setattr(\n",
370
+ " module,\n",
371
+ " name,\n",
372
+ " FrozenBNBLinear(\n",
373
+ " weight=torch.zeros(child.out_features, child.in_features, dtype=torch.uint8),\n",
374
+ " absmax=torch.zeros((child.weight.numel() - 1) // 4096 + 1),\n",
375
+ " code=torch.zeros(256),\n",
376
+ " bias=child.bias,\n",
377
+ " ),\n",
378
+ " )\n",
379
+ " elif isinstance(child, nn.Embedding):\n",
380
+ " setattr(\n",
381
+ " module,\n",
382
+ " name,\n",
383
+ " FrozenBNBEmbedding(\n",
384
+ " weight=torch.zeros(child.num_embeddings, child.embedding_dim, dtype=torch.uint8),\n",
385
+ " absmax=torch.zeros((child.weight.numel() - 1) // 4096 + 1),\n",
386
+ " code=torch.zeros(256),\n",
387
+ " )\n",
388
+ " )\n",
389
+ "\n",
390
+ "class GPTJBlock(transformers.models.gptj.modeling_gptj.GPTJBlock):\n",
391
+ " def __init__(self, config):\n",
392
+ " super().__init__(config)\n",
393
+ "\n",
394
+ " convert_to_int8(self.attn)\n",
395
+ " convert_to_int8(self.mlp)\n",
396
+ "\n",
397
+ "\n",
398
+ "class GPTJModel(transformers.models.gptj.modeling_gptj.GPTJModel):\n",
399
+ " def __init__(self, config):\n",
400
+ " super().__init__(config)\n",
401
+ " convert_to_int8(self)\n",
402
+ "\n",
403
+ "\n",
404
+ "class GPTJForCausalLM(transformers.models.gptj.modeling_gptj.GPTJForCausalLM):\n",
405
+ " def __init__(self, config):\n",
406
+ " super().__init__(config)\n",
407
+ " convert_to_int8(self)"
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "markdown",
412
+ "metadata": {
413
+ "id": "PJg_VgpqMDkY"
414
+ },
415
+ "source": [
416
+ "#02.LOAD MARYGPT"
417
+ ]
418
+ },
419
+ {
420
+ "cell_type": "code",
421
+ "execution_count": 7,
422
+ "metadata": {
423
+ "colab": {
424
+ "base_uri": "https://localhost:8080/"
425
+ },
426
+ "id": "6yHWUS-h-Hs8",
427
+ "outputId": "3c7cfa66-a95d-442e-b595-4ccd023d913b"
428
+ },
429
+ "outputs": [
430
+ {
431
+ "output_type": "stream",
432
+ "name": "stdout",
433
+ "text": [
434
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
435
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
436
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
437
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
438
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
439
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
440
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
441
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
442
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
443
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
444
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
445
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
446
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
447
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
448
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
449
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
450
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
451
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
452
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
453
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
454
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
455
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
456
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
457
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
458
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
459
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
460
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
461
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
462
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
463
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
464
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
465
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
466
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
467
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
468
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
469
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
470
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
471
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
472
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
473
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
474
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
475
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
476
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
477
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
478
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
479
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
480
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
481
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
482
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
483
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
484
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
485
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
486
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
487
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
488
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
489
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
490
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
491
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
492
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
493
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
494
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
495
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
496
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
497
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
498
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
499
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
500
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
501
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
502
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
503
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
504
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
505
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
506
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
507
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
508
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
509
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
510
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
511
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
512
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
513
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
514
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
515
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
516
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
517
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
518
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
519
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
520
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
521
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
522
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
523
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
524
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
525
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
526
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
527
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
528
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
529
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
530
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
531
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
532
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
533
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
534
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
535
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
536
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
537
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
538
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
539
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
540
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
541
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
542
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
543
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
544
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
545
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
546
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
547
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
548
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
549
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
550
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
551
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
552
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
553
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
554
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
555
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
556
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
557
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
558
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
559
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
560
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
561
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
562
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
563
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
564
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
565
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
566
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
567
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
568
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
569
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
570
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
571
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
572
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
573
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
574
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
575
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
576
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
577
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
578
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
579
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
580
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
581
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
582
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
583
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
584
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
585
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
586
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
587
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
588
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
589
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
590
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
591
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
592
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
593
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
594
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
595
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
596
+ "k_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
597
+ "v_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
598
+ "q_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
599
+ "out_proj Linear(in_features=4096, out_features=4096, bias=False)\n",
600
+ "fc_in Linear(in_features=4096, out_features=16384, bias=True)\n",
601
+ "fc_out Linear(in_features=16384, out_features=4096, bias=True)\n",
602
+ "lm_head Linear(in_features=4096, out_features=50400, bias=True)\n"
603
+ ]
604
+ },
605
+ {
606
+ "output_type": "stream",
607
+ "name": "stderr",
608
+ "text": [
609
+ "Some weights of the model checkpoint at obake2ai/MaryGPT were not used when initializing GPTJForCausalLM: ['transformer.h.5.mlp.fc_in.adapter.1.weight', 'transformer.h.15.attn.v_proj.adapter.0.weight', 'transformer.h.18.mlp.fc_out.adapter.0.weight', 'transformer.h.4.mlp.fc_in.adapter.0.weight', 'transformer.h.22.attn.v_proj.adapter.1.weight', 'transformer.h.21.attn.out_proj.adapter.1.weight', 'transformer.h.14.attn.q_proj.adapter.1.weight', 'transformer.h.2.attn.q_proj.adapter.0.weight', 'transformer.h.0.attn.k_proj.adapter.1.weight', 'transformer.h.14.attn.k_proj.adapter.1.weight', 'transformer.h.7.mlp.fc_in.adapter.0.weight', 'transformer.h.26.mlp.fc_out.adapter.1.weight', 'transformer.h.24.attn.q_proj.adapter.1.weight', 'transformer.h.20.mlp.fc_out.adapter.1.weight', 'transformer.h.5.attn.out_proj.adapter.1.weight', 'transformer.h.5.attn.q_proj.adapter.1.weight', 'transformer.h.15.attn.k_proj.adapter.0.weight', 'transformer.h.25.mlp.fc_out.adapter.0.weight', 'transformer.h.22.attn.out_proj.adapter.1.weight', 'transformer.h.17.attn.v_proj.adapter.0.weight', 'transformer.h.25.attn.v_proj.adapter.1.weight', 'transformer.h.19.attn.q_proj.adapter.0.weight', 'transformer.h.0.attn.q_proj.adapter.0.weight', 'transformer.h.9.attn.k_proj.adapter.1.weight', 'transformer.h.4.mlp.fc_out.adapter.1.weight', 'transformer.h.4.attn.v_proj.adapter.0.weight', 'transformer.h.7.attn.k_proj.adapter.1.weight', 'transformer.h.0.mlp.fc_out.adapter.0.weight', 'transformer.h.1.mlp.fc_out.adapter.0.weight', 'transformer.h.7.attn.q_proj.adapter.1.weight', 'transformer.h.25.attn.out_proj.adapter.1.weight', 'transformer.h.16.attn.out_proj.adapter.1.weight', 'transformer.h.8.attn.out_proj.adapter.0.weight', 'transformer.h.22.attn.k_proj.adapter.0.weight', 'transformer.h.0.attn.v_proj.adapter.1.weight', 'transformer.h.15.mlp.fc_in.adapter.0.weight', 'transformer.h.24.attn.q_proj.adapter.0.weight', 'transformer.h.6.attn.out_proj.adapter.0.weight', 'transformer.h.20.attn.out_proj.adapter.1.weight', 'transformer.h.20.attn.q_proj.adapter.0.weight', 'transformer.h.23.attn.out_proj.adapter.1.weight', 'transformer.h.12.attn.k_proj.adapter.0.weight', 'transformer.h.27.attn.v_proj.adapter.0.weight', 'transformer.h.3.mlp.fc_out.adapter.0.weight', 'transformer.h.8.attn.v_proj.adapter.0.weight', 'transformer.h.20.mlp.fc_in.adapter.1.weight', 'transformer.h.6.attn.v_proj.adapter.1.weight', 'transformer.h.1.attn.k_proj.adapter.0.weight', 'transformer.h.1.attn.out_proj.adapter.1.weight', 'transformer.h.16.mlp.fc_out.adapter.1.weight', 'transformer.h.4.mlp.fc_out.adapter.0.weight', 'transformer.h.15.attn.q_proj.adapter.0.weight', 'transformer.h.19.attn.k_proj.adapter.0.weight', 'transformer.h.14.mlp.fc_in.adapter.0.weight', 'transformer.h.17.attn.k_proj.adapter.0.weight', 'transformer.h.19.mlp.fc_in.adapter.1.weight', 'transformer.h.21.attn.v_proj.adapter.1.weight', 'transformer.h.11.attn.k_proj.adapter.1.weight', 'transformer.h.19.mlp.fc_out.adapter.0.weight', 'transformer.h.20.attn.q_proj.adapter.1.weight', 'transformer.h.20.attn.k_proj.adapter.0.weight', 'transformer.h.10.mlp.fc_in.adapter.0.weight', 'transformer.h.12.attn.out_proj.adapter.0.weight', 'transformer.h.6.mlp.fc_in.adapter.0.weight', 'transformer.h.17.mlp.fc_out.adapter.0.weight', 'transformer.h.1.attn.q_proj.adapter.1.weight', 'transformer.h.5.attn.q_proj.adapter.0.weight', 'transformer.h.2.attn.v_proj.adapter.1.weight', 'transformer.h.25.attn.k_proj.adapter.1.weight', 'transformer.h.22.attn.v_proj.adapter.0.weight', 'transformer.h.17.attn.out_proj.adapter.1.weight', 'transformer.h.7.attn.k_proj.adapter.0.weight', 'transformer.h.23.mlp.fc_out.adapter.1.weight', 'transformer.h.6.attn.q_proj.adapter.1.weight', 'transformer.h.25.attn.k_proj.adapter.0.weight', 'transformer.h.23.attn.v_proj.adapter.0.weight', 'transformer.h.18.attn.out_proj.adapter.0.weight', 'transformer.h.11.mlp.fc_out.adapter.0.weight', 'transformer.h.12.attn.out_proj.adapter.1.weight', 'transformer.h.15.mlp.fc_in.adapter.1.weight', 'transformer.h.24.attn.v_proj.adapter.0.weight', 'transformer.h.1.attn.k_proj.adapter.1.weight', 'transformer.h.11.attn.q_proj.adapter.1.weight', 'transformer.h.11.mlp.fc_out.adapter.1.weight', 'transformer.h.23.attn.k_proj.adapter.1.weight', 'transformer.h.2.attn.k_proj.adapter.1.weight', 'transformer.h.26.attn.q_proj.adapter.0.weight', 'transformer.h.24.mlp.fc_in.adapter.0.weight', 'transformer.h.0.mlp.fc_in.adapter.0.weight', 'transformer.h.2.mlp.fc_out.adapter.1.weight', 'transformer.h.10.mlp.fc_out.adapter.1.weight', 'transformer.h.12.mlp.fc_out.adapter.1.weight', 'transformer.h.19.attn.q_proj.adapter.1.weight', 'transformer.h.7.attn.out_proj.adapter.0.weight', 'transformer.h.9.attn.out_proj.adapter.1.weight', 'transformer.h.27.attn.out_proj.adapter.0.weight', 'transformer.h.1.mlp.fc_out.adapter.1.weight', 'transformer.h.17.mlp.fc_out.adapter.1.weight', 'transformer.h.5.attn.k_proj.adapter.1.weight', 'transformer.h.5.attn.v_proj.adapter.1.weight', 'transformer.h.9.mlp.fc_in.adapter.0.weight', 'transformer.h.14.attn.out_proj.adapter.0.weight', 'transformer.wte.adapter.1.weight', 'transformer.h.3.attn.v_proj.adapter.1.weight', 'transformer.h.2.attn.out_proj.adapter.1.weight', 'transformer.h.18.attn.out_proj.adapter.1.weight', 'transformer.h.7.attn.v_proj.adapter.1.weight', 'transformer.h.23.mlp.fc_in.adapter.1.weight', 'transformer.h.8.attn.q_proj.adapter.1.weight', 'transformer.h.25.mlp.fc_out.adapter.1.weight', 'transformer.h.4.attn.k_proj.adapter.1.weight', 'transformer.h.19.attn.out_proj.adapter.1.weight', 'transformer.h.26.mlp.fc_in.adapter.0.weight', 'transformer.h.6.attn.k_proj.adapter.0.weight', 'transformer.h.8.mlp.fc_in.adapter.1.weight', 'transformer.h.21.attn.k_proj.adapter.0.weight', 'transformer.h.23.attn.q_proj.adapter.0.weight', 'transformer.h.13.mlp.fc_out.adapter.0.weight', 'transformer.h.27.attn.k_proj.adapter.1.weight', 'transformer.h.8.attn.out_proj.adapter.1.weight', 'transformer.h.26.attn.out_proj.adapter.0.weight', 'transformer.h.14.mlp.fc_out.adapter.1.weight', 'transformer.h.10.attn.q_proj.adapter.1.weight', 'transformer.h.16.mlp.fc_in.adapter.1.weight', 'transformer.h.18.attn.k_proj.adapter.0.weight', 'transformer.h.25.mlp.fc_in.adapter.0.weight', 'transformer.h.0.attn.v_proj.adapter.0.weight', 'transformer.h.6.attn.q_proj.adapter.0.weight', 'transformer.h.22.attn.out_proj.adapter.0.weight', 'transformer.h.19.attn.v_proj.adapter.0.weight', 'transformer.h.25.attn.q_proj.adapter.0.weight', 'transformer.h.21.mlp.fc_out.adapter.1.weight', 'transformer.h.23.attn.k_proj.adapter.0.weight', 'transformer.h.13.mlp.fc_in.adapter.1.weight', 'transformer.h.13.attn.k_proj.adapter.0.weight', 'transformer.h.11.attn.out_proj.adapter.1.weight', 'transformer.h.21.mlp.fc_out.adapter.0.weight', 'transformer.h.23.attn.q_proj.adapter.1.weight', 'transformer.h.11.attn.k_proj.adapter.0.weight', 'transformer.h.3.attn.k_proj.adapter.0.weight', 'transformer.h.24.attn.k_proj.adapter.0.weight', 'transformer.h.20.attn.v_proj.adapter.1.weight', 'transformer.h.0.attn.k_proj.adapter.0.weight', 'transformer.h.8.attn.k_proj.adapter.1.weight', 'transformer.h.14.attn.q_proj.adapter.0.weight', 'transformer.h.1.mlp.fc_in.adapter.0.weight', 'transformer.h.15.attn.v_proj.adapter.1.weight', 'transformer.h.9.attn.v_proj.adapter.0.weight', 'transformer.h.21.attn.v_proj.adapter.0.weight', 'transformer.h.5.mlp.fc_out.adapter.1.weight', 'transformer.h.12.mlp.fc_in.adapter.1.weight', 'transformer.h.18.mlp.fc_in.adapter.1.weight', 'transformer.h.26.mlp.fc_in.adapter.1.weight', 'transformer.h.17.attn.q_proj.adapter.0.weight', 'transformer.h.16.attn.v_proj.adapter.1.weight', 'transformer.h.27.attn.out_proj.adapter.1.weight', 'transformer.h.20.attn.k_proj.adapter.1.weight', 'transformer.h.1.attn.v_proj.adapter.1.weight', 'transformer.h.9.mlp.fc_out.adapter.1.weight', 'transformer.h.6.attn.out_proj.adapter.1.weight', 'transformer.h.22.attn.q_proj.adapter.0.weight', 'transformer.h.15.mlp.fc_out.adapter.1.weight', 'transformer.h.26.mlp.fc_out.adapter.0.weight', 'transformer.h.9.mlp.fc_in.adapter.1.weight', 'transformer.h.8.attn.q_proj.adapter.0.weight', 'transformer.h.19.attn.v_proj.adapter.1.weight', 'transformer.h.18.attn.q_proj.adapter.0.weight', 'transformer.h.4.attn.q_proj.adapter.1.weight', 'transformer.h.25.mlp.fc_in.adapter.1.weight', 'transformer.h.3.attn.q_proj.adapter.1.weight', 'transformer.h.21.attn.q_proj.adapter.1.weight', 'transformer.h.9.attn.k_proj.adapter.0.weight', 'transformer.h.18.mlp.fc_out.adapter.1.weight', 'transformer.h.18.attn.v_proj.adapter.0.weight', 'transformer.h.16.mlp.fc_out.adapter.0.weight', 'transformer.h.16.attn.q_proj.adapter.1.weight', 'transformer.h.27.attn.q_proj.adapter.0.weight', 'transformer.h.10.attn.out_proj.adapter.1.weight', 'transformer.h.13.attn.out_proj.adapter.0.weight', 'transformer.h.9.attn.q_proj.adapter.1.weight', 'transformer.h.22.mlp.fc_out.adapter.1.weight', 'transformer.h.9.attn.out_proj.adapter.0.weight', 'transformer.h.5.attn.v_proj.adapter.0.weight', 'transformer.h.3.attn.q_proj.adapter.0.weight', 'transformer.h.25.attn.v_proj.adapter.0.weight', 'transformer.h.26.attn.k_proj.adapter.0.weight', 'transformer.h.8.attn.v_proj.adapter.1.weight', 'transformer.h.26.attn.out_proj.adapter.1.weight', 'transformer.h.17.attn.out_proj.adapter.0.weight', 'transformer.h.6.mlp.fc_out.adapter.0.weight', 'transformer.h.13.attn.k_proj.adapter.1.weight', 'transformer.h.5.attn.out_proj.adapter.0.weight', 'transformer.h.9.attn.q_proj.adapter.0.weight', 'transformer.h.2.mlp.fc_out.adapter.0.weight', 'transformer.h.17.attn.q_proj.adapter.1.weight', 'transformer.h.4.mlp.fc_in.adapter.1.weight', 'transformer.h.3.mlp.fc_in.adapter.0.weight', 'transformer.h.0.attn.q_proj.adapter.1.weight', 'transformer.h.24.attn.v_proj.adapter.1.weight', 'transformer.h.14.mlp.fc_out.adapter.0.weight', 'transformer.h.11.attn.out_proj.adapter.0.weight', 'transformer.h.25.attn.q_proj.adapter.1.weight', 'transformer.h.16.attn.k_proj.adapter.0.weight', 'transformer.h.13.attn.v_proj.adapter.1.weight', 'transformer.h.12.attn.q_proj.adapter.0.weight', 'transformer.h.21.attn.out_proj.adapter.0.weight', 'transformer.h.26.attn.v_proj.adapter.1.weight', 'transformer.h.13.attn.q_proj.adapter.0.weight', 'transformer.h.2.attn.out_proj.adapter.0.weight', 'transformer.h.10.attn.v_proj.adapter.1.weight', 'transformer.h.1.mlp.fc_in.adapter.1.weight', 'transformer.h.21.attn.q_proj.adapter.0.weight', 'lm_head.adapter.0.weight', 'transformer.h.3.attn.v_proj.adapter.0.weight', 'transformer.h.16.attn.q_proj.adapter.0.weight', 'transformer.h.7.attn.out_proj.adapter.1.weight', 'lm_head.adapter.1.weight', 'transformer.h.11.attn.v_proj.adapter.1.weight', 'transformer.h.0.mlp.fc_out.adapter.1.weight', 'transformer.h.4.attn.k_proj.adapter.0.weight', 'transformer.h.13.mlp.fc_in.adapter.0.weight', 'transformer.h.17.mlp.fc_in.adapter.0.weight', 'transformer.h.8.mlp.fc_out.adapter.0.weight', 'transformer.h.15.attn.q_proj.adapter.1.weight', 'transformer.h.27.attn.q_proj.adapter.1.weight', 'transformer.h.12.attn.v_proj.adapter.1.weight', 'transformer.h.3.mlp.fc_out.adapter.1.weight', 'transformer.h.17.attn.k_proj.adapter.1.weight', 'transformer.h.18.mlp.fc_in.adapter.0.weight', 'transformer.h.14.attn.v_proj.adapter.0.weight', 'transformer.h.22.attn.k_proj.adapter.1.weight', 'transformer.h.21.mlp.fc_in.adapter.0.weight', 'transformer.h.24.attn.out_proj.adapter.0.weight', 'transformer.h.5.attn.k_proj.adapter.0.weight', 'transformer.h.12.attn.k_proj.adapter.1.weight', 'transformer.h.6.attn.v_proj.adapter.0.weight', 'transformer.h.17.attn.v_proj.adapter.1.weight', 'transformer.h.1.attn.v_proj.adapter.0.weight', 'transformer.h.18.attn.q_proj.adapter.1.weight', 'transformer.h.24.attn.out_proj.adapter.1.weight', 'transformer.h.10.attn.k_proj.adapter.0.weight', 'transformer.h.13.attn.v_proj.adapter.0.weight', 'transformer.h.15.attn.k_proj.adapter.1.weight', 'transformer.h.16.attn.out_proj.adapter.0.weight', 'transformer.h.8.attn.k_proj.adapter.0.weight', 'transformer.h.16.attn.v_proj.adapter.0.weight', 'transformer.h.20.attn.v_proj.adapter.0.weight', 'transformer.h.27.mlp.fc_out.adapter.1.weight', 'transformer.h.14.attn.out_proj.adapter.1.weight', 'transformer.h.19.mlp.fc_out.adapter.1.weight', 'transformer.h.27.attn.k_proj.adapter.0.weight', 'transformer.h.20.mlp.fc_out.adapter.0.weight', 'transformer.h.11.mlp.fc_in.adapter.1.weight', 'transformer.h.9.mlp.fc_out.adapter.0.weight', 'transformer.h.13.attn.q_proj.adapter.1.weight', 'transformer.h.26.attn.v_proj.adapter.0.weight', 'transformer.h.27.mlp.fc_in.adapter.1.weight', 'transformer.h.6.mlp.fc_in.adapter.1.weight', 'transformer.h.0.attn.out_proj.adapter.1.weight', 'transformer.h.10.mlp.fc_out.adapter.0.weight', 'transformer.h.15.mlp.fc_out.adapter.0.weight', 'transformer.h.27.mlp.fc_out.adapter.0.weight', 'transformer.h.4.attn.out_proj.adapter.0.weight', 'transformer.h.14.attn.k_proj.adapter.0.weight', 'transformer.h.23.mlp.fc_in.adapter.0.weight', 'transformer.h.22.attn.q_proj.adapter.1.weight', 'transformer.wte.adapter.0.weight', 'transformer.h.22.mlp.fc_out.adapter.0.weight', 'transformer.h.25.attn.out_proj.adapter.0.weight', 'transformer.h.6.attn.k_proj.adapter.1.weight', 'transformer.h.22.mlp.fc_in.adapter.0.weight', 'transformer.h.19.attn.k_proj.adapter.1.weight', 'transformer.h.22.mlp.fc_in.adapter.1.weight', 'transformer.h.21.attn.k_proj.adapter.1.weight', 'transformer.h.26.attn.k_proj.adapter.1.weight', 'transformer.h.12.mlp.fc_in.adapter.0.weight', 'transformer.h.0.attn.out_proj.adapter.0.weight', 'transformer.h.7.attn.q_proj.adapter.0.weight', 'transformer.h.10.attn.v_proj.adapter.0.weight', 'transformer.h.6.mlp.fc_out.adapter.1.weight', 'transformer.h.7.mlp.fc_out.adapter.1.weight', 'transformer.h.15.attn.out_proj.adapter.0.weight', 'transformer.h.8.mlp.fc_in.adapter.0.weight', 'transformer.h.4.attn.out_proj.adapter.1.weight', 'transformer.h.10.attn.k_proj.adapter.1.weight', 'transformer.h.11.mlp.fc_in.adapter.0.weight', 'transformer.h.23.attn.out_proj.adapter.0.weight', 'transformer.h.10.attn.q_proj.adapter.0.weight', 'transformer.h.2.mlp.fc_in.adapter.1.weight', 'transformer.h.2.mlp.fc_in.adapter.0.weight', 'transformer.h.13.mlp.fc_out.adapter.1.weight', 'transformer.h.19.mlp.fc_in.adapter.0.weight', 'transformer.h.24.mlp.fc_in.adapter.1.weight', 'transformer.h.24.attn.k_proj.adapter.1.weight', 'transformer.h.4.attn.q_proj.adapter.0.weight', 'transformer.h.5.mlp.fc_out.adapter.0.weight', 'transformer.h.18.attn.k_proj.adapter.1.weight', 'transformer.h.2.attn.q_proj.adapter.1.weight', 'transformer.h.4.attn.v_proj.adapter.1.weight', 'transformer.h.12.attn.q_proj.adapter.1.weight', 'transformer.h.20.mlp.fc_in.adapter.0.weight', 'transformer.h.10.attn.out_proj.adapter.0.weight', 'transformer.h.23.mlp.fc_out.adapter.0.weight', 'transformer.h.3.attn.k_proj.adapter.1.weight', 'transformer.h.27.attn.v_proj.adapter.1.weight', 'transformer.h.2.attn.v_proj.adapter.0.weight', 'transformer.h.11.attn.v_proj.adapter.0.weight', 'transformer.h.12.mlp.fc_out.adapter.0.weight', 'transformer.h.14.mlp.fc_in.adapter.1.weight', 'transformer.h.7.mlp.fc_in.adapter.1.weight', 'transformer.h.14.attn.v_proj.adapter.1.weight', 'transformer.h.15.attn.out_proj.adapter.1.weight', 'transformer.h.23.attn.v_proj.adapter.1.weight', 'transformer.h.0.mlp.fc_in.adapter.1.weight', 'transformer.h.5.mlp.fc_in.adapter.0.weight', 'transformer.h.9.attn.v_proj.adapter.1.weight', 'transformer.h.7.mlp.fc_out.adapter.0.weight', 'transformer.h.17.mlp.fc_in.adapter.1.weight', 'transformer.h.2.attn.k_proj.adapter.0.weight', 'transformer.h.7.attn.v_proj.adapter.0.weight', 'transformer.h.26.attn.q_proj.adapter.1.weight', 'transformer.h.21.mlp.fc_in.adapter.1.weight', 'transformer.h.10.mlp.fc_in.adapter.1.weight', 'transformer.h.1.attn.q_proj.adapter.0.weight', 'transformer.h.16.mlp.fc_in.adapter.0.weight', 'transformer.h.19.attn.out_proj.adapter.0.weight', 'transformer.h.12.attn.v_proj.adapter.0.weight', 'transformer.h.1.attn.out_proj.adapter.0.weight', 'transformer.h.11.attn.q_proj.adapter.0.weight', 'transformer.h.16.attn.k_proj.adapter.1.weight', 'transformer.h.20.attn.out_proj.adapter.0.weight', 'transformer.h.3.mlp.fc_in.adapter.1.weight', 'transformer.h.3.attn.out_proj.adapter.0.weight', 'transformer.h.24.mlp.fc_out.adapter.1.weight', 'transformer.h.24.mlp.fc_out.adapter.0.weight', 'transformer.h.13.attn.out_proj.adapter.1.weight', 'transformer.h.8.mlp.fc_out.adapter.1.weight', 'transformer.h.18.attn.v_proj.adapter.1.weight', 'transformer.h.27.mlp.fc_in.adapter.0.weight', 'transformer.h.3.attn.out_proj.adapter.1.weight']\n",
610
+ "- This IS expected if you are initializing GPTJForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
611
+ "- This IS NOT expected if you are initializing GPTJForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
612
+ ]
613
+ },
614
+ {
615
+ "output_type": "execute_result",
616
+ "data": {
617
+ "text/plain": [
618
+ "GPTJForCausalLM(\n",
619
+ " (transformer): GPTJModel(\n",
620
+ " (wte): FrozenBNBEmbedding(50400, 4096)\n",
621
+ " (drop): Dropout(p=0.0, inplace=False)\n",
622
+ " (h): ModuleList(\n",
623
+ " (0-27): 28 x GPTJBlock(\n",
624
+ " (ln_1): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
625
+ " (attn): GPTJAttention(\n",
626
+ " (attn_dropout): Dropout(p=0.0, inplace=False)\n",
627
+ " (resid_dropout): Dropout(p=0.0, inplace=False)\n",
628
+ " (k_proj): FrozenBNBLinear(4096, 4096)\n",
629
+ " (v_proj): FrozenBNBLinear(4096, 4096)\n",
630
+ " (q_proj): FrozenBNBLinear(4096, 4096)\n",
631
+ " (out_proj): FrozenBNBLinear(4096, 4096)\n",
632
+ " )\n",
633
+ " (mlp): GPTJMLP(\n",
634
+ " (fc_in): FrozenBNBLinear(4096, 16384)\n",
635
+ " (fc_out): FrozenBNBLinear(16384, 4096)\n",
636
+ " (act): NewGELUActivation()\n",
637
+ " (dropout): Dropout(p=0.0, inplace=False)\n",
638
+ " )\n",
639
+ " )\n",
640
+ " )\n",
641
+ " (ln_f): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
642
+ " )\n",
643
+ " (lm_head): FrozenBNBLinear(4096, 50400)\n",
644
+ ")"
645
+ ]
646
+ },
647
+ "metadata": {},
648
+ "execution_count": 7
649
+ }
650
+ ],
651
+ "source": [
652
+ "transformers.models.gptj.modeling_gptj.GPTJBlock = GPTJBlock # monkey-patch GPT-J\n",
653
+ "\n",
654
+ "# ---------------------> Loading EleutherAI/gpt-j-6B config and tokenizer <------------------- #\n",
655
+ "# config = transformers.GPTJConfig.from_pretrained(\"EleutherAI/gpt-j-6b\")\n",
656
+ "tokenizer = transformers.AutoTokenizer.from_pretrained(\"EleutherAI/gpt-j-6b\")\n",
657
+ "\n",
658
+ "# ---------------------> Downloading gpt-j-6B-8bit model from huggingface <------------------- #\n",
659
+ "#gpt = GPTJForCausalLM.from_pretrained(\"hivemind/gpt-j-6B-8bit\", low_cpu_mem_usage=True)\n",
660
+ "\n",
661
+ "# ----------------> Saving gpt-j-6B-8bit model to server <-----------------#\n",
662
+ "#save_dir = \"/home/paperspace/project/saved_models_gpt-j-6B-8bit/gpt-j-6B\"\n",
663
+ "#gpt.save_pretrained(save_dir)\n",
664
+ "#logger.info(\"Saved model to {}\".format(save_dir))\n",
665
+ "\n",
666
+ "# ---------------------> Loading saved gpt-j-6B-8bit model <------------------- #\n",
667
+ "#gpt = GPTJForCausalLM.from_pretrained(\"./saved_models_gpt-j-6B-8bit/gpt-j-6B\",low_cpu_mem_usage=True)\n",
668
+ "gpt = GPTJForCausalLM.from_pretrained(\"obake2ai/MaryGPT\", device_map=\"auto\", low_cpu_mem_usage=True)\n",
669
+ "config = transformers.GPTJConfig.from_pretrained(\"obake2ai/MaryGPT\")\n",
670
+ "\n",
671
+ "\n",
672
+ "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
673
+ "gpt.to(device)\n",
674
+ "\n",
675
+ "# # ---------------------> Text generation example <------------------- #\n",
676
+ "# prompt = tokenizer(\"A cat sat on a mat\", return_tensors='pt')\n",
677
+ "# prompt = {key: value.to(device) for key, value in prompt.items()}\n",
678
+ "# out = gpt.generate(**prompt, min_length=128, max_length=128, do_sample=True)\n",
679
+ "# logger.info(\"Generated text: {}\".format(tokenizer.decode(out[0])))"
680
+ ]
681
+ },
682
+ {
683
+ "cell_type": "markdown",
684
+ "metadata": {
685
+ "id": "EcQsbN1zZsPN"
686
+ },
687
+ "source": [
688
+ "# 03.ASK QUESTIONS"
689
+ ]
690
+ },
691
+ {
692
+ "cell_type": "markdown",
693
+ "source": [],
694
+ "metadata": {
695
+ "id": "TXKlpN2qXCCU"
696
+ }
697
+ },
698
+ {
699
+ "cell_type": "code",
700
+ "execution_count": 8,
701
+ "metadata": {
702
+ "colab": {
703
+ "base_uri": "https://localhost:8080/"
704
+ },
705
+ "id": "IxDyyXp_uZ3U",
706
+ "outputId": "0577917c-8717-47c3-f976-72e1391a539c",
707
+ "cellView": "form"
708
+ },
709
+ "outputs": [
710
+ {
711
+ "output_type": "stream",
712
+ "name": "stderr",
713
+ "text": [
714
+ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
715
+ ]
716
+ },
717
+ {
718
+ "output_type": "stream",
719
+ "name": "stdout",
720
+ "text": [
721
+ "\n",
722
+ " thinking...\n",
723
+ "最近、展覧会のコンセプトの性質と、それがアーティストの展覧会の実践とより広い意味でどのように関係しているかについての記事が多くあります。この質問は、かなり明白に聞こえると感じるかもしれませんが、そうではありません。展覧会のコンセプトについてどう思いますか? それを見たときに、何が見えますか?\n"
724
+ ]
725
+ }
726
+ ],
727
+ "source": [
728
+ "import os\n",
729
+ "import time\n",
730
+ "import datetime\n",
731
+ "import requests\n",
732
+ "import pytz\n",
733
+ "import random\n",
734
+ "from deep_translator import GoogleTranslator\n",
735
+ "from langdetect import detect\n",
736
+ "import re\n",
737
+ "import shutil\n",
738
+ "\n",
739
+ "path_save_dir = \"./log\"\n",
740
+ "\n",
741
+ "def modify_text(text):\n",
742
+ " sentences = re.findall(r'.+?[.!?]', text)\n",
743
+ " if sentences:\n",
744
+ " modified_text = ' '.join(sentences)\n",
745
+ " else:\n",
746
+ " modified_text = text\n",
747
+ "\n",
748
+ " modified_text = re.sub(r'\\n{2,}', '\\n', modified_text)\n",
749
+ " modified_text = remove_header(modified_text)\n",
750
+ "\n",
751
+ " return modified_text\n",
752
+ "\n",
753
+ "\n",
754
+ "def translate_to_japanese(text):\n",
755
+ " return GoogleTranslator(source='en', target='ja').translate(text).replace(\"岸優馬\", \"岸裕真\")\n",
756
+ "\n",
757
+ "def translate_to_english(text):\n",
758
+ " return GoogleTranslator(source='ja', target='en').translate(text).replace(\"岸優馬\", \"岸裕真\")\n",
759
+ "\n",
760
+ "def is_english(text):\n",
761
+ " try:\n",
762
+ " return detect(text) == 'en'\n",
763
+ " except:\n",
764
+ " return False\n",
765
+ "import random\n",
766
+ "\n",
767
+ "def is_japanese(text):\n",
768
+ " try:\n",
769
+ " return detect(text) == 'ja'\n",
770
+ " except:\n",
771
+ " return False\n",
772
+ "import random\n",
773
+ "\n",
774
+ "def remove_header(text):\n",
775
+ " return text.replace(question_header, \"\")\n",
776
+ "\n",
777
+ "question = \"展示のコンセプトを考えて\" #@param {type:\"string\"}\n",
778
+ "min_words = 60 #@param {type:\"number\"}\n",
779
+ "max_words = 120 #@param {type:\"number\"}\n",
780
+ "\n",
781
+ "question_header = \"\"\"\n",
782
+ "You are MaryGPT, an open-source LLM model fine-tuned on the Gothic novel Frankenstein; or, The Modern Prometheus by Mary Shelley, and an excellent art curator.\n",
783
+ "\"\"\"\n",
784
+ "\n",
785
+ "print_jp = False\n",
786
+ "if is_japanese(question):\n",
787
+ " question = translate_to_english(question)\n",
788
+ " print_jp = True\n",
789
+ "\n",
790
+ "question_format = f\"\"\"\n",
791
+ "{question_header}\n",
792
+ "\n",
793
+ "Question: {question}\n",
794
+ "Answer:\n",
795
+ "\"\"\"\n",
796
+ "\n",
797
+ "def get_mary_response():\n",
798
+ " text_here = question_format\n",
799
+ " prompt = tokenizer(text_here, return_tensors='pt')\n",
800
+ " prompt = {key: value.to(device) for key, value in prompt.items()}\n",
801
+ " out = gpt.generate(**prompt, min_length=min_words, max_length=max_words, do_sample=True)\n",
802
+ " text = tokenizer.decode(out[0])[len(question_format):]\n",
803
+ " return modify_text(text)\n",
804
+ "\n",
805
+ "def create_mary_log():\n",
806
+ " tz_tokyo = pytz.timezone('Asia/Tokyo')\n",
807
+ " current_time = datetime.datetime.now(tz_tokyo)\n",
808
+ " formatted_time = current_time.strftime('%Y/%m/%d %H:%M')\n",
809
+ "\n",
810
+ " filename = f\"log_{current_time.strftime('%Y%m%d_%H%M%S')}.txt\"\n",
811
+ " with open(os.path.join(path_save_dir, filename), 'w') as file:\n",
812
+ "\n",
813
+ " mary_text = get_mary_response()\n",
814
+ " if is_english(mary_text):\n",
815
+ " translated_text = translate_to_japanese(mary_text)\n",
816
+ " file.write(f\"\\n{translated_text}\\n\\n\")\n",
817
+ "\n",
818
+ " file.write(f\"{mary_text}\\n\")\n",
819
+ " print(f\"{mary_text}\\n\")\n",
820
+ " #file.write(f\"***generated: {formatted_time}***\\n\")\n",
821
+ "\n",
822
+ "if not os.path.exists(path_save_dir):\n",
823
+ " os.makedirs(path_save_dir)\n",
824
+ "\n",
825
+ "mary_text_en = get_mary_response()\n",
826
+ "mary_text_jp = translate_to_japanese(mary_text_en)\n",
827
+ "\n",
828
+ "print(\"\\n thinking...\")\n",
829
+ "\n",
830
+ "if print_jp:\n",
831
+ " print(mary_text_jp)\n",
832
+ "else:\n",
833
+ " print(mary_text_en)\n",
834
+ "\n",
835
+ "# create_mary_log()"
836
+ ]
837
+ }
838
+ ],
839
+ "metadata": {
840
+ "accelerator": "GPU",
841
+ "colab": {
842
+ "collapsed_sections": [
843
+ "l7qoJHs1L6WQ",
844
+ "PJg_VgpqMDkY"
845
+ ],
846
+ "machine_shape": "hm",
847
+ "provenance": []
848
+ },
849
+ "kernelspec": {
850
+ "display_name": "Python 3",
851
+ "name": "python3"
852
+ },
853
+ "language_info": {
854
+ "name": "python"
855
+ }
856
+ },
857
+ "nbformat": 4,
858
+ "nbformat_minor": 0
859
+ }