File size: 6,992 Bytes
4efc8ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9986431478968792,
  "eval_steps": 500,
  "global_step": 368,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.027137042062415198,
      "grad_norm": 1.4497582912445068,
      "learning_rate": 0.0001998659482680456,
      "loss": 3.4336,
      "step": 10
    },
    {
      "epoch": 0.054274084124830396,
      "grad_norm": 1.846526861190796,
      "learning_rate": 0.00019904804439875633,
      "loss": 1.9601,
      "step": 20
    },
    {
      "epoch": 0.0814111261872456,
      "grad_norm": 0.9556183815002441,
      "learning_rate": 0.00019749279121818235,
      "loss": 1.202,
      "step": 30
    },
    {
      "epoch": 0.10854816824966079,
      "grad_norm": 1.0818341970443726,
      "learning_rate": 0.00019521176659107142,
      "loss": 1.1366,
      "step": 40
    },
    {
      "epoch": 0.13568521031207598,
      "grad_norm": 0.9879324436187744,
      "learning_rate": 0.00019222195128618106,
      "loss": 1.0269,
      "step": 50
    },
    {
      "epoch": 0.1628222523744912,
      "grad_norm": 1.1216270923614502,
      "learning_rate": 0.000188545602565321,
      "loss": 1.0766,
      "step": 60
    },
    {
      "epoch": 0.18995929443690637,
      "grad_norm": 1.1110620498657227,
      "learning_rate": 0.00018421008849228118,
      "loss": 0.9947,
      "step": 70
    },
    {
      "epoch": 0.21709633649932158,
      "grad_norm": 0.8764580488204956,
      "learning_rate": 0.00017924768419510904,
      "loss": 1.0024,
      "step": 80
    },
    {
      "epoch": 0.24423337856173677,
      "grad_norm": 1.2004687786102295,
      "learning_rate": 0.00017369533159843369,
      "loss": 1.04,
      "step": 90
    },
    {
      "epoch": 0.27137042062415195,
      "grad_norm": 0.9665369391441345,
      "learning_rate": 0.00016759436441447545,
      "loss": 0.9809,
      "step": 100
    },
    {
      "epoch": 0.29850746268656714,
      "grad_norm": 1.5474181175231934,
      "learning_rate": 0.00016099020044000727,
      "loss": 0.9405,
      "step": 110
    },
    {
      "epoch": 0.3256445047489824,
      "grad_norm": 1.014674186706543,
      "learning_rate": 0.00015393200344991995,
      "loss": 0.9054,
      "step": 120
    },
    {
      "epoch": 0.35278154681139756,
      "grad_norm": 1.013979434967041,
      "learning_rate": 0.00014647231720437686,
      "loss": 0.9754,
      "step": 130
    },
    {
      "epoch": 0.37991858887381275,
      "grad_norm": 1.0418416261672974,
      "learning_rate": 0.0001386666742941419,
      "loss": 0.8711,
      "step": 140
    },
    {
      "epoch": 0.40705563093622793,
      "grad_norm": 0.9280593395233154,
      "learning_rate": 0.0001305731827359753,
      "loss": 0.8358,
      "step": 150
    },
    {
      "epoch": 0.43419267299864317,
      "grad_norm": 1.0268974304199219,
      "learning_rate": 0.00012225209339563145,
      "loss": 0.8568,
      "step": 160
    },
    {
      "epoch": 0.46132971506105835,
      "grad_norm": 0.9812881350517273,
      "learning_rate": 0.00011376535145871684,
      "loss": 0.888,
      "step": 170
    },
    {
      "epoch": 0.48846675712347354,
      "grad_norm": 1.2109171152114868,
      "learning_rate": 0.00010517613528842097,
      "loss": 0.9166,
      "step": 180
    },
    {
      "epoch": 0.5156037991858887,
      "grad_norm": 0.9860134124755859,
      "learning_rate": 9.654838610302923e-05,
      "loss": 0.8357,
      "step": 190
    },
    {
      "epoch": 0.5427408412483039,
      "grad_norm": 1.1094295978546143,
      "learning_rate": 8.79463319744677e-05,
      "loss": 0.928,
      "step": 200
    },
    {
      "epoch": 0.5698778833107191,
      "grad_norm": 0.9386014342308044,
      "learning_rate": 7.943400969140635e-05,
      "loss": 0.9522,
      "step": 210
    },
    {
      "epoch": 0.5970149253731343,
      "grad_norm": 1.0436668395996094,
      "learning_rate": 7.107478804634325e-05,
      "loss": 0.8619,
      "step": 220
    },
    {
      "epoch": 0.6241519674355496,
      "grad_norm": 1.0143946409225464,
      "learning_rate": 6.293089609549325e-05,
      "loss": 0.8046,
      "step": 230
    },
    {
      "epoch": 0.6512890094979648,
      "grad_norm": 1.107064127922058,
      "learning_rate": 5.506295990328385e-05,
      "loss": 0.8532,
      "step": 240
    },
    {
      "epoch": 0.6784260515603799,
      "grad_norm": 0.8748170137405396,
      "learning_rate": 4.75295512200992e-05,
      "loss": 0.8276,
      "step": 250
    },
    {
      "epoch": 0.7055630936227951,
      "grad_norm": 1.0721319913864136,
      "learning_rate": 4.038675145307747e-05,
      "loss": 0.7517,
      "step": 260
    },
    {
      "epoch": 0.7327001356852103,
      "grad_norm": 1.1542731523513794,
      "learning_rate": 3.36877341759205e-05,
      "loss": 0.8542,
      "step": 270
    },
    {
      "epoch": 0.7598371777476255,
      "grad_norm": 0.89125657081604,
      "learning_rate": 2.7482369285662378e-05,
      "loss": 0.8601,
      "step": 280
    },
    {
      "epoch": 0.7869742198100407,
      "grad_norm": 0.9904446601867676,
      "learning_rate": 2.181685175319702e-05,
      "loss": 0.8787,
      "step": 290
    },
    {
      "epoch": 0.8141112618724559,
      "grad_norm": 0.9311710596084595,
      "learning_rate": 1.6733357731279377e-05,
      "loss": 0.7493,
      "step": 300
    },
    {
      "epoch": 0.841248303934871,
      "grad_norm": 1.0216141939163208,
      "learning_rate": 1.2269730580055805e-05,
      "loss": 0.7497,
      "step": 310
    },
    {
      "epoch": 0.8683853459972863,
      "grad_norm": 0.9951556324958801,
      "learning_rate": 8.45919914746337e-06,
      "loss": 0.8324,
      "step": 320
    },
    {
      "epoch": 0.8955223880597015,
      "grad_norm": 1.0728198289871216,
      "learning_rate": 5.3301304017194135e-06,
      "loss": 0.7964,
      "step": 330
    },
    {
      "epoch": 0.9226594301221167,
      "grad_norm": 1.1030397415161133,
      "learning_rate": 2.905818257394799e-06,
      "loss": 0.8166,
      "step": 340
    },
    {
      "epoch": 0.9497964721845319,
      "grad_norm": 0.974777102470398,
      "learning_rate": 1.2043101671253554e-06,
      "loss": 0.7952,
      "step": 350
    },
    {
      "epoch": 0.9769335142469471,
      "grad_norm": 1.078715443611145,
      "learning_rate": 2.382727698752474e-07,
      "loss": 0.8726,
      "step": 360
    }
  ],
  "logging_steps": 10,
  "max_steps": 368,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 1620605933346816.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}