File size: 15,441 Bytes
6a42e9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.8357621110673494,
  "eval_steps": 200,
  "global_step": 1800,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.15754233950374164,
      "grad_norm": 1.364139199256897,
      "learning_rate": 0.00025,
      "logits/chosen": -19.665550231933594,
      "logits/rejected": -18.70989227294922,
      "logps/chosen": -357.2802734375,
      "logps/rejected": -267.8329162597656,
      "loss": 0.498,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": 0.69869065284729,
      "rewards/margins": 0.8159563541412354,
      "rewards/rejected": -0.1172657385468483,
      "step": 100
    },
    {
      "epoch": 0.31508467900748327,
      "grad_norm": 0.2697848677635193,
      "learning_rate": 0.0005,
      "logits/chosen": -19.965831756591797,
      "logits/rejected": -18.864395141601562,
      "logps/chosen": -340.5800476074219,
      "logps/rejected": -273.5838623046875,
      "loss": 0.242,
      "rewards/accuracies": 0.8662499785423279,
      "rewards/chosen": 1.5408194065093994,
      "rewards/margins": 3.1778359413146973,
      "rewards/rejected": -1.63701593875885,
      "step": 200
    },
    {
      "epoch": 0.31508467900748327,
      "eval_logits/chosen": -16.780941009521484,
      "eval_logits/rejected": -16.480772018432617,
      "eval_logps/chosen": -389.2388000488281,
      "eval_logps/rejected": -293.8738708496094,
      "eval_loss": 0.23645737767219543,
      "eval_rewards/accuracies": 0.9711538553237915,
      "eval_rewards/chosen": 0.6156416535377502,
      "eval_rewards/margins": 1.6693669557571411,
      "eval_rewards/rejected": -1.0537253618240356,
      "eval_runtime": 99.0189,
      "eval_samples_per_second": 1.05,
      "eval_steps_per_second": 0.525,
      "step": 200
    },
    {
      "epoch": 0.4726270185112249,
      "grad_norm": 0.3729426860809326,
      "learning_rate": 0.0004957532446941012,
      "logits/chosen": -19.795560836791992,
      "logits/rejected": -18.869535446166992,
      "logps/chosen": -349.1313781738281,
      "logps/rejected": -293.3199157714844,
      "loss": 0.2098,
      "rewards/accuracies": 0.8824999928474426,
      "rewards/chosen": 1.0055174827575684,
      "rewards/margins": 4.656251907348633,
      "rewards/rejected": -3.6507339477539062,
      "step": 300
    },
    {
      "epoch": 0.6301693580149665,
      "grad_norm": 0.24799980223178864,
      "learning_rate": 0.00048315725822143025,
      "logits/chosen": -19.78338050842285,
      "logits/rejected": -18.67037582397461,
      "logps/chosen": -345.9194641113281,
      "logps/rejected": -296.3516845703125,
      "loss": 0.1816,
      "rewards/accuracies": 0.8987500071525574,
      "rewards/chosen": 1.1964439153671265,
      "rewards/margins": 5.297502517700195,
      "rewards/rejected": -4.101058483123779,
      "step": 400
    },
    {
      "epoch": 0.6301693580149665,
      "eval_logits/chosen": -16.3292293548584,
      "eval_logits/rejected": -16.04510498046875,
      "eval_logps/chosen": -398.3055114746094,
      "eval_logps/rejected": -319.1045227050781,
      "eval_loss": 0.08102616667747498,
      "eval_rewards/accuracies": 0.9807692170143127,
      "eval_rewards/chosen": -0.291032999753952,
      "eval_rewards/margins": 3.2857601642608643,
      "eval_rewards/rejected": -3.576793670654297,
      "eval_runtime": 99.1533,
      "eval_samples_per_second": 1.049,
      "eval_steps_per_second": 0.524,
      "step": 400
    },
    {
      "epoch": 0.7877116975187082,
      "grad_norm": 1.1599677801132202,
      "learning_rate": 0.0004626399771610739,
      "logits/chosen": -19.652454376220703,
      "logits/rejected": -18.740825653076172,
      "logps/chosen": -342.26641845703125,
      "logps/rejected": -312.0155334472656,
      "loss": 0.2049,
      "rewards/accuracies": 0.8799999952316284,
      "rewards/chosen": 0.3767644166946411,
      "rewards/margins": 5.342309951782227,
      "rewards/rejected": -4.965545654296875,
      "step": 500
    },
    {
      "epoch": 0.9452540370224498,
      "grad_norm": 1.1663849353790283,
      "learning_rate": 0.00043489845649067753,
      "logits/chosen": -20.011140823364258,
      "logits/rejected": -19.01030731201172,
      "logps/chosen": -350.619873046875,
      "logps/rejected": -307.1583251953125,
      "loss": 0.1641,
      "rewards/accuracies": 0.8962500095367432,
      "rewards/chosen": 0.45414870977401733,
      "rewards/margins": 5.876157283782959,
      "rewards/rejected": -5.4220075607299805,
      "step": 600
    },
    {
      "epoch": 0.9452540370224498,
      "eval_logits/chosen": -16.64271354675293,
      "eval_logits/rejected": -16.393213272094727,
      "eval_logps/chosen": -397.2878723144531,
      "eval_logps/rejected": -322.0921630859375,
      "eval_loss": 0.04721178486943245,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -0.18926361203193665,
      "eval_rewards/margins": 3.686293125152588,
      "eval_rewards/rejected": -3.8755569458007812,
      "eval_runtime": 99.0866,
      "eval_samples_per_second": 1.05,
      "eval_steps_per_second": 0.525,
      "step": 600
    },
    {
      "epoch": 1.1027963765261914,
      "grad_norm": 0.13236922025680542,
      "learning_rate": 0.000400875187811047,
      "logits/chosen": -19.676679611206055,
      "logits/rejected": -18.6084041595459,
      "logps/chosen": -354.6936340332031,
      "logps/rejected": -311.38226318359375,
      "loss": 0.1031,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 0.7313293218612671,
      "rewards/margins": 6.36898946762085,
      "rewards/rejected": -5.637660026550293,
      "step": 700
    },
    {
      "epoch": 1.260338716029933,
      "grad_norm": 0.15625274181365967,
      "learning_rate": 0.00036172607909649605,
      "logits/chosen": -19.61475944519043,
      "logits/rejected": -18.85614776611328,
      "logps/chosen": -354.2134704589844,
      "logps/rejected": -341.8444519042969,
      "loss": 0.0554,
      "rewards/accuracies": 0.9825000166893005,
      "rewards/chosen": -0.4523475766181946,
      "rewards/margins": 7.603002548217773,
      "rewards/rejected": -8.055350303649902,
      "step": 800
    },
    {
      "epoch": 1.260338716029933,
      "eval_logits/chosen": -16.010990142822266,
      "eval_logits/rejected": -15.788914680480957,
      "eval_logps/chosen": -408.9882507324219,
      "eval_logps/rejected": -353.1039733886719,
      "eval_loss": 0.01217043399810791,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -1.3593038320541382,
      "eval_rewards/margins": 5.617432594299316,
      "eval_rewards/rejected": -6.976736068725586,
      "eval_runtime": 99.0253,
      "eval_samples_per_second": 1.05,
      "eval_steps_per_second": 0.525,
      "step": 800
    },
    {
      "epoch": 1.4178810555336747,
      "grad_norm": 0.22375567257404327,
      "learning_rate": 0.00031878118382826264,
      "logits/chosen": -19.352890014648438,
      "logits/rejected": -18.583425521850586,
      "logps/chosen": -359.9408264160156,
      "logps/rejected": -340.4473571777344,
      "loss": 0.0582,
      "rewards/accuracies": 0.9762499928474426,
      "rewards/chosen": -0.5718154907226562,
      "rewards/margins": 7.750644683837891,
      "rewards/rejected": -8.322461128234863,
      "step": 900
    },
    {
      "epoch": 1.5754233950374164,
      "grad_norm": 1.0517126321792603,
      "learning_rate": 0.00027349951370107985,
      "logits/chosen": -19.330745697021484,
      "logits/rejected": -18.550424575805664,
      "logps/chosen": -373.06610107421875,
      "logps/rejected": -360.41961669921875,
      "loss": 0.0741,
      "rewards/accuracies": 0.9637500047683716,
      "rewards/chosen": -1.6150453090667725,
      "rewards/margins": 7.987229824066162,
      "rewards/rejected": -9.602275848388672,
      "step": 1000
    },
    {
      "epoch": 1.5754233950374164,
      "eval_logits/chosen": -15.608610153198242,
      "eval_logits/rejected": -15.47758674621582,
      "eval_logps/chosen": -430.04034423828125,
      "eval_logps/rejected": -373.8860778808594,
      "eval_loss": 0.022561371326446533,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -3.464510440826416,
      "eval_rewards/margins": 5.590437889099121,
      "eval_rewards/rejected": -9.054947853088379,
      "eval_runtime": 99.1814,
      "eval_samples_per_second": 1.049,
      "eval_steps_per_second": 0.524,
      "step": 1000
    },
    {
      "epoch": 1.732965734541158,
      "grad_norm": 0.2072581648826599,
      "learning_rate": 0.00022741947009792817,
      "logits/chosen": -19.062545776367188,
      "logits/rejected": -18.211511611938477,
      "logps/chosen": -378.5089111328125,
      "logps/rejected": -351.533203125,
      "loss": 0.0623,
      "rewards/accuracies": 0.9775000214576721,
      "rewards/chosen": -1.9905846118927002,
      "rewards/margins": 7.95173454284668,
      "rewards/rejected": -9.9423189163208,
      "step": 1100
    },
    {
      "epoch": 1.8905080740448996,
      "grad_norm": 0.19075140357017517,
      "learning_rate": 0.00018210657837614962,
      "logits/chosen": -19.65765953063965,
      "logits/rejected": -18.76839256286621,
      "logps/chosen": -361.4976501464844,
      "logps/rejected": -350.03338623046875,
      "loss": 0.052,
      "rewards/accuracies": 0.9800000190734863,
      "rewards/chosen": -0.5509209632873535,
      "rewards/margins": 8.345035552978516,
      "rewards/rejected": -8.895956039428711,
      "step": 1200
    },
    {
      "epoch": 1.8905080740448996,
      "eval_logits/chosen": -16.13081169128418,
      "eval_logits/rejected": -15.851374626159668,
      "eval_logps/chosen": -403.6054992675781,
      "eval_logps/rejected": -352.6169738769531,
      "eval_loss": 0.015062261372804642,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -0.8210276961326599,
      "eval_rewards/margins": 6.107009410858154,
      "eval_rewards/rejected": -6.928036689758301,
      "eval_runtime": 99.1951,
      "eval_samples_per_second": 1.048,
      "eval_steps_per_second": 0.524,
      "step": 1200
    },
    {
      "epoch": 2.048050413548641,
      "grad_norm": 0.04302853345870972,
      "learning_rate": 0.00013910030064250462,
      "logits/chosen": -19.43883514404297,
      "logits/rejected": -18.620824813842773,
      "logps/chosen": -365.1725769042969,
      "logps/rejected": -346.1496887207031,
      "loss": 0.0357,
      "rewards/accuracies": 0.9887499809265137,
      "rewards/chosen": -0.8485901355743408,
      "rewards/margins": 8.334707260131836,
      "rewards/rejected": -9.183298110961914,
      "step": 1300
    },
    {
      "epoch": 2.205592753052383,
      "grad_norm": 0.2691422700881958,
      "learning_rate": 9.986173400221197e-05,
      "logits/chosen": -19.500974655151367,
      "logits/rejected": -18.651748657226562,
      "logps/chosen": -360.99383544921875,
      "logps/rejected": -359.66326904296875,
      "loss": 0.0097,
      "rewards/accuracies": 0.9950000047683716,
      "rewards/chosen": -1.2903647422790527,
      "rewards/margins": 9.181097984313965,
      "rewards/rejected": -10.471461296081543,
      "step": 1400
    },
    {
      "epoch": 2.205592753052383,
      "eval_logits/chosen": -15.852449417114258,
      "eval_logits/rejected": -15.591791152954102,
      "eval_logps/chosen": -420.1238708496094,
      "eval_logps/rejected": -374.8275146484375,
      "eval_loss": 0.011651669628918171,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -2.4728617668151855,
      "eval_rewards/margins": 6.676229000091553,
      "eval_rewards/rejected": -9.149091720581055,
      "eval_runtime": 99.2676,
      "eval_samples_per_second": 1.048,
      "eval_steps_per_second": 0.524,
      "step": 1400
    },
    {
      "epoch": 2.3631350925561243,
      "grad_norm": 0.03317335993051529,
      "learning_rate": 6.572397118387572e-05,
      "logits/chosen": -19.316547393798828,
      "logits/rejected": -18.4414119720459,
      "logps/chosen": -368.3520202636719,
      "logps/rejected": -356.59405517578125,
      "loss": 0.0094,
      "rewards/accuracies": 0.9962499737739563,
      "rewards/chosen": -1.4736888408660889,
      "rewards/margins": 8.902289390563965,
      "rewards/rejected": -10.375977516174316,
      "step": 1500
    },
    {
      "epoch": 2.520677432059866,
      "grad_norm": 0.012674962170422077,
      "learning_rate": 3.784680999053808e-05,
      "logits/chosen": -19.28080177307129,
      "logits/rejected": -18.418855667114258,
      "logps/chosen": -384.20587158203125,
      "logps/rejected": -371.5294494628906,
      "loss": 0.0096,
      "rewards/accuracies": 0.9950000047683716,
      "rewards/chosen": -1.5619627237319946,
      "rewards/margins": 9.07076644897461,
      "rewards/rejected": -10.632728576660156,
      "step": 1600
    },
    {
      "epoch": 2.520677432059866,
      "eval_logits/chosen": -15.806844711303711,
      "eval_logits/rejected": -15.545206069946289,
      "eval_logps/chosen": -422.5581359863281,
      "eval_logps/rejected": -379.27069091796875,
      "eval_loss": 0.010242895223200321,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -2.716292142868042,
      "eval_rewards/margins": 6.877115249633789,
      "eval_rewards/rejected": -9.59340763092041,
      "eval_runtime": 99.0694,
      "eval_samples_per_second": 1.05,
      "eval_steps_per_second": 0.525,
      "step": 1600
    },
    {
      "epoch": 2.6782197715636076,
      "grad_norm": 0.02596099302172661,
      "learning_rate": 1.7177350279888816e-05,
      "logits/chosen": -19.332918167114258,
      "logits/rejected": -18.499181747436523,
      "logps/chosen": -373.5088195800781,
      "logps/rejected": -373.32061767578125,
      "loss": 0.0073,
      "rewards/accuracies": 0.9975000023841858,
      "rewards/chosen": -1.5841505527496338,
      "rewards/margins": 9.309870719909668,
      "rewards/rejected": -10.894021034240723,
      "step": 1700
    },
    {
      "epoch": 2.8357621110673494,
      "grad_norm": 0.40319836139678955,
      "learning_rate": 4.417817153497928e-06,
      "logits/chosen": -19.361268997192383,
      "logits/rejected": -18.549575805664062,
      "logps/chosen": -375.323486328125,
      "logps/rejected": -373.2767333984375,
      "loss": 0.0085,
      "rewards/accuracies": 0.9962499737739563,
      "rewards/chosen": -1.6672673225402832,
      "rewards/margins": 9.543354988098145,
      "rewards/rejected": -11.210620880126953,
      "step": 1800
    },
    {
      "epoch": 2.8357621110673494,
      "eval_logits/chosen": -15.784783363342285,
      "eval_logits/rejected": -15.527274131774902,
      "eval_logps/chosen": -423.8032531738281,
      "eval_logps/rejected": -381.0099182128906,
      "eval_loss": 0.009673921391367912,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -2.8408048152923584,
      "eval_rewards/margins": 6.926526069641113,
      "eval_rewards/rejected": -9.767330169677734,
      "eval_runtime": 99.5666,
      "eval_samples_per_second": 1.045,
      "eval_steps_per_second": 0.522,
      "step": 1800
    }
  ],
  "logging_steps": 100,
  "max_steps": 1902,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 200,
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}