Training in progress, step 800, checkpoint
Browse files
last-checkpoint/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2066752
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2f61cccc71965321d5bfa0a82a36d3ce48485b151d51630bed6fa6f1b3e1ef0
|
3 |
size 2066752
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2162798
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d24716c68872d0691c8737d29256cdc4c237a8b9f40809eeea9970c38b4f513f
|
3 |
size 2162798
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c431bcafebc4c8ee346d130e382b11c81be579ca0bfd3918fae07b16e10b92f
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40b6b717644e21f80a22ec98694b3a2fd9d62a6467e549d64314725dba905d52
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 200,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -311,6 +311,302 @@
|
|
311 |
"eval_samples_per_second": 115.48,
|
312 |
"eval_steps_per_second": 28.909,
|
313 |
"step": 400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
}
|
315 |
],
|
316 |
"logging_steps": 10,
|
@@ -330,7 +626,7 @@
|
|
330 |
"attributes": {}
|
331 |
}
|
332 |
},
|
333 |
-
"total_flos":
|
334 |
"train_batch_size": 4,
|
335 |
"trial_name": null,
|
336 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.23675643681562591,
|
5 |
"eval_steps": 200,
|
6 |
+
"global_step": 800,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
311 |
"eval_samples_per_second": 115.48,
|
312 |
"eval_steps_per_second": 28.909,
|
313 |
"step": 400
|
314 |
+
},
|
315 |
+
{
|
316 |
+
"epoch": 0.12133767386800828,
|
317 |
+
"grad_norm": 0.443359375,
|
318 |
+
"learning_rate": 0.00019297764858882514,
|
319 |
+
"loss": 8.9547,
|
320 |
+
"step": 410
|
321 |
+
},
|
322 |
+
{
|
323 |
+
"epoch": 0.12429712932820361,
|
324 |
+
"grad_norm": 0.466796875,
|
325 |
+
"learning_rate": 0.00019248258232139388,
|
326 |
+
"loss": 8.9394,
|
327 |
+
"step": 420
|
328 |
+
},
|
329 |
+
{
|
330 |
+
"epoch": 0.12725658478839894,
|
331 |
+
"grad_norm": 0.61328125,
|
332 |
+
"learning_rate": 0.00019197133427991436,
|
333 |
+
"loss": 8.9748,
|
334 |
+
"step": 430
|
335 |
+
},
|
336 |
+
{
|
337 |
+
"epoch": 0.13021604024859426,
|
338 |
+
"grad_norm": 0.73046875,
|
339 |
+
"learning_rate": 0.00019144399391799043,
|
340 |
+
"loss": 8.9198,
|
341 |
+
"step": 440
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"epoch": 0.1331754957087896,
|
345 |
+
"grad_norm": 1.203125,
|
346 |
+
"learning_rate": 0.00019090065350491626,
|
347 |
+
"loss": 8.8904,
|
348 |
+
"step": 450
|
349 |
+
},
|
350 |
+
{
|
351 |
+
"epoch": 0.1361349511689849,
|
352 |
+
"grad_norm": 0.494140625,
|
353 |
+
"learning_rate": 0.0001903414081095315,
|
354 |
+
"loss": 8.8971,
|
355 |
+
"step": 460
|
356 |
+
},
|
357 |
+
{
|
358 |
+
"epoch": 0.13909440662918024,
|
359 |
+
"grad_norm": 0.48046875,
|
360 |
+
"learning_rate": 0.00018976635558358722,
|
361 |
+
"loss": 8.84,
|
362 |
+
"step": 470
|
363 |
+
},
|
364 |
+
{
|
365 |
+
"epoch": 0.14205386208937557,
|
366 |
+
"grad_norm": 0.55859375,
|
367 |
+
"learning_rate": 0.00018917559654462474,
|
368 |
+
"loss": 8.838,
|
369 |
+
"step": 480
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.1450133175495709,
|
373 |
+
"grad_norm": 0.5703125,
|
374 |
+
"learning_rate": 0.00018856923435837022,
|
375 |
+
"loss": 8.7761,
|
376 |
+
"step": 490
|
377 |
+
},
|
378 |
+
{
|
379 |
+
"epoch": 0.1479727730097662,
|
380 |
+
"grad_norm": 0.96875,
|
381 |
+
"learning_rate": 0.0001879473751206489,
|
382 |
+
"loss": 8.8421,
|
383 |
+
"step": 500
|
384 |
+
},
|
385 |
+
{
|
386 |
+
"epoch": 0.15093222846996152,
|
387 |
+
"grad_norm": 0.478515625,
|
388 |
+
"learning_rate": 0.00018731012763882133,
|
389 |
+
"loss": 8.7691,
|
390 |
+
"step": 510
|
391 |
+
},
|
392 |
+
{
|
393 |
+
"epoch": 0.15389168393015684,
|
394 |
+
"grad_norm": 0.4921875,
|
395 |
+
"learning_rate": 0.00018665760341274505,
|
396 |
+
"loss": 8.7749,
|
397 |
+
"step": 520
|
398 |
+
},
|
399 |
+
{
|
400 |
+
"epoch": 0.15685113939035217,
|
401 |
+
"grad_norm": 0.51171875,
|
402 |
+
"learning_rate": 0.00018598991661526572,
|
403 |
+
"loss": 8.79,
|
404 |
+
"step": 530
|
405 |
+
},
|
406 |
+
{
|
407 |
+
"epoch": 0.1598105948505475,
|
408 |
+
"grad_norm": 0.58203125,
|
409 |
+
"learning_rate": 0.00018530718407223974,
|
410 |
+
"loss": 8.8742,
|
411 |
+
"step": 540
|
412 |
+
},
|
413 |
+
{
|
414 |
+
"epoch": 0.16277005031074282,
|
415 |
+
"grad_norm": 1.234375,
|
416 |
+
"learning_rate": 0.00018460952524209355,
|
417 |
+
"loss": 8.7845,
|
418 |
+
"step": 550
|
419 |
+
},
|
420 |
+
{
|
421 |
+
"epoch": 0.16572950577093815,
|
422 |
+
"grad_norm": 0.470703125,
|
423 |
+
"learning_rate": 0.00018389706219492147,
|
424 |
+
"loss": 8.8165,
|
425 |
+
"step": 560
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"epoch": 0.16868896123113347,
|
429 |
+
"grad_norm": 0.486328125,
|
430 |
+
"learning_rate": 0.00018316991959112716,
|
431 |
+
"loss": 8.7024,
|
432 |
+
"step": 570
|
433 |
+
},
|
434 |
+
{
|
435 |
+
"epoch": 0.1716484166913288,
|
436 |
+
"grad_norm": 0.53515625,
|
437 |
+
"learning_rate": 0.00018242822465961176,
|
438 |
+
"loss": 8.7764,
|
439 |
+
"step": 580
|
440 |
+
},
|
441 |
+
{
|
442 |
+
"epoch": 0.17460787215152412,
|
443 |
+
"grad_norm": 0.58984375,
|
444 |
+
"learning_rate": 0.00018167210717551224,
|
445 |
+
"loss": 8.7501,
|
446 |
+
"step": 590
|
447 |
+
},
|
448 |
+
{
|
449 |
+
"epoch": 0.17756732761171945,
|
450 |
+
"grad_norm": 1.28125,
|
451 |
+
"learning_rate": 0.00018090169943749476,
|
452 |
+
"loss": 8.7257,
|
453 |
+
"step": 600
|
454 |
+
},
|
455 |
+
{
|
456 |
+
"epoch": 0.17756732761171945,
|
457 |
+
"eval_loss": 8.762685775756836,
|
458 |
+
"eval_runtime": 18.9408,
|
459 |
+
"eval_samples_per_second": 79.3,
|
460 |
+
"eval_steps_per_second": 19.851,
|
461 |
+
"step": 600
|
462 |
+
},
|
463 |
+
{
|
464 |
+
"epoch": 0.18052678307191478,
|
465 |
+
"grad_norm": 0.54296875,
|
466 |
+
"learning_rate": 0.00018011713624460608,
|
467 |
+
"loss": 8.7709,
|
468 |
+
"step": 610
|
469 |
+
},
|
470 |
+
{
|
471 |
+
"epoch": 0.1834862385321101,
|
472 |
+
"grad_norm": 0.53515625,
|
473 |
+
"learning_rate": 0.00017931855487268782,
|
474 |
+
"loss": 8.7334,
|
475 |
+
"step": 620
|
476 |
+
},
|
477 |
+
{
|
478 |
+
"epoch": 0.18644569399230543,
|
479 |
+
"grad_norm": 0.56640625,
|
480 |
+
"learning_rate": 0.0001785060950503568,
|
481 |
+
"loss": 8.824,
|
482 |
+
"step": 630
|
483 |
+
},
|
484 |
+
{
|
485 |
+
"epoch": 0.18940514945250073,
|
486 |
+
"grad_norm": 0.69921875,
|
487 |
+
"learning_rate": 0.00017767989893455698,
|
488 |
+
"loss": 8.6731,
|
489 |
+
"step": 640
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"epoch": 0.19236460491269605,
|
493 |
+
"grad_norm": 0.90625,
|
494 |
+
"learning_rate": 0.00017684011108568592,
|
495 |
+
"loss": 8.7669,
|
496 |
+
"step": 650
|
497 |
+
},
|
498 |
+
{
|
499 |
+
"epoch": 0.19532406037289138,
|
500 |
+
"grad_norm": 0.49609375,
|
501 |
+
"learning_rate": 0.00017598687844230088,
|
502 |
+
"loss": 8.6911,
|
503 |
+
"step": 660
|
504 |
+
},
|
505 |
+
{
|
506 |
+
"epoch": 0.1982835158330867,
|
507 |
+
"grad_norm": 0.44140625,
|
508 |
+
"learning_rate": 0.00017512035029540885,
|
509 |
+
"loss": 8.6932,
|
510 |
+
"step": 670
|
511 |
+
},
|
512 |
+
{
|
513 |
+
"epoch": 0.20124297129328203,
|
514 |
+
"grad_norm": 0.52734375,
|
515 |
+
"learning_rate": 0.000174240678262345,
|
516 |
+
"loss": 8.71,
|
517 |
+
"step": 680
|
518 |
+
},
|
519 |
+
{
|
520 |
+
"epoch": 0.20420242675347736,
|
521 |
+
"grad_norm": 0.59375,
|
522 |
+
"learning_rate": 0.000173348016260244,
|
523 |
+
"loss": 8.7219,
|
524 |
+
"step": 690
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"epoch": 0.20716188221367268,
|
528 |
+
"grad_norm": 1.3515625,
|
529 |
+
"learning_rate": 0.00017244252047910892,
|
530 |
+
"loss": 8.6973,
|
531 |
+
"step": 700
|
532 |
+
},
|
533 |
+
{
|
534 |
+
"epoch": 0.210121337673868,
|
535 |
+
"grad_norm": 0.462890625,
|
536 |
+
"learning_rate": 0.00017152434935448256,
|
537 |
+
"loss": 8.6743,
|
538 |
+
"step": 710
|
539 |
+
},
|
540 |
+
{
|
541 |
+
"epoch": 0.21308079313406333,
|
542 |
+
"grad_norm": 0.451171875,
|
543 |
+
"learning_rate": 0.0001705936635397259,
|
544 |
+
"loss": 8.7094,
|
545 |
+
"step": 720
|
546 |
+
},
|
547 |
+
{
|
548 |
+
"epoch": 0.21604024859425866,
|
549 |
+
"grad_norm": 0.57421875,
|
550 |
+
"learning_rate": 0.00016965062587790823,
|
551 |
+
"loss": 8.7353,
|
552 |
+
"step": 730
|
553 |
+
},
|
554 |
+
{
|
555 |
+
"epoch": 0.218999704054454,
|
556 |
+
"grad_norm": 0.5546875,
|
557 |
+
"learning_rate": 0.00016869540137331445,
|
558 |
+
"loss": 8.6939,
|
559 |
+
"step": 740
|
560 |
+
},
|
561 |
+
{
|
562 |
+
"epoch": 0.2219591595146493,
|
563 |
+
"grad_norm": 1.0703125,
|
564 |
+
"learning_rate": 0.00016772815716257412,
|
565 |
+
"loss": 8.7202,
|
566 |
+
"step": 750
|
567 |
+
},
|
568 |
+
{
|
569 |
+
"epoch": 0.22491861497484464,
|
570 |
+
"grad_norm": 0.51171875,
|
571 |
+
"learning_rate": 0.00016674906248541726,
|
572 |
+
"loss": 8.6779,
|
573 |
+
"step": 760
|
574 |
+
},
|
575 |
+
{
|
576 |
+
"epoch": 0.22787807043503996,
|
577 |
+
"grad_norm": 0.671875,
|
578 |
+
"learning_rate": 0.00016575828865506245,
|
579 |
+
"loss": 8.6627,
|
580 |
+
"step": 770
|
581 |
+
},
|
582 |
+
{
|
583 |
+
"epoch": 0.2308375258952353,
|
584 |
+
"grad_norm": 0.4375,
|
585 |
+
"learning_rate": 0.0001647560090282419,
|
586 |
+
"loss": 8.7348,
|
587 |
+
"step": 780
|
588 |
+
},
|
589 |
+
{
|
590 |
+
"epoch": 0.2337969813554306,
|
591 |
+
"grad_norm": 0.6875,
|
592 |
+
"learning_rate": 0.000163742398974869,
|
593 |
+
"loss": 8.7236,
|
594 |
+
"step": 790
|
595 |
+
},
|
596 |
+
{
|
597 |
+
"epoch": 0.23675643681562591,
|
598 |
+
"grad_norm": 1.4140625,
|
599 |
+
"learning_rate": 0.0001627176358473537,
|
600 |
+
"loss": 8.7416,
|
601 |
+
"step": 800
|
602 |
+
},
|
603 |
+
{
|
604 |
+
"epoch": 0.23675643681562591,
|
605 |
+
"eval_loss": 8.710856437683105,
|
606 |
+
"eval_runtime": 16.7859,
|
607 |
+
"eval_samples_per_second": 89.48,
|
608 |
+
"eval_steps_per_second": 22.4,
|
609 |
+
"step": 800
|
610 |
}
|
611 |
],
|
612 |
"logging_steps": 10,
|
|
|
626 |
"attributes": {}
|
627 |
}
|
628 |
},
|
629 |
+
"total_flos": 20509072293888.0,
|
630 |
"train_batch_size": 4,
|
631 |
"trial_name": null,
|
632 |
"trial_params": null
|