diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..2858132b348f44f98a7c0dc84873c8abb569ca4c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +model/tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/model/rng_state_0.pth b/model/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..b6473612e41c5cfd6973c2e71fa5f3ad2b2bcad1 --- /dev/null +++ b/model/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:575119a228f98110923ffa2dedcb50e3317251b26054355d015e0b2240d566f2 +size 15984 diff --git a/model/rng_state_1.pth b/model/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..8506e00431b6ac7067699c0ea4f59adb6fa0ba20 --- /dev/null +++ b/model/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0728b56dab7abb5ef8a0d4bae3519c5767c97467bdd886d26bf19cc8599d0312 +size 15984 diff --git a/model/rng_state_10.pth b/model/rng_state_10.pth new file mode 100644 index 0000000000000000000000000000000000000000..4a20fa592760c3d152c5b80cf5b20ecbe21732e7 --- /dev/null +++ b/model/rng_state_10.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:532d1ab071177c2e9d0cc3435d6314c1777a0356f9ce1a5d8f10fbe5f8fd8926 +size 15997 diff --git a/model/rng_state_11.pth b/model/rng_state_11.pth new file mode 100644 index 0000000000000000000000000000000000000000..a0cf51d6bb32b04eaf571e2b02dd08c5ad63bcf9 --- /dev/null +++ b/model/rng_state_11.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a089513416d3ed447265187782ec1194ac2303155b941284463976c62bf3d9b +size 15997 diff --git a/model/rng_state_12.pth b/model/rng_state_12.pth new file mode 100644 index 0000000000000000000000000000000000000000..97aa200507e2626fe0be67e27cc3f3d8588b73af --- /dev/null +++ b/model/rng_state_12.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a87db6b16c9956496c45bf5c81750dc2349e157e60e544e5d256bb957be1243 +size 15997 diff --git a/model/rng_state_13.pth b/model/rng_state_13.pth new file mode 100644 index 0000000000000000000000000000000000000000..c9f4510389d4abf743a127eef0b37a79f4ff3404 --- /dev/null +++ b/model/rng_state_13.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d5fe5d1b92643a3c3dd35cf2f0cbfa4a92599fe3ccbd8941395c1550f088aa +size 15997 diff --git a/model/rng_state_14.pth b/model/rng_state_14.pth new file mode 100644 index 0000000000000000000000000000000000000000..3c9daf69a8ba370e952feb3caa59945138b687a9 --- /dev/null +++ b/model/rng_state_14.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4a08278e808af796415aac256bf0c1e8619db1f0f025ff15775cd3b8d02bd59 +size 15997 diff --git a/model/rng_state_15.pth b/model/rng_state_15.pth new file mode 100644 index 0000000000000000000000000000000000000000..1697bc6bc56f75a7fe7c68e5e3662df1effa6be5 --- /dev/null +++ b/model/rng_state_15.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88600b3520001a5a7e82c0e184d278b46492fda5424ea22ea243af15b40b2e82 +size 15997 diff --git a/model/rng_state_16.pth b/model/rng_state_16.pth new file mode 100644 index 0000000000000000000000000000000000000000..480705c8256c0326852cded458d79278ea69be1b --- /dev/null +++ b/model/rng_state_16.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a5825e92eb7518856e88ce648529dc0597c32a5f52764f0361a119217a4dfb +size 15997 diff --git a/model/rng_state_17.pth b/model/rng_state_17.pth new file mode 100644 index 0000000000000000000000000000000000000000..d425906b16e046c8945ff2dfc4983f09703767fa --- /dev/null +++ b/model/rng_state_17.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30bd2cfa327fb55a93d390d852db109e0a2e1f102ecabf520870cbc79ca95ad +size 15997 diff --git a/model/rng_state_18.pth b/model/rng_state_18.pth new file mode 100644 index 0000000000000000000000000000000000000000..a8bc0d6d03dbe1ebf6d4e29ca65a4101a802e473 --- /dev/null +++ b/model/rng_state_18.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77c95f2456fa4473e7231ab94b79ecbc3c126d31d3a94c61f86ee6125b547456 +size 15997 diff --git a/model/rng_state_19.pth b/model/rng_state_19.pth new file mode 100644 index 0000000000000000000000000000000000000000..40fe1885e8f3059e85f9e86b49aaeffcbc6fcd7a --- /dev/null +++ b/model/rng_state_19.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:301f380300f7ae9a581c304ac407794ecff9d66e6c9c55ff6cffcfc8d94690a4 +size 15997 diff --git a/model/rng_state_2.pth b/model/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47 --- /dev/null +++ b/model/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e +size 15984 diff --git a/model/rng_state_20.pth b/model/rng_state_20.pth new file mode 100644 index 0000000000000000000000000000000000000000..4b90c8b7967b43e32281e4da4cce3c262dd52b37 --- /dev/null +++ b/model/rng_state_20.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b2e90afb8f9481396fa1f82913f7fb908e83e63bf2a38a6f6d2b869ca1d06db +size 15997 diff --git a/model/rng_state_21.pth b/model/rng_state_21.pth new file mode 100644 index 0000000000000000000000000000000000000000..fb87d8530e09738114018df827a61f5909b0686a --- /dev/null +++ b/model/rng_state_21.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35fafcbe672a0f62668f219b7ae7b88ddc86330086a2eca82505408964d61498 +size 15997 diff --git a/model/rng_state_22.pth b/model/rng_state_22.pth new file mode 100644 index 0000000000000000000000000000000000000000..ac9beaf79a1a63ee97f7c96c7ab506d4cac1d0e3 --- /dev/null +++ b/model/rng_state_22.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b8f851ca87ff60251262afa155c3ab50fc55615fb72c857120fd7fa3fe3177c +size 15997 diff --git a/model/rng_state_23.pth b/model/rng_state_23.pth new file mode 100644 index 0000000000000000000000000000000000000000..59e574d907feadf5f1866db4507ceae1cc8738c6 --- /dev/null +++ b/model/rng_state_23.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27d8cd100c48bfcfbf5c8a5164b02d5879f69b5a6276dae705a9c60de9a079c1 +size 15997 diff --git a/model/rng_state_24.pth b/model/rng_state_24.pth new file mode 100644 index 0000000000000000000000000000000000000000..5064859e4500f4ff0eca956dd68d11724dda1e9d --- /dev/null +++ b/model/rng_state_24.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78d1a58c82846308cc3df216d63b5388e6910766e8fbc6b4e8b28255bfff9f0b +size 15997 diff --git a/model/rng_state_25.pth b/model/rng_state_25.pth new file mode 100644 index 0000000000000000000000000000000000000000..c0ea34f20d583f474b10710f5a185a5df623bde4 --- /dev/null +++ b/model/rng_state_25.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdee831f657ca735b9fcf1c4034be6dcc25cc224594a3b2c7488ea6ec175be96 +size 15997 diff --git a/model/rng_state_26.pth b/model/rng_state_26.pth new file mode 100644 index 0000000000000000000000000000000000000000..f7f04e9bee71a47c39adca907b2ed13aa8950fea --- /dev/null +++ b/model/rng_state_26.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e922469f65021a6179e6ecc7c2713db0a41dead9dc08d91e98ed34e65afd14e +size 15997 diff --git a/model/rng_state_27.pth b/model/rng_state_27.pth new file mode 100644 index 0000000000000000000000000000000000000000..f354e1d0d6f1928fe33a1d3a124b4811fe13a246 --- /dev/null +++ b/model/rng_state_27.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11867e8beaaaacbefb60c912ca48d2026f0222b2247dd97d78a6ddd227e75bcf +size 15997 diff --git a/model/rng_state_28.pth b/model/rng_state_28.pth new file mode 100644 index 0000000000000000000000000000000000000000..12c799aef79cf8dea6d92bc29ccd6c1ecd17ceb5 --- /dev/null +++ b/model/rng_state_28.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7664653b5278a30fcc7bd77ee73888988040ecbb6a9cb71fd8a5bb5ccd2c9a31 +size 15997 diff --git a/model/rng_state_29.pth b/model/rng_state_29.pth new file mode 100644 index 0000000000000000000000000000000000000000..6feba0895fc2b8bf1a18f837af2a763009232748 --- /dev/null +++ b/model/rng_state_29.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5007396f0c3c95678e1e62a10e31552d6bfe2303e6e78ab6882752474b2efdc7 +size 15997 diff --git a/model/rng_state_3.pth b/model/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb --- /dev/null +++ b/model/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3 +size 15984 diff --git a/model/rng_state_30.pth b/model/rng_state_30.pth new file mode 100644 index 0000000000000000000000000000000000000000..52254ce3ca964f52816b72f60945dcc808a64064 --- /dev/null +++ b/model/rng_state_30.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f01c58f06cc2a38c9093fadd85def1f922a5e8910e17a0d885f0df5570b13a98 +size 15997 diff --git a/model/rng_state_31.pth b/model/rng_state_31.pth new file mode 100644 index 0000000000000000000000000000000000000000..f6010f3b5f3d7deaa2e73ae0456b8054b44e766a --- /dev/null +++ b/model/rng_state_31.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35e075e15ab066bd7c9405a66e387f2ebf6b302fbd19ba7905da3f8cc8f71009 +size 15997 diff --git a/model/rng_state_32.pth b/model/rng_state_32.pth new file mode 100644 index 0000000000000000000000000000000000000000..1abc7db4e7e1af531008057d93d2fe0a91833a93 --- /dev/null +++ b/model/rng_state_32.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c27074f4ccb5fc4077f4940a43b61907eb102e1cbc07a5a08d6461ceb8c6a7ed +size 15997 diff --git a/model/rng_state_33.pth b/model/rng_state_33.pth new file mode 100644 index 0000000000000000000000000000000000000000..b49abc382a83da1f88b4073d5b617d437d4331a8 --- /dev/null +++ b/model/rng_state_33.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae59d554c0c2da23a7045d1c62a4354464da2732198a7207afeaf9ab354ea29 +size 15997 diff --git a/model/rng_state_34.pth b/model/rng_state_34.pth new file mode 100644 index 0000000000000000000000000000000000000000..be2e72ad512446dac1277ed68b7a184ed52de7a0 --- /dev/null +++ b/model/rng_state_34.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:032d18ce966e3797a2a7ad6003df9c051648a5c862b1d724a639b8d5f6799997 +size 15997 diff --git a/model/rng_state_35.pth b/model/rng_state_35.pth new file mode 100644 index 0000000000000000000000000000000000000000..9510ce8ea8fd193390a4be323a68a97c4f551a94 --- /dev/null +++ b/model/rng_state_35.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe36f8e9ad684634bfa4de2f5c70a7dac62e700b7c380290179370b120854eb +size 15997 diff --git a/model/rng_state_36.pth b/model/rng_state_36.pth new file mode 100644 index 0000000000000000000000000000000000000000..0be6cd4c2e6f0d7e933cbd9dffe3ecfc50669dd6 --- /dev/null +++ b/model/rng_state_36.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d038f60cc47451788e3336c4ddbc332786d8155ddf596409abac316143e33901 +size 15997 diff --git a/model/rng_state_37.pth b/model/rng_state_37.pth new file mode 100644 index 0000000000000000000000000000000000000000..a11c0f384b4ea32889f10fcca0b3fdafd134bcd6 --- /dev/null +++ b/model/rng_state_37.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afc9bc8498e0e69ad3341eb9f234b6ede564671a2a1b08e0b1fb4a0dbf7c4149 +size 15997 diff --git a/model/rng_state_38.pth b/model/rng_state_38.pth new file mode 100644 index 0000000000000000000000000000000000000000..e823e2c2a42345049961e881c0c730ecbc6191be --- /dev/null +++ b/model/rng_state_38.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d4afb59377a18d04041d29ed80a7cd0edb248ada49f3f3a7012d8ebc6227d1c +size 15997 diff --git a/model/rng_state_39.pth b/model/rng_state_39.pth new file mode 100644 index 0000000000000000000000000000000000000000..49a0d03713b13a8870c6e9c727a00e75c2243572 --- /dev/null +++ b/model/rng_state_39.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46edc43c13721ef9e5a382998136530a7338f35fecf5bc192c3b8b25256a5b11 +size 15997 diff --git a/model/rng_state_4.pth b/model/rng_state_4.pth new file mode 100644 index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e --- /dev/null +++ b/model/rng_state_4.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4 +size 15984 diff --git a/model/rng_state_40.pth b/model/rng_state_40.pth new file mode 100644 index 0000000000000000000000000000000000000000..a78ff8ac511579519ba9d49c01b5c49016179490 --- /dev/null +++ b/model/rng_state_40.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47618413ad3ceb42df1301429da1819d4d0a28624a5996be064064e4c2afb9af +size 15997 diff --git a/model/rng_state_41.pth b/model/rng_state_41.pth new file mode 100644 index 0000000000000000000000000000000000000000..64c44812e1ded6535228e26e8271903a8776a593 --- /dev/null +++ b/model/rng_state_41.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0117f247c398b28e68c7a50a77645c242b5a9cb8592142c86546ad05c47a90b0 +size 15997 diff --git a/model/rng_state_42.pth b/model/rng_state_42.pth new file mode 100644 index 0000000000000000000000000000000000000000..60a3d69ebdbdcef9f04fd2cc6931529f6ae2e0f9 --- /dev/null +++ b/model/rng_state_42.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:904a28273d9352805baec2a674efa639e4df2314ad0fafddae9a5559ae13e5b1 +size 15997 diff --git a/model/rng_state_43.pth b/model/rng_state_43.pth new file mode 100644 index 0000000000000000000000000000000000000000..3f9a13ae7d2e8417d0eeaad65588fd67532183b6 --- /dev/null +++ b/model/rng_state_43.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:780fda3c77c5f2ea29269fc6c3cae98910574db4bba24d8a43ed449562e56e7f +size 15997 diff --git a/model/rng_state_44.pth b/model/rng_state_44.pth new file mode 100644 index 0000000000000000000000000000000000000000..dc5d2f46a6c9a168b12bf468403794eab3319fee --- /dev/null +++ b/model/rng_state_44.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5462c21f4bea77f45feec77c523ec9c6035d8739b753f01d1e0833bff42eea3d +size 15997 diff --git a/model/rng_state_45.pth b/model/rng_state_45.pth new file mode 100644 index 0000000000000000000000000000000000000000..7e452cbc724a2a522cd26c71cbcd448495297cca --- /dev/null +++ b/model/rng_state_45.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e95c415ca234c587a61aa484a7f229edd7f49c5e08411ddbae587239930dc4b9 +size 15997 diff --git a/model/rng_state_46.pth b/model/rng_state_46.pth new file mode 100644 index 0000000000000000000000000000000000000000..dfdd8e83a4e223b9de96657a0a23d241f10cd1e9 --- /dev/null +++ b/model/rng_state_46.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34acd0f511547278397f33bec32464c233b11cc7bc000f979a9c737b73263e0c +size 15997 diff --git a/model/rng_state_47.pth b/model/rng_state_47.pth new file mode 100644 index 0000000000000000000000000000000000000000..f30ba3a8c4f27adadccca5cede53c2529cb0fcb1 --- /dev/null +++ b/model/rng_state_47.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8934f1d047d4fb11a01862614c2c323fc20005fcd6dcf40062dd694d82ef8e13 +size 15997 diff --git a/model/rng_state_48.pth b/model/rng_state_48.pth new file mode 100644 index 0000000000000000000000000000000000000000..2a3a1d45eb5fc2e374ccd64aef2d14ecb860161c --- /dev/null +++ b/model/rng_state_48.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f67e452ebcc708df9242a679c303def7af044c48bec3bb0ac3ddc2f65ac7b361 +size 15997 diff --git a/model/rng_state_49.pth b/model/rng_state_49.pth new file mode 100644 index 0000000000000000000000000000000000000000..65a5f6f4644ddc107c08edb4ce561ed92b3f4e89 --- /dev/null +++ b/model/rng_state_49.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f406370ef8dedfbe030bff0cf91b397833b12f06819be814a18a8f4350db0011 +size 15997 diff --git a/model/rng_state_5.pth b/model/rng_state_5.pth new file mode 100644 index 0000000000000000000000000000000000000000..b824ee24d256695aad4a69a62d8e7125f51a17f2 --- /dev/null +++ b/model/rng_state_5.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69d1bb1abee38b92e53f3f23549b642ce0f1edcdccf7b6129847ac61636e96d5 +size 15984 diff --git a/model/rng_state_50.pth b/model/rng_state_50.pth new file mode 100644 index 0000000000000000000000000000000000000000..befdd4cbd047b600d74b26836d4ac409034ba317 --- /dev/null +++ b/model/rng_state_50.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d1f8979b6ea7ba2ec569b9ec88a7089993ed2d89ac4eccef16130222b512d55 +size 15997 diff --git a/model/rng_state_51.pth b/model/rng_state_51.pth new file mode 100644 index 0000000000000000000000000000000000000000..861ecaced33bcf9fea419ddf7454795830280173 --- /dev/null +++ b/model/rng_state_51.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c2fab3048f1684c8b03e770e0dd4df99bc226a1c01aa2312d80865fabf7691 +size 15997 diff --git a/model/rng_state_52.pth b/model/rng_state_52.pth new file mode 100644 index 0000000000000000000000000000000000000000..d158e1c31f9fa951f3e88c18ea219b1da18b8658 --- /dev/null +++ b/model/rng_state_52.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5641536435c61c41ccbd8d13e6004ec6902861ccef3ca09e652339a831e46480 +size 15997 diff --git a/model/rng_state_53.pth b/model/rng_state_53.pth new file mode 100644 index 0000000000000000000000000000000000000000..3b032c42f14eb06fbf7c21fcf4ddf36b34373f3e --- /dev/null +++ b/model/rng_state_53.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b516a8cb28717bd22c3bd554fcba02b6bebe2d575cb5d76f24e7649b6b8ca5c +size 15997 diff --git a/model/rng_state_54.pth b/model/rng_state_54.pth new file mode 100644 index 0000000000000000000000000000000000000000..0cdf076133f84019d7c10921905ea61547b3fa07 --- /dev/null +++ b/model/rng_state_54.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b7f82e1d5b988b49c47a93d1e8690cf72916236545e59b466ce8ab4312b946 +size 15997 diff --git a/model/rng_state_55.pth b/model/rng_state_55.pth new file mode 100644 index 0000000000000000000000000000000000000000..8a9efd69be6653a8e58632a8036dad1fcd5e290a --- /dev/null +++ b/model/rng_state_55.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f537329348875fd2338704cdae26023d61b8805529562686590b1dbd0c6b40bd +size 15997 diff --git a/model/rng_state_56.pth b/model/rng_state_56.pth new file mode 100644 index 0000000000000000000000000000000000000000..86c1eb104be3713b030779ddabea2f89b7514d11 --- /dev/null +++ b/model/rng_state_56.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:866826a40dca9901537a54f7bb7f1b1ede3a4fdbfaf04d141dda67f43f8f45aa +size 15997 diff --git a/model/rng_state_57.pth b/model/rng_state_57.pth new file mode 100644 index 0000000000000000000000000000000000000000..e7c1c56b4bb1218c41aa8006c5d72446f6783462 --- /dev/null +++ b/model/rng_state_57.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b08c9aa9802f5c4688fadb37129739ce0c76d05948da52eb136dd0930210d071 +size 15997 diff --git a/model/rng_state_58.pth b/model/rng_state_58.pth new file mode 100644 index 0000000000000000000000000000000000000000..ac62d26e4035190d055538ff0b7d2d68b937e7d5 --- /dev/null +++ b/model/rng_state_58.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a53f328f8cf7072305393447ff2f20e46f6d0ee87f440a79466fba1363854db +size 15997 diff --git a/model/rng_state_59.pth b/model/rng_state_59.pth new file mode 100644 index 0000000000000000000000000000000000000000..2e40c99071921204f6b16b68f08726be7f8582b6 --- /dev/null +++ b/model/rng_state_59.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85927d1eadcdf481d5a41b1d59ca8ed231bc2286cd5b9c13c937d234ce21ab8d +size 15997 diff --git a/model/rng_state_6.pth b/model/rng_state_6.pth new file mode 100644 index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd --- /dev/null +++ b/model/rng_state_6.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6 +size 15984 diff --git a/model/rng_state_60.pth b/model/rng_state_60.pth new file mode 100644 index 0000000000000000000000000000000000000000..040813eb2a6c3d2cb2389bf9fbfe18a2b74a8781 --- /dev/null +++ b/model/rng_state_60.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:802fccfec77e964c5bf73d4cd98dc7c14476c3a9464805fb030ad7112b240d4f +size 15997 diff --git a/model/rng_state_61.pth b/model/rng_state_61.pth new file mode 100644 index 0000000000000000000000000000000000000000..77d7b36fc7721821a368e65f2b9a206ab4a656dc --- /dev/null +++ b/model/rng_state_61.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7e9fba5ef9547603c4eebe8a40e42d684e9260ea4e802ea10e72bc9c834b6b7 +size 15997 diff --git a/model/rng_state_62.pth b/model/rng_state_62.pth new file mode 100644 index 0000000000000000000000000000000000000000..02b7e9db0b425d74053794ccce3a619eb12e9d4a --- /dev/null +++ b/model/rng_state_62.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86183e98832237d8e9138bc6ca2d13e08f0e33a90357a2993cc6d3d27a89553a +size 15997 diff --git a/model/rng_state_63.pth b/model/rng_state_63.pth new file mode 100644 index 0000000000000000000000000000000000000000..a0333eaa2266f7e70bf83c92fa349e279b99c9df --- /dev/null +++ b/model/rng_state_63.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5afd159f9f5b56042248622f666ca9c9bcbc895845d02415ac41d9e620b3b7c6 +size 15997 diff --git a/model/rng_state_64.pth b/model/rng_state_64.pth new file mode 100644 index 0000000000000000000000000000000000000000..f559c730cef8f8a75acafcec36ab50357c86fc0c --- /dev/null +++ b/model/rng_state_64.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d36b38fcfd219f00f925fd7a0f95635eba5b93d74563b1a2f6c10730214d37 +size 15997 diff --git a/model/rng_state_65.pth b/model/rng_state_65.pth new file mode 100644 index 0000000000000000000000000000000000000000..a134e9218657fc1f48e5472d63bbc0edaacafa61 --- /dev/null +++ b/model/rng_state_65.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7029c8bdfe778b22b1c01dfe98411e5d1231e710739438f43c5497b6d15c6d56 +size 15997 diff --git a/model/rng_state_66.pth b/model/rng_state_66.pth new file mode 100644 index 0000000000000000000000000000000000000000..f76f12ea8fc7861d407ea55cb6467a41afe7f2ce --- /dev/null +++ b/model/rng_state_66.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:958ba07916e05a028b4de005794d20c68feb47e1522f6ca40641e38e2a9c6252 +size 15997 diff --git a/model/rng_state_67.pth b/model/rng_state_67.pth new file mode 100644 index 0000000000000000000000000000000000000000..aca85a655bc940c61d919b6c535fa5fcd34a8127 --- /dev/null +++ b/model/rng_state_67.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e60e7d3d9ebee1ae244be947b9b6b40bf4bbaa46c99ce3bb3a13622633f3207 +size 15997 diff --git a/model/rng_state_68.pth b/model/rng_state_68.pth new file mode 100644 index 0000000000000000000000000000000000000000..db9f5b2b109da97f1177a7009a754332bf8dbf4d --- /dev/null +++ b/model/rng_state_68.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4637e93c49f93f65c9bdc47826178d81be8083ea997bf4a988324fa39675ad2d +size 15997 diff --git a/model/rng_state_69.pth b/model/rng_state_69.pth new file mode 100644 index 0000000000000000000000000000000000000000..00dc0c208117905ff5cfbe7e1528dc9ff2df4e67 --- /dev/null +++ b/model/rng_state_69.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea87d15d4714f1808d627913fabe41259e068f27469e95db8fe83313a2839510 +size 15997 diff --git a/model/rng_state_7.pth b/model/rng_state_7.pth new file mode 100644 index 0000000000000000000000000000000000000000..4e80125fd18efcb1097384319888b699f4dce7e7 --- /dev/null +++ b/model/rng_state_7.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e2c46927fc06939b4c976a01e4b95dec1f8b98ceaea86d31a5d756fc30ff006 +size 15984 diff --git a/model/rng_state_70.pth b/model/rng_state_70.pth new file mode 100644 index 0000000000000000000000000000000000000000..1cf437498358abfdbaca751c00975f4e77a73b13 --- /dev/null +++ b/model/rng_state_70.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b886de24fd85716e00cc7670826b405a9d0e58d878c9a21a33fee96fc17c39ff +size 15997 diff --git a/model/rng_state_71.pth b/model/rng_state_71.pth new file mode 100644 index 0000000000000000000000000000000000000000..60751592615b273c71d85d7f6c442b6bfebae637 --- /dev/null +++ b/model/rng_state_71.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad6d510e6809fda27e463555027179f12d796b0f0018b264dfeaacf9ff8feba1 +size 15997 diff --git a/model/rng_state_72.pth b/model/rng_state_72.pth new file mode 100644 index 0000000000000000000000000000000000000000..1013eb5b4f0cb2290d669b3bd9ca0956608e065b --- /dev/null +++ b/model/rng_state_72.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd49543ac4404252e03e3fa89f4a066974feeff6c5c43b23913189ab30d93f6 +size 15997 diff --git a/model/rng_state_73.pth b/model/rng_state_73.pth new file mode 100644 index 0000000000000000000000000000000000000000..33ea06f89553d05d199df0e44a07fe58dfc41256 --- /dev/null +++ b/model/rng_state_73.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecefd666a862ff3fa6a1003c4623bddb092eee5569105a1e2dcde402ef3ccf66 +size 15997 diff --git a/model/rng_state_74.pth b/model/rng_state_74.pth new file mode 100644 index 0000000000000000000000000000000000000000..0998746f2f5fa15e0a1d734ea9fa044ba1505184 --- /dev/null +++ b/model/rng_state_74.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8371030d649732ea5d315da3f2a86ce13551935251f4bd07ba48bceec51c69f6 +size 15997 diff --git a/model/rng_state_75.pth b/model/rng_state_75.pth new file mode 100644 index 0000000000000000000000000000000000000000..aca4f314c0bbd7e40db6d47dceabf19208a40d8f --- /dev/null +++ b/model/rng_state_75.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acde597c5e354c9d7c97920645d0a8dbe6e974a3a2e6fb8b2336a70c74c4538b +size 15997 diff --git a/model/rng_state_76.pth b/model/rng_state_76.pth new file mode 100644 index 0000000000000000000000000000000000000000..2216d1112a964a1feeaf9eb1a220f466d3832353 --- /dev/null +++ b/model/rng_state_76.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40e51599a73435e8c9a655397e612a3e5335df3d824150253c24af4be9a14e32 +size 15997 diff --git a/model/rng_state_77.pth b/model/rng_state_77.pth new file mode 100644 index 0000000000000000000000000000000000000000..55715d821ade51145187f2330e278a4194332095 --- /dev/null +++ b/model/rng_state_77.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c15a2ee5949cbf7eb30f204d5f9eb104ef2a78aab5e9da9d83f0f4368ccdb562 +size 15997 diff --git a/model/rng_state_78.pth b/model/rng_state_78.pth new file mode 100644 index 0000000000000000000000000000000000000000..011aa51aad2a0b7596a847bbe3a644898962a023 --- /dev/null +++ b/model/rng_state_78.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:add160ea283e1abe1c851a7f46117eee835b212eae23de320bba1852199ec17f +size 15997 diff --git a/model/rng_state_79.pth b/model/rng_state_79.pth new file mode 100644 index 0000000000000000000000000000000000000000..ddc57a3348b16e6dedcc247a7fe3bdb2354c29ef --- /dev/null +++ b/model/rng_state_79.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6dec88d3f2f0d051154207981d3d333a9820817490cd68ae658c15f866d4807 +size 15997 diff --git a/model/rng_state_8.pth b/model/rng_state_8.pth new file mode 100644 index 0000000000000000000000000000000000000000..b95c8ee87fac85945aa1b015c0678470851b032f --- /dev/null +++ b/model/rng_state_8.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87544a1ecb93bba51fc38c978d8a027be1635c93e80f7f0679c1154bc76e16d3 +size 15984 diff --git a/model/rng_state_80.pth b/model/rng_state_80.pth new file mode 100644 index 0000000000000000000000000000000000000000..b822785d772f1c3a21f854b28b7b239f819f02e9 --- /dev/null +++ b/model/rng_state_80.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c58101a47c3b0fb02ef6f13a983b915c7f0ee7d7f708a426edb1ff93ef80543 +size 15997 diff --git a/model/rng_state_81.pth b/model/rng_state_81.pth new file mode 100644 index 0000000000000000000000000000000000000000..5a55fe16397ffc90c2e34a3ce1daae871f124bc4 --- /dev/null +++ b/model/rng_state_81.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7c7d5598ed39f18e6cc4f4ab4d9bea78f1f6a56e250a4c391cbe3ef92fc460c +size 15997 diff --git a/model/rng_state_82.pth b/model/rng_state_82.pth new file mode 100644 index 0000000000000000000000000000000000000000..be43e9050800563dc0fb403455fd146bfd019450 --- /dev/null +++ b/model/rng_state_82.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbfa857ea9b731b70ac8ae1289a8958ee6a5cccd27dd8fb10299246ba2031bc8 +size 15997 diff --git a/model/rng_state_83.pth b/model/rng_state_83.pth new file mode 100644 index 0000000000000000000000000000000000000000..ec1595917af19a5fe99c3f8f9dd7c1c2daf2cc1f --- /dev/null +++ b/model/rng_state_83.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7293a7cc2b97cfb57af3251fda5ceeccf2041af4f6bfff725648bfef2567783c +size 15997 diff --git a/model/rng_state_84.pth b/model/rng_state_84.pth new file mode 100644 index 0000000000000000000000000000000000000000..e3620a0cdaac8c53208ab063cb17e4d05727c05f --- /dev/null +++ b/model/rng_state_84.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f53a621934c15ffdfd12631eb2485ec3985082caf8f1cc6c2acd903eaac7a5ee +size 15997 diff --git a/model/rng_state_85.pth b/model/rng_state_85.pth new file mode 100644 index 0000000000000000000000000000000000000000..15c1f5fe7369aeff9fbda3c5cae913981c9cbae6 --- /dev/null +++ b/model/rng_state_85.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541bf428925331b47c908c5aa91f6e5d98bb83b377bd73a979ceea315c5a50ae +size 15997 diff --git a/model/rng_state_86.pth b/model/rng_state_86.pth new file mode 100644 index 0000000000000000000000000000000000000000..3e352033423c7393a86e85f279125ec34faecdf6 --- /dev/null +++ b/model/rng_state_86.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d409da046f5c2c7fdd87656a393268cf4268eb6e295d16f01327cd1c8ef749b +size 15997 diff --git a/model/rng_state_87.pth b/model/rng_state_87.pth new file mode 100644 index 0000000000000000000000000000000000000000..8f87d42334d53d64e2d393c7acb37de7e37d8ad4 --- /dev/null +++ b/model/rng_state_87.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:604ed8aa952b7eb5504d1bd3bf41a12f2e8acf41b8cde3c214c1827c7dd0c297 +size 15997 diff --git a/model/rng_state_88.pth b/model/rng_state_88.pth new file mode 100644 index 0000000000000000000000000000000000000000..1ab4ade22529083832d8b088e47c7175e555520c --- /dev/null +++ b/model/rng_state_88.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a6e5dbbccef183dafd4e8689b6ec0bb2327e26eb80a002d799faa55849ef45 +size 15997 diff --git a/model/rng_state_89.pth b/model/rng_state_89.pth new file mode 100644 index 0000000000000000000000000000000000000000..dc1b9f0415e7e3e3fa9d3867db2b2a949fa51f75 --- /dev/null +++ b/model/rng_state_89.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:196f62dafa7c2b9ca56175e212a6563327dfdfa4c8fc984ead12292692d4bf99 +size 15997 diff --git a/model/rng_state_9.pth b/model/rng_state_9.pth new file mode 100644 index 0000000000000000000000000000000000000000..8ff6eb635d2b7556d5ecc6fdc6cd47867459dac8 --- /dev/null +++ b/model/rng_state_9.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f51e1e81143bcfd2cfd36874b3b7606bb4a14aac945154b167b31a816d9f450 +size 15984 diff --git a/model/rng_state_90.pth b/model/rng_state_90.pth new file mode 100644 index 0000000000000000000000000000000000000000..f4c8c37141b6420e581317a5caa02ad192513755 --- /dev/null +++ b/model/rng_state_90.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06c7eb05dd472f29c9ade32e29cacc9180f28b4e080dd0640763bf6be7bf5501 +size 15997 diff --git a/model/rng_state_91.pth b/model/rng_state_91.pth new file mode 100644 index 0000000000000000000000000000000000000000..efc5fe7bc08eb55fe4f2282a12d3baf1b8f61336 --- /dev/null +++ b/model/rng_state_91.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82c0b03745f89471fad0c937b492b9790d0166e24b1550b23b4d434016540e3f +size 15997 diff --git a/model/rng_state_92.pth b/model/rng_state_92.pth new file mode 100644 index 0000000000000000000000000000000000000000..49beaf9c7746566fdba375e48e45db9a0b464dbe --- /dev/null +++ b/model/rng_state_92.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c44cb42a5ca15ec38367340320891adc6d54636cfd87127d61f54c6268cdeab +size 15997 diff --git a/model/rng_state_93.pth b/model/rng_state_93.pth new file mode 100644 index 0000000000000000000000000000000000000000..d1bab06d7122b1df25e3eea2790f392567eeb7a6 --- /dev/null +++ b/model/rng_state_93.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:124de1711af5956476ea0b4da4b41cb51254260dcac005e97a74a46accc133d1 +size 15997 diff --git a/model/rng_state_94.pth b/model/rng_state_94.pth new file mode 100644 index 0000000000000000000000000000000000000000..fde6b8f32f3c9ca86edd9c090eeec56badadc4b5 --- /dev/null +++ b/model/rng_state_94.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:172b6040713a4f5153582f60c5a50ff089a58d9c46234a222faa78effc477ef5 +size 15997 diff --git a/model/rng_state_95.pth b/model/rng_state_95.pth new file mode 100644 index 0000000000000000000000000000000000000000..82f133262b54c38ded6ed5a2529b55068e1ef861 --- /dev/null +++ b/model/rng_state_95.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:539bf8e104a633abd6188ea3ca1eac62e04ab144d1a1bf2251fdecd2211c9d80 +size 15997 diff --git a/model/scheduler.pt b/model/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..acdcb760ddf659574d4c5780518c531c0fe54251 --- /dev/null +++ b/model/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97829d75703ded14b7f16ce608a71f0da0d659a2f24478cbe4cfe7565bd33975 +size 1064 diff --git a/model/special_tokens_map.json b/model/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..278b7f0f84be865c4687700ee7b3c63d89a51e18 --- /dev/null +++ b/model/special_tokens_map.json @@ -0,0 +1,23 @@ +{ + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/model/tokenizer.json b/model/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/model/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/model/tokenizer_config.json b/model/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..276a919ff495d21fa400bdb06241dcc81175d88b --- /dev/null +++ b/model/tokenizer_config.json @@ -0,0 +1,2064 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "bos_token": "<|begin_of_text|>", + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|end_of_text|>", + "padding_side": "right", + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/model/trainer_state.json b/model/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..ecfa7a2a3efb4004f7f54d7be8817586a692607d --- /dev/null +++ b/model/trainer_state.json @@ -0,0 +1,1697 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "eval_steps": 40000000000000000, + "global_step": 522, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.009578544061302681, + "grad_norm": 19.977911424248134, + "learning_rate": 4.777070063694267e-09, + "logits/chosen": 0.9882557988166809, + "logits/rejected": 0.86724853515625, + "logps/chosen": -0.6835294961929321, + "logps/rejected": -0.7026089429855347, + "loss": 5.255, + "nll_loss": 0.6835293769836426, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.835294246673584, + "rewards/margins": 0.19079475104808807, + "rewards/rejected": -7.026089668273926, + "step": 5 + }, + { + "epoch": 0.019157088122605363, + "grad_norm": 15.532794462607491, + "learning_rate": 9.554140127388535e-09, + "logits/chosen": 0.8198736906051636, + "logits/rejected": 0.7917336225509644, + "logps/chosen": -0.7847039103507996, + "logps/rejected": -0.7573590278625488, + "loss": 5.3866, + "nll_loss": 0.7847039103507996, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.847039222717285, + "rewards/margins": -0.2734483778476715, + "rewards/rejected": -7.573590278625488, + "step": 10 + }, + { + "epoch": 0.028735632183908046, + "grad_norm": 17.18263537036945, + "learning_rate": 1.4331210191082803e-08, + "logits/chosen": 0.6409434080123901, + "logits/rejected": 0.8134855031967163, + "logps/chosen": -0.7896786332130432, + "logps/rejected": -0.7245721220970154, + "loss": 5.1665, + "nll_loss": 0.7896786332130432, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -7.896786689758301, + "rewards/margins": -0.6510653495788574, + "rewards/rejected": -7.245721340179443, + "step": 15 + }, + { + "epoch": 0.038314176245210725, + "grad_norm": 14.59673969815782, + "learning_rate": 1.910828025477707e-08, + "logits/chosen": 0.7096026539802551, + "logits/rejected": 0.8205940127372742, + "logps/chosen": -0.7373861074447632, + "logps/rejected": -0.7078397870063782, + "loss": 5.229, + "nll_loss": 0.7373861074447632, + "rewards/accuracies": 0.0, + "rewards/chosen": -7.373861789703369, + "rewards/margins": -0.29546356201171875, + "rewards/rejected": -7.07839822769165, + "step": 20 + }, + { + "epoch": 0.04789272030651341, + "grad_norm": 15.05129246603728, + "learning_rate": 2.3885350318471336e-08, + "logits/chosen": 0.9928520321846008, + "logits/rejected": 0.9827763438224792, + "logps/chosen": -0.5954749584197998, + "logps/rejected": -0.654929518699646, + "loss": 5.4048, + "nll_loss": 0.595474898815155, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -5.954749584197998, + "rewards/margins": 0.5945457220077515, + "rewards/rejected": -6.549294948577881, + "step": 25 + }, + { + "epoch": 0.05747126436781609, + "grad_norm": 20.985831817061353, + "learning_rate": 2.8662420382165606e-08, + "logits/chosen": 0.7672659158706665, + "logits/rejected": 0.8577300310134888, + "logps/chosen": -0.6465980410575867, + "logps/rejected": -0.7104107737541199, + "loss": 5.2394, + "nll_loss": 0.6465979814529419, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.465980529785156, + "rewards/margins": 0.6381272077560425, + "rewards/rejected": -7.104107856750488, + "step": 30 + }, + { + "epoch": 0.06704980842911877, + "grad_norm": 16.252866127879475, + "learning_rate": 3.343949044585987e-08, + "logits/chosen": 0.8599953651428223, + "logits/rejected": 0.8298212289810181, + "logps/chosen": -0.7009156346321106, + "logps/rejected": -0.7079470157623291, + "loss": 5.3018, + "nll_loss": 0.700915515422821, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.009156227111816, + "rewards/margins": 0.07031383365392685, + "rewards/rejected": -7.079470157623291, + "step": 35 + }, + { + "epoch": 0.07662835249042145, + "grad_norm": 16.99436823968821, + "learning_rate": 3.821656050955414e-08, + "logits/chosen": 0.6351531744003296, + "logits/rejected": 0.7295518517494202, + "logps/chosen": -0.7221059799194336, + "logps/rejected": -0.7749537825584412, + "loss": 5.2718, + "nll_loss": 0.722105860710144, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.221059322357178, + "rewards/margins": 0.5284790992736816, + "rewards/rejected": -7.749538421630859, + "step": 40 + }, + { + "epoch": 0.08620689655172414, + "grad_norm": 16.474791128586975, + "learning_rate": 4.29936305732484e-08, + "logits/chosen": 0.9291857481002808, + "logits/rejected": 0.83441162109375, + "logps/chosen": -0.6729990243911743, + "logps/rejected": -0.7266682386398315, + "loss": 5.2957, + "nll_loss": 0.6729990243911743, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.729989528656006, + "rewards/margins": 0.5366925001144409, + "rewards/rejected": -7.2666826248168945, + "step": 45 + }, + { + "epoch": 0.09578544061302682, + "grad_norm": 17.51495070571998, + "learning_rate": 4.777070063694267e-08, + "logits/chosen": 0.776635468006134, + "logits/rejected": 0.845638632774353, + "logps/chosen": -0.7210168838500977, + "logps/rejected": -0.6794952154159546, + "loss": 5.2687, + "nll_loss": 0.7210168838500977, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.210168361663818, + "rewards/margins": -0.41521579027175903, + "rewards/rejected": -6.794952392578125, + "step": 50 + }, + { + "epoch": 0.1053639846743295, + "grad_norm": 15.080710084896578, + "learning_rate": 5.2547770700636935e-08, + "logits/chosen": 0.8091619610786438, + "logits/rejected": 0.8544187545776367, + "logps/chosen": -0.7046722769737244, + "logps/rejected": -0.703011691570282, + "loss": 5.276, + "nll_loss": 0.7046722769737244, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.046723365783691, + "rewards/margins": -0.01660604402422905, + "rewards/rejected": -7.030117034912109, + "step": 55 + }, + { + "epoch": 0.11494252873563218, + "grad_norm": 22.50777831402302, + "learning_rate": 5.732484076433121e-08, + "logits/chosen": 0.8281611204147339, + "logits/rejected": 0.8864553570747375, + "logps/chosen": -0.6377026438713074, + "logps/rejected": -0.7030226588249207, + "loss": 5.2442, + "nll_loss": 0.6377025842666626, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.377026557922363, + "rewards/margins": 0.6532003283500671, + "rewards/rejected": -7.030226707458496, + "step": 60 + }, + { + "epoch": 0.12452107279693486, + "grad_norm": 19.79864592347249, + "learning_rate": 6.210191082802548e-08, + "logits/chosen": 1.0290864706039429, + "logits/rejected": 0.7254050374031067, + "logps/chosen": -0.6748142838478088, + "logps/rejected": -0.6928014755249023, + "loss": 5.3121, + "nll_loss": 0.6748142838478088, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.748143196105957, + "rewards/margins": 0.1798720359802246, + "rewards/rejected": -6.928015232086182, + "step": 65 + }, + { + "epoch": 0.13409961685823754, + "grad_norm": 15.639368843476637, + "learning_rate": 6.687898089171974e-08, + "logits/chosen": 0.9622044563293457, + "logits/rejected": 0.9868735074996948, + "logps/chosen": -0.7220475077629089, + "logps/rejected": -0.8194485902786255, + "loss": 5.2694, + "nll_loss": 0.7220475077629089, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.220475196838379, + "rewards/margins": 0.9740108251571655, + "rewards/rejected": -8.194485664367676, + "step": 70 + }, + { + "epoch": 0.14367816091954022, + "grad_norm": 15.338789064769195, + "learning_rate": 7.165605095541401e-08, + "logits/chosen": 0.8242565989494324, + "logits/rejected": 0.8403714299201965, + "logps/chosen": -0.8075603246688843, + "logps/rejected": -0.7985510230064392, + "loss": 5.307, + "nll_loss": 0.8075603246688843, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.075602531433105, + "rewards/margins": -0.09009275585412979, + "rewards/rejected": -7.985510349273682, + "step": 75 + }, + { + "epoch": 0.1532567049808429, + "grad_norm": 16.558919395561837, + "learning_rate": 7.643312101910828e-08, + "logits/chosen": 0.852637767791748, + "logits/rejected": 0.6942145228385925, + "logps/chosen": -0.6923746466636658, + "logps/rejected": -0.748778760433197, + "loss": 5.2346, + "nll_loss": 0.6923746466636658, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.923746585845947, + "rewards/margins": 0.564041256904602, + "rewards/rejected": -7.48778772354126, + "step": 80 + }, + { + "epoch": 0.16283524904214558, + "grad_norm": 14.992266742573277, + "learning_rate": 8.121019108280254e-08, + "logits/chosen": 0.8808382749557495, + "logits/rejected": 0.9680411219596863, + "logps/chosen": -0.6730803847312927, + "logps/rejected": -0.8849495649337769, + "loss": 5.2452, + "nll_loss": 0.6730804443359375, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.730803489685059, + "rewards/margins": 2.118691921234131, + "rewards/rejected": -8.849495887756348, + "step": 85 + }, + { + "epoch": 0.1724137931034483, + "grad_norm": 14.696295604697424, + "learning_rate": 8.59872611464968e-08, + "logits/chosen": 0.6036122441291809, + "logits/rejected": 0.6832916140556335, + "logps/chosen": -0.7643388509750366, + "logps/rejected": -0.7602866888046265, + "loss": 5.3007, + "nll_loss": 0.7643388509750366, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.643387794494629, + "rewards/margins": -0.040520571172237396, + "rewards/rejected": -7.602867126464844, + "step": 90 + }, + { + "epoch": 0.18199233716475097, + "grad_norm": 14.017222797723742, + "learning_rate": 9.076433121019108e-08, + "logits/chosen": 0.9275333285331726, + "logits/rejected": 0.7763484120368958, + "logps/chosen": -0.7543958425521851, + "logps/rejected": -0.7292922139167786, + "loss": 5.2406, + "nll_loss": 0.7543958425521851, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.5439581871032715, + "rewards/margins": -0.25103625655174255, + "rewards/rejected": -7.292922019958496, + "step": 95 + }, + { + "epoch": 0.19157088122605365, + "grad_norm": 14.22025157094158, + "learning_rate": 9.554140127388534e-08, + "logits/chosen": 0.9820090532302856, + "logits/rejected": 0.8248282670974731, + "logps/chosen": -0.6709014177322388, + "logps/rejected": -0.7284021973609924, + "loss": 5.461, + "nll_loss": 0.6709014177322388, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.709013938903809, + "rewards/margins": 0.5750080943107605, + "rewards/rejected": -7.284021854400635, + "step": 100 + }, + { + "epoch": 0.20114942528735633, + "grad_norm": 15.684437324553063, + "learning_rate": 1.0031847133757961e-07, + "logits/chosen": 0.8076725006103516, + "logits/rejected": 0.8857797384262085, + "logps/chosen": -0.6815362572669983, + "logps/rejected": -0.6763182878494263, + "loss": 5.3235, + "nll_loss": 0.6815363168716431, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.815362453460693, + "rewards/margins": -0.05218010023236275, + "rewards/rejected": -6.763182163238525, + "step": 105 + }, + { + "epoch": 0.210727969348659, + "grad_norm": 24.773788939839015, + "learning_rate": 1.0509554140127387e-07, + "logits/chosen": 0.7703748941421509, + "logits/rejected": 0.747571587562561, + "logps/chosen": -0.6411559581756592, + "logps/rejected": -0.6272796392440796, + "loss": 5.1527, + "nll_loss": 0.6411559581756592, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -6.411559104919434, + "rewards/margins": -0.13876314461231232, + "rewards/rejected": -6.272796630859375, + "step": 110 + }, + { + "epoch": 0.22030651340996169, + "grad_norm": 15.503433446190241, + "learning_rate": 1.0987261146496813e-07, + "logits/chosen": 0.7363082766532898, + "logits/rejected": 0.7384678721427917, + "logps/chosen": -0.7916821241378784, + "logps/rejected": -0.8139774203300476, + "loss": 5.2975, + "nll_loss": 0.7916821241378784, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.916821479797363, + "rewards/margins": 0.2229524552822113, + "rewards/rejected": -8.139774322509766, + "step": 115 + }, + { + "epoch": 0.22988505747126436, + "grad_norm": 14.405865015988635, + "learning_rate": 1.1464968152866242e-07, + "logits/chosen": 0.988644003868103, + "logits/rejected": 0.6871722936630249, + "logps/chosen": -0.8117585182189941, + "logps/rejected": -0.7687948942184448, + "loss": 5.2603, + "nll_loss": 0.8117585182189941, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -8.117586135864258, + "rewards/margins": -0.42963656783103943, + "rewards/rejected": -7.687948703765869, + "step": 120 + }, + { + "epoch": 0.23946360153256704, + "grad_norm": 15.441159671033866, + "learning_rate": 1.194267515923567e-07, + "logits/chosen": 0.9181255102157593, + "logits/rejected": 1.0879169702529907, + "logps/chosen": -0.7296438813209534, + "logps/rejected": -0.7042439579963684, + "loss": 5.3496, + "nll_loss": 0.7296438217163086, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.296438694000244, + "rewards/margins": -0.25399884581565857, + "rewards/rejected": -7.0424394607543945, + "step": 125 + }, + { + "epoch": 0.24904214559386972, + "grad_norm": 14.525160680247224, + "learning_rate": 1.2420382165605095e-07, + "logits/chosen": 1.0101947784423828, + "logits/rejected": 0.9281114339828491, + "logps/chosen": -0.6179158091545105, + "logps/rejected": -0.5991578102111816, + "loss": 5.349, + "nll_loss": 0.6179158091545105, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -6.1791582107543945, + "rewards/margins": -0.1875801980495453, + "rewards/rejected": -5.991578102111816, + "step": 130 + }, + { + "epoch": 0.25862068965517243, + "grad_norm": 14.83309451045206, + "learning_rate": 1.2898089171974521e-07, + "logits/chosen": 0.6288986802101135, + "logits/rejected": 0.8249040842056274, + "logps/chosen": -0.671284019947052, + "logps/rejected": -0.7514477968215942, + "loss": 5.2492, + "nll_loss": 0.671284019947052, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.7128400802612305, + "rewards/margins": 0.801638126373291, + "rewards/rejected": -7.5144782066345215, + "step": 135 + }, + { + "epoch": 0.2681992337164751, + "grad_norm": 15.778143495292198, + "learning_rate": 1.3375796178343948e-07, + "logits/chosen": 0.979997992515564, + "logits/rejected": 0.8755594491958618, + "logps/chosen": -0.5514487028121948, + "logps/rejected": -0.675363302230835, + "loss": 5.3335, + "nll_loss": 0.5514487028121948, + "rewards/accuracies": 1.0, + "rewards/chosen": -5.514487266540527, + "rewards/margins": 1.2391456365585327, + "rewards/rejected": -6.75363302230835, + "step": 140 + }, + { + "epoch": 0.2777777777777778, + "grad_norm": 18.462671529719298, + "learning_rate": 1.3853503184713377e-07, + "logits/chosen": 0.8637372851371765, + "logits/rejected": 0.9276704788208008, + "logps/chosen": -0.7762002944946289, + "logps/rejected": -1.0306679010391235, + "loss": 5.2209, + "nll_loss": 0.7762002348899841, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.762002468109131, + "rewards/margins": 2.5446763038635254, + "rewards/rejected": -10.306678771972656, + "step": 145 + }, + { + "epoch": 0.28735632183908044, + "grad_norm": 14.528423213748091, + "learning_rate": 1.4331210191082803e-07, + "logits/chosen": 0.9764812588691711, + "logits/rejected": 1.0769740343093872, + "logps/chosen": -0.7632086277008057, + "logps/rejected": -0.7627191543579102, + "loss": 5.1848, + "nll_loss": 0.7632086277008057, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.632086277008057, + "rewards/margins": -0.004894733428955078, + "rewards/rejected": -7.627191066741943, + "step": 150 + }, + { + "epoch": 0.29693486590038315, + "grad_norm": 14.670904215756279, + "learning_rate": 1.480891719745223e-07, + "logits/chosen": 0.7151988744735718, + "logits/rejected": 0.8136464953422546, + "logps/chosen": -0.8410874605178833, + "logps/rejected": -0.6722872853279114, + "loss": 5.322, + "nll_loss": 0.8410874605178833, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -8.410874366760254, + "rewards/margins": -1.688001036643982, + "rewards/rejected": -6.722872734069824, + "step": 155 + }, + { + "epoch": 0.3065134099616858, + "grad_norm": 16.96923463521279, + "learning_rate": 1.5286624203821656e-07, + "logits/chosen": 1.0444055795669556, + "logits/rejected": 0.8398516774177551, + "logps/chosen": -0.7191808819770813, + "logps/rejected": -0.7984825372695923, + "loss": 5.0657, + "nll_loss": 0.7191808819770813, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.191809177398682, + "rewards/margins": 0.7930164337158203, + "rewards/rejected": -7.98482608795166, + "step": 160 + }, + { + "epoch": 0.3160919540229885, + "grad_norm": 14.547279963040138, + "learning_rate": 1.5764331210191082e-07, + "logits/chosen": 0.694339394569397, + "logits/rejected": 0.7881234884262085, + "logps/chosen": -0.6559264063835144, + "logps/rejected": -0.6876171827316284, + "loss": 5.3857, + "nll_loss": 0.6559264063835144, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.559264183044434, + "rewards/margins": 0.3169073164463043, + "rewards/rejected": -6.876172065734863, + "step": 165 + }, + { + "epoch": 0.32567049808429116, + "grad_norm": 15.883172546108769, + "learning_rate": 1.6242038216560508e-07, + "logits/chosen": 0.9939007759094238, + "logits/rejected": 0.9327294230461121, + "logps/chosen": -0.514694094657898, + "logps/rejected": -0.6553934812545776, + "loss": 5.1601, + "nll_loss": 0.514694094657898, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -5.146941184997559, + "rewards/margins": 1.4069939851760864, + "rewards/rejected": -6.5539350509643555, + "step": 170 + }, + { + "epoch": 0.33524904214559387, + "grad_norm": 14.974676446039048, + "learning_rate": 1.6719745222929935e-07, + "logits/chosen": 0.8872886896133423, + "logits/rejected": 0.8021435737609863, + "logps/chosen": -0.7085338830947876, + "logps/rejected": -0.7116855382919312, + "loss": 5.3339, + "nll_loss": 0.7085338830947876, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.085339546203613, + "rewards/margins": 0.031516265124082565, + "rewards/rejected": -7.116854667663574, + "step": 175 + }, + { + "epoch": 0.3448275862068966, + "grad_norm": 17.49142572813654, + "learning_rate": 1.719745222929936e-07, + "logits/chosen": 0.7162724733352661, + "logits/rejected": 0.6837285757064819, + "logps/chosen": -0.8341668844223022, + "logps/rejected": -0.7984797358512878, + "loss": 5.211, + "nll_loss": 0.8341668248176575, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.341669082641602, + "rewards/margins": -0.35687151551246643, + "rewards/rejected": -7.984797477722168, + "step": 180 + }, + { + "epoch": 0.3544061302681992, + "grad_norm": 15.695305726655395, + "learning_rate": 1.7675159235668787e-07, + "logits/chosen": 0.9962735176086426, + "logits/rejected": 0.8522371053695679, + "logps/chosen": -0.824070930480957, + "logps/rejected": -0.7453502416610718, + "loss": 5.1938, + "nll_loss": 0.824070930480957, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.24070930480957, + "rewards/margins": -0.7872062921524048, + "rewards/rejected": -7.453502655029297, + "step": 185 + }, + { + "epoch": 0.36398467432950193, + "grad_norm": 15.933522154415847, + "learning_rate": 1.8152866242038216e-07, + "logits/chosen": 0.7882771492004395, + "logits/rejected": 0.7340660691261292, + "logps/chosen": -0.6636666059494019, + "logps/rejected": -0.7198070287704468, + "loss": 5.3231, + "nll_loss": 0.6636666655540466, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.636666774749756, + "rewards/margins": 0.5614040493965149, + "rewards/rejected": -7.198070526123047, + "step": 190 + }, + { + "epoch": 0.3735632183908046, + "grad_norm": 14.263964254379374, + "learning_rate": 1.8630573248407643e-07, + "logits/chosen": 0.6639770269393921, + "logits/rejected": 0.7358517050743103, + "logps/chosen": -0.7163713574409485, + "logps/rejected": -0.7314590215682983, + "loss": 5.2454, + "nll_loss": 0.7163712978363037, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.1637139320373535, + "rewards/margins": 0.1508767157793045, + "rewards/rejected": -7.3145904541015625, + "step": 195 + }, + { + "epoch": 0.3831417624521073, + "grad_norm": 15.417127684289229, + "learning_rate": 1.910828025477707e-07, + "logits/chosen": 0.6845916509628296, + "logits/rejected": 0.8717025518417358, + "logps/chosen": -0.7460684776306152, + "logps/rejected": -0.8934140205383301, + "loss": 5.3951, + "nll_loss": 0.7460684776306152, + "rewards/accuracies": 1.0, + "rewards/chosen": -7.460684776306152, + "rewards/margins": 1.4734549522399902, + "rewards/rejected": -8.9341402053833, + "step": 200 + }, + { + "epoch": 0.39272030651340994, + "grad_norm": 15.29361256055114, + "learning_rate": 1.9585987261146495e-07, + "logits/chosen": 0.8160010576248169, + "logits/rejected": 0.7316077351570129, + "logps/chosen": -0.8079813122749329, + "logps/rejected": -0.7992033958435059, + "loss": 5.1982, + "nll_loss": 0.8079813122749329, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.079813003540039, + "rewards/margins": -0.08777885138988495, + "rewards/rejected": -7.992033958435059, + "step": 205 + }, + { + "epoch": 0.40229885057471265, + "grad_norm": 18.735791443990614, + "learning_rate": 2.0063694267515922e-07, + "logits/chosen": 0.8831043243408203, + "logits/rejected": 0.9638816118240356, + "logps/chosen": -0.8348730802536011, + "logps/rejected": -0.7787247896194458, + "loss": 5.2804, + "nll_loss": 0.8348730802536011, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -8.348731994628906, + "rewards/margins": -0.5614833235740662, + "rewards/rejected": -7.787248134613037, + "step": 210 + }, + { + "epoch": 0.4118773946360153, + "grad_norm": 15.020231482587125, + "learning_rate": 2.0541401273885348e-07, + "logits/chosen": 0.8089305758476257, + "logits/rejected": 0.7801268696784973, + "logps/chosen": -0.9548230171203613, + "logps/rejected": -0.8521941304206848, + "loss": 5.4231, + "nll_loss": 0.9548231363296509, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -9.548230171203613, + "rewards/margins": -1.0262889862060547, + "rewards/rejected": -8.521940231323242, + "step": 215 + }, + { + "epoch": 0.421455938697318, + "grad_norm": 16.66122418369343, + "learning_rate": 2.1019108280254774e-07, + "logits/chosen": 0.8265215754508972, + "logits/rejected": 0.7605193257331848, + "logps/chosen": -0.6805425882339478, + "logps/rejected": -0.7011183500289917, + "loss": 5.2324, + "nll_loss": 0.6805425882339478, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -6.80542516708374, + "rewards/margins": 0.20575818419456482, + "rewards/rejected": -7.011183261871338, + "step": 220 + }, + { + "epoch": 0.43103448275862066, + "grad_norm": 15.84641449526937, + "learning_rate": 2.14968152866242e-07, + "logits/chosen": 0.9168558120727539, + "logits/rejected": 0.8388528823852539, + "logps/chosen": -0.8054409027099609, + "logps/rejected": -0.7549694776535034, + "loss": 5.2786, + "nll_loss": 0.8054410219192505, + "rewards/accuracies": 0.0, + "rewards/chosen": -8.054409980773926, + "rewards/margins": -0.5047143697738647, + "rewards/rejected": -7.549695014953613, + "step": 225 + }, + { + "epoch": 0.44061302681992337, + "grad_norm": 15.381291199652642, + "learning_rate": 2.1974522292993627e-07, + "logits/chosen": 0.8924915194511414, + "logits/rejected": 0.9192771911621094, + "logps/chosen": -0.7253198623657227, + "logps/rejected": -0.6863486766815186, + "loss": 5.3113, + "nll_loss": 0.7253197431564331, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.253198146820068, + "rewards/margins": -0.38971146941185, + "rewards/rejected": -6.863486289978027, + "step": 230 + }, + { + "epoch": 0.4501915708812261, + "grad_norm": 22.207135774874516, + "learning_rate": 2.2452229299363056e-07, + "logits/chosen": 0.6804489493370056, + "logits/rejected": 0.8135985136032104, + "logps/chosen": -0.7592498064041138, + "logps/rejected": -0.8050843477249146, + "loss": 5.306, + "nll_loss": 0.7592498064041138, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.592497825622559, + "rewards/margins": 0.4583454132080078, + "rewards/rejected": -8.050843238830566, + "step": 235 + }, + { + "epoch": 0.45977011494252873, + "grad_norm": 15.418776278146453, + "learning_rate": 2.2929936305732485e-07, + "logits/chosen": 0.740727961063385, + "logits/rejected": 0.7790459394454956, + "logps/chosen": -0.6311203837394714, + "logps/rejected": -0.7699800729751587, + "loss": 5.3202, + "nll_loss": 0.6311203837394714, + "rewards/accuracies": 1.0, + "rewards/chosen": -6.311203956604004, + "rewards/margins": 1.388596534729004, + "rewards/rejected": -7.69980001449585, + "step": 240 + }, + { + "epoch": 0.46934865900383144, + "grad_norm": 18.062610898602617, + "learning_rate": 2.340764331210191e-07, + "logits/chosen": 0.7905587553977966, + "logits/rejected": 0.70208740234375, + "logps/chosen": -0.8448599576950073, + "logps/rejected": -0.726888120174408, + "loss": 5.2108, + "nll_loss": 0.8448599576950073, + "rewards/accuracies": 0.0, + "rewards/chosen": -8.448599815368652, + "rewards/margins": -1.179718255996704, + "rewards/rejected": -7.268881320953369, + "step": 245 + }, + { + "epoch": 0.4789272030651341, + "grad_norm": 20.317601991362707, + "learning_rate": 2.388535031847134e-07, + "logits/chosen": 0.8352301716804504, + "logits/rejected": 0.7928398847579956, + "logps/chosen": -0.7886701822280884, + "logps/rejected": -0.7603856325149536, + "loss": 5.3794, + "nll_loss": 0.7886701822280884, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.886702060699463, + "rewards/margins": -0.28284597396850586, + "rewards/rejected": -7.603856086730957, + "step": 250 + }, + { + "epoch": 0.4885057471264368, + "grad_norm": 17.92061038766115, + "learning_rate": 2.4363057324840764e-07, + "logits/chosen": 0.7246678471565247, + "logits/rejected": 0.8966231346130371, + "logps/chosen": -0.8691000938415527, + "logps/rejected": -0.7771162986755371, + "loss": 5.2854, + "nll_loss": 0.8691000938415527, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.691000938415527, + "rewards/margins": -0.9198387861251831, + "rewards/rejected": -7.771162509918213, + "step": 255 + }, + { + "epoch": 0.49808429118773945, + "grad_norm": 17.57639103239166, + "learning_rate": 2.484076433121019e-07, + "logits/chosen": 0.9574035406112671, + "logits/rejected": 0.6729179620742798, + "logps/chosen": -0.6743525266647339, + "logps/rejected": -0.7344107031822205, + "loss": 5.2276, + "nll_loss": 0.6743525266647339, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.743525505065918, + "rewards/margins": 0.6005817651748657, + "rewards/rejected": -7.344107151031494, + "step": 260 + }, + { + "epoch": 0.5076628352490421, + "grad_norm": 21.283801923925182, + "learning_rate": 2.5318471337579616e-07, + "logits/chosen": 0.8346107602119446, + "logits/rejected": 0.9647878408432007, + "logps/chosen": -0.6870938539505005, + "logps/rejected": -0.6139359474182129, + "loss": 5.3469, + "nll_loss": 0.6870938539505005, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -6.870938301086426, + "rewards/margins": -0.7315785884857178, + "rewards/rejected": -6.139359951019287, + "step": 265 + }, + { + "epoch": 0.5172413793103449, + "grad_norm": 14.399379764504936, + "learning_rate": 2.5796178343949043e-07, + "logits/chosen": 0.864532470703125, + "logits/rejected": 0.8542212247848511, + "logps/chosen": -0.7113697528839111, + "logps/rejected": -0.732496976852417, + "loss": 5.177, + "nll_loss": 0.7113697528839111, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.1136980056762695, + "rewards/margins": 0.2112717628479004, + "rewards/rejected": -7.324969291687012, + "step": 270 + }, + { + "epoch": 0.5268199233716475, + "grad_norm": 14.537680856211797, + "learning_rate": 2.627388535031847e-07, + "logits/chosen": 0.9982441067695618, + "logits/rejected": 0.8428419232368469, + "logps/chosen": -0.687545120716095, + "logps/rejected": -0.7962311506271362, + "loss": 5.1719, + "nll_loss": 0.687545120716095, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -6.87545108795166, + "rewards/margins": 1.0868605375289917, + "rewards/rejected": -7.9623122215271, + "step": 275 + }, + { + "epoch": 0.5363984674329502, + "grad_norm": 19.6142347909319, + "learning_rate": 2.6751592356687895e-07, + "logits/chosen": 0.7479467988014221, + "logits/rejected": 0.7518871426582336, + "logps/chosen": -0.7905808687210083, + "logps/rejected": -0.7879656553268433, + "loss": 5.2129, + "nll_loss": 0.7905808687210083, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.905808448791504, + "rewards/margins": -0.02615184709429741, + "rewards/rejected": -7.8796563148498535, + "step": 280 + }, + { + "epoch": 0.5459770114942529, + "grad_norm": 15.380742906594616, + "learning_rate": 2.722929936305732e-07, + "logits/chosen": 0.6082831025123596, + "logits/rejected": 0.6754466891288757, + "logps/chosen": -0.866966724395752, + "logps/rejected": -0.822973370552063, + "loss": 5.2728, + "nll_loss": 0.8669666051864624, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.66966724395752, + "rewards/margins": -0.4399335980415344, + "rewards/rejected": -8.229734420776367, + "step": 285 + }, + { + "epoch": 0.5555555555555556, + "grad_norm": 16.49675270885239, + "learning_rate": 2.7707006369426753e-07, + "logits/chosen": 0.8076263666152954, + "logits/rejected": 0.7774611711502075, + "logps/chosen": -0.7558648586273193, + "logps/rejected": -0.8593353033065796, + "loss": 5.2718, + "nll_loss": 0.7558648586273193, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.558648586273193, + "rewards/margins": 1.0347039699554443, + "rewards/rejected": -8.593353271484375, + "step": 290 + }, + { + "epoch": 0.5651340996168582, + "grad_norm": 15.91951747554686, + "learning_rate": 2.818471337579618e-07, + "logits/chosen": 0.7341046333312988, + "logits/rejected": 0.6761490106582642, + "logps/chosen": -0.7117196321487427, + "logps/rejected": -0.8302199244499207, + "loss": 5.3494, + "nll_loss": 0.7117196321487427, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.117195129394531, + "rewards/margins": 1.1850030422210693, + "rewards/rejected": -8.302199363708496, + "step": 295 + }, + { + "epoch": 0.5747126436781609, + "grad_norm": 17.296732181243744, + "learning_rate": 2.8662420382165606e-07, + "logits/chosen": 0.8246825933456421, + "logits/rejected": 0.649739146232605, + "logps/chosen": -0.823817253112793, + "logps/rejected": -0.8983147740364075, + "loss": 5.2106, + "nll_loss": 0.8238171339035034, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.23817253112793, + "rewards/margins": 0.7449753880500793, + "rewards/rejected": -8.983147621154785, + "step": 300 + }, + { + "epoch": 0.5842911877394636, + "grad_norm": 14.832912322402217, + "learning_rate": 2.914012738853503e-07, + "logits/chosen": 0.6315704584121704, + "logits/rejected": 0.614464521408081, + "logps/chosen": -0.8551123738288879, + "logps/rejected": -0.7884758114814758, + "loss": 5.2191, + "nll_loss": 0.8551123738288879, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.55112361907959, + "rewards/margins": -0.6663654446601868, + "rewards/rejected": -7.884758949279785, + "step": 305 + }, + { + "epoch": 0.5938697318007663, + "grad_norm": 22.263623540675233, + "learning_rate": 2.961783439490446e-07, + "logits/chosen": 0.8310597538948059, + "logits/rejected": 0.7741198539733887, + "logps/chosen": -0.769278883934021, + "logps/rejected": -0.7818907499313354, + "loss": 5.3222, + "nll_loss": 0.7692790031433105, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.692789554595947, + "rewards/margins": 0.1261179894208908, + "rewards/rejected": -7.818907737731934, + "step": 310 + }, + { + "epoch": 0.603448275862069, + "grad_norm": 15.209695881976554, + "learning_rate": 2.999999067864633e-07, + "logits/chosen": 0.6580372452735901, + "logits/rejected": 0.5932300090789795, + "logps/chosen": -0.8376361727714539, + "logps/rejected": -0.9159227609634399, + "loss": 5.1548, + "nll_loss": 0.8376361131668091, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.376360893249512, + "rewards/margins": 0.782866358757019, + "rewards/rejected": -9.15922737121582, + "step": 315 + }, + { + "epoch": 0.6130268199233716, + "grad_norm": 16.786502978942124, + "learning_rate": 2.9999664432484305e-07, + "logits/chosen": 0.779415488243103, + "logits/rejected": 0.8173543810844421, + "logps/chosen": -0.8148608207702637, + "logps/rejected": -0.8662956357002258, + "loss": 5.0858, + "nll_loss": 0.8148608207702637, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.148608207702637, + "rewards/margins": 0.5143473744392395, + "rewards/rejected": -8.662956237792969, + "step": 320 + }, + { + "epoch": 0.6226053639846744, + "grad_norm": 14.809215737259183, + "learning_rate": 2.999887213022373e-07, + "logits/chosen": 0.9049234390258789, + "logits/rejected": 0.6404236555099487, + "logps/chosen": -0.9479631185531616, + "logps/rejected": -0.8360861539840698, + "loss": 5.282, + "nll_loss": 0.9479631185531616, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -9.479631423950195, + "rewards/margins": -1.1187700033187866, + "rewards/rejected": -8.360861778259277, + "step": 325 + }, + { + "epoch": 0.632183908045977, + "grad_norm": 16.116801233519272, + "learning_rate": 2.999761379648231e-07, + "logits/chosen": 0.5830576419830322, + "logits/rejected": 0.4962643086910248, + "logps/chosen": -0.8123816251754761, + "logps/rejected": -0.915209174156189, + "loss": 5.0586, + "nll_loss": 0.8123816251754761, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -8.12381649017334, + "rewards/margins": 1.0282756090164185, + "rewards/rejected": -9.152092933654785, + "step": 330 + }, + { + "epoch": 0.6417624521072797, + "grad_norm": 17.606725914130628, + "learning_rate": 2.999588947035786e-07, + "logits/chosen": 0.666958212852478, + "logits/rejected": 0.6473885774612427, + "logps/chosen": -0.8091378211975098, + "logps/rejected": -0.8131651878356934, + "loss": 5.1567, + "nll_loss": 0.8091378211975098, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.091379165649414, + "rewards/margins": 0.04027414321899414, + "rewards/rejected": -8.13165283203125, + "step": 335 + }, + { + "epoch": 0.6513409961685823, + "grad_norm": 15.2118101330663, + "learning_rate": 2.999369920542709e-07, + "logits/chosen": 0.7825851440429688, + "logits/rejected": 0.6557101011276245, + "logps/chosen": -0.8586159944534302, + "logps/rejected": -0.795002818107605, + "loss": 5.2093, + "nll_loss": 0.8586158752441406, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -8.586160659790039, + "rewards/margins": -0.6361311078071594, + "rewards/rejected": -7.950028419494629, + "step": 340 + }, + { + "epoch": 0.6609195402298851, + "grad_norm": 21.51236359546376, + "learning_rate": 2.9991043069743953e-07, + "logits/chosen": 0.8391911387443542, + "logits/rejected": 0.9106936454772949, + "logps/chosen": -0.9227014780044556, + "logps/rejected": -0.8664349317550659, + "loss": 5.1782, + "nll_loss": 0.9227014780044556, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -9.22701358795166, + "rewards/margins": -0.5626646280288696, + "rewards/rejected": -8.664348602294922, + "step": 345 + }, + { + "epoch": 0.6704980842911877, + "grad_norm": 18.995163332531835, + "learning_rate": 2.9987921145837506e-07, + "logits/chosen": 0.5608048439025879, + "logits/rejected": 0.504362940788269, + "logps/chosen": -0.9775077700614929, + "logps/rejected": -0.8174030184745789, + "loss": 5.2409, + "nll_loss": 0.9775077700614929, + "rewards/accuracies": 0.0, + "rewards/chosen": -9.775077819824219, + "rewards/margins": -1.6010481119155884, + "rewards/rejected": -8.174030303955078, + "step": 350 + }, + { + "epoch": 0.6800766283524904, + "grad_norm": 18.321608160126484, + "learning_rate": 2.998433353070936e-07, + "logits/chosen": 0.5228645205497742, + "logits/rejected": 0.5875726342201233, + "logps/chosen": -0.8083820343017578, + "logps/rejected": -0.9094891548156738, + "loss": 5.3646, + "nll_loss": 0.8083820343017578, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -8.083820343017578, + "rewards/margins": 1.011070966720581, + "rewards/rejected": -9.094891548156738, + "step": 355 + }, + { + "epoch": 0.6896551724137931, + "grad_norm": 39.97684130432745, + "learning_rate": 2.998028033583067e-07, + "logits/chosen": 0.635855495929718, + "logits/rejected": 0.6091259717941284, + "logps/chosen": -0.8663408160209656, + "logps/rejected": -0.8845375776290894, + "loss": 5.3618, + "nll_loss": 0.8663408160209656, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.663408279418945, + "rewards/margins": 0.18196812272071838, + "rewards/rejected": -8.845376014709473, + "step": 360 + }, + { + "epoch": 0.6992337164750958, + "grad_norm": 20.393894377350467, + "learning_rate": 2.9975761687138675e-07, + "logits/chosen": 0.8516343832015991, + "logits/rejected": 0.9076502919197083, + "logps/chosen": -0.7819638848304749, + "logps/rejected": -0.8371108174324036, + "loss": 5.1922, + "nll_loss": 0.7819639444351196, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.819638252258301, + "rewards/margins": 0.5514693260192871, + "rewards/rejected": -8.371108055114746, + "step": 365 + }, + { + "epoch": 0.7088122605363985, + "grad_norm": 15.92022688698728, + "learning_rate": 2.997077772503276e-07, + "logits/chosen": 0.6935936212539673, + "logits/rejected": 0.7612776160240173, + "logps/chosen": -0.8113873600959778, + "logps/rejected": -0.8486092686653137, + "loss": 5.2959, + "nll_loss": 0.8113872408866882, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.113873481750488, + "rewards/margins": 0.37221986055374146, + "rewards/rejected": -8.486093521118164, + "step": 370 + }, + { + "epoch": 0.7183908045977011, + "grad_norm": 19.844305829223966, + "learning_rate": 2.9965328604370115e-07, + "logits/chosen": 0.7288967370986938, + "logits/rejected": 0.8481170535087585, + "logps/chosen": -0.7829886674880981, + "logps/rejected": -0.7796565294265747, + "loss": 5.2523, + "nll_loss": 0.7829886674880981, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.829885959625244, + "rewards/margins": -0.033321283757686615, + "rewards/rejected": -7.796565055847168, + "step": 375 + }, + { + "epoch": 0.7279693486590039, + "grad_norm": 22.21588650736091, + "learning_rate": 2.9959414494460934e-07, + "logits/chosen": 0.7555449604988098, + "logits/rejected": 0.8247362971305847, + "logps/chosen": -0.8521485328674316, + "logps/rejected": -0.9153167605400085, + "loss": 5.1015, + "nll_loss": 0.8521484136581421, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.521485328674316, + "rewards/margins": 0.6316838264465332, + "rewards/rejected": -9.153168678283691, + "step": 380 + }, + { + "epoch": 0.7375478927203065, + "grad_norm": 16.32420836624451, + "learning_rate": 2.995303557906312e-07, + "logits/chosen": 0.8759990930557251, + "logits/rejected": 0.684215784072876, + "logps/chosen": -0.9286302328109741, + "logps/rejected": -0.9789536595344543, + "loss": 5.3124, + "nll_loss": 0.9286301732063293, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -9.28630256652832, + "rewards/margins": 0.5032343864440918, + "rewards/rejected": -9.789536476135254, + "step": 385 + }, + { + "epoch": 0.7471264367816092, + "grad_norm": 15.80345388242498, + "learning_rate": 2.99461920563766e-07, + "logits/chosen": 0.5319703817367554, + "logits/rejected": 0.6946344971656799, + "logps/chosen": -0.9111973643302917, + "logps/rejected": -0.8061173558235168, + "loss": 5.227, + "nll_loss": 0.911197304725647, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -9.111973762512207, + "rewards/margins": -1.0507996082305908, + "rewards/rejected": -8.061173439025879, + "step": 390 + }, + { + "epoch": 0.7567049808429118, + "grad_norm": 20.47124302367669, + "learning_rate": 2.993888413903716e-07, + "logits/chosen": 0.875220000743866, + "logits/rejected": 0.9313627481460571, + "logps/chosen": -0.6892002820968628, + "logps/rejected": -0.7782602310180664, + "loss": 5.1504, + "nll_loss": 0.689200222492218, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -6.892002105712891, + "rewards/margins": 0.8905998468399048, + "rewards/rejected": -7.782601833343506, + "step": 395 + }, + { + "epoch": 0.7662835249042146, + "grad_norm": 17.699773611139502, + "learning_rate": 2.9931112054109855e-07, + "logits/chosen": 0.5683273673057556, + "logits/rejected": 0.6470497846603394, + "logps/chosen": -0.8484388589859009, + "logps/rejected": -0.8746269345283508, + "loss": 5.1348, + "nll_loss": 0.8484388589859009, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.48438835144043, + "rewards/margins": 0.26188117265701294, + "rewards/rejected": -8.746268272399902, + "step": 400 + }, + { + "epoch": 0.7758620689655172, + "grad_norm": 19.019674807304686, + "learning_rate": 2.992287604308192e-07, + "logits/chosen": 0.6370224356651306, + "logits/rejected": 0.500920295715332, + "logps/chosen": -1.061689853668213, + "logps/rejected": -1.0553849935531616, + "loss": 5.2272, + "nll_loss": 1.0616897344589233, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -10.616897583007812, + "rewards/margins": -0.06304798275232315, + "rewards/rejected": -10.553851127624512, + "step": 405 + }, + { + "epoch": 0.7854406130268199, + "grad_norm": 22.32813440003362, + "learning_rate": 2.9914176361855286e-07, + "logits/chosen": 0.773957371711731, + "logits/rejected": 0.5596843957901001, + "logps/chosen": -0.8565713167190552, + "logps/rejected": -0.8435229063034058, + "loss": 5.1566, + "nll_loss": 0.8565713167190552, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.565712928771973, + "rewards/margins": -0.13048286736011505, + "rewards/rejected": -8.435229301452637, + "step": 410 + }, + { + "epoch": 0.7950191570881227, + "grad_norm": 18.60336417027728, + "learning_rate": 2.9905013280738643e-07, + "logits/chosen": 0.5330244898796082, + "logits/rejected": 0.52126145362854, + "logps/chosen": -0.9946764707565308, + "logps/rejected": -1.0159534215927124, + "loss": 5.3385, + "nll_loss": 0.9946764707565308, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -9.94676399230957, + "rewards/margins": 0.21276941895484924, + "rewards/rejected": -10.159533500671387, + "step": 415 + }, + { + "epoch": 0.8045977011494253, + "grad_norm": 17.890613875131685, + "learning_rate": 2.9895387084439007e-07, + "logits/chosen": 0.9610258340835571, + "logits/rejected": 0.7646621465682983, + "logps/chosen": -0.7954119443893433, + "logps/rejected": -0.8386955261230469, + "loss": 5.1982, + "nll_loss": 0.7954119443893433, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.954119682312012, + "rewards/margins": 0.43283557891845703, + "rewards/rejected": -8.386955261230469, + "step": 420 + }, + { + "epoch": 0.814176245210728, + "grad_norm": 16.55166479015083, + "learning_rate": 2.9885298072052896e-07, + "logits/chosen": 0.6969675421714783, + "logits/rejected": 0.8127206563949585, + "logps/chosen": -0.7167531251907349, + "logps/rejected": -0.8444196581840515, + "loss": 5.1488, + "nll_loss": 0.7167531847953796, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.167531490325928, + "rewards/margins": 1.2766650915145874, + "rewards/rejected": -8.444195747375488, + "step": 425 + }, + { + "epoch": 0.8237547892720306, + "grad_norm": 17.66990705043301, + "learning_rate": 2.987474655705706e-07, + "logits/chosen": 0.6976544260978699, + "logits/rejected": 0.45688027143478394, + "logps/chosen": -0.984510600566864, + "logps/rejected": -0.9671609997749329, + "loss": 5.1164, + "nll_loss": 0.9845105409622192, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -9.84510612487793, + "rewards/margins": -0.17349663376808167, + "rewards/rejected": -9.671609878540039, + "step": 430 + }, + { + "epoch": 0.8333333333333334, + "grad_norm": 17.864698049191677, + "learning_rate": 2.9863732867298676e-07, + "logits/chosen": 0.5837534070014954, + "logits/rejected": 0.4828642010688782, + "logps/chosen": -0.7947196960449219, + "logps/rejected": -0.8388618230819702, + "loss": 5.1483, + "nll_loss": 0.7947196960449219, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -7.947196960449219, + "rewards/margins": 0.44142085313796997, + "rewards/rejected": -8.388618469238281, + "step": 435 + }, + { + "epoch": 0.842911877394636, + "grad_norm": 19.80331125773151, + "learning_rate": 2.985225734498523e-07, + "logits/chosen": 0.6473149061203003, + "logits/rejected": 0.6882539987564087, + "logps/chosen": -0.8926659822463989, + "logps/rejected": -1.0417662858963013, + "loss": 5.0516, + "nll_loss": 0.8926659822463989, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -8.926658630371094, + "rewards/margins": 1.4910037517547607, + "rewards/rejected": -10.417662620544434, + "step": 440 + }, + { + "epoch": 0.8524904214559387, + "grad_norm": 23.020768804171617, + "learning_rate": 2.984032034667383e-07, + "logits/chosen": 0.723087728023529, + "logits/rejected": 0.7870732545852661, + "logps/chosen": -0.9583943486213684, + "logps/rejected": -0.9407602548599243, + "loss": 5.2786, + "nll_loss": 0.9583943486213684, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -9.583943367004395, + "rewards/margins": -0.17634105682373047, + "rewards/rejected": -9.40760326385498, + "step": 445 + }, + { + "epoch": 0.8620689655172413, + "grad_norm": 19.047172710756705, + "learning_rate": 2.982792224326018e-07, + "logits/chosen": 0.6330351829528809, + "logits/rejected": 0.6864033341407776, + "logps/chosen": -0.7574523687362671, + "logps/rejected": -0.8687712550163269, + "loss": 5.1046, + "nll_loss": 0.7574523091316223, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.574522972106934, + "rewards/margins": 1.113189458847046, + "rewards/rejected": -8.687711715698242, + "step": 450 + }, + { + "epoch": 0.8716475095785441, + "grad_norm": 16.669062938958124, + "learning_rate": 2.9815063419966994e-07, + "logits/chosen": 0.5700467824935913, + "logits/rejected": 0.4501461088657379, + "logps/chosen": -0.8433354496955872, + "logps/rejected": -1.0070374011993408, + "loss": 5.224, + "nll_loss": 0.8433355093002319, + "rewards/accuracies": 1.0, + "rewards/chosen": -8.433355331420898, + "rewards/margins": 1.637019157409668, + "rewards/rejected": -10.070374488830566, + "step": 455 + }, + { + "epoch": 0.8812260536398467, + "grad_norm": 18.696544325644073, + "learning_rate": 2.9801744276332095e-07, + "logits/chosen": 0.7160075306892395, + "logits/rejected": 0.7029515504837036, + "logps/chosen": -0.929043173789978, + "logps/rejected": -0.8915184140205383, + "loss": 5.2651, + "nll_loss": 0.9290431141853333, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -9.29043197631836, + "rewards/margins": -0.37524765729904175, + "rewards/rejected": -8.915184020996094, + "step": 460 + }, + { + "epoch": 0.8908045977011494, + "grad_norm": 17.828488483344373, + "learning_rate": 2.978796522619593e-07, + "logits/chosen": 0.48793935775756836, + "logits/rejected": 0.5863816142082214, + "logps/chosen": -0.8918827772140503, + "logps/rejected": -0.9165046811103821, + "loss": 5.1141, + "nll_loss": 0.8918827772140503, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": -8.918828010559082, + "rewards/margins": 0.24621906876564026, + "rewards/rejected": -9.165046691894531, + "step": 465 + }, + { + "epoch": 0.9003831417624522, + "grad_norm": 17.225972538621157, + "learning_rate": 2.9773726697688786e-07, + "logits/chosen": 0.8548671007156372, + "logits/rejected": 0.6914275884628296, + "logps/chosen": -0.7240065932273865, + "logps/rejected": -0.7344146370887756, + "loss": 5.2492, + "nll_loss": 0.7240065336227417, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -7.2400665283203125, + "rewards/margins": 0.10407984256744385, + "rewards/rejected": -7.34414529800415, + "step": 470 + }, + { + "epoch": 0.9099616858237548, + "grad_norm": 19.7970366847853, + "learning_rate": 2.975902913321742e-07, + "logits/chosen": 0.6560118794441223, + "logits/rejected": 0.9177835583686829, + "logps/chosen": -0.7790594100952148, + "logps/rejected": -0.7989363670349121, + "loss": 5.3694, + "nll_loss": 0.7790594100952148, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -7.79059362411499, + "rewards/margins": 0.19877009093761444, + "rewards/rejected": -7.989363670349121, + "step": 475 + }, + { + "epoch": 0.9195402298850575, + "grad_norm": 25.366911327657693, + "learning_rate": 2.974387298945135e-07, + "logits/chosen": 0.4641974866390228, + "logits/rejected": 0.5562915802001953, + "logps/chosen": -1.0258574485778809, + "logps/rejected": -0.9660851359367371, + "loss": 5.2603, + "nll_loss": 1.0258575677871704, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -10.258573532104492, + "rewards/margins": -0.5977233052253723, + "rewards/rejected": -9.66085147857666, + "step": 480 + }, + { + "epoch": 0.9291187739463601, + "grad_norm": 28.44742966645083, + "learning_rate": 2.9728258737308666e-07, + "logits/chosen": 0.7019563913345337, + "logits/rejected": 0.9221956133842468, + "logps/chosen": -0.7030410766601562, + "logps/rejected": -0.7920758724212646, + "loss": 5.1616, + "nll_loss": 0.7030410766601562, + "rewards/accuracies": 1.0, + "rewards/chosen": -7.0304107666015625, + "rewards/margins": 0.8903471231460571, + "rewards/rejected": -7.920758247375488, + "step": 485 + }, + { + "epoch": 0.9386973180076629, + "grad_norm": 20.06782347986855, + "learning_rate": 2.9712186861941376e-07, + "logits/chosen": 0.5050719976425171, + "logits/rejected": 0.5799335241317749, + "logps/chosen": -0.9137505292892456, + "logps/rejected": -0.8959131240844727, + "loss": 5.0958, + "nll_loss": 0.9137505292892456, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -9.137506484985352, + "rewards/margins": -0.17837515473365784, + "rewards/rejected": -8.959131240844727, + "step": 490 + }, + { + "epoch": 0.9482758620689655, + "grad_norm": 25.038776687357906, + "learning_rate": 2.9695657862720366e-07, + "logits/chosen": 0.6863113641738892, + "logits/rejected": 0.617378294467926, + "logps/chosen": -0.8844548463821411, + "logps/rejected": -0.8485180139541626, + "loss": 5.2545, + "nll_loss": 0.8844548463821411, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.844549179077148, + "rewards/margins": -0.3593680262565613, + "rewards/rejected": -8.485180854797363, + "step": 495 + }, + { + "epoch": 0.9578544061302682, + "grad_norm": 17.692781929545614, + "learning_rate": 2.967867225321984e-07, + "logits/chosen": 0.37053728103637695, + "logits/rejected": 0.6822131276130676, + "logps/chosen": -0.8528249859809875, + "logps/rejected": -0.8191383481025696, + "loss": 5.1402, + "nll_loss": 0.8528249859809875, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": -8.528249740600586, + "rewards/margins": -0.33686715364456177, + "rewards/rejected": -8.191383361816406, + "step": 500 + }, + { + "epoch": 0.9674329501915708, + "grad_norm": 21.29237339971544, + "learning_rate": 2.96612305612014e-07, + "logits/chosen": 0.45431119203567505, + "logits/rejected": 0.6691686511039734, + "logps/chosen": -0.8379117250442505, + "logps/rejected": -0.7931283116340637, + "loss": 5.1782, + "nll_loss": 0.8379117846488953, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -8.379117965698242, + "rewards/margins": -0.44783440232276917, + "rewards/rejected": -7.931282997131348, + "step": 505 + }, + { + "epoch": 0.9770114942528736, + "grad_norm": 18.325302400422004, + "learning_rate": 2.9643333328597636e-07, + "logits/chosen": 0.4990893006324768, + "logits/rejected": 0.44439974427223206, + "logps/chosen": -0.8343612551689148, + "logps/rejected": -0.861991286277771, + "loss": 5.3056, + "nll_loss": 0.8343612551689148, + "rewards/accuracies": 0.6000000238418579, + "rewards/chosen": -8.343611717224121, + "rewards/margins": 0.27630099654197693, + "rewards/rejected": -8.619913101196289, + "step": 510 + }, + { + "epoch": 0.9865900383141762, + "grad_norm": 19.49284906216681, + "learning_rate": 2.9624981111495277e-07, + "logits/chosen": 0.6197850704193115, + "logits/rejected": 0.7587565183639526, + "logps/chosen": -0.7680959105491638, + "logps/rejected": -0.9193935394287109, + "loss": 5.0512, + "nll_loss": 0.768095850944519, + "rewards/accuracies": 1.0, + "rewards/chosen": -7.6809587478637695, + "rewards/margins": 1.5129766464233398, + "rewards/rejected": -9.19393539428711, + "step": 515 + }, + { + "epoch": 0.9961685823754789, + "grad_norm": 17.910862409831864, + "learning_rate": 2.960617448011793e-07, + "logits/chosen": 0.4001706540584564, + "logits/rejected": 0.5382306575775146, + "logps/chosen": -0.9400454759597778, + "logps/rejected": -0.9204422235488892, + "loss": 5.1904, + "nll_loss": 0.9400454759597778, + "rewards/accuracies": 0.20000000298023224, + "rewards/chosen": -9.400455474853516, + "rewards/margins": -0.19603300094604492, + "rewards/rejected": -9.204421997070312, + "step": 520 + } + ], + "logging_steps": 5, + "max_steps": 3132, + "num_input_tokens_seen": 0, + "num_train_epochs": 6, + "save_steps": 1000000, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 0.0, + "train_batch_size": 1, + "trial_name": null, + "trial_params": null +} diff --git a/model/training_args.bin b/model/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..88d96a6aff62bb6908e38a6f17877e5b1493af74 --- /dev/null +++ b/model/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4c8912e91898daa7149f0b2e6bc91b92b5d58f37b67941236719deace681d5f +size 7224 diff --git a/model/zero_to_fp32.py b/model/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..e69ecd9acb5a235ffbf927091051106d902b3d39 --- /dev/null +++ b/model/zero_to_fp32.py @@ -0,0 +1,674 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import json +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + state_dict_split = split_torch_state_dict_into_shards(state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors} + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard, output_path, metadata={"format": "pt"}) + else: + torch.save(shard, output_path) + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters)