param-bharat commited on
Commit
ed053a0
·
verified ·
1 Parent(s): 60544ff

Training in progress, step 500

Browse files
config.json CHANGED
@@ -13,6 +13,7 @@
13
  "initializer_range": 0.041666666666666664,
14
  "intermediate_size": 1536,
15
  "is_llama_config": true,
 
16
  "max_position_embeddings": 8192,
17
  "mlp_bias": false,
18
  "model_type": "llama",
 
13
  "initializer_range": 0.041666666666666664,
14
  "intermediate_size": 1536,
15
  "is_llama_config": true,
16
+ "max_length": null,
17
  "max_position_embeddings": 8192,
18
  "mlp_bias": false,
19
  "model_type": "llama",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cdca2476fdf69458c6ad0fc334a14db86907ab84020b2b6f31a21762f3e8b9fa
3
  size 269060552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e68c400a7ade9b88f4cbe6ff514214b13cd3ee6a9584de0313df2f3c033bede
3
  size 269060552
tokenizer_config.json CHANGED
@@ -147,7 +147,7 @@
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
149
  "max_length": 8192,
150
- "model_max_length": 2048,
151
  "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
153
  "truncation": true,
 
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
149
  "max_length": 8192,
150
+ "model_max_length": 8192,
151
  "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
153
  "truncation": true,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:925bde0881cdd9e8d97493aeb52f31bf6688b39f2b1a275f6f9c64d10dc902a1
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d27ad4a6d36df5ed277cace91a024f72c4490dd6a21a7c1e9eb0099b6fe97815
3
  size 5496