luciaquirke commited on
Commit
f6c47cb
·
verified ·
1 Parent(s): 9b73d26

EleutherAI/SmolLM2-1.7B-magpie-ultra-v0.1-attribution

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/eleutherai/huggingface/runs/vofyo7hl)
31
 
32
 
33
  This model was trained with SFT.
@@ -35,7 +35,7 @@ This model was trained with SFT.
35
  ### Framework versions
36
 
37
  - TRL: 0.18.1
38
- - Transformers: 4.51.3
39
  - Pytorch: 2.5.1
40
  - Datasets: 3.6.0
41
  - Tokenizers: 0.21.1
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/eleutherai/huggingface/runs/2e1kywoy)
31
 
32
 
33
  This model was trained with SFT.
 
35
  ### Framework versions
36
 
37
  - TRL: 0.18.1
38
+ - Transformers: 4.52.4
39
  - Pytorch: 2.5.1
40
  - Datasets: 3.6.0
41
  - Tokenizers: 0.21.1
config.json CHANGED
@@ -24,7 +24,7 @@
24
  "rope_theta": 130000,
25
  "tie_word_embeddings": true,
26
  "torch_dtype": "bfloat16",
27
- "transformers_version": "4.51.3",
28
  "use_cache": true,
29
  "vocab_size": 49152
30
  }
 
24
  "rope_theta": 130000,
25
  "tie_word_embeddings": true,
26
  "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.52.4",
28
  "use_cache": true,
29
  "vocab_size": 49152
30
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.51.3"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.52.4"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf9ed62f32bc7a474274025af822815ff912a25aa872f03b39bfa1ec7b8d2843
3
  size 3422777952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc3265f1eccc7f0a63088133dde35097dc97defc4153c5515541f94198a5254d
3
  size 3422777952
tokenizer_config.json CHANGED
@@ -143,10 +143,10 @@
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
146
- "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
149
  "extra_special_tokens": {},
 
150
  "model_max_length": 8192,
151
  "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
 
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
 
146
  "clean_up_tokenization_spaces": false,
147
  "eos_token": "<|im_end|>",
148
  "extra_special_tokens": {},
149
+ "max_length": 8192,
150
  "model_max_length": 8192,
151
  "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1580a1d6e6e9330ed239f0141f3c6a9b2003ba9dbe5012527caf9d9ef19fce48
3
- size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc7ca4fc7c17fe67dad7f930c033aab41d667039fb6b330646c2f2d703a964a8
3
+ size 5624