blockblockblock commited on
Commit
f6efdcb
·
verified ·
1 Parent(s): 5bf2699

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +7 -2
  2. config.json +3 -3
  3. output.safetensors +2 -2
README.md CHANGED
@@ -10,10 +10,15 @@ license: llama3
10
  <a href="https://www.gradient.ai" target="_blank"><img src="https://cdn-uploads.huggingface.co/production/uploads/655bb613e8a8971e89944f3e/TSa3V8YpoVagnTYgxiLaO.png" width="200"/></a>
11
 
12
  # Llama-3 8B Gradient Instruct 1048k
 
 
 
13
  Gradient incorporates your data to deploy autonomous assistants that power critical operations across your business. If you're looking to build custom AI models or agents, email us a message [email protected].
14
 
15
  For more info see our [End-to-end development service for custom LLMs and AI systems](https://gradient.ai/development-lab)
16
 
 
 
17
  This model extends LLama-3 8B's context length from 8k to > 1040K, developed by Gradient, sponsored by compute from [Crusoe Energy](https://huggingface.co/crusoeai). It demonstrates that SOTA LLMs can learn to operate on long context with minimal training by appropriately adjusting RoPE theta. We trained on 830M tokens for this stage, and 1.4B tokens total for all stages, which is < 0.01% of Llama-3's original pre-training data.
18
 
19
  **Update (5/3): We further fine-tuned our model to strengthen its assistant-like chat ability as well. The NIAH result is updated.**
@@ -43,7 +48,7 @@ For training data, we generate long contexts by augmenting [SlimPajama](https://
43
  | Initialize From | LLaMA-3 8B| 65K | 262K | 524k |
44
  | Sequence Length 2^N | 16 | 18 | 19 | 20 |
45
  | RoPE theta | 15.3 M | 207.1 M | 1.06B | 2.80B |
46
- | Batch Size | 1 | 1 | 16 | 16 |
47
  | Gradient Accumulation Steps | 32 | 16 | 1 | 1 |
48
  | Steps | 30 | 24 | 50 | 50 |
49
  | Total Tokens | 62914560 | 100663296 | 419430400 | 838860800 |
@@ -102,7 +107,7 @@ Drop an email to [[email protected]](mailto:[email protected])
102
 
103
  [3] https://github.com/jzhang38/EasyContext
104
 
105
- [3] Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan
106
  Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling
107
  high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.
108
 
 
10
  <a href="https://www.gradient.ai" target="_blank"><img src="https://cdn-uploads.huggingface.co/production/uploads/655bb613e8a8971e89944f3e/TSa3V8YpoVagnTYgxiLaO.png" width="200"/></a>
11
 
12
  # Llama-3 8B Gradient Instruct 1048k
13
+
14
+ Join our custom agent and long context (262k-1M+) waitlist: https://forms.gle/L6TDY7dozx8TuoUv7
15
+
16
  Gradient incorporates your data to deploy autonomous assistants that power critical operations across your business. If you're looking to build custom AI models or agents, email us a message [email protected].
17
 
18
  For more info see our [End-to-end development service for custom LLMs and AI systems](https://gradient.ai/development-lab)
19
 
20
+ [Join our Discord](https://discord.com/invite/2QVy2qt2mf)
21
+
22
  This model extends LLama-3 8B's context length from 8k to > 1040K, developed by Gradient, sponsored by compute from [Crusoe Energy](https://huggingface.co/crusoeai). It demonstrates that SOTA LLMs can learn to operate on long context with minimal training by appropriately adjusting RoPE theta. We trained on 830M tokens for this stage, and 1.4B tokens total for all stages, which is < 0.01% of Llama-3's original pre-training data.
23
 
24
  **Update (5/3): We further fine-tuned our model to strengthen its assistant-like chat ability as well. The NIAH result is updated.**
 
48
  | Initialize From | LLaMA-3 8B| 65K | 262K | 524k |
49
  | Sequence Length 2^N | 16 | 18 | 19 | 20 |
50
  | RoPE theta | 15.3 M | 207.1 M | 1.06B | 2.80B |
51
+ | Batch Size | 1 | 1 | 16 | 8 |
52
  | Gradient Accumulation Steps | 32 | 16 | 1 | 1 |
53
  | Steps | 30 | 24 | 50 | 50 |
54
  | Total Tokens | 62914560 | 100663296 | 419430400 | 838860800 |
 
107
 
108
  [3] https://github.com/jzhang38/EasyContext
109
 
110
+ [4] Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan
111
  Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling
112
  high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.
113
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gradientai/llama3-run1-stage524k-fm-v3",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -19,10 +19,10 @@
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
- "rope_theta": 2804339835.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.39.1",
26
  "use_cache": true,
27
  "vocab_size": 128256,
28
  "quantization_config": {
 
1
  {
2
+ "_name_or_path": "gradientai/llama3-8b-stage262k-chat",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
+ "rope_theta": 3580165449.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.41.0.dev0",
26
  "use_cache": true,
27
  "vocab_size": 128256,
28
  "quantization_config": {
output.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbcb8866a8160dd042d8d401b668842b1bf40189f39b635fa5f65d9bca5d9905
3
- size 4956183188
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b06ee3e83bd0ac96e3d0698519a8380b69562aac08b19f2ef6849d1d09a180ec
3
+ size 4956884788