Abdulvajid commited on
Commit
d8d3b36
·
verified ·
1 Parent(s): 2c5ee19

Upload checkpoints

Browse files
ckpt_7800/config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GPT"
4
+ ],
5
+ "batch_size": 8,
6
+ "context_len": 1024,
7
+ "d_model": 768,
8
+ "device": "cuda",
9
+ "dtype": "float32",
10
+ "intermidiate_size": 3072,
11
+ "load_checkpoint": true,
12
+ "lr": 0.0006,
13
+ "model_type": "gpt_custom",
14
+ "n_epoch": 5,
15
+ "n_heads": 12,
16
+ "n_layers": 12,
17
+ "transformers_version": "4.56.1",
18
+ "vocab_size": 50304,
19
+ "weight_decay": 0.1
20
+ }
ckpt_7800/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1f4cefe98d00469f81e2e192ac302f5b621ef104280d8c4c8a3cf844b76c1ab
3
+ size 548379944
ckpt_7950/config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GPT"
4
+ ],
5
+ "batch_size": 8,
6
+ "context_len": 1024,
7
+ "d_model": 768,
8
+ "device": "cuda",
9
+ "dtype": "float32",
10
+ "intermidiate_size": 3072,
11
+ "load_checkpoint": true,
12
+ "lr": 0.0006,
13
+ "model_type": "gpt_custom",
14
+ "n_epoch": 5,
15
+ "n_heads": 12,
16
+ "n_layers": 12,
17
+ "transformers_version": "4.56.1",
18
+ "vocab_size": 50304,
19
+ "weight_decay": 0.1
20
+ }
ckpt_7950/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bda829f0daf7d24ef6dbd7d437961a28d0ebb818c424dd29d97664710087859
3
+ size 548379944
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37ca93a16aa233a9d3389db46656bf374686b16b7653f351a99d8e3751c25041
3
  size 996203019
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce237fb6d76df053a3cd07e97979ea5594937e559c748c95898044e6e5154a1e
3
  size 996203019