CocoLng commited on
Commit
712d374
·
1 Parent(s): 2bf8df4

Add camrun24 1G mOSCAR

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +2 -0
  2. cam_run24/.DS_Store +0 -0
  3. cam_run24/checkpoints/.DS_Store +0 -0
  4. cam_run24/checkpoints/checkpoint-1000/config.json +26 -0
  5. cam_run24/checkpoints/checkpoint-1000/merges.txt +0 -0
  6. cam_run24/checkpoints/checkpoint-1000/metrics_report.txt +32 -0
  7. cam_run24/checkpoints/checkpoint-1000/model.safetensors +3 -0
  8. cam_run24/checkpoints/checkpoint-1000/special_tokens_map.json +15 -0
  9. cam_run24/checkpoints/checkpoint-1000/tokenizer.json +0 -0
  10. cam_run24/checkpoints/checkpoint-1000/tokenizer_config.json +57 -0
  11. cam_run24/checkpoints/checkpoint-1000/trainer_state.pt +3 -0
  12. cam_run24/checkpoints/checkpoint-1000/vocab.json +0 -0
  13. cam_run24/checkpoints/checkpoint-10000/config.json +26 -0
  14. cam_run24/checkpoints/checkpoint-10000/merges.txt +0 -0
  15. cam_run24/checkpoints/checkpoint-10000/metrics_report.txt +38 -0
  16. cam_run24/checkpoints/checkpoint-10000/model.safetensors +3 -0
  17. cam_run24/checkpoints/checkpoint-10000/special_tokens_map.json +15 -0
  18. cam_run24/checkpoints/checkpoint-10000/tokenizer.json +0 -0
  19. cam_run24/checkpoints/checkpoint-10000/tokenizer_config.json +57 -0
  20. cam_run24/checkpoints/checkpoint-10000/trainer_state.pt +3 -0
  21. cam_run24/checkpoints/checkpoint-10000/vocab.json +0 -0
  22. cam_run24/checkpoints/checkpoint-15000/config.json +26 -0
  23. cam_run24/checkpoints/checkpoint-15000/merges.txt +0 -0
  24. cam_run24/checkpoints/checkpoint-15000/metrics_report.txt +38 -0
  25. cam_run24/checkpoints/checkpoint-15000/model.safetensors +3 -0
  26. cam_run24/checkpoints/checkpoint-15000/special_tokens_map.json +15 -0
  27. cam_run24/checkpoints/checkpoint-15000/tokenizer.json +0 -0
  28. cam_run24/checkpoints/checkpoint-15000/tokenizer_config.json +57 -0
  29. cam_run24/checkpoints/checkpoint-15000/trainer_state.pt +3 -0
  30. cam_run24/checkpoints/checkpoint-15000/vocab.json +0 -0
  31. cam_run24/checkpoints/checkpoint-20000/config.json +26 -0
  32. cam_run24/checkpoints/checkpoint-20000/merges.txt +0 -0
  33. cam_run24/checkpoints/checkpoint-20000/metrics_report.txt +38 -0
  34. cam_run24/checkpoints/checkpoint-20000/model.safetensors +3 -0
  35. cam_run24/checkpoints/checkpoint-20000/special_tokens_map.json +15 -0
  36. cam_run24/checkpoints/checkpoint-20000/tokenizer.json +0 -0
  37. cam_run24/checkpoints/checkpoint-20000/tokenizer_config.json +57 -0
  38. cam_run24/checkpoints/checkpoint-20000/trainer_state.pt +3 -0
  39. cam_run24/checkpoints/checkpoint-20000/vocab.json +0 -0
  40. cam_run24/checkpoints/checkpoint-500/config.json +26 -0
  41. cam_run24/checkpoints/checkpoint-500/merges.txt +0 -0
  42. cam_run24/checkpoints/checkpoint-500/metrics_report.txt +20 -0
  43. cam_run24/checkpoints/checkpoint-500/model.safetensors +3 -0
  44. cam_run24/checkpoints/checkpoint-500/special_tokens_map.json +15 -0
  45. cam_run24/checkpoints/checkpoint-500/tokenizer.json +0 -0
  46. cam_run24/checkpoints/checkpoint-500/tokenizer_config.json +57 -0
  47. cam_run24/checkpoints/checkpoint-500/trainer_state.pt +3 -0
  48. cam_run24/checkpoints/checkpoint-500/vocab.json +0 -0
  49. cam_run24/checkpoints/checkpoint-5000/config.json +26 -0
  50. cam_run24/checkpoints/checkpoint-5000/merges.txt +0 -0
README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # CamemBert-Gpt POIDS UNIQUEMENT
2
+ Projet Final pour Sorbonne Université, machine learning avancé
cam_run24/.DS_Store ADDED
Binary file (6.15 kB). View file
 
cam_run24/checkpoints/.DS_Store ADDED
Binary file (8.2 kB). View file
 
cam_run24/checkpoints/checkpoint-1000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-1000/metrics_report.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === Training Metrics Report ===
2
+
3
+ Current State:
4
+ Step: 1000
5
+ Tokens Processed: 163,840,000
6
+ Training Progress: 61.04%
7
+
8
+ Recent Metrics:
9
+ Step 250:
10
+ loss: 9.0778
11
+ grad_norm: 7.743047714233398
12
+ learning_rate: 1.7500000000000002e-05
13
+ epoch: 0.0125
14
+
15
+ Step 500:
16
+ loss: 6.3809
17
+ grad_norm: 3.5750033855438232
18
+ learning_rate: 3.5000000000000004e-05
19
+ epoch: 0.025
20
+
21
+ Step 750:
22
+ loss: 5.9896
23
+ grad_norm: 4.1413140296936035
24
+ learning_rate: 5.2499999999999995e-05
25
+ epoch: 0.0375
26
+
27
+ Step 1000:
28
+ loss: 5.9116
29
+ grad_norm: 6.747597694396973
30
+ learning_rate: 7.000000000000001e-05
31
+ epoch: 0.05
32
+
cam_run24/checkpoints/checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd2e4741f9fa11564176572868bebc6443c6173af3f63c0c66c804dd607ac6ee
3
+ size 498813948
cam_run24/checkpoints/checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
cam_run24/checkpoints/checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "RobertaTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
+ }
cam_run24/checkpoints/checkpoint-1000/trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7cc7ab8ccdc62dab416f5565a07c0fb0917508c2a5efb53230b956db67ef044
3
+ size 1272
cam_run24/checkpoints/checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-10000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-10000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-10000/metrics_report.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === Training Metrics Report ===
2
+
3
+ Current State:
4
+ Step: 10000
5
+ Tokens Processed: 1,638,400,000
6
+ Training Progress: 610.35%
7
+
8
+ Recent Metrics:
9
+ Step 9000:
10
+ loss: 1.0834
11
+ grad_norm: 3.596336841583252
12
+ learning_rate: 0.0006297900000000001
13
+ epoch: 0.45
14
+
15
+ Step 9250:
16
+ loss: 1.2132
17
+ grad_norm: 3.082534074783325
18
+ learning_rate: 0.00064694
19
+ epoch: 0.4625
20
+
21
+ Step 9500:
22
+ loss: 1.1674
23
+ grad_norm: 46.47481918334961
24
+ learning_rate: 0.0006642299999999999
25
+ epoch: 0.475
26
+
27
+ Step 9750:
28
+ loss: 1.1172
29
+ grad_norm: 3.1326756477355957
30
+ learning_rate: 0.00068173
31
+ epoch: 0.4875
32
+
33
+ Step 10000:
34
+ loss: 1.0835
35
+ grad_norm: 3.4375391006469727
36
+ learning_rate: 0.00069923
37
+ epoch: 0.5
38
+
cam_run24/checkpoints/checkpoint-10000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a5396c0cfbb5dc161c5a3c48cda1d57daba5b4c5df8c07d069fcaf9efbc693b
3
+ size 498813948
cam_run24/checkpoints/checkpoint-10000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
cam_run24/checkpoints/checkpoint-10000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-10000/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "RobertaTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
+ }
cam_run24/checkpoints/checkpoint-10000/trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:247313d53800315171e4a59a9338d776ffea9bc85ab7d21c1e98938363d32daa
3
+ size 3256
cam_run24/checkpoints/checkpoint-10000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-15000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-15000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-15000/metrics_report.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === Training Metrics Report ===
2
+
3
+ Current State:
4
+ Step: 15000
5
+ Tokens Processed: 2,457,600,000
6
+ Training Progress: 915.53%
7
+
8
+ Recent Metrics:
9
+ Step 14000:
10
+ loss: 0.9014
11
+ grad_norm: 2.5185256004333496
12
+ learning_rate: 0.00042080989
13
+ epoch: 0.7
14
+
15
+ Step 14250:
16
+ loss: 0.8956
17
+ grad_norm: 2.4787769317626953
18
+ learning_rate: 0.0004033123900000001
19
+ epoch: 0.7125
20
+
21
+ Step 14500:
22
+ loss: 0.8823
23
+ grad_norm: 2.4841043949127197
24
+ learning_rate: 0.0003858148899999999
25
+ epoch: 0.725
26
+
27
+ Step 14750:
28
+ loss: 0.8753
29
+ grad_norm: 2.531907081604004
30
+ learning_rate: 0.00036831739
31
+ epoch: 0.7375
32
+
33
+ Step 15000:
34
+ loss: 0.8753
35
+ grad_norm: 2.6155550479888916
36
+ learning_rate: 0.00035081989
37
+ epoch: 0.75
38
+
cam_run24/checkpoints/checkpoint-15000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85b3b31bbff6f96cb0711a5c041040d2c6570a4c9751a3b9ad9aa6b40623280c
3
+ size 498813948
cam_run24/checkpoints/checkpoint-15000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
cam_run24/checkpoints/checkpoint-15000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-15000/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "RobertaTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
+ }
cam_run24/checkpoints/checkpoint-15000/trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16d59d9332f6115f06577d99f194c43ee1a81cba96cd933d2bfc414d9938b55
3
+ size 4344
cam_run24/checkpoints/checkpoint-15000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-20000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-20000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-20000/metrics_report.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === Training Metrics Report ===
2
+
3
+ Current State:
4
+ Step: 20000
5
+ Tokens Processed: 3,276,800,000
6
+ Training Progress: 1220.70%
7
+
8
+ Recent Metrics:
9
+ Step 19000:
10
+ loss: 0.7501
11
+ grad_norm: 2.222653388977051
12
+ learning_rate: 7.092987999999998e-05
13
+ epoch: 0.95
14
+
15
+ Step 19250:
16
+ loss: 0.7459
17
+ grad_norm: 2.1408591270446777
18
+ learning_rate: 5.343238000000004e-05
19
+ epoch: 0.9625
20
+
21
+ Step 19500:
22
+ loss: 0.7407
23
+ grad_norm: 2.2472920417785645
24
+ learning_rate: 3.593488000000002e-05
25
+ epoch: 0.975
26
+
27
+ Step 19750:
28
+ loss: 0.7389
29
+ grad_norm: 2.1884987354278564
30
+ learning_rate: 1.843738e-05
31
+ epoch: 0.9875
32
+
33
+ Step 20000:
34
+ loss: 0.7345
35
+ grad_norm: 2.1019210815429688
36
+ learning_rate: 9.398799999999853e-07
37
+ epoch: 1.0
38
+
cam_run24/checkpoints/checkpoint-20000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a707e260d3dab5cf94b0b4170f7c05d7b9aa9447e9ee52d4286733196d68780c
3
+ size 498813948
cam_run24/checkpoints/checkpoint-20000/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
cam_run24/checkpoints/checkpoint-20000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-20000/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "RobertaTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
+ }
cam_run24/checkpoints/checkpoint-20000/trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94805ea9524dd42e4bf4135009aae4c4c8d63f9ffe4d2fe19613c35be62374f4
3
+ size 5432
cam_run24/checkpoints/checkpoint-20000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-500/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-500/metrics_report.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === Training Metrics Report ===
2
+
3
+ Current State:
4
+ Step: 500
5
+ Tokens Processed: 81,920,000
6
+ Training Progress: 30.52%
7
+
8
+ Recent Metrics:
9
+ Step 250:
10
+ loss: 9.0778
11
+ grad_norm: 7.743047714233398
12
+ learning_rate: 1.7500000000000002e-05
13
+ epoch: 0.0125
14
+
15
+ Step 500:
16
+ loss: 6.3809
17
+ grad_norm: 3.5750033855438232
18
+ learning_rate: 3.5000000000000004e-05
19
+ epoch: 0.025
20
+
cam_run24/checkpoints/checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57fe6694ca2ed9de3e9f1fa172d621a44ad9d07e3af6ee58cbc71c13151b6b15
3
+ size 498813948
cam_run24/checkpoints/checkpoint-500/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
cam_run24/checkpoints/checkpoint-500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "RobertaTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
+ }
cam_run24/checkpoints/checkpoint-500/trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09936da2393321341d0f870e8eac64babb2760bbd7d1690d9c2e520d65f0961a
3
+ size 1208
cam_run24/checkpoints/checkpoint-500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cam_run24/checkpoints/checkpoint-5000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.46.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
cam_run24/checkpoints/checkpoint-5000/merges.txt ADDED
The diff for this file is too large to render. See raw diff