yemen2016 commited on
Commit
72d2a4a
·
verified ·
1 Parent(s): a8ce628

Upload folder using huggingface_hub

Browse files
Files changed (40) hide show
  1. checkpoint-1000/config.json +31 -0
  2. checkpoint-1000/generation_config.json +7 -0
  3. checkpoint-1000/optimizer.pt +3 -0
  4. checkpoint-1000/pytorch_model.bin +3 -0
  5. checkpoint-1000/rng_state.pth +3 -0
  6. checkpoint-1000/scheduler.pt +3 -0
  7. checkpoint-1000/trainer_state.json +36 -0
  8. checkpoint-1000/training_args.bin +3 -0
  9. checkpoint-1500/config.json +31 -0
  10. checkpoint-1500/generation_config.json +7 -0
  11. checkpoint-1500/optimizer.pt +3 -0
  12. checkpoint-1500/pytorch_model.bin +3 -0
  13. checkpoint-1500/rng_state.pth +3 -0
  14. checkpoint-1500/scheduler.pt +3 -0
  15. checkpoint-1500/trainer_state.json +42 -0
  16. checkpoint-1500/training_args.bin +3 -0
  17. checkpoint-2000/config.json +31 -0
  18. checkpoint-2000/generation_config.json +7 -0
  19. checkpoint-2000/optimizer.pt +3 -0
  20. checkpoint-2000/pytorch_model.bin +3 -0
  21. checkpoint-2000/rng_state.pth +3 -0
  22. checkpoint-2000/scheduler.pt +3 -0
  23. checkpoint-2000/trainer_state.json +56 -0
  24. checkpoint-2000/training_args.bin +3 -0
  25. checkpoint-500/config.json +31 -0
  26. checkpoint-500/generation_config.json +7 -0
  27. checkpoint-500/optimizer.pt +3 -0
  28. checkpoint-500/pytorch_model.bin +3 -0
  29. checkpoint-500/rng_state.pth +3 -0
  30. checkpoint-500/scheduler.pt +3 -0
  31. checkpoint-500/trainer_state.json +22 -0
  32. checkpoint-500/training_args.bin +3 -0
  33. config.json +31 -0
  34. generation_config.json +7 -0
  35. pytorch_model.bin +3 -0
  36. runs/Jan07_11-05-40_hendrixgpu22fl.unicph.domain/events.out.tfevents.1736244342.hendrixgpu22fl.unicph.domain.961379.0 +3 -0
  37. runs/Jan07_11-05-40_hendrixgpu22fl.unicph.domain/events.out.tfevents.1736244894.hendrixgpu22fl.unicph.domain.961379.1 +3 -0
  38. special_tokens_map.json +5 -0
  39. spiece.model +3 -0
  40. tokenizer_config.json +11 -0
checkpoint-1000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ELRs/Ewondo_mT5",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
checkpoint-1000/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:604e185fbf18327cd14ee4594bb3b46daededc1159820e8aa15f9401d03da3fd
3
+ size 2401526789
checkpoint-1000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b352c44d41ba5af1c25c5b1aba3560ff14c09a40bdccaf6c2f4c4fcb06316f
3
+ size 1200772613
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ddf581a839d698b5bd9813f416ae0aba9973f0f354f93fe48667222b13d0830
3
+ size 14575
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f58c36972747b7dbc5db9c5dbc65e2722d1ec04948e610fba6902d83f728b84
3
+ size 627
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.2531328320802004,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.63,
12
+ "learning_rate": 1.5822890559732666e-05,
13
+ "loss": 3.1176,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 2.115739107131958,
19
+ "eval_runtime": 3.995,
20
+ "eval_samples_per_second": 177.722,
21
+ "eval_steps_per_second": 11.264,
22
+ "step": 798
23
+ },
24
+ {
25
+ "epoch": 1.25,
26
+ "learning_rate": 1.1645781119465331e-05,
27
+ "loss": 2.5326,
28
+ "step": 1000
29
+ }
30
+ ],
31
+ "max_steps": 2394,
32
+ "num_train_epochs": 3,
33
+ "total_flos": 1057367194337280.0,
34
+ "trial_name": null,
35
+ "trial_params": null
36
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef77904dd1a69ba56ad3d9a998ad96adf8e3c471e99d84569b150d774724eab
3
+ size 3963
checkpoint-1500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ELRs/Ewondo_mT5",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
checkpoint-1500/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47a26032824e024ed1b8c9425038a8425779b2725ba702f6ad9b3ad584478277
3
+ size 2401526789
checkpoint-1500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78207c709a4c69a8ad1b4dc89f62de92a9a2224327d360172f5b7a675a3194d1
3
+ size 1200772613
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8817f543d0af69c923bca544a5c1dfb3e8b78e58240c4fd601f33708440e3b52
3
+ size 14575
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:187a79bc7a5425466db66dfe6cb9e3f0ae7bd0331f1e44425eb04324722507b2
3
+ size 627
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.8796992481203008,
5
+ "global_step": 1500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.63,
12
+ "learning_rate": 1.5822890559732666e-05,
13
+ "loss": 3.1176,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 2.115739107131958,
19
+ "eval_runtime": 3.995,
20
+ "eval_samples_per_second": 177.722,
21
+ "eval_steps_per_second": 11.264,
22
+ "step": 798
23
+ },
24
+ {
25
+ "epoch": 1.25,
26
+ "learning_rate": 1.1645781119465331e-05,
27
+ "loss": 2.5326,
28
+ "step": 1000
29
+ },
30
+ {
31
+ "epoch": 1.88,
32
+ "learning_rate": 7.468671679197995e-06,
33
+ "loss": 2.3611,
34
+ "step": 1500
35
+ }
36
+ ],
37
+ "max_steps": 2394,
38
+ "num_train_epochs": 3,
39
+ "total_flos": 1586116885217280.0,
40
+ "trial_name": null,
41
+ "trial_params": null
42
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef77904dd1a69ba56ad3d9a998ad96adf8e3c471e99d84569b150d774724eab
3
+ size 3963
checkpoint-2000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ELRs/Ewondo_mT5",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
checkpoint-2000/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97840cb3907ac5d72cfe4dce1cdcd08b9ef2e02b1d32fc6e7b65ad3ef5531a36
3
+ size 2401526789
checkpoint-2000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd835031b8526dd518217a1a7ebb552b5b616c121685391a1789ff5cb603f44f
3
+ size 1200772613
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82d3cbb9c4470e6612bdcbea5b33ae1136a1b15a2ae2cf083607d58cc4c173f1
3
+ size 14575
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2b8854518a0638c5988f808ca9409e3b563ecaa9de97a8772905d25fed5746
3
+ size 627
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.506265664160401,
5
+ "global_step": 2000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.63,
12
+ "learning_rate": 1.5822890559732666e-05,
13
+ "loss": 3.1176,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 2.115739107131958,
19
+ "eval_runtime": 3.995,
20
+ "eval_samples_per_second": 177.722,
21
+ "eval_steps_per_second": 11.264,
22
+ "step": 798
23
+ },
24
+ {
25
+ "epoch": 1.25,
26
+ "learning_rate": 1.1645781119465331e-05,
27
+ "loss": 2.5326,
28
+ "step": 1000
29
+ },
30
+ {
31
+ "epoch": 1.88,
32
+ "learning_rate": 7.468671679197995e-06,
33
+ "loss": 2.3611,
34
+ "step": 1500
35
+ },
36
+ {
37
+ "epoch": 2.0,
38
+ "eval_loss": 1.9598926305770874,
39
+ "eval_runtime": 3.8777,
40
+ "eval_samples_per_second": 183.099,
41
+ "eval_steps_per_second": 11.605,
42
+ "step": 1596
43
+ },
44
+ {
45
+ "epoch": 2.51,
46
+ "learning_rate": 3.29156223893066e-06,
47
+ "loss": 2.3096,
48
+ "step": 2000
49
+ }
50
+ ],
51
+ "max_steps": 2394,
52
+ "num_train_epochs": 3,
53
+ "total_flos": 2114734388674560.0,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef77904dd1a69ba56ad3d9a998ad96adf8e3c471e99d84569b150d774724eab
3
+ size 3963
checkpoint-500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ELRs/Ewondo_mT5",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
checkpoint-500/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47539c3bc8461f56a0560553d78756da4ade24c698cabcc523c6751370b19637
3
+ size 2401526789
checkpoint-500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06263ba5387c8958d565ec8e2c3fffa1f3993bc995d97274e3c5614fec5070ed
3
+ size 1200772613
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd9147173ef498be25967623b269bd1628d5cea087f2ff089e3f62f27e1908e
3
+ size 14575
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e88518db194a6b634e82cdd343b462a07f5919fdf067c955ebcb3acdac1e5d5f
3
+ size 627
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.6265664160401002,
5
+ "global_step": 500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.63,
12
+ "learning_rate": 1.5822890559732666e-05,
13
+ "loss": 3.1176,
14
+ "step": 500
15
+ }
16
+ ],
17
+ "max_steps": 2394,
18
+ "num_train_epochs": 3,
19
+ "total_flos": 528749690880000.0,
20
+ "trial_name": null,
21
+ "trial_params": null
22
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef77904dd1a69ba56ad3d9a998ad96adf8e3c471e99d84569b150d774724eab
3
+ size 3963
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ELRs/Ewondo_mT5",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cd068172c24e2e4b0d5e05fb5e2d651d0254dd6d35d1484adcc809efa97e93a
3
+ size 1200772613
runs/Jan07_11-05-40_hendrixgpu22fl.unicph.domain/events.out.tfevents.1736244342.hendrixgpu22fl.unicph.domain.961379.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f85b680eec6382c7e5a5de6e3b5de4e645d6760c20918f20d5ecfe3ca0f5d5b
3
+ size 5935
runs/Jan07_11-05-40_hendrixgpu22fl.unicph.domain/events.out.tfevents.1736244894.hendrixgpu22fl.unicph.domain.961379.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc8ffc953dc5b5737159fd44a0d4b36c69f6a487ddde1ca550ae3fc393a59e5
3
+ size 311
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "clean_up_tokenization_spaces": true,
4
+ "eos_token": "</s>",
5
+ "extra_ids": 0,
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "pad_token": "<pad>",
8
+ "sp_model_kwargs": {},
9
+ "tokenizer_class": "T5Tokenizer",
10
+ "unk_token": "<unk>"
11
+ }