Upload folder using huggingface_hub
Browse files- best_model/decoder/config.json +34 -0
- best_model/decoder/generation_config.json +5 -0
- best_model/decoder/pytorch_model.bin +3 -0
- best_model/decoder/special_tokens_map.json +7 -0
- best_model/decoder/tokenizer.json +0 -0
- best_model/decoder/tokenizer_config.json +15 -0
- best_model/decoder/vocab.txt +0 -0
- best_model/encoder/config.json +32 -0
- best_model/encoder/pytorch_model.bin +3 -0
- best_model/encoder/special_tokens_map.json +7 -0
- best_model/encoder/tokenizer.json +0 -0
- best_model/encoder/tokenizer_config.json +15 -0
- best_model/encoder/vocab.txt +0 -0
- best_model/eval_results.txt +1 -0
- best_model/model_args.json +1 -0
- best_model/optimizer.pt +3 -0
- best_model/scheduler.pt +3 -0
- best_model/training_args.bin +3 -0
- decoder/config.json +34 -0
- decoder/generation_config.json +5 -0
- decoder/pytorch_model.bin +3 -0
- decoder/special_tokens_map.json +7 -0
- decoder/tokenizer.json +0 -0
- decoder/tokenizer_config.json +15 -0
- decoder/vocab.txt +0 -0
- encoder/config.json +32 -0
- encoder/pytorch_model.bin +3 -0
- encoder/special_tokens_map.json +7 -0
- encoder/tokenizer.json +0 -0
- encoder/tokenizer_config.json +15 -0
- encoder/vocab.txt +0 -0
- eval_results.txt +1 -0
- model_args.json +1 -0
- predictions.tsv +0 -0
- training_args.bin +3 -0
- training_progress_scores.csv +36 -0
best_model/decoder/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "neuralmind/bert-large-portuguese-cased",
|
3 |
+
"add_cross_attention": true,
|
4 |
+
"architectures": [
|
5 |
+
"BertLMHeadModel"
|
6 |
+
],
|
7 |
+
"attention_probs_dropout_prob": 0.1,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"directionality": "bidi",
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"is_decoder": true,
|
16 |
+
"layer_norm_eps": 1e-12,
|
17 |
+
"max_position_embeddings": 512,
|
18 |
+
"model_type": "bert",
|
19 |
+
"num_attention_heads": 16,
|
20 |
+
"num_hidden_layers": 24,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"pooler_fc_size": 768,
|
24 |
+
"pooler_num_attention_heads": 12,
|
25 |
+
"pooler_num_fc_layers": 3,
|
26 |
+
"pooler_size_per_head": 128,
|
27 |
+
"pooler_type": "first_token_transform",
|
28 |
+
"position_embedding_type": "absolute",
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.29.2",
|
31 |
+
"type_vocab_size": 2,
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 29794
|
34 |
+
}
|
best_model/decoder/generation_config.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"pad_token_id": 0,
|
4 |
+
"transformers_version": "4.29.2"
|
5 |
+
}
|
best_model/decoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3156d2adc458ff64f6ba576850cd713e5782b728e0fa553006ef0b049a9e5619
|
3 |
+
size 1741179769
|
best_model/decoder/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
best_model/decoder/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
best_model/decoder/tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_basic_tokenize": true,
|
5 |
+
"do_lower_case": false,
|
6 |
+
"mask_token": "[MASK]",
|
7 |
+
"model_max_length": 1000000000000000019884624838656,
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"strip_accents": null,
|
12 |
+
"tokenize_chinese_chars": true,
|
13 |
+
"tokenizer_class": "BertTokenizer",
|
14 |
+
"unk_token": "[UNK]"
|
15 |
+
}
|
best_model/decoder/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
best_model/encoder/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "neuralmind/bert-large-portuguese-cased",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"directionality": "bidi",
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 4096,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 16,
|
18 |
+
"num_hidden_layers": 24,
|
19 |
+
"output_past": true,
|
20 |
+
"pad_token_id": 0,
|
21 |
+
"pooler_fc_size": 768,
|
22 |
+
"pooler_num_attention_heads": 12,
|
23 |
+
"pooler_num_fc_layers": 3,
|
24 |
+
"pooler_size_per_head": 128,
|
25 |
+
"pooler_type": "first_token_transform",
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.29.2",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 29794
|
32 |
+
}
|
best_model/encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f82d36c0369b38691e6f59b6eff7c897d5ebc337d334b511ebe5e4f6ace5aaa
|
3 |
+
size 1337721965
|
best_model/encoder/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
best_model/encoder/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
best_model/encoder/tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_basic_tokenize": true,
|
5 |
+
"do_lower_case": false,
|
6 |
+
"mask_token": "[MASK]",
|
7 |
+
"model_max_length": 1000000000000000019884624838656,
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"strip_accents": null,
|
12 |
+
"tokenize_chinese_chars": true,
|
13 |
+
"tokenizer_class": "BertTokenizer",
|
14 |
+
"unk_token": "[UNK]"
|
15 |
+
}
|
best_model/encoder/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
best_model/eval_results.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
eval_loss = 1.6019802306755165e-06
|
best_model/model_args.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/bertimabu/best_model", "cache_dir": "cache_dir/bertimabu", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 1e-05, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "neuralmind/bert-large-portuguese-cased-neuralmind/bert-large-portuguese-cased", "model_type": "bert-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/bertimabu", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "neuralmind/bert-large-portuguese-cased"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
|
best_model/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:991d7af4dcca2c250f772be8fb5c725ea524183e12d7065e5b21f20337caf167
|
3 |
+
size 6149550514
|
best_model/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c7a96ee255d0a57f373a4204253994b819a4d9133113db6ef1d43effc69eb2a
|
3 |
+
size 627
|
best_model/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c98b4384060371a3bc8fbda28d773c8d5ba40ed795a6a7ccac9d0d0ac24a73c6
|
3 |
+
size 3643
|
decoder/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "neuralmind/bert-large-portuguese-cased",
|
3 |
+
"add_cross_attention": true,
|
4 |
+
"architectures": [
|
5 |
+
"BertLMHeadModel"
|
6 |
+
],
|
7 |
+
"attention_probs_dropout_prob": 0.1,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"directionality": "bidi",
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"is_decoder": true,
|
16 |
+
"layer_norm_eps": 1e-12,
|
17 |
+
"max_position_embeddings": 512,
|
18 |
+
"model_type": "bert",
|
19 |
+
"num_attention_heads": 16,
|
20 |
+
"num_hidden_layers": 24,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"pooler_fc_size": 768,
|
24 |
+
"pooler_num_attention_heads": 12,
|
25 |
+
"pooler_num_fc_layers": 3,
|
26 |
+
"pooler_size_per_head": 128,
|
27 |
+
"pooler_type": "first_token_transform",
|
28 |
+
"position_embedding_type": "absolute",
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.29.2",
|
31 |
+
"type_vocab_size": 2,
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 29794
|
34 |
+
}
|
decoder/generation_config.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"pad_token_id": 0,
|
4 |
+
"transformers_version": "4.29.2"
|
5 |
+
}
|
decoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3156d2adc458ff64f6ba576850cd713e5782b728e0fa553006ef0b049a9e5619
|
3 |
+
size 1741179769
|
decoder/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
decoder/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
decoder/tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_basic_tokenize": true,
|
5 |
+
"do_lower_case": false,
|
6 |
+
"mask_token": "[MASK]",
|
7 |
+
"model_max_length": 1000000000000000019884624838656,
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"strip_accents": null,
|
12 |
+
"tokenize_chinese_chars": true,
|
13 |
+
"tokenizer_class": "BertTokenizer",
|
14 |
+
"unk_token": "[UNK]"
|
15 |
+
}
|
decoder/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
encoder/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "neuralmind/bert-large-portuguese-cased",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"directionality": "bidi",
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 4096,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 16,
|
18 |
+
"num_hidden_layers": 24,
|
19 |
+
"output_past": true,
|
20 |
+
"pad_token_id": 0,
|
21 |
+
"pooler_fc_size": 768,
|
22 |
+
"pooler_num_attention_heads": 12,
|
23 |
+
"pooler_num_fc_layers": 3,
|
24 |
+
"pooler_size_per_head": 128,
|
25 |
+
"pooler_type": "first_token_transform",
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.29.2",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 29794
|
32 |
+
}
|
encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f82d36c0369b38691e6f59b6eff7c897d5ebc337d334b511ebe5e4f6ace5aaa
|
3 |
+
size 1337721965
|
encoder/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
encoder/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
encoder/tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_basic_tokenize": true,
|
5 |
+
"do_lower_case": false,
|
6 |
+
"mask_token": "[MASK]",
|
7 |
+
"model_max_length": 1000000000000000019884624838656,
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"strip_accents": null,
|
12 |
+
"tokenize_chinese_chars": true,
|
13 |
+
"tokenizer_class": "BertTokenizer",
|
14 |
+
"unk_token": "[UNK]"
|
15 |
+
}
|
encoder/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval_results.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
eval_loss = 1.6019802306755165e-06
|
model_args.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/bertimabu/best_model", "cache_dir": "cache_dir/bertimabu", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 1e-05, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "neuralmind/bert-large-portuguese-cased-neuralmind/bert-large-portuguese-cased", "model_type": "bert-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/bertimabu", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "neuralmind/bert-large-portuguese-cased"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
|
predictions.tsv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c98b4384060371a3bc8fbda28d773c8d5ba40ed795a6a7ccac9d0d0ac24a73c6
|
3 |
+
size 3643
|
training_progress_scores.csv
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global_step,eval_loss,train_loss
|
2 |
+
3200,0.001178404999460858,0.0006026856717653573
|
3 |
+
6400,0.00010422055795854605,0.0066980295814573765
|
4 |
+
8242,8.634919201496561e-05,0.00010150179150514305
|
5 |
+
9600,0.0005798203661123472,0.0006748212617821991
|
6 |
+
12800,3.665592998781394e-05,5.669304300681688e-05
|
7 |
+
16000,1.9148139064609864e-05,5.934512955718674e-06
|
8 |
+
16484,4.442635250713356e-05,8.367742339032702e-06
|
9 |
+
19200,4.0860269045350253e-05,4.3056414142483845e-05
|
10 |
+
22400,1.5532486212820124e-05,0.00011162611190229654
|
11 |
+
24726,9.585336437204741e-06,1.0142700375581626e-05
|
12 |
+
25600,5.6472556965585174e-05,4.169212479609996e-05
|
13 |
+
28800,9.08329865568708e-05,0.0287998765707016
|
14 |
+
32000,4.51531647717285e-05,0.00048229179810732603
|
15 |
+
32968,7.492072329877129e-06,2.445363497827202e-05
|
16 |
+
35200,4.627303449665739e-06,3.1986153317120625e-06
|
17 |
+
38400,1.2005302877313595e-05,5.751788648922229e-06
|
18 |
+
41210,1.282016639177353e-05,6.00425209995592e-06
|
19 |
+
41600,2.8359676563058704e-05,6.304038834059611e-05
|
20 |
+
44800,8.28712661098179e-06,8.453973350697197e-06
|
21 |
+
48000,1.8534383959556227e-06,8.852833843775443e-07
|
22 |
+
49452,4.934307926257002e-06,4.415692728798604e-06
|
23 |
+
51200,1.166240545478673e-05,1.3250189113023225e-05
|
24 |
+
54400,4.212427000433759e-06,1.5147059002629248e-06
|
25 |
+
57600,4.707957195581101e-06,1.22307619676576e-06
|
26 |
+
57694,4.702054648813395e-06,2.4315015707543353e-06
|
27 |
+
60800,1.300890340969685e-05,1.34417518893315e-06
|
28 |
+
64000,3.524587238496406e-06,3.1387448871100787e-06
|
29 |
+
65936,4.114913685621035e-06,1.1050465218431782e-06
|
30 |
+
67200,2.154203737306198e-06,4.43456883658655e-07
|
31 |
+
70400,1.9140163940530486e-06,7.143480047488993e-07
|
32 |
+
73600,1.602001228416908e-06,3.527809724346298e-07
|
33 |
+
74178,2.5098013840603486e-06,1.864866362666362e-06
|
34 |
+
76800,1.6910507659374766e-06,5.713800987905415e-07
|
35 |
+
80000,1.7120607546294114e-06,5.381394885262125e-07
|
36 |
+
82420,1.6019802306755165e-06,2.916027881383343e-07
|