Pushing of the new best model checkpoint
Browse files- README.md +101 -0
- config.json +25 -0
- model.safetensors +3 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +59 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: mit
|
4 |
+
base_model: dbmdz/bert-base-turkish-cased
|
5 |
+
tags:
|
6 |
+
- generated_from_trainer
|
7 |
+
metrics:
|
8 |
+
- f1
|
9 |
+
model-index:
|
10 |
+
- name: turkish-medical-question-answering
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# turkish-medical-question-answering
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [dbmdz/bert-base-turkish-cased](https://huggingface.co/dbmdz/bert-base-turkish-cased) on an unknown dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 1.2864
|
22 |
+
- Exact Match: 55.1899
|
23 |
+
- F1: 75.1246
|
24 |
+
|
25 |
+
## Model description
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Intended uses & limitations
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training and evaluation data
|
34 |
+
|
35 |
+
More information needed
|
36 |
+
|
37 |
+
## Training procedure
|
38 |
+
|
39 |
+
### Training hyperparameters
|
40 |
+
|
41 |
+
The following hyperparameters were used during training:
|
42 |
+
- learning_rate: 1e-05
|
43 |
+
- train_batch_size: 16
|
44 |
+
- eval_batch_size: 64
|
45 |
+
- seed: 42
|
46 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
47 |
+
- lr_scheduler_type: cosine
|
48 |
+
- lr_scheduler_warmup_steps: 1000
|
49 |
+
- num_epochs: 10
|
50 |
+
|
51 |
+
### Training results
|
52 |
+
|
53 |
+
| Training Loss | Epoch | Step | Validation Loss | Exact Match | F1 |
|
54 |
+
|:-------------:|:------:|:----:|:---------------:|:-----------:|:-------:|
|
55 |
+
| 5.9451 | 0.1166 | 50 | 5.9731 | 0.0 | 5.7871 |
|
56 |
+
| 5.8483 | 0.2331 | 100 | 5.8315 | 0.0 | 5.9124 |
|
57 |
+
| 5.6758 | 0.3497 | 150 | 5.6207 | 0.2519 | 6.2966 |
|
58 |
+
| 5.4079 | 0.4662 | 200 | 5.3396 | 0.2584 | 6.1159 |
|
59 |
+
| 5.1136 | 0.5828 | 250 | 5.0135 | 0.2755 | 7.7324 |
|
60 |
+
| 4.7974 | 0.6993 | 300 | 4.6818 | 1.2531 | 9.4001 |
|
61 |
+
| 4.4412 | 0.8159 | 350 | 4.3335 | 1.5656 | 10.9210 |
|
62 |
+
| 4.1566 | 0.9324 | 400 | 3.8584 | 7.0281 | 22.6904 |
|
63 |
+
| 3.599 | 1.0490 | 450 | 2.8534 | 23.4628 | 48.7349 |
|
64 |
+
| 3.0347 | 1.1655 | 500 | 2.4007 | 28.9362 | 53.4749 |
|
65 |
+
| 2.7626 | 1.2821 | 550 | 2.2064 | 33.4711 | 57.0427 |
|
66 |
+
| 2.5741 | 1.3986 | 600 | 2.0689 | 37.3297 | 60.0019 |
|
67 |
+
| 2.3917 | 1.5152 | 650 | 1.9761 | 38.6968 | 61.5842 |
|
68 |
+
| 2.3607 | 1.6317 | 700 | 1.9137 | 41.5344 | 63.5713 |
|
69 |
+
| 2.2972 | 1.7483 | 750 | 1.8294 | 44.0210 | 65.5393 |
|
70 |
+
| 2.1214 | 1.8648 | 800 | 1.7721 | 44.7028 | 65.9376 |
|
71 |
+
| 2.1775 | 1.9814 | 850 | 1.7058 | 46.1538 | 66.8453 |
|
72 |
+
| 1.9282 | 2.0979 | 900 | 1.6579 | 46.7784 | 67.7813 |
|
73 |
+
| 1.9428 | 2.2145 | 950 | 1.6366 | 46.8912 | 68.5467 |
|
74 |
+
| 1.8639 | 2.3310 | 1000 | 1.5751 | 49.4819 | 70.2320 |
|
75 |
+
| 1.8969 | 2.4476 | 1050 | 1.5519 | 48.9610 | 70.3424 |
|
76 |
+
| 1.7348 | 2.5641 | 1100 | 1.5173 | 50.4505 | 71.1044 |
|
77 |
+
| 1.7847 | 2.6807 | 1150 | 1.4999 | 50.5762 | 71.6186 |
|
78 |
+
| 1.7822 | 2.7972 | 1200 | 1.4566 | 53.0691 | 72.0005 |
|
79 |
+
| 1.7989 | 2.9138 | 1250 | 1.4300 | 51.6005 | 72.0121 |
|
80 |
+
| 1.7683 | 3.0303 | 1300 | 1.4319 | 52.0305 | 72.2366 |
|
81 |
+
| 1.5444 | 3.1469 | 1350 | 1.4277 | 51.7903 | 72.0603 |
|
82 |
+
| 1.5121 | 3.2634 | 1400 | 1.3861 | 53.6122 | 73.3486 |
|
83 |
+
| 1.6294 | 3.3800 | 1450 | 1.3830 | 52.6718 | 73.2456 |
|
84 |
+
| 1.514 | 3.4965 | 1500 | 1.3456 | 53.7389 | 73.4757 |
|
85 |
+
| 1.3778 | 3.6131 | 1550 | 1.3644 | 53.2319 | 73.5271 |
|
86 |
+
| 1.4502 | 3.7296 | 1600 | 1.3491 | 53.6030 | 73.8642 |
|
87 |
+
| 1.5388 | 3.8462 | 1650 | 1.3611 | 53.0380 | 73.1390 |
|
88 |
+
| 1.5244 | 3.9627 | 1700 | 1.3143 | 53.3587 | 74.0002 |
|
89 |
+
| 1.3127 | 4.0793 | 1750 | 1.3191 | 54.4767 | 74.6247 |
|
90 |
+
| 1.3819 | 4.1958 | 1800 | 1.2864 | 55.1899 | 75.1246 |
|
91 |
+
| 1.307 | 4.3124 | 1850 | 1.3762 | 54.1401 | 74.4158 |
|
92 |
+
| 1.2792 | 4.4289 | 1900 | 1.3156 | 53.4943 | 75.0122 |
|
93 |
+
| 1.289 | 4.5455 | 1950 | 1.2809 | 55.0063 | 74.6137 |
|
94 |
+
|
95 |
+
|
96 |
+
### Framework versions
|
97 |
+
|
98 |
+
- Transformers 4.48.0.dev0
|
99 |
+
- Pytorch 2.4.1+cu121
|
100 |
+
- Datasets 3.1.0
|
101 |
+
- Tokenizers 0.21.0
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "dbmdz/bert-base-turkish-cased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForQuestionAnswering"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.2,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.2,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 3072,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_hidden_layers": 12,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"torch_dtype": "float32",
|
21 |
+
"transformers_version": "4.48.0.dev0",
|
22 |
+
"type_vocab_size": 2,
|
23 |
+
"use_cache": true,
|
24 |
+
"vocab_size": 32000
|
25 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcf1e97fa80acd7e34110f7b2f17b6e5d5379d27f978a4ab8bd983bc57efe492
|
3 |
+
size 440136504
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"4": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": false,
|
48 |
+
"extra_special_tokens": {},
|
49 |
+
"mask_token": "[MASK]",
|
50 |
+
"max_len": 512,
|
51 |
+
"model_max_length": 512,
|
52 |
+
"never_split": null,
|
53 |
+
"pad_token": "[PAD]",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"strip_accents": null,
|
56 |
+
"tokenize_chinese_chars": true,
|
57 |
+
"tokenizer_class": "BertTokenizer",
|
58 |
+
"unk_token": "[UNK]"
|
59 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50dffc91e032cee9e5d4246dabf7dbb61430bf79c9d7fd4829fbf092223f251b
|
3 |
+
size 5432
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|