End of training
Browse files- README.md +162 -0
- config.json +37 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +397 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer_config.json +30 -0
- training_args.bin +3 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
base_model: facebook/opt-1.3b
|
4 |
+
tags:
|
5 |
+
- generated_from_trainer
|
6 |
+
metrics:
|
7 |
+
- accuracy
|
8 |
+
model-index:
|
9 |
+
- name: NumTrainEpochs10_SaveStrategiesno_reward_modeling_anthropic_hh
|
10 |
+
results: []
|
11 |
+
---
|
12 |
+
|
13 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
+
should probably proofread and complete it, then remove this comment. -->
|
15 |
+
|
16 |
+
# NumTrainEpochs10_SaveStrategiesno_reward_modeling_anthropic_hh
|
17 |
+
|
18 |
+
This model is a fine-tuned version of [facebook/opt-1.3b](https://huggingface.co/facebook/opt-1.3b) on an unknown dataset.
|
19 |
+
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 3.2160
|
21 |
+
- Accuracy: 0.6289
|
22 |
+
- Train Rewards/chosen: 13.3266
|
23 |
+
- Train Rewards/rejected: -10.7412
|
24 |
+
- Train Rewards/accuracies: 0.9925
|
25 |
+
- Train Rewards/margins: 24.0678
|
26 |
+
- Train Nll Loss: 1.9271
|
27 |
+
- Train Logit Total Loss: 0.0395
|
28 |
+
- Train Logit Loss: 0.0204
|
29 |
+
- Rewards/chosen: 4.7138
|
30 |
+
- Rewards/rejected: -1.7686
|
31 |
+
- Rewards/accuracies: 0.6145
|
32 |
+
- Rewards/margins: 6.4823
|
33 |
+
- Nll Loss: 2.0087
|
34 |
+
- Logit Total Loss: 3.2131
|
35 |
+
- Logit Loss: 3.2252
|
36 |
+
|
37 |
+
## Model description
|
38 |
+
|
39 |
+
More information needed
|
40 |
+
|
41 |
+
## Intended uses & limitations
|
42 |
+
|
43 |
+
More information needed
|
44 |
+
|
45 |
+
## Training and evaluation data
|
46 |
+
|
47 |
+
More information needed
|
48 |
+
|
49 |
+
## Training procedure
|
50 |
+
|
51 |
+
### Training hyperparameters
|
52 |
+
|
53 |
+
The following hyperparameters were used during training:
|
54 |
+
- learning_rate: 1.41e-05
|
55 |
+
- train_batch_size: 4
|
56 |
+
- eval_batch_size: 8
|
57 |
+
- seed: 42
|
58 |
+
- gradient_accumulation_steps: 4
|
59 |
+
- total_train_batch_size: 16
|
60 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
61 |
+
- lr_scheduler_type: linear
|
62 |
+
- num_epochs: 10
|
63 |
+
|
64 |
+
### Training results
|
65 |
+
|
66 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Nll Loss | Logit Total Loss | Logit Loss |
|
67 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------:|:-----------------:|:-----------:|
|
68 |
+
| 0.7879 | 0.11 | 100 | 0.7507 | 0.4845 | -0.1740 | -0.1876 | 0.4714 | 0.0135 | 6.2659 | 0.7498 | 0.6941 |
|
69 |
+
| 0.7291 | 0.23 | 200 | 0.7331 | 0.5773 | -0.2697 | -0.3880 | 0.5645 | 0.1184 | 6.0096 | 0.7310 | 0.6777 |
|
70 |
+
| 0.6843 | 0.34 | 300 | 0.7057 | 0.5876 | 0.2058 | -0.0389 | 0.5754 | 0.2448 | 3.9577 | 0.7039 | 0.6710 |
|
71 |
+
| 0.6773 | 0.46 | 400 | 0.6950 | 0.5918 | -0.0097 | -0.2138 | 0.5774 | 0.2041 | 4.2789 | 0.6968 | 0.6607 |
|
72 |
+
| 0.7071 | 0.57 | 500 | 0.7107 | 0.5918 | 0.7447 | 0.5198 | 0.5815 | 0.2249 | 4.4170 | 0.7087 | 0.6712 |
|
73 |
+
| 0.6881 | 0.69 | 600 | 0.6687 | 0.6186 | -0.8010 | -1.0541 | 0.6028 | 0.2531 | 3.2753 | 0.6671 | 0.6408 |
|
74 |
+
| 0.6871 | 0.8 | 700 | 0.6847 | 0.5753 | -1.7064 | -1.9330 | 0.5601 | 0.2266 | 3.7264 | 0.6839 | 0.6532 |
|
75 |
+
| 0.7125 | 0.91 | 800 | 0.6885 | 0.5814 | -1.4574 | -1.6521 | 0.5734 | 0.1947 | 4.3386 | 0.6851 | 0.6482 |
|
76 |
+
| 0.62 | 1.03 | 900 | 0.6955 | 0.6103 | -1.4133 | -1.8571 | 0.5964 | 0.4438 | 3.1332 | 0.6958 | 0.6712 |
|
77 |
+
| 0.5929 | 1.14 | 1000 | 0.6537 | 0.6371 | -1.9413 | -2.5254 | 0.6226 | 0.5841 | 2.9107 | 0.6524 | 0.6296 |
|
78 |
+
| 0.5825 | 1.26 | 1100 | 0.6749 | 0.6515 | 0.4669 | -0.0787 | 0.6367 | 0.5455 | 2.9364 | 0.6732 | 0.6503 |
|
79 |
+
| 0.614 | 1.37 | 1200 | 0.6697 | 0.6351 | 0.1785 | -0.2933 | 0.6258 | 0.4718 | 2.9997 | 0.6659 | 0.6423 |
|
80 |
+
| 0.5528 | 1.49 | 1300 | 0.6553 | 0.6268 | -1.0780 | -1.6306 | 0.6177 | 0.5526 | 2.9051 | 0.6504 | 0.6276 |
|
81 |
+
| 0.6501 | 1.6 | 1400 | 0.6379 | 0.6412 | -1.6259 | -2.1203 | 0.6306 | 0.4944 | 2.9085 | 0.6351 | 0.6121 |
|
82 |
+
| 0.545 | 1.71 | 1500 | 0.6640 | 0.6660 | -0.3375 | -1.1855 | 0.6560 | 0.8480 | 3.0934 | 0.6573 | 0.6327 |
|
83 |
+
| 0.6653 | 1.83 | 1600 | 0.6379 | 0.6639 | -1.0663 | -1.6961 | 0.6528 | 0.6298 | 2.8475 | 0.6376 | 0.6153 |
|
84 |
+
| 0.5792 | 1.94 | 1700 | 0.6447 | 0.6577 | -0.0283 | -0.6093 | 0.6480 | 0.5810 | 3.0457 | 0.6420 | 0.6178 |
|
85 |
+
| 0.2858 | 2.06 | 1800 | 0.9327 | 0.6330 | 1.7576 | 0.2131 | 0.6226 | 1.5445 | 2.8731 | 0.9216 | 0.9019 |
|
86 |
+
| 0.3404 | 2.17 | 1900 | 0.8438 | 0.6144 | 0.9326 | -0.2443 | 0.6024 | 1.1769 | 2.7925 | 0.8418 | 0.8221 |
|
87 |
+
| 0.2734 | 2.29 | 2000 | 0.9082 | 0.6227 | 1.6779 | 0.4268 | 0.6125 | 1.2511 | 2.7991 | 0.8986 | 0.8794 |
|
88 |
+
| 0.2562 | 2.4 | 2100 | 0.9566 | 0.6371 | 2.2122 | 0.5184 | 0.6266 | 1.6937 | 2.7729 | 0.9522 | 0.9338 |
|
89 |
+
| 0.3796 | 2.51 | 2200 | 0.8839 | 0.6351 | 0.7900 | -0.5311 | 0.6218 | 1.3211 | 2.7689 | 0.8720 | 0.8528 |
|
90 |
+
| 0.2316 | 2.63 | 2300 | 0.8741 | 0.6454 | 2.0133 | 0.5784 | 0.6359 | 1.4349 | 2.7465 | 0.8633 | 0.8443 |
|
91 |
+
| 0.3679 | 2.74 | 2400 | 0.8584 | 0.6515 | -0.8628 | -2.1801 | 0.6379 | 1.3173 | 2.7134 | 0.8483 | 0.8294 |
|
92 |
+
| 0.3384 | 2.86 | 2500 | 0.9165 | 0.6412 | -0.9835 | -2.3685 | 0.6294 | 1.3850 | 2.7084 | 0.9087 | 0.8905 |
|
93 |
+
| 0.3595 | 2.97 | 2600 | 0.9173 | 0.6454 | 0.3307 | -1.0129 | 0.6347 | 1.3436 | 2.7089 | 0.9049 | 0.8867 |
|
94 |
+
| 0.1331 | 3.09 | 2700 | 1.4595 | 0.6557 | 0.6119 | -2.1780 | 0.6468 | 2.7900 | 2.6967 | 1.4381 | 1.4254 |
|
95 |
+
| 0.1464 | 3.2 | 2800 | 1.4234 | 0.6351 | 5.4974 | 2.9945 | 0.6258 | 2.5029 | 2.6392 | 1.3999 | 1.3874 |
|
96 |
+
| 0.137 | 3.31 | 2900 | 1.4612 | 0.6474 | 3.1356 | 0.4400 | 0.6363 | 2.6956 | 2.6002 | 1.4435 | 1.4318 |
|
97 |
+
| 0.1593 | 3.43 | 3000 | 1.7826 | 0.6433 | 3.8280 | 0.7687 | 0.6282 | 3.0593 | 2.6206 | 1.7676 | 1.7590 |
|
98 |
+
| 0.0834 | 3.54 | 3100 | 1.5493 | 0.6474 | 2.4447 | -0.2971 | 0.6355 | 2.7418 | 2.6296 | 1.5386 | 1.5275 |
|
99 |
+
| 0.136 | 3.66 | 3200 | 1.5847 | 0.6495 | 2.6691 | -0.1416 | 0.6375 | 2.8108 | 2.6007 | 1.5701 | 1.5597 |
|
100 |
+
| 0.0859 | 3.77 | 3300 | 1.7114 | 0.6227 | 0.8690 | -1.9033 | 0.6093 | 2.7723 | 2.5630 | 1.6942 | 1.6854 |
|
101 |
+
| 0.1705 | 3.89 | 3400 | 1.7792 | 0.6268 | -1.4030 | -4.0698 | 0.6121 | 2.6669 | 2.5917 | 1.7786 | 1.7704 |
|
102 |
+
| 0.1675 | 4.0 | 3500 | 2.1762 | 0.6268 | -1.5886 | -5.0180 | 0.6133 | 3.4294 | 2.5716 | 2.1579 | 2.1537 |
|
103 |
+
| 0.0589 | 4.11 | 3600 | 2.3409 | 0.6309 | 1.1330 | -2.8993 | 0.6173 | 4.0323 | 2.4949 | 2.3055 | 2.3036 |
|
104 |
+
| 0.1014 | 4.23 | 3700 | 2.3221 | 0.6268 | 2.6255 | -1.3486 | 0.6125 | 3.9741 | 2.4617 | 2.2985 | 2.2969 |
|
105 |
+
| 0.0697 | 4.34 | 3800 | 2.4256 | 0.6351 | 2.8885 | -1.2680 | 0.6194 | 4.1565 | 2.4613 | 2.4010 | 2.4004 |
|
106 |
+
| 0.1687 | 4.46 | 3900 | 2.1905 | 0.6433 | 3.3404 | -1.0572 | 0.6347 | 4.3976 | 2.4074 | 2.1582 | 2.1556 |
|
107 |
+
| 0.0315 | 4.57 | 4000 | 2.3170 | 0.6619 | 2.0050 | -2.4036 | 0.6480 | 4.4086 | 2.4112 | 2.2812 | 2.2799 |
|
108 |
+
| 0.1071 | 4.69 | 4100 | 2.2205 | 0.6454 | 0.9399 | -3.4755 | 0.6379 | 4.4154 | 2.3561 | 2.1998 | 2.1983 |
|
109 |
+
| 0.1342 | 4.8 | 4200 | 2.2640 | 0.6557 | 10.1640 | 5.7216 | 0.6419 | 4.4424 | 2.3536 | 2.2410 | 2.2399 |
|
110 |
+
| 0.0793 | 4.91 | 4300 | 2.0629 | 0.6495 | -0.6830 | -4.8288 | 0.6327 | 4.1458 | 2.3658 | 2.0407 | 2.0374 |
|
111 |
+
| 0.0587 | 5.03 | 4400 | 2.3862 | 0.6371 | 3.2076 | -1.4161 | 0.6258 | 4.6238 | 2.3529 | 2.3625 | 2.3626 |
|
112 |
+
| 0.0433 | 5.14 | 4500 | 2.5409 | 0.6454 | 4.9940 | 0.1253 | 0.6286 | 4.8687 | 2.3166 | 2.5250 | 2.5271 |
|
113 |
+
| 0.0506 | 5.26 | 4600 | 2.5949 | 0.6557 | 6.7660 | 1.6624 | 0.6395 | 5.1035 | 2.2864 | 2.5983 | 2.6014 |
|
114 |
+
| 0.0506 | 5.37 | 4700 | 2.7389 | 0.6351 | 7.2608 | 2.0917 | 0.6226 | 5.1690 | 2.2691 | 2.7197 | 2.7243 |
|
115 |
+
| 0.0644 | 5.49 | 4800 | 2.8523 | 0.6309 | 2.3756 | -2.9285 | 0.6173 | 5.3041 | 2.2594 | 2.8574 | 2.8634 |
|
116 |
+
| 0.0714 | 5.6 | 4900 | 2.5013 | 0.6309 | 2.5445 | -2.3571 | 0.6206 | 4.9016 | 2.2422 | 2.5045 | 2.5072 |
|
117 |
+
| 0.1087 | 5.71 | 5000 | 2.6378 | 0.6227 | -0.0320 | -5.0243 | 0.6113 | 4.9923 | 2.2318 | 2.6303 | 2.6344 |
|
118 |
+
| 0.0874 | 5.83 | 5100 | 2.8088 | 0.6412 | 5.9816 | 0.6049 | 0.6278 | 5.3767 | 2.2257 | 2.7811 | 2.7867 |
|
119 |
+
| 0.0871 | 5.94 | 5200 | 2.4819 | 0.6433 | 7.2347 | 2.1895 | 0.6306 | 5.0451 | 2.2034 | 2.4679 | 2.4706 |
|
120 |
+
| 0.0331 | 6.06 | 5300 | 2.8775 | 0.6268 | 9.8380 | 4.4195 | 0.6145 | 5.4184 | 2.1978 | 2.8663 | 2.8730 |
|
121 |
+
| 0.024 | 6.17 | 5400 | 2.8923 | 0.6433 | 5.1441 | -0.5990 | 0.6306 | 5.7431 | 2.1912 | 2.8713 | 2.8781 |
|
122 |
+
| 0.0354 | 6.29 | 5500 | 2.7626 | 0.6433 | -1.4206 | -6.9376 | 0.6315 | 5.5170 | 2.1826 | 2.7519 | 2.7577 |
|
123 |
+
| 0.0289 | 6.4 | 5600 | 2.8423 | 0.6371 | 7.1683 | 1.7904 | 0.6246 | 5.3779 | 2.1707 | 2.8182 | 2.8248 |
|
124 |
+
| 0.0389 | 6.51 | 5700 | 2.9096 | 0.6412 | 2.0666 | -3.5386 | 0.6234 | 5.6052 | 2.1672 | 2.9140 | 2.9215 |
|
125 |
+
| 0.0245 | 6.63 | 5800 | 2.8677 | 0.6495 | 4.5194 | -1.1798 | 0.6347 | 5.6992 | 2.1466 | 2.8461 | 2.8532 |
|
126 |
+
| 0.0804 | 6.74 | 5900 | 2.9668 | 0.6371 | 5.6766 | -0.3308 | 0.6226 | 6.0074 | 2.1468 | 2.9437 | 2.9518 |
|
127 |
+
| 0.029 | 6.86 | 6000 | 3.0269 | 0.6371 | 3.9285 | -2.2229 | 0.6226 | 6.1514 | 2.1305 | 2.9998 | 3.0086 |
|
128 |
+
| 0.0438 | 6.97 | 6100 | 2.8192 | 0.6639 | 2.2607 | -4.3102 | 0.6476 | 6.5708 | 2.1277 | 2.8101 | 2.8170 |
|
129 |
+
| 0.0451 | 7.09 | 6200 | 2.8547 | 0.6577 | 2.5219 | -3.4933 | 0.6395 | 6.0152 | 2.1111 | 2.8383 | 2.8456 |
|
130 |
+
| 0.0761 | 7.2 | 6300 | 2.9610 | 0.6536 | 4.7705 | -1.5571 | 0.6435 | 6.3275 | 2.1023 | 2.9370 | 2.9454 |
|
131 |
+
| 0.0477 | 7.31 | 6400 | 2.8708 | 0.6619 | 2.7809 | -3.7082 | 0.6488 | 6.4891 | 2.0958 | 2.8410 | 2.8485 |
|
132 |
+
| 0.0449 | 7.43 | 6500 | 3.0901 | 0.6619 | 5.8808 | -0.8822 | 0.6496 | 6.7630 | 2.0873 | 3.0685 | 3.0784 |
|
133 |
+
| 0.0418 | 7.54 | 6600 | 2.9687 | 0.6371 | 2.2079 | -4.1264 | 0.6206 | 6.3343 | 2.0853 | 2.9514 | 2.9602 |
|
134 |
+
| 0.0473 | 7.66 | 6700 | 2.9895 | 0.6351 | 2.4455 | -3.8039 | 0.6206 | 6.2494 | 2.0790 | 2.9705 | 2.9795 |
|
135 |
+
| 0.0459 | 7.77 | 6800 | 3.0660 | 0.6392 | 4.6892 | -1.6980 | 0.6254 | 6.3872 | 2.0757 | 3.0540 | 3.0638 |
|
136 |
+
| 0.045 | 7.89 | 6900 | 3.0811 | 0.6474 | 2.9687 | -3.4595 | 0.6347 | 6.4282 | 2.0697 | 3.0561 | 3.0661 |
|
137 |
+
| 0.0493 | 8.0 | 7000 | 2.9549 | 0.6330 | 3.3733 | -2.8947 | 0.6214 | 6.2680 | 2.0679 | 2.9435 | 2.9523 |
|
138 |
+
| 0.031 | 8.11 | 7100 | 2.9964 | 0.6330 | 4.2065 | -2.1412 | 0.6206 | 6.3477 | 2.0650 | 2.9810 | 2.9903 |
|
139 |
+
| 0.0196 | 8.23 | 7200 | 3.0962 | 0.6371 | 4.8289 | -1.6916 | 0.6246 | 6.5204 | 2.0550 | 3.0800 | 3.0904 |
|
140 |
+
| 0.0223 | 8.34 | 7300 | 3.0038 | 0.6392 | 2.7990 | -3.5327 | 0.6246 | 6.3317 | 2.0497 | 2.9870 | 2.9965 |
|
141 |
+
| 0.0629 | 8.46 | 7400 | 3.0349 | 0.6351 | 5.2916 | -0.8920 | 0.6206 | 6.1836 | 2.0453 | 3.0076 | 3.0173 |
|
142 |
+
| 0.0922 | 8.57 | 7500 | 3.0735 | 0.6227 | 1.5229 | -4.6388 | 0.6105 | 6.1617 | 2.0409 | 3.0489 | 3.0591 |
|
143 |
+
| 0.0302 | 8.69 | 7600 | 3.1279 | 0.6289 | 1.4324 | -4.7615 | 0.6185 | 6.1939 | 2.0355 | 3.1060 | 3.1168 |
|
144 |
+
| 0.0589 | 8.8 | 7700 | 3.1274 | 0.6412 | 4.6809 | -1.6469 | 0.6306 | 6.3279 | 2.0298 | 3.1051 | 3.1159 |
|
145 |
+
| 0.0389 | 8.91 | 7800 | 3.0308 | 0.6330 | 4.8002 | -1.3492 | 0.6206 | 6.1494 | 2.0277 | 3.0129 | 3.0229 |
|
146 |
+
| 0.0252 | 9.03 | 7900 | 3.0680 | 0.6330 | 5.0212 | -1.1246 | 0.6165 | 6.1458 | 2.0236 | 3.0565 | 3.0670 |
|
147 |
+
| 0.0652 | 9.14 | 8000 | 3.1190 | 0.6351 | 4.3150 | -1.9926 | 0.6165 | 6.3076 | 2.0196 | 3.1234 | 3.1345 |
|
148 |
+
| 0.0201 | 9.26 | 8100 | 3.1413 | 0.6289 | 4.7573 | -1.5726 | 0.6165 | 6.3299 | 2.0164 | 3.1389 | 3.1503 |
|
149 |
+
| 0.0443 | 9.37 | 8200 | 3.1135 | 0.6247 | 4.3945 | -1.9119 | 0.6125 | 6.3065 | 2.0140 | 3.1029 | 3.1139 |
|
150 |
+
| 0.0186 | 9.49 | 8300 | 3.1597 | 0.6289 | 3.7131 | -2.6943 | 0.6165 | 6.4074 | 2.0114 | 3.1487 | 3.1602 |
|
151 |
+
| 0.0352 | 9.6 | 8400 | 3.1513 | 0.6247 | 3.9594 | -2.4902 | 0.6085 | 6.4496 | 2.0100 | 3.1409 | 3.1523 |
|
152 |
+
| 0.0225 | 9.71 | 8500 | 3.1966 | 0.6227 | 4.9750 | -1.5016 | 0.6125 | 6.4766 | 2.0095 | 3.1854 | 3.1973 |
|
153 |
+
| 0.0385 | 9.83 | 8600 | 3.2165 | 0.6268 | 4.9076 | -1.6079 | 0.6125 | 6.5155 | 2.0094 | 3.2082 | 3.2203 |
|
154 |
+
| 0.0266 | 9.94 | 8700 | 3.2160 | 0.6289 | 4.7138 | -1.7686 | 0.6145 | 6.4823 | 2.0087 | 3.2131 | 3.2252 |
|
155 |
+
|
156 |
+
|
157 |
+
### Framework versions
|
158 |
+
|
159 |
+
- Transformers 4.37.2
|
160 |
+
- Pytorch 2.4.0+cu121
|
161 |
+
- Datasets 2.21.0
|
162 |
+
- Tokenizers 0.15.2
|
config.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/opt-1.3b",
|
3 |
+
"_remove_final_layer_norm": false,
|
4 |
+
"activation_dropout": 0.0,
|
5 |
+
"activation_function": "relu",
|
6 |
+
"architectures": [
|
7 |
+
"OPTForSequenceClassification"
|
8 |
+
],
|
9 |
+
"attention_dropout": 0.0,
|
10 |
+
"bos_token_id": 2,
|
11 |
+
"do_layer_norm_before": true,
|
12 |
+
"dropout": 0.1,
|
13 |
+
"enable_bias": true,
|
14 |
+
"eos_token_id": 2,
|
15 |
+
"ffn_dim": 8192,
|
16 |
+
"hidden_size": 2048,
|
17 |
+
"id2label": {
|
18 |
+
"0": "LABEL_0"
|
19 |
+
},
|
20 |
+
"init_std": 0.02,
|
21 |
+
"label2id": {
|
22 |
+
"LABEL_0": 0
|
23 |
+
},
|
24 |
+
"layer_norm_elementwise_affine": true,
|
25 |
+
"layerdrop": 0.0,
|
26 |
+
"max_position_embeddings": 2048,
|
27 |
+
"model_type": "opt",
|
28 |
+
"num_attention_heads": 32,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"pad_token_id": 1,
|
31 |
+
"prefix": "</s>",
|
32 |
+
"torch_dtype": "float32",
|
33 |
+
"transformers_version": "4.37.2",
|
34 |
+
"use_cache": true,
|
35 |
+
"vocab_size": 50272,
|
36 |
+
"word_embed_proj_dim": 2048
|
37 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94dea9c2fad6aa9887b61a11d4536714b2a4b768e24e242581499f39e30e053f
|
3 |
+
size 4994509120
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fa9e152078dcc6b560bc05674c7d8c08054ad76334f3b2eb014626479fe7717
|
3 |
+
size 680405464
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 5674868736
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00002-of-00002.safetensors",
|
7 |
+
"model.decoder.embed_positions.weight": "model-00001-of-00002.safetensors",
|
8 |
+
"model.decoder.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
9 |
+
"model.decoder.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
10 |
+
"model.decoder.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
11 |
+
"model.decoder.layers.0.fc1.bias": "model-00001-of-00002.safetensors",
|
12 |
+
"model.decoder.layers.0.fc1.weight": "model-00001-of-00002.safetensors",
|
13 |
+
"model.decoder.layers.0.fc2.bias": "model-00001-of-00002.safetensors",
|
14 |
+
"model.decoder.layers.0.fc2.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"model.decoder.layers.0.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
16 |
+
"model.decoder.layers.0.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"model.decoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
18 |
+
"model.decoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"model.decoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
20 |
+
"model.decoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"model.decoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
22 |
+
"model.decoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"model.decoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
24 |
+
"model.decoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
25 |
+
"model.decoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
26 |
+
"model.decoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
27 |
+
"model.decoder.layers.1.fc1.bias": "model-00001-of-00002.safetensors",
|
28 |
+
"model.decoder.layers.1.fc1.weight": "model-00001-of-00002.safetensors",
|
29 |
+
"model.decoder.layers.1.fc2.bias": "model-00001-of-00002.safetensors",
|
30 |
+
"model.decoder.layers.1.fc2.weight": "model-00001-of-00002.safetensors",
|
31 |
+
"model.decoder.layers.1.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
32 |
+
"model.decoder.layers.1.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
33 |
+
"model.decoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
34 |
+
"model.decoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
35 |
+
"model.decoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
36 |
+
"model.decoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
37 |
+
"model.decoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
38 |
+
"model.decoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
39 |
+
"model.decoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
40 |
+
"model.decoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
41 |
+
"model.decoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
42 |
+
"model.decoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
43 |
+
"model.decoder.layers.10.fc1.bias": "model-00001-of-00002.safetensors",
|
44 |
+
"model.decoder.layers.10.fc1.weight": "model-00001-of-00002.safetensors",
|
45 |
+
"model.decoder.layers.10.fc2.bias": "model-00001-of-00002.safetensors",
|
46 |
+
"model.decoder.layers.10.fc2.weight": "model-00001-of-00002.safetensors",
|
47 |
+
"model.decoder.layers.10.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
48 |
+
"model.decoder.layers.10.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
49 |
+
"model.decoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
50 |
+
"model.decoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
51 |
+
"model.decoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
52 |
+
"model.decoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
53 |
+
"model.decoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
54 |
+
"model.decoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
55 |
+
"model.decoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
56 |
+
"model.decoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
57 |
+
"model.decoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
58 |
+
"model.decoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
59 |
+
"model.decoder.layers.11.fc1.bias": "model-00001-of-00002.safetensors",
|
60 |
+
"model.decoder.layers.11.fc1.weight": "model-00001-of-00002.safetensors",
|
61 |
+
"model.decoder.layers.11.fc2.bias": "model-00001-of-00002.safetensors",
|
62 |
+
"model.decoder.layers.11.fc2.weight": "model-00001-of-00002.safetensors",
|
63 |
+
"model.decoder.layers.11.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
64 |
+
"model.decoder.layers.11.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
65 |
+
"model.decoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
66 |
+
"model.decoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
67 |
+
"model.decoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
68 |
+
"model.decoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
69 |
+
"model.decoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
70 |
+
"model.decoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
71 |
+
"model.decoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
72 |
+
"model.decoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
73 |
+
"model.decoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
74 |
+
"model.decoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
75 |
+
"model.decoder.layers.12.fc1.bias": "model-00001-of-00002.safetensors",
|
76 |
+
"model.decoder.layers.12.fc1.weight": "model-00001-of-00002.safetensors",
|
77 |
+
"model.decoder.layers.12.fc2.bias": "model-00001-of-00002.safetensors",
|
78 |
+
"model.decoder.layers.12.fc2.weight": "model-00001-of-00002.safetensors",
|
79 |
+
"model.decoder.layers.12.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
80 |
+
"model.decoder.layers.12.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
81 |
+
"model.decoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
82 |
+
"model.decoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
83 |
+
"model.decoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
84 |
+
"model.decoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
85 |
+
"model.decoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
86 |
+
"model.decoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
87 |
+
"model.decoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
88 |
+
"model.decoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
89 |
+
"model.decoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
90 |
+
"model.decoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
91 |
+
"model.decoder.layers.13.fc1.bias": "model-00001-of-00002.safetensors",
|
92 |
+
"model.decoder.layers.13.fc1.weight": "model-00001-of-00002.safetensors",
|
93 |
+
"model.decoder.layers.13.fc2.bias": "model-00001-of-00002.safetensors",
|
94 |
+
"model.decoder.layers.13.fc2.weight": "model-00001-of-00002.safetensors",
|
95 |
+
"model.decoder.layers.13.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
96 |
+
"model.decoder.layers.13.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
97 |
+
"model.decoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
98 |
+
"model.decoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
99 |
+
"model.decoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
100 |
+
"model.decoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
101 |
+
"model.decoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
102 |
+
"model.decoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
103 |
+
"model.decoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
104 |
+
"model.decoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
105 |
+
"model.decoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
106 |
+
"model.decoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
107 |
+
"model.decoder.layers.14.fc1.bias": "model-00001-of-00002.safetensors",
|
108 |
+
"model.decoder.layers.14.fc1.weight": "model-00001-of-00002.safetensors",
|
109 |
+
"model.decoder.layers.14.fc2.bias": "model-00001-of-00002.safetensors",
|
110 |
+
"model.decoder.layers.14.fc2.weight": "model-00001-of-00002.safetensors",
|
111 |
+
"model.decoder.layers.14.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
112 |
+
"model.decoder.layers.14.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
113 |
+
"model.decoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
114 |
+
"model.decoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
115 |
+
"model.decoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
116 |
+
"model.decoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
117 |
+
"model.decoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
118 |
+
"model.decoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
119 |
+
"model.decoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
120 |
+
"model.decoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
121 |
+
"model.decoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
122 |
+
"model.decoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
123 |
+
"model.decoder.layers.15.fc1.bias": "model-00001-of-00002.safetensors",
|
124 |
+
"model.decoder.layers.15.fc1.weight": "model-00001-of-00002.safetensors",
|
125 |
+
"model.decoder.layers.15.fc2.bias": "model-00001-of-00002.safetensors",
|
126 |
+
"model.decoder.layers.15.fc2.weight": "model-00001-of-00002.safetensors",
|
127 |
+
"model.decoder.layers.15.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
128 |
+
"model.decoder.layers.15.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
129 |
+
"model.decoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
130 |
+
"model.decoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
131 |
+
"model.decoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
132 |
+
"model.decoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
133 |
+
"model.decoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
134 |
+
"model.decoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
135 |
+
"model.decoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
136 |
+
"model.decoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
137 |
+
"model.decoder.layers.15.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
138 |
+
"model.decoder.layers.15.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
139 |
+
"model.decoder.layers.16.fc1.bias": "model-00001-of-00002.safetensors",
|
140 |
+
"model.decoder.layers.16.fc1.weight": "model-00001-of-00002.safetensors",
|
141 |
+
"model.decoder.layers.16.fc2.bias": "model-00001-of-00002.safetensors",
|
142 |
+
"model.decoder.layers.16.fc2.weight": "model-00001-of-00002.safetensors",
|
143 |
+
"model.decoder.layers.16.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
144 |
+
"model.decoder.layers.16.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
145 |
+
"model.decoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
146 |
+
"model.decoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
147 |
+
"model.decoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
148 |
+
"model.decoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
149 |
+
"model.decoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
150 |
+
"model.decoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
151 |
+
"model.decoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
152 |
+
"model.decoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
153 |
+
"model.decoder.layers.16.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
154 |
+
"model.decoder.layers.16.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
155 |
+
"model.decoder.layers.17.fc1.bias": "model-00001-of-00002.safetensors",
|
156 |
+
"model.decoder.layers.17.fc1.weight": "model-00001-of-00002.safetensors",
|
157 |
+
"model.decoder.layers.17.fc2.bias": "model-00001-of-00002.safetensors",
|
158 |
+
"model.decoder.layers.17.fc2.weight": "model-00001-of-00002.safetensors",
|
159 |
+
"model.decoder.layers.17.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
160 |
+
"model.decoder.layers.17.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
161 |
+
"model.decoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
162 |
+
"model.decoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
163 |
+
"model.decoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
164 |
+
"model.decoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
165 |
+
"model.decoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
166 |
+
"model.decoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
167 |
+
"model.decoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
168 |
+
"model.decoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
169 |
+
"model.decoder.layers.17.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
170 |
+
"model.decoder.layers.17.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
171 |
+
"model.decoder.layers.18.fc1.bias": "model-00001-of-00002.safetensors",
|
172 |
+
"model.decoder.layers.18.fc1.weight": "model-00001-of-00002.safetensors",
|
173 |
+
"model.decoder.layers.18.fc2.bias": "model-00001-of-00002.safetensors",
|
174 |
+
"model.decoder.layers.18.fc2.weight": "model-00001-of-00002.safetensors",
|
175 |
+
"model.decoder.layers.18.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
176 |
+
"model.decoder.layers.18.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
177 |
+
"model.decoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
178 |
+
"model.decoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
179 |
+
"model.decoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
180 |
+
"model.decoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
181 |
+
"model.decoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
182 |
+
"model.decoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
183 |
+
"model.decoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
184 |
+
"model.decoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
185 |
+
"model.decoder.layers.18.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
186 |
+
"model.decoder.layers.18.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
187 |
+
"model.decoder.layers.19.fc1.bias": "model-00001-of-00002.safetensors",
|
188 |
+
"model.decoder.layers.19.fc1.weight": "model-00001-of-00002.safetensors",
|
189 |
+
"model.decoder.layers.19.fc2.bias": "model-00001-of-00002.safetensors",
|
190 |
+
"model.decoder.layers.19.fc2.weight": "model-00001-of-00002.safetensors",
|
191 |
+
"model.decoder.layers.19.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
192 |
+
"model.decoder.layers.19.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
193 |
+
"model.decoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
194 |
+
"model.decoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
195 |
+
"model.decoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
196 |
+
"model.decoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
197 |
+
"model.decoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
198 |
+
"model.decoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
199 |
+
"model.decoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
200 |
+
"model.decoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
201 |
+
"model.decoder.layers.19.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
202 |
+
"model.decoder.layers.19.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
203 |
+
"model.decoder.layers.2.fc1.bias": "model-00001-of-00002.safetensors",
|
204 |
+
"model.decoder.layers.2.fc1.weight": "model-00001-of-00002.safetensors",
|
205 |
+
"model.decoder.layers.2.fc2.bias": "model-00001-of-00002.safetensors",
|
206 |
+
"model.decoder.layers.2.fc2.weight": "model-00001-of-00002.safetensors",
|
207 |
+
"model.decoder.layers.2.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
208 |
+
"model.decoder.layers.2.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
209 |
+
"model.decoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
210 |
+
"model.decoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
211 |
+
"model.decoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
212 |
+
"model.decoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
213 |
+
"model.decoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
214 |
+
"model.decoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
215 |
+
"model.decoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
216 |
+
"model.decoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
217 |
+
"model.decoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
218 |
+
"model.decoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
219 |
+
"model.decoder.layers.20.fc1.bias": "model-00001-of-00002.safetensors",
|
220 |
+
"model.decoder.layers.20.fc1.weight": "model-00001-of-00002.safetensors",
|
221 |
+
"model.decoder.layers.20.fc2.bias": "model-00001-of-00002.safetensors",
|
222 |
+
"model.decoder.layers.20.fc2.weight": "model-00001-of-00002.safetensors",
|
223 |
+
"model.decoder.layers.20.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
224 |
+
"model.decoder.layers.20.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
225 |
+
"model.decoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
226 |
+
"model.decoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
227 |
+
"model.decoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
228 |
+
"model.decoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
229 |
+
"model.decoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
230 |
+
"model.decoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
231 |
+
"model.decoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
232 |
+
"model.decoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
233 |
+
"model.decoder.layers.20.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
234 |
+
"model.decoder.layers.20.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
235 |
+
"model.decoder.layers.21.fc1.bias": "model-00001-of-00002.safetensors",
|
236 |
+
"model.decoder.layers.21.fc1.weight": "model-00001-of-00002.safetensors",
|
237 |
+
"model.decoder.layers.21.fc2.bias": "model-00001-of-00002.safetensors",
|
238 |
+
"model.decoder.layers.21.fc2.weight": "model-00001-of-00002.safetensors",
|
239 |
+
"model.decoder.layers.21.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
240 |
+
"model.decoder.layers.21.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
241 |
+
"model.decoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
242 |
+
"model.decoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
243 |
+
"model.decoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
244 |
+
"model.decoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
245 |
+
"model.decoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
246 |
+
"model.decoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
247 |
+
"model.decoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
248 |
+
"model.decoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
249 |
+
"model.decoder.layers.21.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
250 |
+
"model.decoder.layers.21.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
251 |
+
"model.decoder.layers.22.fc1.bias": "model-00001-of-00002.safetensors",
|
252 |
+
"model.decoder.layers.22.fc1.weight": "model-00001-of-00002.safetensors",
|
253 |
+
"model.decoder.layers.22.fc2.bias": "model-00002-of-00002.safetensors",
|
254 |
+
"model.decoder.layers.22.fc2.weight": "model-00002-of-00002.safetensors",
|
255 |
+
"model.decoder.layers.22.final_layer_norm.bias": "model-00002-of-00002.safetensors",
|
256 |
+
"model.decoder.layers.22.final_layer_norm.weight": "model-00002-of-00002.safetensors",
|
257 |
+
"model.decoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
258 |
+
"model.decoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
259 |
+
"model.decoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
260 |
+
"model.decoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
261 |
+
"model.decoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
262 |
+
"model.decoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
263 |
+
"model.decoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
264 |
+
"model.decoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
265 |
+
"model.decoder.layers.22.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
266 |
+
"model.decoder.layers.22.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
267 |
+
"model.decoder.layers.23.fc1.bias": "model-00002-of-00002.safetensors",
|
268 |
+
"model.decoder.layers.23.fc1.weight": "model-00002-of-00002.safetensors",
|
269 |
+
"model.decoder.layers.23.fc2.bias": "model-00002-of-00002.safetensors",
|
270 |
+
"model.decoder.layers.23.fc2.weight": "model-00002-of-00002.safetensors",
|
271 |
+
"model.decoder.layers.23.final_layer_norm.bias": "model-00002-of-00002.safetensors",
|
272 |
+
"model.decoder.layers.23.final_layer_norm.weight": "model-00002-of-00002.safetensors",
|
273 |
+
"model.decoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
274 |
+
"model.decoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
275 |
+
"model.decoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
|
276 |
+
"model.decoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
|
277 |
+
"model.decoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
278 |
+
"model.decoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
279 |
+
"model.decoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
280 |
+
"model.decoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
281 |
+
"model.decoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00002.safetensors",
|
282 |
+
"model.decoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00002.safetensors",
|
283 |
+
"model.decoder.layers.3.fc1.bias": "model-00001-of-00002.safetensors",
|
284 |
+
"model.decoder.layers.3.fc1.weight": "model-00001-of-00002.safetensors",
|
285 |
+
"model.decoder.layers.3.fc2.bias": "model-00001-of-00002.safetensors",
|
286 |
+
"model.decoder.layers.3.fc2.weight": "model-00001-of-00002.safetensors",
|
287 |
+
"model.decoder.layers.3.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
288 |
+
"model.decoder.layers.3.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
289 |
+
"model.decoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
290 |
+
"model.decoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
291 |
+
"model.decoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
292 |
+
"model.decoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
293 |
+
"model.decoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
294 |
+
"model.decoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
295 |
+
"model.decoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
296 |
+
"model.decoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
297 |
+
"model.decoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
298 |
+
"model.decoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
299 |
+
"model.decoder.layers.4.fc1.bias": "model-00001-of-00002.safetensors",
|
300 |
+
"model.decoder.layers.4.fc1.weight": "model-00001-of-00002.safetensors",
|
301 |
+
"model.decoder.layers.4.fc2.bias": "model-00001-of-00002.safetensors",
|
302 |
+
"model.decoder.layers.4.fc2.weight": "model-00001-of-00002.safetensors",
|
303 |
+
"model.decoder.layers.4.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
304 |
+
"model.decoder.layers.4.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
305 |
+
"model.decoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
306 |
+
"model.decoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
307 |
+
"model.decoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
308 |
+
"model.decoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
309 |
+
"model.decoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
310 |
+
"model.decoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
311 |
+
"model.decoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
312 |
+
"model.decoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
313 |
+
"model.decoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
314 |
+
"model.decoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
315 |
+
"model.decoder.layers.5.fc1.bias": "model-00001-of-00002.safetensors",
|
316 |
+
"model.decoder.layers.5.fc1.weight": "model-00001-of-00002.safetensors",
|
317 |
+
"model.decoder.layers.5.fc2.bias": "model-00001-of-00002.safetensors",
|
318 |
+
"model.decoder.layers.5.fc2.weight": "model-00001-of-00002.safetensors",
|
319 |
+
"model.decoder.layers.5.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
320 |
+
"model.decoder.layers.5.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
321 |
+
"model.decoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
322 |
+
"model.decoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
323 |
+
"model.decoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
324 |
+
"model.decoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
325 |
+
"model.decoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
326 |
+
"model.decoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
327 |
+
"model.decoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
328 |
+
"model.decoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
329 |
+
"model.decoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
330 |
+
"model.decoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
331 |
+
"model.decoder.layers.6.fc1.bias": "model-00001-of-00002.safetensors",
|
332 |
+
"model.decoder.layers.6.fc1.weight": "model-00001-of-00002.safetensors",
|
333 |
+
"model.decoder.layers.6.fc2.bias": "model-00001-of-00002.safetensors",
|
334 |
+
"model.decoder.layers.6.fc2.weight": "model-00001-of-00002.safetensors",
|
335 |
+
"model.decoder.layers.6.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
336 |
+
"model.decoder.layers.6.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
337 |
+
"model.decoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
338 |
+
"model.decoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
339 |
+
"model.decoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
340 |
+
"model.decoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
341 |
+
"model.decoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
342 |
+
"model.decoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
343 |
+
"model.decoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
344 |
+
"model.decoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
345 |
+
"model.decoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
346 |
+
"model.decoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
347 |
+
"model.decoder.layers.7.fc1.bias": "model-00001-of-00002.safetensors",
|
348 |
+
"model.decoder.layers.7.fc1.weight": "model-00001-of-00002.safetensors",
|
349 |
+
"model.decoder.layers.7.fc2.bias": "model-00001-of-00002.safetensors",
|
350 |
+
"model.decoder.layers.7.fc2.weight": "model-00001-of-00002.safetensors",
|
351 |
+
"model.decoder.layers.7.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
352 |
+
"model.decoder.layers.7.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
353 |
+
"model.decoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
354 |
+
"model.decoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
355 |
+
"model.decoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
356 |
+
"model.decoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
357 |
+
"model.decoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
358 |
+
"model.decoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
359 |
+
"model.decoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
360 |
+
"model.decoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
361 |
+
"model.decoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
362 |
+
"model.decoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
363 |
+
"model.decoder.layers.8.fc1.bias": "model-00001-of-00002.safetensors",
|
364 |
+
"model.decoder.layers.8.fc1.weight": "model-00001-of-00002.safetensors",
|
365 |
+
"model.decoder.layers.8.fc2.bias": "model-00001-of-00002.safetensors",
|
366 |
+
"model.decoder.layers.8.fc2.weight": "model-00001-of-00002.safetensors",
|
367 |
+
"model.decoder.layers.8.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
368 |
+
"model.decoder.layers.8.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
369 |
+
"model.decoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
370 |
+
"model.decoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
371 |
+
"model.decoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
372 |
+
"model.decoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
373 |
+
"model.decoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
374 |
+
"model.decoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
375 |
+
"model.decoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
376 |
+
"model.decoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
377 |
+
"model.decoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
378 |
+
"model.decoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
379 |
+
"model.decoder.layers.9.fc1.bias": "model-00001-of-00002.safetensors",
|
380 |
+
"model.decoder.layers.9.fc1.weight": "model-00001-of-00002.safetensors",
|
381 |
+
"model.decoder.layers.9.fc2.bias": "model-00001-of-00002.safetensors",
|
382 |
+
"model.decoder.layers.9.fc2.weight": "model-00001-of-00002.safetensors",
|
383 |
+
"model.decoder.layers.9.final_layer_norm.bias": "model-00001-of-00002.safetensors",
|
384 |
+
"model.decoder.layers.9.final_layer_norm.weight": "model-00001-of-00002.safetensors",
|
385 |
+
"model.decoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
386 |
+
"model.decoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
387 |
+
"model.decoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
|
388 |
+
"model.decoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
|
389 |
+
"model.decoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
390 |
+
"model.decoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
391 |
+
"model.decoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
392 |
+
"model.decoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
393 |
+
"model.decoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00002.safetensors",
|
394 |
+
"model.decoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00002.safetensors",
|
395 |
+
"score.weight": "model-00002-of-00002.safetensors"
|
396 |
+
}
|
397 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "</s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<pad>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": true,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "</s>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": true,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"1": {
|
6 |
+
"content": "<pad>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"2": {
|
14 |
+
"content": "</s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"bos_token": "</s>",
|
23 |
+
"clean_up_tokenization_spaces": true,
|
24 |
+
"eos_token": "</s>",
|
25 |
+
"errors": "replace",
|
26 |
+
"model_max_length": 1000000000000000019884624838656,
|
27 |
+
"pad_token": "<pad>",
|
28 |
+
"tokenizer_class": "GPT2Tokenizer",
|
29 |
+
"unk_token": "</s>"
|
30 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c88b3168f77ed23e4d33b4865e9a03450de941e8bbe9117268befc0db9614f93
|
3 |
+
size 4856
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|