root commited on
Commit
0a185c9
·
1 Parent(s): 526ae3e
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "result/my-unsup-simcse-roberta-base-d0.1-rs192-std0.1-t0.05",
3
+ "architectures": [
4
+ "RobertaForCL"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "transformers_version": "4.2.1",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
eval_results.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch = 1.0
2
+ eval_CR = 86.81
3
+ eval_MPQA = 85.37
4
+ eval_MR = 80.38
5
+ eval_MRPC = 75.61
6
+ eval_SST2 = 86.47
7
+ eval_SUBJ = 93.2
8
+ eval_TREC = 74.98
9
+ eval_avg_sts = 0.778271737124629
10
+ eval_avg_transfer = 83.26
11
+ eval_sickr_spearman = 0.7131796271103109
12
+ eval_stsb_spearman = 0.8433638471389473
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad80aa49ecd13a9e6b4105e334eef706462677fe8e1c10599f09c7e039b98aa8
3
+ size 997289965
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e3408163f721652e895c928b1f88fd9e20af3e4e70a5e8b84a6da272861a351
3
+ size 501035657
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d13f17241fe3ee82151cd92a07c283b6a8de10bc88ad69e2817521f1b9e3237
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "name_or_path": "roberta-base"}
train_results.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ epoch = 1.0
2
+ train_runtime = 3862.446
3
+ train_samples_per_second = 4.046
trainer_state.json ADDED
@@ -0,0 +1,1083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8433638471389473,
3
+ "best_model_checkpoint": "result/my-unsup-simcse-roberta-base-d0.1-rs192-std0.1-t0.05",
4
+ "epoch": 1.0,
5
+ "global_step": 15626,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "eval_avg_sts": 0.7572535655220898,
13
+ "eval_sickr_spearman": 0.7054352844388353,
14
+ "eval_stsb_spearman": 0.8090718466053444,
15
+ "step": 125
16
+ },
17
+ {
18
+ "epoch": 0.02,
19
+ "eval_avg_sts": 0.7614025514877952,
20
+ "eval_sickr_spearman": 0.7051708251943171,
21
+ "eval_stsb_spearman": 0.8176342777812734,
22
+ "step": 250
23
+ },
24
+ {
25
+ "epoch": 0.02,
26
+ "eval_avg_sts": 0.7636217659471919,
27
+ "eval_sickr_spearman": 0.7079373205749496,
28
+ "eval_stsb_spearman": 0.8193062113194342,
29
+ "step": 375
30
+ },
31
+ {
32
+ "epoch": 0.03,
33
+ "learning_rate": 9.680020478689364e-06,
34
+ "loss": 0.2125,
35
+ "step": 500
36
+ },
37
+ {
38
+ "epoch": 0.03,
39
+ "eval_avg_sts": 0.7657425201047614,
40
+ "eval_sickr_spearman": 0.7097071706007412,
41
+ "eval_stsb_spearman": 0.8217778696087817,
42
+ "step": 500
43
+ },
44
+ {
45
+ "epoch": 0.04,
46
+ "eval_avg_sts": 0.7674773834562932,
47
+ "eval_sickr_spearman": 0.7100151940542637,
48
+ "eval_stsb_spearman": 0.8249395728583228,
49
+ "step": 625
50
+ },
51
+ {
52
+ "epoch": 0.05,
53
+ "eval_avg_sts": 0.7670247636071179,
54
+ "eval_sickr_spearman": 0.7053980603352208,
55
+ "eval_stsb_spearman": 0.828651466879015,
56
+ "step": 750
57
+ },
58
+ {
59
+ "epoch": 0.06,
60
+ "eval_avg_sts": 0.7642544693075077,
61
+ "eval_sickr_spearman": 0.7042453619318076,
62
+ "eval_stsb_spearman": 0.8242635766832077,
63
+ "step": 875
64
+ },
65
+ {
66
+ "epoch": 0.06,
67
+ "learning_rate": 9.36004095737873e-06,
68
+ "loss": 0.0005,
69
+ "step": 1000
70
+ },
71
+ {
72
+ "epoch": 0.06,
73
+ "eval_avg_sts": 0.7706729186771359,
74
+ "eval_sickr_spearman": 0.7109765365495476,
75
+ "eval_stsb_spearman": 0.8303693008047242,
76
+ "step": 1000
77
+ },
78
+ {
79
+ "epoch": 0.07,
80
+ "eval_avg_sts": 0.7723601548256038,
81
+ "eval_sickr_spearman": 0.7154485202800445,
82
+ "eval_stsb_spearman": 0.8292717893711632,
83
+ "step": 1125
84
+ },
85
+ {
86
+ "epoch": 0.08,
87
+ "eval_avg_sts": 0.7715988622174201,
88
+ "eval_sickr_spearman": 0.7117784157880568,
89
+ "eval_stsb_spearman": 0.8314193086467835,
90
+ "step": 1250
91
+ },
92
+ {
93
+ "epoch": 0.09,
94
+ "eval_avg_sts": 0.7699957963614845,
95
+ "eval_sickr_spearman": 0.7098231176796127,
96
+ "eval_stsb_spearman": 0.8301684750433563,
97
+ "step": 1375
98
+ },
99
+ {
100
+ "epoch": 0.1,
101
+ "learning_rate": 9.040061436068092e-06,
102
+ "loss": 0.0003,
103
+ "step": 1500
104
+ },
105
+ {
106
+ "epoch": 0.1,
107
+ "eval_avg_sts": 0.7732021256956165,
108
+ "eval_sickr_spearman": 0.7176254338905241,
109
+ "eval_stsb_spearman": 0.8287788175007089,
110
+ "step": 1500
111
+ },
112
+ {
113
+ "epoch": 0.1,
114
+ "eval_avg_sts": 0.7706739860482326,
115
+ "eval_sickr_spearman": 0.7126164144148476,
116
+ "eval_stsb_spearman": 0.8287315576816174,
117
+ "step": 1625
118
+ },
119
+ {
120
+ "epoch": 0.11,
121
+ "eval_avg_sts": 0.7719707314787304,
122
+ "eval_sickr_spearman": 0.7146769966976441,
123
+ "eval_stsb_spearman": 0.8292644662598168,
124
+ "step": 1750
125
+ },
126
+ {
127
+ "epoch": 0.12,
128
+ "eval_avg_sts": 0.7692985063340654,
129
+ "eval_sickr_spearman": 0.7109207724407779,
130
+ "eval_stsb_spearman": 0.827676240227353,
131
+ "step": 1875
132
+ },
133
+ {
134
+ "epoch": 0.13,
135
+ "learning_rate": 8.720081914757458e-06,
136
+ "loss": 0.0002,
137
+ "step": 2000
138
+ },
139
+ {
140
+ "epoch": 0.13,
141
+ "eval_avg_sts": 0.771889783054714,
142
+ "eval_sickr_spearman": 0.7135132511409,
143
+ "eval_stsb_spearman": 0.8302663149685279,
144
+ "step": 2000
145
+ },
146
+ {
147
+ "epoch": 0.14,
148
+ "eval_avg_sts": 0.7615509683620076,
149
+ "eval_sickr_spearman": 0.6917990552038427,
150
+ "eval_stsb_spearman": 0.8313028815201725,
151
+ "step": 2125
152
+ },
153
+ {
154
+ "epoch": 0.14,
155
+ "eval_avg_sts": 0.7642814292065848,
156
+ "eval_sickr_spearman": 0.6928178503087533,
157
+ "eval_stsb_spearman": 0.8357450081044163,
158
+ "step": 2250
159
+ },
160
+ {
161
+ "epoch": 0.15,
162
+ "eval_avg_sts": 0.7664718523224301,
163
+ "eval_sickr_spearman": 0.6978614041463637,
164
+ "eval_stsb_spearman": 0.8350823004984966,
165
+ "step": 2375
166
+ },
167
+ {
168
+ "epoch": 0.16,
169
+ "learning_rate": 8.400102393446819e-06,
170
+ "loss": 0.0002,
171
+ "step": 2500
172
+ },
173
+ {
174
+ "epoch": 0.16,
175
+ "eval_avg_sts": 0.7669677380011899,
176
+ "eval_sickr_spearman": 0.6978531908280179,
177
+ "eval_stsb_spearman": 0.8360822851743619,
178
+ "step": 2500
179
+ },
180
+ {
181
+ "epoch": 0.17,
182
+ "eval_avg_sts": 0.767509277729593,
183
+ "eval_sickr_spearman": 0.697946947538025,
184
+ "eval_stsb_spearman": 0.837071607921161,
185
+ "step": 2625
186
+ },
187
+ {
188
+ "epoch": 0.18,
189
+ "eval_avg_sts": 0.7668651085967232,
190
+ "eval_sickr_spearman": 0.6985169326187911,
191
+ "eval_stsb_spearman": 0.8352132845746553,
192
+ "step": 2750
193
+ },
194
+ {
195
+ "epoch": 0.18,
196
+ "eval_avg_sts": 0.7668897285698411,
197
+ "eval_sickr_spearman": 0.6986797580526662,
198
+ "eval_stsb_spearman": 0.835099699087016,
199
+ "step": 2875
200
+ },
201
+ {
202
+ "epoch": 0.19,
203
+ "learning_rate": 8.080122872136184e-06,
204
+ "loss": 0.0002,
205
+ "step": 3000
206
+ },
207
+ {
208
+ "epoch": 0.19,
209
+ "eval_avg_sts": 0.7678085026409753,
210
+ "eval_sickr_spearman": 0.6984642425005135,
211
+ "eval_stsb_spearman": 0.8371527627814372,
212
+ "step": 3000
213
+ },
214
+ {
215
+ "epoch": 0.2,
216
+ "eval_avg_sts": 0.7692156947271456,
217
+ "eval_sickr_spearman": 0.7026153304423024,
218
+ "eval_stsb_spearman": 0.8358160590119886,
219
+ "step": 3125
220
+ },
221
+ {
222
+ "epoch": 0.21,
223
+ "eval_avg_sts": 0.768843154466003,
224
+ "eval_sickr_spearman": 0.705912809649333,
225
+ "eval_stsb_spearman": 0.8317734992826732,
226
+ "step": 3250
227
+ },
228
+ {
229
+ "epoch": 0.22,
230
+ "eval_avg_sts": 0.7672560021396605,
231
+ "eval_sickr_spearman": 0.7075799211491486,
232
+ "eval_stsb_spearman": 0.8269320831301724,
233
+ "step": 3375
234
+ },
235
+ {
236
+ "epoch": 0.22,
237
+ "learning_rate": 7.760143350825547e-06,
238
+ "loss": 0.0002,
239
+ "step": 3500
240
+ },
241
+ {
242
+ "epoch": 0.22,
243
+ "eval_avg_sts": 0.7665000851835462,
244
+ "eval_sickr_spearman": 0.7056194356817491,
245
+ "eval_stsb_spearman": 0.8273807346853432,
246
+ "step": 3500
247
+ },
248
+ {
249
+ "epoch": 0.23,
250
+ "eval_avg_sts": 0.7675835380050031,
251
+ "eval_sickr_spearman": 0.706044462898375,
252
+ "eval_stsb_spearman": 0.8291226131116314,
253
+ "step": 3625
254
+ },
255
+ {
256
+ "epoch": 0.24,
257
+ "eval_avg_sts": 0.766064422853222,
258
+ "eval_sickr_spearman": 0.7050694795702827,
259
+ "eval_stsb_spearman": 0.8270593661361613,
260
+ "step": 3750
261
+ },
262
+ {
263
+ "epoch": 0.25,
264
+ "eval_avg_sts": 0.7680428355810465,
265
+ "eval_sickr_spearman": 0.7066562350373923,
266
+ "eval_stsb_spearman": 0.8294294361247007,
267
+ "step": 3875
268
+ },
269
+ {
270
+ "epoch": 0.26,
271
+ "learning_rate": 7.440163829514912e-06,
272
+ "loss": 0.0003,
273
+ "step": 4000
274
+ },
275
+ {
276
+ "epoch": 0.26,
277
+ "eval_avg_sts": 0.7706781402051571,
278
+ "eval_sickr_spearman": 0.7060189583835114,
279
+ "eval_stsb_spearman": 0.8353373220268029,
280
+ "step": 4000
281
+ },
282
+ {
283
+ "epoch": 0.26,
284
+ "eval_avg_sts": 0.7704250512723954,
285
+ "eval_sickr_spearman": 0.7065240054151332,
286
+ "eval_stsb_spearman": 0.8343260971296576,
287
+ "step": 4125
288
+ },
289
+ {
290
+ "epoch": 0.27,
291
+ "eval_avg_sts": 0.7676150118719631,
292
+ "eval_sickr_spearman": 0.7018462564460752,
293
+ "eval_stsb_spearman": 0.833383767297851,
294
+ "step": 4250
295
+ },
296
+ {
297
+ "epoch": 0.28,
298
+ "eval_avg_sts": 0.7672794331395227,
299
+ "eval_sickr_spearman": 0.7023547617170005,
300
+ "eval_stsb_spearman": 0.832204104562045,
301
+ "step": 4375
302
+ },
303
+ {
304
+ "epoch": 0.29,
305
+ "learning_rate": 7.120184308204276e-06,
306
+ "loss": 0.0003,
307
+ "step": 4500
308
+ },
309
+ {
310
+ "epoch": 0.29,
311
+ "eval_avg_sts": 0.7603546899710306,
312
+ "eval_sickr_spearman": 0.6936834187877694,
313
+ "eval_stsb_spearman": 0.8270259611542918,
314
+ "step": 4500
315
+ },
316
+ {
317
+ "epoch": 0.3,
318
+ "eval_avg_sts": 0.7604700472645542,
319
+ "eval_sickr_spearman": 0.6935327932536596,
320
+ "eval_stsb_spearman": 0.8274073012754489,
321
+ "step": 4625
322
+ },
323
+ {
324
+ "epoch": 0.3,
325
+ "eval_avg_sts": 0.761349850549489,
326
+ "eval_sickr_spearman": 0.6934180469523239,
327
+ "eval_stsb_spearman": 0.8292816541466541,
328
+ "step": 4750
329
+ },
330
+ {
331
+ "epoch": 0.31,
332
+ "eval_avg_sts": 0.7624410643103536,
333
+ "eval_sickr_spearman": 0.6947914482668454,
334
+ "eval_stsb_spearman": 0.830090680353862,
335
+ "step": 4875
336
+ },
337
+ {
338
+ "epoch": 0.32,
339
+ "learning_rate": 6.800204786893639e-06,
340
+ "loss": 0.0001,
341
+ "step": 5000
342
+ },
343
+ {
344
+ "epoch": 0.32,
345
+ "eval_avg_sts": 0.7625184502556002,
346
+ "eval_sickr_spearman": 0.6949592209041686,
347
+ "eval_stsb_spearman": 0.8300776796070318,
348
+ "step": 5000
349
+ },
350
+ {
351
+ "epoch": 0.33,
352
+ "eval_avg_sts": 0.7620233259863773,
353
+ "eval_sickr_spearman": 0.6943664690813208,
354
+ "eval_stsb_spearman": 0.829680182891434,
355
+ "step": 5125
356
+ },
357
+ {
358
+ "epoch": 0.34,
359
+ "eval_avg_sts": 0.7688935367788103,
360
+ "eval_sickr_spearman": 0.7047914275240574,
361
+ "eval_stsb_spearman": 0.8329956460335631,
362
+ "step": 5250
363
+ },
364
+ {
365
+ "epoch": 0.34,
366
+ "eval_avg_sts": 0.7721471528975761,
367
+ "eval_sickr_spearman": 0.7084637894778126,
368
+ "eval_stsb_spearman": 0.8358305163173394,
369
+ "step": 5375
370
+ },
371
+ {
372
+ "epoch": 0.35,
373
+ "learning_rate": 6.480225265583003e-06,
374
+ "loss": 0.0002,
375
+ "step": 5500
376
+ },
377
+ {
378
+ "epoch": 0.35,
379
+ "eval_avg_sts": 0.7736568332643656,
380
+ "eval_sickr_spearman": 0.7087314187750259,
381
+ "eval_stsb_spearman": 0.8385822477537053,
382
+ "step": 5500
383
+ },
384
+ {
385
+ "epoch": 0.36,
386
+ "eval_avg_sts": 0.7734070993186029,
387
+ "eval_sickr_spearman": 0.7073194965171511,
388
+ "eval_stsb_spearman": 0.8394947021200546,
389
+ "step": 5625
390
+ },
391
+ {
392
+ "epoch": 0.37,
393
+ "eval_avg_sts": 0.7727328180179123,
394
+ "eval_sickr_spearman": 0.7090136014959747,
395
+ "eval_stsb_spearman": 0.83645203453985,
396
+ "step": 5750
397
+ },
398
+ {
399
+ "epoch": 0.38,
400
+ "eval_avg_sts": 0.7702894469668818,
401
+ "eval_sickr_spearman": 0.7065904804595234,
402
+ "eval_stsb_spearman": 0.8339884134742402,
403
+ "step": 5875
404
+ },
405
+ {
406
+ "epoch": 0.38,
407
+ "learning_rate": 6.1602457442723675e-06,
408
+ "loss": 0.0001,
409
+ "step": 6000
410
+ },
411
+ {
412
+ "epoch": 0.38,
413
+ "eval_avg_sts": 0.7673639531247151,
414
+ "eval_sickr_spearman": 0.7063840427855425,
415
+ "eval_stsb_spearman": 0.8283438634638877,
416
+ "step": 6000
417
+ },
418
+ {
419
+ "epoch": 0.39,
420
+ "eval_avg_sts": 0.7656242558893468,
421
+ "eval_sickr_spearman": 0.7046311957696599,
422
+ "eval_stsb_spearman": 0.8266173160090337,
423
+ "step": 6125
424
+ },
425
+ {
426
+ "epoch": 0.4,
427
+ "eval_avg_sts": 0.7678162328535889,
428
+ "eval_sickr_spearman": 0.7065671853753259,
429
+ "eval_stsb_spearman": 0.8290652803318519,
430
+ "step": 6250
431
+ },
432
+ {
433
+ "epoch": 0.41,
434
+ "eval_avg_sts": 0.7659380803169125,
435
+ "eval_sickr_spearman": 0.7034162970898843,
436
+ "eval_stsb_spearman": 0.8284598635439405,
437
+ "step": 6375
438
+ },
439
+ {
440
+ "epoch": 0.42,
441
+ "learning_rate": 5.840266222961732e-06,
442
+ "loss": 0.0002,
443
+ "step": 6500
444
+ },
445
+ {
446
+ "epoch": 0.42,
447
+ "eval_avg_sts": 0.7674929110456945,
448
+ "eval_sickr_spearman": 0.7043084267679958,
449
+ "eval_stsb_spearman": 0.8306773953233932,
450
+ "step": 6500
451
+ },
452
+ {
453
+ "epoch": 0.42,
454
+ "eval_avg_sts": 0.7674747228197092,
455
+ "eval_sickr_spearman": 0.7030638928986329,
456
+ "eval_stsb_spearman": 0.8318855527407855,
457
+ "step": 6625
458
+ },
459
+ {
460
+ "epoch": 0.43,
461
+ "eval_avg_sts": 0.7669269125478364,
462
+ "eval_sickr_spearman": 0.7022142707452941,
463
+ "eval_stsb_spearman": 0.8316395543503788,
464
+ "step": 6750
465
+ },
466
+ {
467
+ "epoch": 0.44,
468
+ "eval_avg_sts": 0.7663176164301795,
469
+ "eval_sickr_spearman": 0.7013282410170653,
470
+ "eval_stsb_spearman": 0.8313069918432938,
471
+ "step": 6875
472
+ },
473
+ {
474
+ "epoch": 0.45,
475
+ "learning_rate": 5.520286701651095e-06,
476
+ "loss": 0.0001,
477
+ "step": 7000
478
+ },
479
+ {
480
+ "epoch": 0.45,
481
+ "eval_avg_sts": 0.7679613274313469,
482
+ "eval_sickr_spearman": 0.705412661790058,
483
+ "eval_stsb_spearman": 0.8305099930726358,
484
+ "step": 7000
485
+ },
486
+ {
487
+ "epoch": 0.46,
488
+ "eval_avg_sts": 0.7677041476146869,
489
+ "eval_sickr_spearman": 0.7044645758787711,
490
+ "eval_stsb_spearman": 0.8309437193506026,
491
+ "step": 7125
492
+ },
493
+ {
494
+ "epoch": 0.46,
495
+ "eval_avg_sts": 0.771967747158485,
496
+ "eval_sickr_spearman": 0.7112394107677183,
497
+ "eval_stsb_spearman": 0.8326960835492517,
498
+ "step": 7250
499
+ },
500
+ {
501
+ "epoch": 0.47,
502
+ "eval_avg_sts": 0.7727980509536904,
503
+ "eval_sickr_spearman": 0.7122028666314655,
504
+ "eval_stsb_spearman": 0.8333932352759152,
505
+ "step": 7375
506
+ },
507
+ {
508
+ "epoch": 0.48,
509
+ "learning_rate": 5.200307180340458e-06,
510
+ "loss": 0.0002,
511
+ "step": 7500
512
+ },
513
+ {
514
+ "epoch": 0.48,
515
+ "eval_avg_sts": 0.7716808664260861,
516
+ "eval_sickr_spearman": 0.7073300153283661,
517
+ "eval_stsb_spearman": 0.8360317175238062,
518
+ "step": 7500
519
+ },
520
+ {
521
+ "epoch": 0.49,
522
+ "eval_avg_sts": 0.7749475591557311,
523
+ "eval_sickr_spearman": 0.7102010744168291,
524
+ "eval_stsb_spearman": 0.8396940438946332,
525
+ "step": 7625
526
+ },
527
+ {
528
+ "epoch": 0.5,
529
+ "eval_avg_sts": 0.7741400965390657,
530
+ "eval_sickr_spearman": 0.7103573195898074,
531
+ "eval_stsb_spearman": 0.8379228734883241,
532
+ "step": 7750
533
+ },
534
+ {
535
+ "epoch": 0.5,
536
+ "eval_avg_sts": 0.7742400629462907,
537
+ "eval_sickr_spearman": 0.7103860902195688,
538
+ "eval_stsb_spearman": 0.8380940356730125,
539
+ "step": 7875
540
+ },
541
+ {
542
+ "epoch": 0.51,
543
+ "learning_rate": 4.8803276590298225e-06,
544
+ "loss": 0.0001,
545
+ "step": 8000
546
+ },
547
+ {
548
+ "epoch": 0.51,
549
+ "eval_avg_sts": 0.7737984133316254,
550
+ "eval_sickr_spearman": 0.7100542913708344,
551
+ "eval_stsb_spearman": 0.8375425352924163,
552
+ "step": 8000
553
+ },
554
+ {
555
+ "epoch": 0.52,
556
+ "eval_avg_sts": 0.7736256294772575,
557
+ "eval_sickr_spearman": 0.7117209225596355,
558
+ "eval_stsb_spearman": 0.8355303363948796,
559
+ "step": 8125
560
+ },
561
+ {
562
+ "epoch": 0.53,
563
+ "eval_avg_sts": 0.7739892471083305,
564
+ "eval_sickr_spearman": 0.7121400419507844,
565
+ "eval_stsb_spearman": 0.8358384522658765,
566
+ "step": 8250
567
+ },
568
+ {
569
+ "epoch": 0.54,
570
+ "eval_avg_sts": 0.771587372188157,
571
+ "eval_sickr_spearman": 0.7086112929903291,
572
+ "eval_stsb_spearman": 0.834563451385985,
573
+ "step": 8375
574
+ },
575
+ {
576
+ "epoch": 0.54,
577
+ "learning_rate": 4.560348137719187e-06,
578
+ "loss": 0.0001,
579
+ "step": 8500
580
+ },
581
+ {
582
+ "epoch": 0.54,
583
+ "eval_avg_sts": 0.7717136828796769,
584
+ "eval_sickr_spearman": 0.7081623943162886,
585
+ "eval_stsb_spearman": 0.8352649714430652,
586
+ "step": 8500
587
+ },
588
+ {
589
+ "epoch": 0.55,
590
+ "eval_avg_sts": 0.7700399485422347,
591
+ "eval_sickr_spearman": 0.7062681917688737,
592
+ "eval_stsb_spearman": 0.8338117053155956,
593
+ "step": 8625
594
+ },
595
+ {
596
+ "epoch": 0.56,
597
+ "eval_avg_sts": 0.7655558973491845,
598
+ "eval_sickr_spearman": 0.7032426646581855,
599
+ "eval_stsb_spearman": 0.8278691300401836,
600
+ "step": 8750
601
+ },
602
+ {
603
+ "epoch": 0.57,
604
+ "eval_avg_sts": 0.7691329874077902,
605
+ "eval_sickr_spearman": 0.7023757513083291,
606
+ "eval_stsb_spearman": 0.8358902235072514,
607
+ "step": 8875
608
+ },
609
+ {
610
+ "epoch": 0.58,
611
+ "learning_rate": 4.24036861640855e-06,
612
+ "loss": 0.0001,
613
+ "step": 9000
614
+ },
615
+ {
616
+ "epoch": 0.58,
617
+ "eval_avg_sts": 0.7696286255223032,
618
+ "eval_sickr_spearman": 0.7031883895135604,
619
+ "eval_stsb_spearman": 0.8360688615310459,
620
+ "step": 9000
621
+ },
622
+ {
623
+ "epoch": 0.58,
624
+ "eval_avg_sts": 0.7696384100854612,
625
+ "eval_sickr_spearman": 0.7036895940570672,
626
+ "eval_stsb_spearman": 0.835587226113855,
627
+ "step": 9125
628
+ },
629
+ {
630
+ "epoch": 0.59,
631
+ "eval_avg_sts": 0.7688416464364851,
632
+ "eval_sickr_spearman": 0.7023802182007628,
633
+ "eval_stsb_spearman": 0.8353030746722075,
634
+ "step": 9250
635
+ },
636
+ {
637
+ "epoch": 0.6,
638
+ "eval_avg_sts": 0.7660994859121804,
639
+ "eval_sickr_spearman": 0.7051366750811947,
640
+ "eval_stsb_spearman": 0.8270622967431659,
641
+ "step": 9375
642
+ },
643
+ {
644
+ "epoch": 0.61,
645
+ "learning_rate": 3.920389095097914e-06,
646
+ "loss": 0.0002,
647
+ "step": 9500
648
+ },
649
+ {
650
+ "epoch": 0.61,
651
+ "eval_avg_sts": 0.7673638146997882,
652
+ "eval_sickr_spearman": 0.7064614138295504,
653
+ "eval_stsb_spearman": 0.828266215570026,
654
+ "step": 9500
655
+ },
656
+ {
657
+ "epoch": 0.62,
658
+ "eval_avg_sts": 0.7688152733727235,
659
+ "eval_sickr_spearman": 0.707200523478889,
660
+ "eval_stsb_spearman": 0.830430023266558,
661
+ "step": 9625
662
+ },
663
+ {
664
+ "epoch": 0.62,
665
+ "eval_avg_sts": 0.7697361419751771,
666
+ "eval_sickr_spearman": 0.7075413521746937,
667
+ "eval_stsb_spearman": 0.8319309317756607,
668
+ "step": 9750
669
+ },
670
+ {
671
+ "epoch": 0.63,
672
+ "eval_avg_sts": 0.7718885121529019,
673
+ "eval_sickr_spearman": 0.7071343846522086,
674
+ "eval_stsb_spearman": 0.8366426396535952,
675
+ "step": 9875
676
+ },
677
+ {
678
+ "epoch": 0.64,
679
+ "learning_rate": 3.600409573787278e-06,
680
+ "loss": 0.0001,
681
+ "step": 10000
682
+ },
683
+ {
684
+ "epoch": 0.64,
685
+ "eval_avg_sts": 0.7727693021998165,
686
+ "eval_sickr_spearman": 0.7095777267823654,
687
+ "eval_stsb_spearman": 0.8359608776172676,
688
+ "step": 10000
689
+ },
690
+ {
691
+ "epoch": 0.65,
692
+ "eval_avg_sts": 0.7717316927837671,
693
+ "eval_sickr_spearman": 0.7084112434528395,
694
+ "eval_stsb_spearman": 0.8350521421146947,
695
+ "step": 10125
696
+ },
697
+ {
698
+ "epoch": 0.66,
699
+ "eval_avg_sts": 0.7720632550487434,
700
+ "eval_sickr_spearman": 0.7069583986965393,
701
+ "eval_stsb_spearman": 0.8371681114009475,
702
+ "step": 10250
703
+ },
704
+ {
705
+ "epoch": 0.66,
706
+ "eval_avg_sts": 0.7716685440161564,
707
+ "eval_sickr_spearman": 0.7068186762224559,
708
+ "eval_stsb_spearman": 0.836518411809857,
709
+ "step": 10375
710
+ },
711
+ {
712
+ "epoch": 0.67,
713
+ "learning_rate": 3.280430052476642e-06,
714
+ "loss": 0.0001,
715
+ "step": 10500
716
+ },
717
+ {
718
+ "epoch": 0.67,
719
+ "eval_avg_sts": 0.7706994083764094,
720
+ "eval_sickr_spearman": 0.7044442106917614,
721
+ "eval_stsb_spearman": 0.8369546060610572,
722
+ "step": 10500
723
+ },
724
+ {
725
+ "epoch": 0.68,
726
+ "eval_avg_sts": 0.770376770388492,
727
+ "eval_sickr_spearman": 0.704766259226904,
728
+ "eval_stsb_spearman": 0.83598728155008,
729
+ "step": 10625
730
+ },
731
+ {
732
+ "epoch": 0.69,
733
+ "eval_avg_sts": 0.7700945307869602,
734
+ "eval_sickr_spearman": 0.7045319154829873,
735
+ "eval_stsb_spearman": 0.8356571460909332,
736
+ "step": 10750
737
+ },
738
+ {
739
+ "epoch": 0.7,
740
+ "eval_avg_sts": 0.7701867471043669,
741
+ "eval_sickr_spearman": 0.7049941187721263,
742
+ "eval_stsb_spearman": 0.8353793754366073,
743
+ "step": 10875
744
+ },
745
+ {
746
+ "epoch": 0.7,
747
+ "learning_rate": 2.960450531166006e-06,
748
+ "loss": 0.0001,
749
+ "step": 11000
750
+ },
751
+ {
752
+ "epoch": 0.7,
753
+ "eval_avg_sts": 0.7700153990116653,
754
+ "eval_sickr_spearman": 0.7048114084622558,
755
+ "eval_stsb_spearman": 0.8352193895610748,
756
+ "step": 11000
757
+ },
758
+ {
759
+ "epoch": 0.71,
760
+ "eval_avg_sts": 0.7693725150897858,
761
+ "eval_sickr_spearman": 0.7057958058862298,
762
+ "eval_stsb_spearman": 0.8329492242933417,
763
+ "step": 11125
764
+ },
765
+ {
766
+ "epoch": 0.72,
767
+ "eval_avg_sts": 0.7696360829638863,
768
+ "eval_sickr_spearman": 0.7062946088746646,
769
+ "eval_stsb_spearman": 0.832977557053108,
770
+ "step": 11250
771
+ },
772
+ {
773
+ "epoch": 0.73,
774
+ "eval_avg_sts": 0.7709027781052127,
775
+ "eval_sickr_spearman": 0.7053959469667576,
776
+ "eval_stsb_spearman": 0.8364096092436679,
777
+ "step": 11375
778
+ },
779
+ {
780
+ "epoch": 0.74,
781
+ "learning_rate": 2.640471009855369e-06,
782
+ "loss": 0.0002,
783
+ "step": 11500
784
+ },
785
+ {
786
+ "epoch": 0.74,
787
+ "eval_avg_sts": 0.7705889705773711,
788
+ "eval_sickr_spearman": 0.7048775472889359,
789
+ "eval_stsb_spearman": 0.8363003938658062,
790
+ "step": 11500
791
+ },
792
+ {
793
+ "epoch": 0.74,
794
+ "eval_avg_sts": 0.7756785758354756,
795
+ "eval_sickr_spearman": 0.7125353379156201,
796
+ "eval_stsb_spearman": 0.8388218137553312,
797
+ "step": 11625
798
+ },
799
+ {
800
+ "epoch": 0.75,
801
+ "eval_avg_sts": 0.775711044555684,
802
+ "eval_sickr_spearman": 0.7125440315449804,
803
+ "eval_stsb_spearman": 0.8388780575663876,
804
+ "step": 11750
805
+ },
806
+ {
807
+ "epoch": 0.76,
808
+ "eval_avg_sts": 0.7756594965556575,
809
+ "eval_sickr_spearman": 0.710795363234923,
810
+ "eval_stsb_spearman": 0.840523629876392,
811
+ "step": 11875
812
+ },
813
+ {
814
+ "epoch": 0.77,
815
+ "learning_rate": 2.3204914885447333e-06,
816
+ "loss": 0.0002,
817
+ "step": 12000
818
+ },
819
+ {
820
+ "epoch": 0.77,
821
+ "eval_avg_sts": 0.778271737124629,
822
+ "eval_sickr_spearman": 0.7131796271103109,
823
+ "eval_stsb_spearman": 0.8433638471389473,
824
+ "step": 12000
825
+ },
826
+ {
827
+ "epoch": 0.78,
828
+ "eval_avg_sts": 0.7778824941586701,
829
+ "eval_sickr_spearman": 0.7129357251772082,
830
+ "eval_stsb_spearman": 0.842829263140132,
831
+ "step": 12125
832
+ },
833
+ {
834
+ "epoch": 0.78,
835
+ "eval_avg_sts": 0.7791778033988318,
836
+ "eval_sickr_spearman": 0.7154212866455291,
837
+ "eval_stsb_spearman": 0.8429343201521345,
838
+ "step": 12250
839
+ },
840
+ {
841
+ "epoch": 0.79,
842
+ "eval_avg_sts": 0.7784491802919915,
843
+ "eval_sickr_spearman": 0.7169989642344666,
844
+ "eval_stsb_spearman": 0.8398993963495163,
845
+ "step": 12375
846
+ },
847
+ {
848
+ "epoch": 0.8,
849
+ "learning_rate": 2.000511967234097e-06,
850
+ "loss": 0.0003,
851
+ "step": 12500
852
+ },
853
+ {
854
+ "epoch": 0.8,
855
+ "eval_avg_sts": 0.7782960644263726,
856
+ "eval_sickr_spearman": 0.7171246616269303,
857
+ "eval_stsb_spearman": 0.8394674672258147,
858
+ "step": 12500
859
+ },
860
+ {
861
+ "epoch": 0.81,
862
+ "eval_avg_sts": 0.7777553880232895,
863
+ "eval_sickr_spearman": 0.7163744638535685,
864
+ "eval_stsb_spearman": 0.8391363121930105,
865
+ "step": 12625
866
+ },
867
+ {
868
+ "epoch": 0.82,
869
+ "eval_avg_sts": 0.7768079269313797,
870
+ "eval_sickr_spearman": 0.7156439108006947,
871
+ "eval_stsb_spearman": 0.8379719430620648,
872
+ "step": 12750
873
+ },
874
+ {
875
+ "epoch": 0.82,
876
+ "eval_avg_sts": 0.7769940996454496,
877
+ "eval_sickr_spearman": 0.7154824302376598,
878
+ "eval_stsb_spearman": 0.8385057690532396,
879
+ "step": 12875
880
+ },
881
+ {
882
+ "epoch": 0.83,
883
+ "learning_rate": 1.6805324459234608e-06,
884
+ "loss": 0.0002,
885
+ "step": 13000
886
+ },
887
+ {
888
+ "epoch": 0.83,
889
+ "eval_avg_sts": 0.7773811376862301,
890
+ "eval_sickr_spearman": 0.7153101426768013,
891
+ "eval_stsb_spearman": 0.839452132695659,
892
+ "step": 13000
893
+ },
894
+ {
895
+ "epoch": 0.84,
896
+ "eval_avg_sts": 0.7765844510293946,
897
+ "eval_sickr_spearman": 0.713703022022682,
898
+ "eval_stsb_spearman": 0.8394658800361071,
899
+ "step": 13125
900
+ },
901
+ {
902
+ "epoch": 0.85,
903
+ "eval_avg_sts": 0.7754935189329493,
904
+ "eval_sickr_spearman": 0.7128497975367355,
905
+ "eval_stsb_spearman": 0.838137240329163,
906
+ "step": 13250
907
+ },
908
+ {
909
+ "epoch": 0.86,
910
+ "eval_avg_sts": 0.7753794499527557,
911
+ "eval_sickr_spearman": 0.7128604124101533,
912
+ "eval_stsb_spearman": 0.837898487495358,
913
+ "step": 13375
914
+ },
915
+ {
916
+ "epoch": 0.86,
917
+ "learning_rate": 1.3605529246128248e-06,
918
+ "loss": 0.0001,
919
+ "step": 13500
920
+ },
921
+ {
922
+ "epoch": 0.86,
923
+ "eval_avg_sts": 0.7753019292848522,
924
+ "eval_sickr_spearman": 0.7128690099773107,
925
+ "eval_stsb_spearman": 0.8377348485923936,
926
+ "step": 13500
927
+ },
928
+ {
929
+ "epoch": 0.87,
930
+ "eval_avg_sts": 0.7751785379635656,
931
+ "eval_sickr_spearman": 0.7127431615818965,
932
+ "eval_stsb_spearman": 0.8376139143452346,
933
+ "step": 13625
934
+ },
935
+ {
936
+ "epoch": 0.88,
937
+ "eval_avg_sts": 0.7751092404866357,
938
+ "eval_sickr_spearman": 0.712538940248228,
939
+ "eval_stsb_spearman": 0.8376795407250434,
940
+ "step": 13750
941
+ },
942
+ {
943
+ "epoch": 0.89,
944
+ "eval_avg_sts": 0.7748652873780293,
945
+ "eval_sickr_spearman": 0.7121167948976883,
946
+ "eval_stsb_spearman": 0.8376137798583702,
947
+ "step": 13875
948
+ },
949
+ {
950
+ "epoch": 0.9,
951
+ "learning_rate": 1.0405734033021888e-06,
952
+ "loss": 0.0,
953
+ "step": 14000
954
+ },
955
+ {
956
+ "epoch": 0.9,
957
+ "eval_avg_sts": 0.7749284965089291,
958
+ "eval_sickr_spearman": 0.7121870643990923,
959
+ "eval_stsb_spearman": 0.8376699286187659,
960
+ "step": 14000
961
+ },
962
+ {
963
+ "epoch": 0.9,
964
+ "eval_avg_sts": 0.7736810149784585,
965
+ "eval_sickr_spearman": 0.7101105357906183,
966
+ "eval_stsb_spearman": 0.8372514941662987,
967
+ "step": 14125
968
+ },
969
+ {
970
+ "epoch": 0.91,
971
+ "eval_avg_sts": 0.7736952566877616,
972
+ "eval_sickr_spearman": 0.7101496811382905,
973
+ "eval_stsb_spearman": 0.8372408322372328,
974
+ "step": 14250
975
+ },
976
+ {
977
+ "epoch": 0.92,
978
+ "eval_avg_sts": 0.7737241698204669,
979
+ "eval_sickr_spearman": 0.7102500181091946,
980
+ "eval_stsb_spearman": 0.837198321531739,
981
+ "step": 14375
982
+ },
983
+ {
984
+ "epoch": 0.93,
985
+ "learning_rate": 7.205938819915525e-07,
986
+ "loss": 0.0001,
987
+ "step": 14500
988
+ },
989
+ {
990
+ "epoch": 0.93,
991
+ "eval_avg_sts": 0.7737795026917761,
992
+ "eval_sickr_spearman": 0.7103373866827106,
993
+ "eval_stsb_spearman": 0.8372216187008416,
994
+ "step": 14500
995
+ },
996
+ {
997
+ "epoch": 0.94,
998
+ "eval_avg_sts": 0.7736836444786954,
999
+ "eval_sickr_spearman": 0.7101872414596151,
1000
+ "eval_stsb_spearman": 0.8371800474977756,
1001
+ "step": 14625
1002
+ },
1003
+ {
1004
+ "epoch": 0.94,
1005
+ "eval_avg_sts": 0.7734449184616194,
1006
+ "eval_sickr_spearman": 0.7102963681220825,
1007
+ "eval_stsb_spearman": 0.8365934688011564,
1008
+ "step": 14750
1009
+ },
1010
+ {
1011
+ "epoch": 0.95,
1012
+ "eval_avg_sts": 0.7733655720089518,
1013
+ "eval_sickr_spearman": 0.7101887784548611,
1014
+ "eval_stsb_spearman": 0.8365423655630426,
1015
+ "step": 14875
1016
+ },
1017
+ {
1018
+ "epoch": 0.96,
1019
+ "learning_rate": 4.0061436068091647e-07,
1020
+ "loss": 0.0001,
1021
+ "step": 15000
1022
+ },
1023
+ {
1024
+ "epoch": 0.96,
1025
+ "eval_avg_sts": 0.7735006567182534,
1026
+ "eval_sickr_spearman": 0.7106506935573914,
1027
+ "eval_stsb_spearman": 0.8363506198791154,
1028
+ "step": 15000
1029
+ },
1030
+ {
1031
+ "epoch": 0.97,
1032
+ "eval_avg_sts": 0.7733751888661335,
1033
+ "eval_sickr_spearman": 0.710443343292483,
1034
+ "eval_stsb_spearman": 0.8363070344397839,
1035
+ "step": 15125
1036
+ },
1037
+ {
1038
+ "epoch": 0.98,
1039
+ "eval_avg_sts": 0.7736170866924498,
1040
+ "eval_sickr_spearman": 0.7105904625561881,
1041
+ "eval_stsb_spearman": 0.8366437108287115,
1042
+ "step": 15250
1043
+ },
1044
+ {
1045
+ "epoch": 0.98,
1046
+ "eval_avg_sts": 0.7738120865784781,
1047
+ "eval_sickr_spearman": 0.7107394070017475,
1048
+ "eval_stsb_spearman": 0.8368847661552088,
1049
+ "step": 15375
1050
+ },
1051
+ {
1052
+ "epoch": 0.99,
1053
+ "learning_rate": 8.06348393702803e-08,
1054
+ "loss": 0.0002,
1055
+ "step": 15500
1056
+ },
1057
+ {
1058
+ "epoch": 0.99,
1059
+ "eval_avg_sts": 0.7737627481564555,
1060
+ "eval_sickr_spearman": 0.710675861854545,
1061
+ "eval_stsb_spearman": 0.8368496344583661,
1062
+ "step": 15500
1063
+ },
1064
+ {
1065
+ "epoch": 1.0,
1066
+ "eval_avg_sts": 0.7737442273397684,
1067
+ "eval_sickr_spearman": 0.7106493006554496,
1068
+ "eval_stsb_spearman": 0.8368391540240873,
1069
+ "step": 15625
1070
+ },
1071
+ {
1072
+ "epoch": 1.0,
1073
+ "step": 15626,
1074
+ "train_runtime": 3862.446,
1075
+ "train_samples_per_second": 4.046
1076
+ }
1077
+ ],
1078
+ "max_steps": 15626,
1079
+ "num_train_epochs": 1,
1080
+ "total_flos": 48090729552850944,
1081
+ "trial_name": null,
1082
+ "trial_params": null
1083
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10411d805de1d2657380d417e399fbf47a891c0681176da1ababf6ca68326c35
3
+ size 2095
vocab.json ADDED
The diff for this file is too large to render. See raw diff