dima806 commited on
Commit
b97dac8
·
verified ·
1 Parent(s): cd9bd2f

Upload folder using huggingface_hub

Browse files
checkpoint-4230/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Blazer",
13
+ "1": "Coat",
14
+ "2": "Denim Jacket",
15
+ "3": "Dresses",
16
+ "4": "Hoodie",
17
+ "5": "Jacket",
18
+ "6": "Jeans",
19
+ "7": "Long Pants",
20
+ "8": "Polo",
21
+ "9": "Shirt",
22
+ "10": "Shorts",
23
+ "11": "Skirt",
24
+ "12": "Sports Jacket",
25
+ "13": "Sweater",
26
+ "14": "T-shirt"
27
+ },
28
+ "image_size": 224,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 3072,
31
+ "label2id": {
32
+ "Blazer": 0,
33
+ "Coat": 1,
34
+ "Denim Jacket": 2,
35
+ "Dresses": 3,
36
+ "Hoodie": 4,
37
+ "Jacket": 5,
38
+ "Jeans": 6,
39
+ "Long Pants": 7,
40
+ "Polo": 8,
41
+ "Shirt": 9,
42
+ "Shorts": 10,
43
+ "Skirt": 11,
44
+ "Sports Jacket": 12,
45
+ "Sweater": 13,
46
+ "T-shirt": 14
47
+ },
48
+ "layer_norm_eps": 1e-12,
49
+ "model_type": "vit",
50
+ "num_attention_heads": 12,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 12,
53
+ "patch_size": 16,
54
+ "problem_type": "single_label_classification",
55
+ "qkv_bias": true,
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.48.3"
58
+ }
checkpoint-4230/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a080e7e8acf4c1d4e7f178e05dbec278d9a1156545ebd583c53dcfcf2495d1
3
+ size 343263964
checkpoint-4230/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff59a8600ca155cb1bbfe7a4846a08ebcbb5a84840aaaab7915910ee37770085
3
+ size 686648762
checkpoint-4230/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
checkpoint-4230/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0473c83597605b4d7f3bd4a45b4b7be9945f3aa5f4d9b4adc9bd6cf9bdc3749f
3
+ size 14244
checkpoint-4230/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f179d87a714db15290527477f9cbdef82e1b1a97770ab6c3e413991c9939ad59
3
+ size 1064
checkpoint-4230/trainer_state.json ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.43010413646698,
3
+ "best_model_checkpoint": "clothes_image_detection/checkpoint-4230",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 4230,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.21566666666666667,
14
+ "eval_loss": 2.645193338394165,
15
+ "eval_model_preparation_time": 0.0034,
16
+ "eval_runtime": 60.5917,
17
+ "eval_samples_per_second": 49.512,
18
+ "eval_steps_per_second": 6.189,
19
+ "step": 141
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.38333333333333336,
24
+ "eval_loss": 2.5685439109802246,
25
+ "eval_model_preparation_time": 0.0034,
26
+ "eval_runtime": 59.8482,
27
+ "eval_samples_per_second": 50.127,
28
+ "eval_steps_per_second": 6.266,
29
+ "step": 282
30
+ },
31
+ {
32
+ "epoch": 3.0,
33
+ "eval_accuracy": 0.5196666666666667,
34
+ "eval_loss": 2.4840424060821533,
35
+ "eval_model_preparation_time": 0.0034,
36
+ "eval_runtime": 56.8548,
37
+ "eval_samples_per_second": 52.766,
38
+ "eval_steps_per_second": 6.596,
39
+ "step": 423
40
+ },
41
+ {
42
+ "epoch": 3.546099290780142,
43
+ "grad_norm": 1.593246579170227,
44
+ "learning_rate": 2.6770334928229665e-06,
45
+ "loss": 2.5827,
46
+ "step": 500
47
+ },
48
+ {
49
+ "epoch": 4.0,
50
+ "eval_accuracy": 0.594,
51
+ "eval_loss": 2.3924148082733154,
52
+ "eval_model_preparation_time": 0.0034,
53
+ "eval_runtime": 59.5738,
54
+ "eval_samples_per_second": 50.358,
55
+ "eval_steps_per_second": 6.295,
56
+ "step": 564
57
+ },
58
+ {
59
+ "epoch": 5.0,
60
+ "eval_accuracy": 0.637,
61
+ "eval_loss": 2.2984158992767334,
62
+ "eval_model_preparation_time": 0.0034,
63
+ "eval_runtime": 59.8179,
64
+ "eval_samples_per_second": 50.152,
65
+ "eval_steps_per_second": 6.269,
66
+ "step": 705
67
+ },
68
+ {
69
+ "epoch": 6.0,
70
+ "eval_accuracy": 0.6786666666666666,
71
+ "eval_loss": 2.204831123352051,
72
+ "eval_model_preparation_time": 0.0034,
73
+ "eval_runtime": 59.5967,
74
+ "eval_samples_per_second": 50.338,
75
+ "eval_steps_per_second": 6.292,
76
+ "step": 846
77
+ },
78
+ {
79
+ "epoch": 7.0,
80
+ "eval_accuracy": 0.7026666666666667,
81
+ "eval_loss": 2.114955186843872,
82
+ "eval_model_preparation_time": 0.0034,
83
+ "eval_runtime": 58.0933,
84
+ "eval_samples_per_second": 51.641,
85
+ "eval_steps_per_second": 6.455,
86
+ "step": 987
87
+ },
88
+ {
89
+ "epoch": 7.092198581560283,
90
+ "grad_norm": 1.663735032081604,
91
+ "learning_rate": 2.318181818181818e-06,
92
+ "loss": 2.2373,
93
+ "step": 1000
94
+ },
95
+ {
96
+ "epoch": 8.0,
97
+ "eval_accuracy": 0.7193333333333334,
98
+ "eval_loss": 2.0335097312927246,
99
+ "eval_model_preparation_time": 0.0034,
100
+ "eval_runtime": 58.4613,
101
+ "eval_samples_per_second": 51.316,
102
+ "eval_steps_per_second": 6.414,
103
+ "step": 1128
104
+ },
105
+ {
106
+ "epoch": 9.0,
107
+ "eval_accuracy": 0.732,
108
+ "eval_loss": 1.9591959714889526,
109
+ "eval_model_preparation_time": 0.0034,
110
+ "eval_runtime": 60.8155,
111
+ "eval_samples_per_second": 49.33,
112
+ "eval_steps_per_second": 6.166,
113
+ "step": 1269
114
+ },
115
+ {
116
+ "epoch": 10.0,
117
+ "eval_accuracy": 0.7413333333333333,
118
+ "eval_loss": 1.8956468105316162,
119
+ "eval_model_preparation_time": 0.0034,
120
+ "eval_runtime": 58.6099,
121
+ "eval_samples_per_second": 51.186,
122
+ "eval_steps_per_second": 6.398,
123
+ "step": 1410
124
+ },
125
+ {
126
+ "epoch": 10.638297872340425,
127
+ "grad_norm": 2.2240982055664062,
128
+ "learning_rate": 1.95933014354067e-06,
129
+ "loss": 1.9201,
130
+ "step": 1500
131
+ },
132
+ {
133
+ "epoch": 11.0,
134
+ "eval_accuracy": 0.7506666666666667,
135
+ "eval_loss": 1.8356391191482544,
136
+ "eval_model_preparation_time": 0.0034,
137
+ "eval_runtime": 61.3536,
138
+ "eval_samples_per_second": 48.897,
139
+ "eval_steps_per_second": 6.112,
140
+ "step": 1551
141
+ },
142
+ {
143
+ "epoch": 12.0,
144
+ "eval_accuracy": 0.7506666666666667,
145
+ "eval_loss": 1.7848457098007202,
146
+ "eval_model_preparation_time": 0.0034,
147
+ "eval_runtime": 59.2943,
148
+ "eval_samples_per_second": 50.595,
149
+ "eval_steps_per_second": 6.324,
150
+ "step": 1692
151
+ },
152
+ {
153
+ "epoch": 13.0,
154
+ "eval_accuracy": 0.764,
155
+ "eval_loss": 1.736997365951538,
156
+ "eval_model_preparation_time": 0.0034,
157
+ "eval_runtime": 58.9962,
158
+ "eval_samples_per_second": 50.851,
159
+ "eval_steps_per_second": 6.356,
160
+ "step": 1833
161
+ },
162
+ {
163
+ "epoch": 14.0,
164
+ "eval_accuracy": 0.7626666666666667,
165
+ "eval_loss": 1.6969176530838013,
166
+ "eval_model_preparation_time": 0.0034,
167
+ "eval_runtime": 59.8754,
168
+ "eval_samples_per_second": 50.104,
169
+ "eval_steps_per_second": 6.263,
170
+ "step": 1974
171
+ },
172
+ {
173
+ "epoch": 14.184397163120567,
174
+ "grad_norm": 2.2442896366119385,
175
+ "learning_rate": 1.6004784688995215e-06,
176
+ "loss": 1.7006,
177
+ "step": 2000
178
+ },
179
+ {
180
+ "epoch": 15.0,
181
+ "eval_accuracy": 0.768,
182
+ "eval_loss": 1.6571820974349976,
183
+ "eval_model_preparation_time": 0.0034,
184
+ "eval_runtime": 58.7702,
185
+ "eval_samples_per_second": 51.046,
186
+ "eval_steps_per_second": 6.381,
187
+ "step": 2115
188
+ },
189
+ {
190
+ "epoch": 16.0,
191
+ "eval_accuracy": 0.7653333333333333,
192
+ "eval_loss": 1.6255625486373901,
193
+ "eval_model_preparation_time": 0.0034,
194
+ "eval_runtime": 59.8291,
195
+ "eval_samples_per_second": 50.143,
196
+ "eval_steps_per_second": 6.268,
197
+ "step": 2256
198
+ },
199
+ {
200
+ "epoch": 17.0,
201
+ "eval_accuracy": 0.771,
202
+ "eval_loss": 1.5948454141616821,
203
+ "eval_model_preparation_time": 0.0034,
204
+ "eval_runtime": 56.7649,
205
+ "eval_samples_per_second": 52.85,
206
+ "eval_steps_per_second": 6.606,
207
+ "step": 2397
208
+ },
209
+ {
210
+ "epoch": 17.73049645390071,
211
+ "grad_norm": 2.0100739002227783,
212
+ "learning_rate": 1.2416267942583733e-06,
213
+ "loss": 1.5525,
214
+ "step": 2500
215
+ },
216
+ {
217
+ "epoch": 18.0,
218
+ "eval_accuracy": 0.7736666666666666,
219
+ "eval_loss": 1.5684410333633423,
220
+ "eval_model_preparation_time": 0.0034,
221
+ "eval_runtime": 59.7778,
222
+ "eval_samples_per_second": 50.186,
223
+ "eval_steps_per_second": 6.273,
224
+ "step": 2538
225
+ },
226
+ {
227
+ "epoch": 19.0,
228
+ "eval_accuracy": 0.7746666666666666,
229
+ "eval_loss": 1.5451995134353638,
230
+ "eval_model_preparation_time": 0.0034,
231
+ "eval_runtime": 56.7165,
232
+ "eval_samples_per_second": 52.895,
233
+ "eval_steps_per_second": 6.612,
234
+ "step": 2679
235
+ },
236
+ {
237
+ "epoch": 20.0,
238
+ "eval_accuracy": 0.776,
239
+ "eval_loss": 1.5241280794143677,
240
+ "eval_model_preparation_time": 0.0034,
241
+ "eval_runtime": 59.6632,
242
+ "eval_samples_per_second": 50.282,
243
+ "eval_steps_per_second": 6.285,
244
+ "step": 2820
245
+ },
246
+ {
247
+ "epoch": 21.0,
248
+ "eval_accuracy": 0.778,
249
+ "eval_loss": 1.5055561065673828,
250
+ "eval_model_preparation_time": 0.0034,
251
+ "eval_runtime": 61.0991,
252
+ "eval_samples_per_second": 49.101,
253
+ "eval_steps_per_second": 6.138,
254
+ "step": 2961
255
+ },
256
+ {
257
+ "epoch": 21.27659574468085,
258
+ "grad_norm": 2.603900194168091,
259
+ "learning_rate": 8.827751196172249e-07,
260
+ "loss": 1.4511,
261
+ "step": 3000
262
+ },
263
+ {
264
+ "epoch": 22.0,
265
+ "eval_accuracy": 0.7773333333333333,
266
+ "eval_loss": 1.4899275302886963,
267
+ "eval_model_preparation_time": 0.0034,
268
+ "eval_runtime": 60.2357,
269
+ "eval_samples_per_second": 49.804,
270
+ "eval_steps_per_second": 6.226,
271
+ "step": 3102
272
+ },
273
+ {
274
+ "epoch": 23.0,
275
+ "eval_accuracy": 0.779,
276
+ "eval_loss": 1.4755609035491943,
277
+ "eval_model_preparation_time": 0.0034,
278
+ "eval_runtime": 58.2979,
279
+ "eval_samples_per_second": 51.46,
280
+ "eval_steps_per_second": 6.432,
281
+ "step": 3243
282
+ },
283
+ {
284
+ "epoch": 24.0,
285
+ "eval_accuracy": 0.7796666666666666,
286
+ "eval_loss": 1.4629756212234497,
287
+ "eval_model_preparation_time": 0.0034,
288
+ "eval_runtime": 59.4982,
289
+ "eval_samples_per_second": 50.422,
290
+ "eval_steps_per_second": 6.303,
291
+ "step": 3384
292
+ },
293
+ {
294
+ "epoch": 24.822695035460992,
295
+ "grad_norm": 2.867500066757202,
296
+ "learning_rate": 5.239234449760765e-07,
297
+ "loss": 1.3856,
298
+ "step": 3500
299
+ },
300
+ {
301
+ "epoch": 25.0,
302
+ "eval_accuracy": 0.7803333333333333,
303
+ "eval_loss": 1.452785849571228,
304
+ "eval_model_preparation_time": 0.0034,
305
+ "eval_runtime": 61.5325,
306
+ "eval_samples_per_second": 48.755,
307
+ "eval_steps_per_second": 6.094,
308
+ "step": 3525
309
+ },
310
+ {
311
+ "epoch": 26.0,
312
+ "eval_accuracy": 0.783,
313
+ "eval_loss": 1.4447195529937744,
314
+ "eval_model_preparation_time": 0.0034,
315
+ "eval_runtime": 59.5703,
316
+ "eval_samples_per_second": 50.361,
317
+ "eval_steps_per_second": 6.295,
318
+ "step": 3666
319
+ },
320
+ {
321
+ "epoch": 27.0,
322
+ "eval_accuracy": 0.7823333333333333,
323
+ "eval_loss": 1.438262701034546,
324
+ "eval_model_preparation_time": 0.0034,
325
+ "eval_runtime": 60.0356,
326
+ "eval_samples_per_second": 49.97,
327
+ "eval_steps_per_second": 6.246,
328
+ "step": 3807
329
+ },
330
+ {
331
+ "epoch": 28.0,
332
+ "eval_accuracy": 0.783,
333
+ "eval_loss": 1.4334560632705688,
334
+ "eval_model_preparation_time": 0.0034,
335
+ "eval_runtime": 60.0215,
336
+ "eval_samples_per_second": 49.982,
337
+ "eval_steps_per_second": 6.248,
338
+ "step": 3948
339
+ },
340
+ {
341
+ "epoch": 28.368794326241133,
342
+ "grad_norm": 3.527007579803467,
343
+ "learning_rate": 1.6507177033492823e-07,
344
+ "loss": 1.3468,
345
+ "step": 4000
346
+ },
347
+ {
348
+ "epoch": 29.0,
349
+ "eval_accuracy": 0.784,
350
+ "eval_loss": 1.4309223890304565,
351
+ "eval_model_preparation_time": 0.0034,
352
+ "eval_runtime": 57.775,
353
+ "eval_samples_per_second": 51.926,
354
+ "eval_steps_per_second": 6.491,
355
+ "step": 4089
356
+ },
357
+ {
358
+ "epoch": 30.0,
359
+ "eval_accuracy": 0.7846666666666666,
360
+ "eval_loss": 1.43010413646698,
361
+ "eval_model_preparation_time": 0.0034,
362
+ "eval_runtime": 60.9928,
363
+ "eval_samples_per_second": 49.186,
364
+ "eval_steps_per_second": 6.148,
365
+ "step": 4230
366
+ }
367
+ ],
368
+ "logging_steps": 500,
369
+ "max_steps": 4230,
370
+ "num_input_tokens_seen": 0,
371
+ "num_train_epochs": 30,
372
+ "save_steps": 500,
373
+ "stateful_callbacks": {
374
+ "TrainerControl": {
375
+ "args": {
376
+ "should_epoch_stop": false,
377
+ "should_evaluate": false,
378
+ "should_log": false,
379
+ "should_save": true,
380
+ "should_training_stop": true
381
+ },
382
+ "attributes": {}
383
+ }
384
+ },
385
+ "total_flos": 1.046263750898688e+19,
386
+ "train_batch_size": 32,
387
+ "trial_name": null,
388
+ "trial_params": null
389
+ }
checkpoint-4230/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:348a7626fa4c360c7b86e7c9aaa401da6136daa29e45038dc6ab9fa90da3c856
3
+ size 5304
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9a715c47e19dfc1104b65608e115fd6731f299ac95acadbfd47876173afa7e1
3
  size 343263964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a080e7e8acf4c1d4e7f178e05dbec278d9a1156545ebd583c53dcfcf2495d1
3
  size 343263964
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49f1f0d35245f24ce45112eca68b5e8a46b3e609f94361d9be86699750887ef5
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:348a7626fa4c360c7b86e7c9aaa401da6136daa29e45038dc6ab9fa90da3c856
3
  size 5304