digo-prayudha commited on
Commit
2cf4441
·
verified ·
1 Parent(s): fcb45ea

vit-emotion-classification

Browse files
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: google/vit-base-patch16-224-in21k
5
+ tags:
6
+ - image-classification
7
+ - generated_from_trainer
8
+ datasets:
9
+ - imagefolder
10
+ metrics:
11
+ - accuracy
12
+ model-index:
13
+ - name: vit-emotion-classification
14
+ results:
15
+ - task:
16
+ name: Image Classification
17
+ type: image-classification
18
+ dataset:
19
+ name: FastJobs/Visual_Emotional_Analysis
20
+ type: imagefolder
21
+ config: default
22
+ split: train
23
+ args: default
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.6125
28
+ ---
29
+
30
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
+ should probably proofread and complete it, then remove this comment. -->
32
+
33
+ # vit-emotion-classification
34
+
35
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the FastJobs/Visual_Emotional_Analysis dataset.
36
+ It achieves the following results on the evaluation set:
37
+ - Loss: 1.3802
38
+ - Accuracy: 0.6125
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 0.0002
58
+ - train_batch_size: 16
59
+ - eval_batch_size: 8
60
+ - seed: 42
61
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
62
+ - lr_scheduler_type: linear
63
+ - num_epochs: 10
64
+ - mixed_precision_training: Native AMP
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | 0.8454 | 2.5 | 100 | 1.4373 | 0.4813 |
71
+ | 0.2022 | 5.0 | 200 | 1.4067 | 0.55 |
72
+ | 0.0474 | 7.5 | 300 | 1.3802 | 0.6125 |
73
+ | 0.0368 | 10.0 | 400 | 1.4388 | 0.5938 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.47.1
79
+ - Pytorch 2.5.1+cu121
80
+ - Datasets 3.2.0
81
+ - Tokenizers 0.21.0
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6125,
4
+ "eval_loss": 1.3801825046539307,
5
+ "eval_runtime": 0.9514,
6
+ "eval_samples_per_second": 168.179,
7
+ "eval_steps_per_second": 21.022,
8
+ "total_flos": 4.959754037231616e+17,
9
+ "train_loss": 0.4905405020713806,
10
+ "train_runtime": 117.1653,
11
+ "train_samples_per_second": 54.624,
12
+ "train_steps_per_second": 3.414
13
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "anger",
13
+ "1": "contempt",
14
+ "2": "disgust",
15
+ "3": "fear",
16
+ "4": "happy",
17
+ "5": "neutral",
18
+ "6": "sad",
19
+ "7": "surprise"
20
+ },
21
+ "image_size": 224,
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "anger": "0",
26
+ "contempt": "1",
27
+ "disgust": "2",
28
+ "fear": "3",
29
+ "happy": "4",
30
+ "neutral": "5",
31
+ "sad": "6",
32
+ "surprise": "7"
33
+ },
34
+ "layer_norm_eps": 1e-12,
35
+ "model_type": "vit",
36
+ "num_attention_heads": 12,
37
+ "num_channels": 3,
38
+ "num_hidden_layers": 12,
39
+ "patch_size": 16,
40
+ "problem_type": "single_label_classification",
41
+ "qkv_bias": true,
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.47.1"
44
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6125,
4
+ "eval_loss": 1.3801825046539307,
5
+ "eval_runtime": 0.9514,
6
+ "eval_samples_per_second": 168.179,
7
+ "eval_steps_per_second": 21.022
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c96beb5c645ff28e350eeff2b83ba7add70bbd26c931fa952f14edf607627a74
3
+ size 343242432
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTFeatureExtractor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
runs/Jan06_09-10-24_8008b6c70fb0/events.out.tfevents.1736154659.8008b6c70fb0.1523.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e431a970bf8a3c82fcb386cdd5728af4b2faa4aeb1ab7199b8d03623fe7f2917
3
+ size 5295
runs/Jan06_09-12-00_8008b6c70fb0/events.out.tfevents.1736154727.8008b6c70fb0.1523.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dd17d98455858d87626a91f9f1147eeb7982d839b8d79966d32b08ea1cbbc81
3
+ size 5295
runs/Jan06_09-13-05_8008b6c70fb0/events.out.tfevents.1736154791.8008b6c70fb0.1523.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55ca07998c95839663cf0ab14da4513f8eae3ed422bd3f62ce9f056c698332e1
3
+ size 9294
runs/Jan06_09-13-05_8008b6c70fb0/events.out.tfevents.1736154913.8008b6c70fb0.1523.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:591589e0dadd9a67b21b4070d9e7b5456b852db0368cd0b19187cb45882e398f
3
+ size 411
runs/Jan06_09-18-00_8008b6c70fb0/events.out.tfevents.1736155086.8008b6c70fb0.1523.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:758fb4832bf018795ecc0455cfd8f45976e2158d1bd66ef0c861620f2ae1a632
3
+ size 16384
runs/Jan06_09-18-00_8008b6c70fb0/events.out.tfevents.1736155227.8008b6c70fb0.1523.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb72e847ed367c25aa646f33dad1b128dd0beb4c5d578b08cf8bd7898b9f215
3
+ size 411
runs/Jan06_09-32-39_8008b6c70fb0/events.out.tfevents.1736155985.8008b6c70fb0.1523.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db3f0ed90d3c7e2f022d57396fb18dab94fe3367097ae970a380dfe740cad87
3
+ size 15329
runs/Jan06_09-32-39_8008b6c70fb0/events.out.tfevents.1736156140.8008b6c70fb0.1523.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:635945efba2d48f0e76c34aa440e563eca2a234a3b4fd956114c4fd49ab9ef0b
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 4.959754037231616e+17,
4
+ "train_loss": 0.4905405020713806,
5
+ "train_runtime": 117.1653,
6
+ "train_samples_per_second": 54.624,
7
+ "train_steps_per_second": 3.414
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.3801825046539307,
3
+ "best_model_checkpoint": "./vit-emotion-classification/checkpoint-300",
4
+ "epoch": 10.0,
5
+ "eval_steps": 100,
6
+ "global_step": 400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.25,
13
+ "grad_norm": 2.134533643722534,
14
+ "learning_rate": 0.000195,
15
+ "loss": 2.0279,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.5,
20
+ "grad_norm": 1.939251184463501,
21
+ "learning_rate": 0.00019,
22
+ "loss": 1.7986,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.75,
27
+ "grad_norm": 1.8353347778320312,
28
+ "learning_rate": 0.00018500000000000002,
29
+ "loss": 1.6841,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "grad_norm": 2.401373863220215,
35
+ "learning_rate": 0.00018,
36
+ "loss": 1.5968,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 1.25,
41
+ "grad_norm": 2.080655813217163,
42
+ "learning_rate": 0.000175,
43
+ "loss": 1.3349,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 1.5,
48
+ "grad_norm": 2.146280527114868,
49
+ "learning_rate": 0.00017,
50
+ "loss": 1.2608,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 1.75,
55
+ "grad_norm": 2.722163438796997,
56
+ "learning_rate": 0.000165,
57
+ "loss": 1.2728,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 2.0,
62
+ "grad_norm": 1.9687062501907349,
63
+ "learning_rate": 0.00016,
64
+ "loss": 1.1254,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 2.25,
69
+ "grad_norm": 2.55076265335083,
70
+ "learning_rate": 0.000155,
71
+ "loss": 0.8877,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 2.5,
76
+ "grad_norm": 2.256789445877075,
77
+ "learning_rate": 0.00015000000000000001,
78
+ "loss": 0.8454,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 2.5,
83
+ "eval_accuracy": 0.48125,
84
+ "eval_loss": 1.4373115301132202,
85
+ "eval_runtime": 0.8376,
86
+ "eval_samples_per_second": 191.019,
87
+ "eval_steps_per_second": 23.877,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 2.75,
92
+ "grad_norm": 2.4422082901000977,
93
+ "learning_rate": 0.000145,
94
+ "loss": 0.8337,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 3.0,
99
+ "grad_norm": 3.179633140563965,
100
+ "learning_rate": 0.00014,
101
+ "loss": 0.882,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 3.25,
106
+ "grad_norm": 1.4560832977294922,
107
+ "learning_rate": 0.00013500000000000003,
108
+ "loss": 0.5198,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 3.5,
113
+ "grad_norm": 2.0190017223358154,
114
+ "learning_rate": 0.00013000000000000002,
115
+ "loss": 0.4344,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 3.75,
120
+ "grad_norm": 3.2591023445129395,
121
+ "learning_rate": 0.000125,
122
+ "loss": 0.4698,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 4.0,
127
+ "grad_norm": 2.652550220489502,
128
+ "learning_rate": 0.00012,
129
+ "loss": 0.54,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 4.25,
134
+ "grad_norm": 0.5937463641166687,
135
+ "learning_rate": 0.00011499999999999999,
136
+ "loss": 0.2737,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 4.5,
141
+ "grad_norm": 1.0296827554702759,
142
+ "learning_rate": 0.00011000000000000002,
143
+ "loss": 0.2402,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 4.75,
148
+ "grad_norm": 3.007828712463379,
149
+ "learning_rate": 0.000105,
150
+ "loss": 0.1952,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 5.0,
155
+ "grad_norm": 1.936133623123169,
156
+ "learning_rate": 0.0001,
157
+ "loss": 0.2022,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 5.0,
162
+ "eval_accuracy": 0.55,
163
+ "eval_loss": 1.406724214553833,
164
+ "eval_runtime": 0.8264,
165
+ "eval_samples_per_second": 193.601,
166
+ "eval_steps_per_second": 24.2,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 5.25,
171
+ "grad_norm": 0.3376343250274658,
172
+ "learning_rate": 9.5e-05,
173
+ "loss": 0.1086,
174
+ "step": 210
175
+ },
176
+ {
177
+ "epoch": 5.5,
178
+ "grad_norm": 2.637477397918701,
179
+ "learning_rate": 9e-05,
180
+ "loss": 0.109,
181
+ "step": 220
182
+ },
183
+ {
184
+ "epoch": 5.75,
185
+ "grad_norm": 3.05291485786438,
186
+ "learning_rate": 8.5e-05,
187
+ "loss": 0.0957,
188
+ "step": 230
189
+ },
190
+ {
191
+ "epoch": 6.0,
192
+ "grad_norm": 3.055624485015869,
193
+ "learning_rate": 8e-05,
194
+ "loss": 0.136,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 6.25,
199
+ "grad_norm": 0.30109065771102905,
200
+ "learning_rate": 7.500000000000001e-05,
201
+ "loss": 0.0741,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 6.5,
206
+ "grad_norm": 0.19268257915973663,
207
+ "learning_rate": 7e-05,
208
+ "loss": 0.0664,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 6.75,
213
+ "grad_norm": 0.15611745417118073,
214
+ "learning_rate": 6.500000000000001e-05,
215
+ "loss": 0.0559,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 7.0,
220
+ "grad_norm": 0.21113821864128113,
221
+ "learning_rate": 6e-05,
222
+ "loss": 0.0599,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 7.25,
227
+ "grad_norm": 0.160753071308136,
228
+ "learning_rate": 5.500000000000001e-05,
229
+ "loss": 0.049,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 7.5,
234
+ "grad_norm": 0.13460688292980194,
235
+ "learning_rate": 5e-05,
236
+ "loss": 0.0474,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 7.5,
241
+ "eval_accuracy": 0.6125,
242
+ "eval_loss": 1.3801825046539307,
243
+ "eval_runtime": 0.8118,
244
+ "eval_samples_per_second": 197.084,
245
+ "eval_steps_per_second": 24.635,
246
+ "step": 300
247
+ },
248
+ {
249
+ "epoch": 7.75,
250
+ "grad_norm": 0.13902044296264648,
251
+ "learning_rate": 4.5e-05,
252
+ "loss": 0.0452,
253
+ "step": 310
254
+ },
255
+ {
256
+ "epoch": 8.0,
257
+ "grad_norm": 0.12478330731391907,
258
+ "learning_rate": 4e-05,
259
+ "loss": 0.0424,
260
+ "step": 320
261
+ },
262
+ {
263
+ "epoch": 8.25,
264
+ "grad_norm": 0.11532563716173172,
265
+ "learning_rate": 3.5e-05,
266
+ "loss": 0.0409,
267
+ "step": 330
268
+ },
269
+ {
270
+ "epoch": 8.5,
271
+ "grad_norm": 0.11974634230136871,
272
+ "learning_rate": 3e-05,
273
+ "loss": 0.0395,
274
+ "step": 340
275
+ },
276
+ {
277
+ "epoch": 8.75,
278
+ "grad_norm": 0.1203409880399704,
279
+ "learning_rate": 2.5e-05,
280
+ "loss": 0.0393,
281
+ "step": 350
282
+ },
283
+ {
284
+ "epoch": 9.0,
285
+ "grad_norm": 0.13047201931476593,
286
+ "learning_rate": 2e-05,
287
+ "loss": 0.0386,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 9.25,
292
+ "grad_norm": 0.10817253589630127,
293
+ "learning_rate": 1.5e-05,
294
+ "loss": 0.0376,
295
+ "step": 370
296
+ },
297
+ {
298
+ "epoch": 9.5,
299
+ "grad_norm": 0.12842506170272827,
300
+ "learning_rate": 1e-05,
301
+ "loss": 0.037,
302
+ "step": 380
303
+ },
304
+ {
305
+ "epoch": 9.75,
306
+ "grad_norm": 0.12066592276096344,
307
+ "learning_rate": 5e-06,
308
+ "loss": 0.0371,
309
+ "step": 390
310
+ },
311
+ {
312
+ "epoch": 10.0,
313
+ "grad_norm": 0.13028773665428162,
314
+ "learning_rate": 0.0,
315
+ "loss": 0.0368,
316
+ "step": 400
317
+ },
318
+ {
319
+ "epoch": 10.0,
320
+ "eval_accuracy": 0.59375,
321
+ "eval_loss": 1.4388375282287598,
322
+ "eval_runtime": 0.8121,
323
+ "eval_samples_per_second": 197.013,
324
+ "eval_steps_per_second": 24.627,
325
+ "step": 400
326
+ },
327
+ {
328
+ "epoch": 10.0,
329
+ "step": 400,
330
+ "total_flos": 4.959754037231616e+17,
331
+ "train_loss": 0.4905405020713806,
332
+ "train_runtime": 117.1653,
333
+ "train_samples_per_second": 54.624,
334
+ "train_steps_per_second": 3.414
335
+ }
336
+ ],
337
+ "logging_steps": 10,
338
+ "max_steps": 400,
339
+ "num_input_tokens_seen": 0,
340
+ "num_train_epochs": 10,
341
+ "save_steps": 100,
342
+ "stateful_callbacks": {
343
+ "TrainerControl": {
344
+ "args": {
345
+ "should_epoch_stop": false,
346
+ "should_evaluate": false,
347
+ "should_log": false,
348
+ "should_save": true,
349
+ "should_training_stop": true
350
+ },
351
+ "attributes": {}
352
+ }
353
+ },
354
+ "total_flos": 4.959754037231616e+17,
355
+ "train_batch_size": 16,
356
+ "trial_name": null,
357
+ "trial_params": null
358
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4e554804ca69ba63c5dfc5d687264a589bf43c750133db4f89df8bdafce4473
3
+ size 5304