ChispiDEV commited on
Commit
bdbc37e
·
verified ·
1 Parent(s): f3630ba

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ base_model: google/mobilenet_v2_1.0_224
7
+ widget:
8
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
9
+ example_title: Tiger
10
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
11
+ example_title: Teapot
12
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
13
+ example_title: Palace
14
+ ---
15
+
16
+ # Model Trained Using AutoTrain
17
+
18
+ - Problem type: Image Classification
19
+
20
+ ## Validation Metrics
21
+ loss: 0.4714449942111969
22
+
23
+ f1: 0.6666666666666666
24
+
25
+ precision: 0.5
26
+
27
+ recall: 1.0
28
+
29
+ auc: 1.0
30
+
31
+ accuracy: 0.5
checkpoint-12/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mobilenet_v2_1.0_224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "MobileNetV2ForImageClassification"
6
+ ],
7
+ "classifier_dropout_prob": 0.2,
8
+ "depth_divisible_by": 8,
9
+ "depth_multiplier": 1.0,
10
+ "expand_ratio": 6,
11
+ "finegrained_output": true,
12
+ "first_layer_is_expansion": true,
13
+ "hidden_act": "relu6",
14
+ "id2label": {
15
+ "0": "maduras",
16
+ "1": "normales"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_range": 0.02,
20
+ "label2id": {
21
+ "maduras": 0,
22
+ "normales": 1
23
+ },
24
+ "layer_norm_eps": 0.001,
25
+ "min_depth": 8,
26
+ "model_type": "mobilenet_v2",
27
+ "num_channels": 3,
28
+ "output_stride": 32,
29
+ "problem_type": "single_label_classification",
30
+ "semantic_loss_ignore_index": 255,
31
+ "tf_padding": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.43.1"
34
+ }
checkpoint-12/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10348f2f4056789bd6db4c4fdcdd4cd9243ff0820639a44331c9d3d6af34dc99
3
+ size 9080216
checkpoint-12/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c90b556db8988c0774b80f7f14dcb32013d1d3e1ba77a6e03f5e5effb74e2c06
3
+ size 17939322
checkpoint-12/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe823a6b739000a7c0f83ccd30a9bf694b855c757b633c2cb77488f30bd5815e
3
+ size 13990
checkpoint-12/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a72edfeb588224aa21da22b0df3f2ac920460bb0b48de631667731ab64a1a6d3
3
+ size 1064
checkpoint-12/trainer_state.json ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4714449942111969,
3
+ "best_model_checkpoint": "autotrain-1tqht-w0zz7/checkpoint-12",
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 12,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.3333333333333333,
13
+ "grad_norm": 18.477209091186523,
14
+ "learning_rate": 1.6666666666666667e-05,
15
+ "loss": 0.78,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.6666666666666666,
20
+ "grad_norm": 26.289060592651367,
21
+ "learning_rate": 3.3333333333333335e-05,
22
+ "loss": 0.9268,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "grad_norm": 15.280725479125977,
28
+ "learning_rate": 5e-05,
29
+ "loss": 0.4171,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_accuracy": 0.5,
35
+ "eval_auc": 0.0,
36
+ "eval_f1": 0.6666666666666666,
37
+ "eval_loss": 0.6661330461502075,
38
+ "eval_precision": 0.5,
39
+ "eval_recall": 1.0,
40
+ "eval_runtime": 0.085,
41
+ "eval_samples_per_second": 23.519,
42
+ "eval_steps_per_second": 11.76,
43
+ "step": 3
44
+ },
45
+ {
46
+ "epoch": 1.3333333333333333,
47
+ "grad_norm": 14.359776496887207,
48
+ "learning_rate": 4.814814814814815e-05,
49
+ "loss": 0.5053,
50
+ "step": 4
51
+ },
52
+ {
53
+ "epoch": 1.6666666666666665,
54
+ "grad_norm": 20.931427001953125,
55
+ "learning_rate": 4.62962962962963e-05,
56
+ "loss": 1.1015,
57
+ "step": 5
58
+ },
59
+ {
60
+ "epoch": 2.0,
61
+ "grad_norm": 23.964014053344727,
62
+ "learning_rate": 4.4444444444444447e-05,
63
+ "loss": 0.5705,
64
+ "step": 6
65
+ },
66
+ {
67
+ "epoch": 2.0,
68
+ "eval_accuracy": 1.0,
69
+ "eval_auc": 1.0,
70
+ "eval_f1": 1.0,
71
+ "eval_loss": 0.5112756490707397,
72
+ "eval_precision": 1.0,
73
+ "eval_recall": 1.0,
74
+ "eval_runtime": 0.081,
75
+ "eval_samples_per_second": 24.693,
76
+ "eval_steps_per_second": 12.346,
77
+ "step": 6
78
+ },
79
+ {
80
+ "epoch": 2.3333333333333335,
81
+ "grad_norm": 16.643795013427734,
82
+ "learning_rate": 4.259259259259259e-05,
83
+ "loss": 0.5339,
84
+ "step": 7
85
+ },
86
+ {
87
+ "epoch": 2.6666666666666665,
88
+ "grad_norm": 25.878021240234375,
89
+ "learning_rate": 4.074074074074074e-05,
90
+ "loss": 0.7978,
91
+ "step": 8
92
+ },
93
+ {
94
+ "epoch": 3.0,
95
+ "grad_norm": 15.237863540649414,
96
+ "learning_rate": 3.888888888888889e-05,
97
+ "loss": 0.4334,
98
+ "step": 9
99
+ },
100
+ {
101
+ "epoch": 3.0,
102
+ "eval_accuracy": 0.5,
103
+ "eval_auc": 0.0,
104
+ "eval_f1": 0.6666666666666666,
105
+ "eval_loss": 0.7194620370864868,
106
+ "eval_precision": 0.5,
107
+ "eval_recall": 1.0,
108
+ "eval_runtime": 0.0865,
109
+ "eval_samples_per_second": 23.122,
110
+ "eval_steps_per_second": 11.561,
111
+ "step": 9
112
+ },
113
+ {
114
+ "epoch": 3.3333333333333335,
115
+ "grad_norm": 20.827402114868164,
116
+ "learning_rate": 3.7037037037037037e-05,
117
+ "loss": 1.0579,
118
+ "step": 10
119
+ },
120
+ {
121
+ "epoch": 3.6666666666666665,
122
+ "grad_norm": 11.112966537475586,
123
+ "learning_rate": 3.518518518518519e-05,
124
+ "loss": 0.3397,
125
+ "step": 11
126
+ },
127
+ {
128
+ "epoch": 4.0,
129
+ "grad_norm": 19.818172454833984,
130
+ "learning_rate": 3.3333333333333335e-05,
131
+ "loss": 0.6308,
132
+ "step": 12
133
+ },
134
+ {
135
+ "epoch": 4.0,
136
+ "eval_accuracy": 0.5,
137
+ "eval_auc": 1.0,
138
+ "eval_f1": 0.6666666666666666,
139
+ "eval_loss": 0.4714449942111969,
140
+ "eval_precision": 0.5,
141
+ "eval_recall": 1.0,
142
+ "eval_runtime": 0.0984,
143
+ "eval_samples_per_second": 20.335,
144
+ "eval_steps_per_second": 10.168,
145
+ "step": 12
146
+ }
147
+ ],
148
+ "logging_steps": 1,
149
+ "max_steps": 30,
150
+ "num_input_tokens_seen": 0,
151
+ "num_train_epochs": 10,
152
+ "save_steps": 500,
153
+ "stateful_callbacks": {
154
+ "EarlyStoppingCallback": {
155
+ "args": {
156
+ "early_stopping_patience": 5,
157
+ "early_stopping_threshold": 0.01
158
+ },
159
+ "attributes": {
160
+ "early_stopping_patience_counter": 0
161
+ }
162
+ },
163
+ "TrainerControl": {
164
+ "args": {
165
+ "should_epoch_stop": false,
166
+ "should_evaluate": false,
167
+ "should_log": false,
168
+ "should_save": true,
169
+ "should_training_stop": false
170
+ },
171
+ "attributes": {}
172
+ }
173
+ },
174
+ "total_flos": 84045069287424.0,
175
+ "train_batch_size": 3,
176
+ "trial_name": null,
177
+ "trial_params": null
178
+ }
checkpoint-12/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa7441d0f9e505c8f4b02d6a7959ebc47fb61d61bdbe51c2ea4dca78d3490fd6
3
+ size 5240
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mobilenet_v2_1.0_224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "MobileNetV2ForImageClassification"
6
+ ],
7
+ "classifier_dropout_prob": 0.2,
8
+ "depth_divisible_by": 8,
9
+ "depth_multiplier": 1.0,
10
+ "expand_ratio": 6,
11
+ "finegrained_output": true,
12
+ "first_layer_is_expansion": true,
13
+ "hidden_act": "relu6",
14
+ "id2label": {
15
+ "0": "maduras",
16
+ "1": "normales"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_range": 0.02,
20
+ "label2id": {
21
+ "maduras": 0,
22
+ "normales": 1
23
+ },
24
+ "layer_norm_eps": 0.001,
25
+ "min_depth": 8,
26
+ "model_type": "mobilenet_v2",
27
+ "num_channels": 3,
28
+ "output_stride": 32,
29
+ "problem_type": "single_label_classification",
30
+ "semantic_loss_ignore_index": 255,
31
+ "tf_padding": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.43.1"
34
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10348f2f4056789bd6db4c4fdcdd4cd9243ff0820639a44331c9d3d6af34dc99
3
+ size 9080216
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "image_processor_type": "MobileNetV2ImageProcessor",
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "resample": 2,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "shortest_edge": 256
25
+ }
26
+ }
runs/Jul30_21-00-06_r-chispidev-prueba-autotrain-1-5o5rnnct-b64d4-32juv/events.out.tfevents.1722373208.r-chispidev-prueba-autotrain-1-5o5rnnct-b64d4-32juv.102.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0e578fe10dda1a8a0aae91c7c6c10d249f0416dd49d0cad703249229b90f13c
3
- size 5059
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c87fc45f893f02001693017925aeec82130189393e4a1b398bd8d45290dd61
3
+ size 15577
runs/Jul30_21-00-06_r-chispidev-prueba-autotrain-1-5o5rnnct-b64d4-32juv/events.out.tfevents.1722373221.r-chispidev-prueba-autotrain-1-5o5rnnct-b64d4-32juv.102.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bab50226fb1b0d3c79ea9e623cbf200899f7fbe58f5b910438559f041662be10
3
+ size 597
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa7441d0f9e505c8f4b02d6a7959ebc47fb61d61bdbe51c2ea4dca78d3490fd6
3
+ size 5240
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "autotrain-1tqht-w0zz7/autotrain-data",
3
+ "model": "google/mobilenet_v2_1.0_224",
4
+ "username": "ChispiDEV",
5
+ "lr": 5e-05,
6
+ "epochs": 10,
7
+ "batch_size": 3,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "autotrain-1tqht-w0zz7",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "push_to_hub": true,
23
+ "eval_strategy": "epoch",
24
+ "image_column": "autotrain_image",
25
+ "target_column": "autotrain_label",
26
+ "log": "tensorboard",
27
+ "early_stopping_patience": 5,
28
+ "early_stopping_threshold": 0.01
29
+ }