gihakkk commited on
Commit
9732ec8
·
verified ·
1 Parent(s): 72212f7

Upload 4 files

Browse files
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "apple/mobilevit-small",
3
+ "architectures": [
4
+ "MobileViTForImageClassification"
5
+ ],
6
+ "aspp_dropout_prob": 0.1,
7
+ "aspp_out_channels": 256,
8
+ "atrous_rates": [
9
+ 6,
10
+ 12,
11
+ 18
12
+ ],
13
+ "attention_probs_dropout_prob": 0.0,
14
+ "classifier_dropout_prob": 0.1,
15
+ "conv_kernel_size": 3,
16
+ "expand_ratio": 4.0,
17
+ "hidden_act": "silu",
18
+ "hidden_dropout_prob": 0.1,
19
+ "hidden_sizes": [
20
+ 144,
21
+ 192,
22
+ 240
23
+ ],
24
+ "image_size": 256,
25
+ "initializer_range": 0.02,
26
+ "layer_norm_eps": 1e-05,
27
+ "mlp_ratio": 2.0,
28
+ "model_type": "mobilevit",
29
+ "neck_hidden_sizes": [
30
+ 16,
31
+ 32,
32
+ 64,
33
+ 96,
34
+ 128,
35
+ 160,
36
+ 640
37
+ ],
38
+ "num_attention_heads": 4,
39
+ "num_channels": 3,
40
+ "output_stride": 32,
41
+ "patch_size": 2,
42
+ "qkv_bias": true,
43
+ "semantic_loss_ignore_index": 255,
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.41.2"
46
+ }
mobilevit_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a5849c27f12ea3dc9ec2c6d5500ed2dfacd41c9caabbc78f5673d8bbb31df1
3
+ size 20045692
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2370bc0add01ddd90109edaf175ad036f517ee5c22ccf437767b5cf689de881a
3
+ size 19851560
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "image_size": 224,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "model_type": "vit",
16
+ "num_attention_heads": 12,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 12,
19
+ "patch_size": 16,
20
+ "qkv_bias": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.41.2"
23
+ }