File size: 1,476 Bytes
8c690bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
{
"_name_or_path": "hf-internal-testing/tiny-random-LevitModel",
"architectures": [
"LevitModel"
],
"attention_ratio": [
2,
2,
2
],
"depths": [
2,
3,
4
],
"down_ops": [
[
"Subsample",
16,
8,
4,
2,
2
],
[
"Subsample",
16,
16,
4,
2,
2
],
[
""
]
],
"drop_path_rate": 0,
"hidden_sizes": [
128,
256,
384
],
"image_size": 64,
"initializer_range": 0.02,
"kernel_size": 3,
"key_dim": [
16,
16,
16
],
"mlp_ratio": [
2,
2,
2
],
"model_type": "levit",
"neuron": {
"auto_cast": null,
"auto_cast_type": null,
"compiler_type": "neuronx-cc",
"compiler_version": "2.14.182.0+a56cbff7",
"disable_fallback": false,
"disable_fast_relayout": false,
"dynamic_batch_size": false,
"inline_weights_to_neff": true,
"input_names": [
"pixel_values"
],
"model_type": "levit",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"logits"
],
"static_batch_size": 1,
"static_image_size": 64,
"static_num_channels": 3,
"static_patch_size": 16
},
"num_attention_heads": [
4,
6,
8
],
"num_channels": 3,
"padding": 1,
"patch_size": 16,
"stride": 2,
"task": "image-classification",
"torchscript": true,
"transformers_version": "4.41.1"
}
|