Add model
Browse files- README.md +43 -0
- config.json +33 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- timm
|
4 |
+
- transformers
|
5 |
+
- image-feature-extraction
|
6 |
+
- siglip
|
7 |
+
- siglip2
|
8 |
+
library_name: timm
|
9 |
+
license: apache-2.0
|
10 |
+
datasets:
|
11 |
+
- webli
|
12 |
+
---
|
13 |
+
# Model card for vit_base_patch32_siglip_gap_256.v2_webli
|
14 |
+
|
15 |
+
A SigLIP 2 ViT (image encoder only) for `timm`. Equivalent to image tower from https://huggingface.co/timm/ViT-B-32-SigLIP2-256. This `gap` variant uses global average pooling and has the attention pooling head removed.
|
16 |
+
|
17 |
+
|
18 |
+
## Model Details
|
19 |
+
- **Dataset:** webli
|
20 |
+
- **Papers:**
|
21 |
+
- SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features: https://arxiv.org/abs/2502.14786
|
22 |
+
- Sigmoid Loss for Language Image Pre-Training: https://arxiv.org/abs/2303.15343
|
23 |
+
|
24 |
+
## Citation
|
25 |
+
```bibtex
|
26 |
+
@article{tschannen2025siglip,
|
27 |
+
title={SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features},
|
28 |
+
author={Tschannen, Michael and Gritsenko, Alexey and Wang, Xiao and Naeem, Muhammad Ferjad and Alabdulmohsin, Ibrahim and Parthasarathy, Nikhil and Evans, Talfan and Beyer, Lucas and Xia, Ye and Mustafa, Basil and H'enaff, Olivier and Harmsen, Jeremiah and Steiner, Andreas and Zhai, Xiaohua},
|
29 |
+
year={2025},
|
30 |
+
journal={arXiv preprint arXiv:2502.14786}
|
31 |
+
}
|
32 |
+
|
33 |
+
```
|
34 |
+
```bibtex
|
35 |
+
@inproceedings{zhai2023sigmoid,
|
36 |
+
title={Sigmoid loss for language image pre-training},
|
37 |
+
author={Zhai, Xiaohua and Mustafa, Basil and Kolesnikov, Alexander and Beyer, Lucas},
|
38 |
+
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
|
39 |
+
pages={11975--11986},
|
40 |
+
year={2023}
|
41 |
+
}
|
42 |
+
|
43 |
+
```
|
config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architecture": "vit_base_patch32_siglip_gap_256",
|
3 |
+
"num_classes": 0,
|
4 |
+
"num_features": 768,
|
5 |
+
"global_pool": "avg",
|
6 |
+
"pretrained_cfg": {
|
7 |
+
"tag": "v2_webli",
|
8 |
+
"custom_load": false,
|
9 |
+
"input_size": [
|
10 |
+
3,
|
11 |
+
256,
|
12 |
+
256
|
13 |
+
],
|
14 |
+
"fixed_input_size": true,
|
15 |
+
"interpolation": "bicubic",
|
16 |
+
"crop_pct": 0.9,
|
17 |
+
"crop_mode": "center",
|
18 |
+
"mean": [
|
19 |
+
0.5,
|
20 |
+
0.5,
|
21 |
+
0.5
|
22 |
+
],
|
23 |
+
"std": [
|
24 |
+
0.5,
|
25 |
+
0.5,
|
26 |
+
0.5
|
27 |
+
],
|
28 |
+
"num_classes": 0,
|
29 |
+
"pool_size": null,
|
30 |
+
"first_conv": "patch_embed.proj",
|
31 |
+
"classifier": "head"
|
32 |
+
}
|
33 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ddb091b4dfd7dd5b1c2e821e7ae3e37bd2e14e5da08cde0e02230d53664b467
|
3 |
+
size 349874704
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16edb5761bef1dda430d52ed132f81db5fbd2ccdbfb7cc7809c956d0fe422e1e
|
3 |
+
size 349916894
|