rwightman HF staff commited on
Commit
897c2e2
·
verified ·
1 Parent(s): a5b5168
Files changed (4) hide show
  1. README.md +43 -0
  2. config.json +33 -0
  3. model.safetensors +3 -0
  4. pytorch_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - timm
4
+ - transformers
5
+ - image-feature-extraction
6
+ - siglip
7
+ - siglip2
8
+ library_name: timm
9
+ license: apache-2.0
10
+ datasets:
11
+ - webli
12
+ ---
13
+ # Model card for vit_so400m_patch14_siglip_224.v2_webli
14
+
15
+ A SigLIP 2 ViT (image encoder only) for `timm`. Equivalent to image tower from https://huggingface.co/timm/ViT-SO400M-14-SigLIP2.
16
+
17
+
18
+ ## Model Details
19
+ - **Dataset:** webli
20
+ - **Papers:**
21
+ - SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features: https://arxiv.org/abs/2502.14786
22
+ - Sigmoid Loss for Language Image Pre-Training: https://arxiv.org/abs/2303.15343
23
+
24
+ ## Citation
25
+ ```bibtex
26
+ @article{tschannen2025siglip,
27
+ title={SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features},
28
+ author={Tschannen, Michael and Gritsenko, Alexey and Wang, Xiao and Naeem, Muhammad Ferjad and Alabdulmohsin, Ibrahim and Parthasarathy, Nikhil and Evans, Talfan and Beyer, Lucas and Xia, Ye and Mustafa, Basil and H'enaff, Olivier and Harmsen, Jeremiah and Steiner, Andreas and Zhai, Xiaohua},
29
+ year={2025},
30
+ journal={arXiv preprint arXiv:2502.14786}
31
+ }
32
+
33
+ ```
34
+ ```bibtex
35
+ @inproceedings{zhai2023sigmoid,
36
+ title={Sigmoid loss for language image pre-training},
37
+ author={Zhai, Xiaohua and Mustafa, Basil and Kolesnikov, Alexander and Beyer, Lucas},
38
+ booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
39
+ pages={11975--11986},
40
+ year={2023}
41
+ }
42
+
43
+ ```
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": "vit_so400m_patch14_siglip_224",
3
+ "num_classes": 0,
4
+ "num_features": 1152,
5
+ "global_pool": "map",
6
+ "pretrained_cfg": {
7
+ "tag": "v2_webli",
8
+ "custom_load": false,
9
+ "input_size": [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ "fixed_input_size": true,
15
+ "interpolation": "bicubic",
16
+ "crop_pct": 0.9,
17
+ "crop_mode": "center",
18
+ "mean": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "std": [
24
+ 0.5,
25
+ 0.5,
26
+ 0.5
27
+ ],
28
+ "num_classes": 0,
29
+ "pool_size": null,
30
+ "first_conv": "patch_embed.proj",
31
+ "classifier": "head"
32
+ }
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c079ddc4d7fc5875c3e0dcc8f7f33bdd00453d7e1c6362a3787e5c24121b454
3
+ size 1710755536
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9487d905fa87a1ac9294994dd159becdb6d8def649a64cbdc4e0c51542f64484
3
+ size 1710851426