Upload OpenVLAForActionPrediction
Browse files- config.json +2 -1
 - generation_config.json +7 -0
 - model-00001-of-00003.safetensors +3 -0
 - model-00002-of-00003.safetensors +3 -0
 - model-00003-of-00003.safetensors +3 -0
 - model.safetensors.index.json +989 -0
 - modeling_prismatic.py +565 -0
 
    	
        config.json
    CHANGED
    
    | 
         @@ -4,7 +4,8 @@ 
     | 
|
| 4 | 
         
             
                "OpenVLAForActionPrediction"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 6 | 
         
             
              "auto_map": {
         
     | 
| 7 | 
         
            -
                "AutoConfig": "configuration_prismatic.OpenVLAConfig"
         
     | 
| 
         | 
|
| 8 | 
         
             
              },
         
     | 
| 9 | 
         
             
              "hf_llm_id": "meta-llama/Llama-2-7b-hf",
         
     | 
| 10 | 
         
             
              "image_resize_strategy": "resize-naive",
         
     | 
| 
         | 
|
| 4 | 
         
             
                "OpenVLAForActionPrediction"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 6 | 
         
             
              "auto_map": {
         
     | 
| 7 | 
         
            +
                "AutoConfig": "configuration_prismatic.OpenVLAConfig",
         
     | 
| 8 | 
         
            +
                "AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
         
     | 
| 9 | 
         
             
              },
         
     | 
| 10 | 
         
             
              "hf_llm_id": "meta-llama/Llama-2-7b-hf",
         
     | 
| 11 | 
         
             
              "image_resize_strategy": "resize-naive",
         
     | 
    	
        generation_config.json
    ADDED
    
    | 
         @@ -0,0 +1,7 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_from_model_config": true,
         
     | 
| 3 | 
         
            +
              "bos_token_id": 1,
         
     | 
| 4 | 
         
            +
              "eos_token_id": 2,
         
     | 
| 5 | 
         
            +
              "pad_token_id": 32000,
         
     | 
| 6 | 
         
            +
              "transformers_version": "4.40.1"
         
     | 
| 7 | 
         
            +
            }
         
     | 
    	
        model-00001-of-00003.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:0a2b419a502c19d5f23dc27c96642d3a0fba3e4176ff1305eb2b6720238fd67e
         
     | 
| 3 | 
         
            +
            size 6948961960
         
     | 
    	
        model-00002-of-00003.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:33e4e142e7fb3583d83b287224b991cb5552e314f00a253b4f7592c698384e43
         
     | 
| 3 | 
         
            +
            size 6971232040
         
     | 
    	
        model-00003-of-00003.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:25c23195e07d0702851e969871585746ed4f3ecd368c1e4f4768419b9232090e
         
     | 
| 3 | 
         
            +
            size 1162406824
         
     | 
    	
        model.safetensors.index.json
    ADDED
    
    | 
         @@ -0,0 +1,989 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "metadata": {
         
     | 
| 3 | 
         
            +
                "total_size": 15082474368
         
     | 
| 4 | 
         
            +
              },
         
     | 
| 5 | 
         
            +
              "weight_map": {
         
     | 
| 6 | 
         
            +
                "language_model.lm_head.weight": "model-00003-of-00003.safetensors",
         
     | 
| 7 | 
         
            +
                "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
         
     | 
| 8 | 
         
            +
                "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 9 | 
         
            +
                "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 10 | 
         
            +
                "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 11 | 
         
            +
                "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 12 | 
         
            +
                "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 13 | 
         
            +
                "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 14 | 
         
            +
                "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 15 | 
         
            +
                "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 16 | 
         
            +
                "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 17 | 
         
            +
                "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 18 | 
         
            +
                "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 19 | 
         
            +
                "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 20 | 
         
            +
                "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 21 | 
         
            +
                "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 22 | 
         
            +
                "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 23 | 
         
            +
                "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 24 | 
         
            +
                "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 25 | 
         
            +
                "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 26 | 
         
            +
                "language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 27 | 
         
            +
                "language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 28 | 
         
            +
                "language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 29 | 
         
            +
                "language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 30 | 
         
            +
                "language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 31 | 
         
            +
                "language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 32 | 
         
            +
                "language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 33 | 
         
            +
                "language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 34 | 
         
            +
                "language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 35 | 
         
            +
                "language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 36 | 
         
            +
                "language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 37 | 
         
            +
                "language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 38 | 
         
            +
                "language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 39 | 
         
            +
                "language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 40 | 
         
            +
                "language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 41 | 
         
            +
                "language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 42 | 
         
            +
                "language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 43 | 
         
            +
                "language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 44 | 
         
            +
                "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 45 | 
         
            +
                "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 46 | 
         
            +
                "language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 47 | 
         
            +
                "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 48 | 
         
            +
                "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 49 | 
         
            +
                "language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 50 | 
         
            +
                "language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 51 | 
         
            +
                "language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 52 | 
         
            +
                "language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 53 | 
         
            +
                "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 54 | 
         
            +
                "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 55 | 
         
            +
                "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 56 | 
         
            +
                "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 57 | 
         
            +
                "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 58 | 
         
            +
                "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 59 | 
         
            +
                "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 60 | 
         
            +
                "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 61 | 
         
            +
                "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 62 | 
         
            +
                "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 63 | 
         
            +
                "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 64 | 
         
            +
                "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 65 | 
         
            +
                "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 66 | 
         
            +
                "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 67 | 
         
            +
                "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 68 | 
         
            +
                "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 69 | 
         
            +
                "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 70 | 
         
            +
                "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 71 | 
         
            +
                "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 72 | 
         
            +
                "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 73 | 
         
            +
                "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 74 | 
         
            +
                "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 75 | 
         
            +
                "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 76 | 
         
            +
                "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 77 | 
         
            +
                "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 78 | 
         
            +
                "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 79 | 
         
            +
                "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 80 | 
         
            +
                "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 81 | 
         
            +
                "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 82 | 
         
            +
                "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 83 | 
         
            +
                "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 84 | 
         
            +
                "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 85 | 
         
            +
                "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 86 | 
         
            +
                "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 87 | 
         
            +
                "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 88 | 
         
            +
                "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 89 | 
         
            +
                "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 90 | 
         
            +
                "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 91 | 
         
            +
                "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 92 | 
         
            +
                "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 93 | 
         
            +
                "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 94 | 
         
            +
                "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 95 | 
         
            +
                "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 96 | 
         
            +
                "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 97 | 
         
            +
                "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 98 | 
         
            +
                "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 99 | 
         
            +
                "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 100 | 
         
            +
                "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 101 | 
         
            +
                "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 102 | 
         
            +
                "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 103 | 
         
            +
                "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 104 | 
         
            +
                "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 105 | 
         
            +
                "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 106 | 
         
            +
                "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 107 | 
         
            +
                "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 108 | 
         
            +
                "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 109 | 
         
            +
                "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 110 | 
         
            +
                "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 111 | 
         
            +
                "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 112 | 
         
            +
                "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 113 | 
         
            +
                "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 114 | 
         
            +
                "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 115 | 
         
            +
                "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 116 | 
         
            +
                "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 117 | 
         
            +
                "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 118 | 
         
            +
                "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 119 | 
         
            +
                "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 120 | 
         
            +
                "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 121 | 
         
            +
                "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 122 | 
         
            +
                "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 123 | 
         
            +
                "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 124 | 
         
            +
                "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 125 | 
         
            +
                "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 126 | 
         
            +
                "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 127 | 
         
            +
                "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 128 | 
         
            +
                "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 129 | 
         
            +
                "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 130 | 
         
            +
                "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 131 | 
         
            +
                "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 132 | 
         
            +
                "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 133 | 
         
            +
                "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 134 | 
         
            +
                "language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 135 | 
         
            +
                "language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 136 | 
         
            +
                "language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 137 | 
         
            +
                "language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 138 | 
         
            +
                "language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 139 | 
         
            +
                "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 140 | 
         
            +
                "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 141 | 
         
            +
                "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 142 | 
         
            +
                "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 143 | 
         
            +
                "language_model.model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 144 | 
         
            +
                "language_model.model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 145 | 
         
            +
                "language_model.model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 146 | 
         
            +
                "language_model.model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 147 | 
         
            +
                "language_model.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 148 | 
         
            +
                "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 149 | 
         
            +
                "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 150 | 
         
            +
                "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 151 | 
         
            +
                "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 152 | 
         
            +
                "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 153 | 
         
            +
                "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 154 | 
         
            +
                "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 155 | 
         
            +
                "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 156 | 
         
            +
                "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 157 | 
         
            +
                "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 158 | 
         
            +
                "language_model.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 159 | 
         
            +
                "language_model.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 160 | 
         
            +
                "language_model.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 161 | 
         
            +
                "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 162 | 
         
            +
                "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 163 | 
         
            +
                "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 164 | 
         
            +
                "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 165 | 
         
            +
                "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 166 | 
         
            +
                "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 167 | 
         
            +
                "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 168 | 
         
            +
                "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 169 | 
         
            +
                "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 170 | 
         
            +
                "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 171 | 
         
            +
                "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 172 | 
         
            +
                "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 173 | 
         
            +
                "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 174 | 
         
            +
                "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 175 | 
         
            +
                "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 176 | 
         
            +
                "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 177 | 
         
            +
                "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 178 | 
         
            +
                "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 179 | 
         
            +
                "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 180 | 
         
            +
                "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 181 | 
         
            +
                "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 182 | 
         
            +
                "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 183 | 
         
            +
                "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 184 | 
         
            +
                "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 185 | 
         
            +
                "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 186 | 
         
            +
                "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 187 | 
         
            +
                "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 188 | 
         
            +
                "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 189 | 
         
            +
                "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 190 | 
         
            +
                "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 191 | 
         
            +
                "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 192 | 
         
            +
                "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 193 | 
         
            +
                "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 194 | 
         
            +
                "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 195 | 
         
            +
                "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 196 | 
         
            +
                "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 197 | 
         
            +
                "language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 198 | 
         
            +
                "language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 199 | 
         
            +
                "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 200 | 
         
            +
                "language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 201 | 
         
            +
                "language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
         
     | 
| 202 | 
         
            +
                "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 203 | 
         
            +
                "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 204 | 
         
            +
                "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 205 | 
         
            +
                "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 206 | 
         
            +
                "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 207 | 
         
            +
                "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 208 | 
         
            +
                "language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 209 | 
         
            +
                "language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 210 | 
         
            +
                "language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 211 | 
         
            +
                "language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 212 | 
         
            +
                "language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 213 | 
         
            +
                "language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 214 | 
         
            +
                "language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
         
     | 
| 215 | 
         
            +
                "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 216 | 
         
            +
                "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 217 | 
         
            +
                "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 218 | 
         
            +
                "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 219 | 
         
            +
                "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 220 | 
         
            +
                "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 221 | 
         
            +
                "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 222 | 
         
            +
                "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 223 | 
         
            +
                "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 224 | 
         
            +
                "language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 225 | 
         
            +
                "language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 226 | 
         
            +
                "language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 227 | 
         
            +
                "language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 228 | 
         
            +
                "language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 229 | 
         
            +
                "language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 230 | 
         
            +
                "language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 231 | 
         
            +
                "language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 232 | 
         
            +
                "language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 233 | 
         
            +
                "language_model.model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 234 | 
         
            +
                "language_model.model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 235 | 
         
            +
                "language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 236 | 
         
            +
                "language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 237 | 
         
            +
                "language_model.model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 238 | 
         
            +
                "language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 239 | 
         
            +
                "language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 240 | 
         
            +
                "language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 241 | 
         
            +
                "language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
         
     | 
| 242 | 
         
            +
                "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 243 | 
         
            +
                "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 244 | 
         
            +
                "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 245 | 
         
            +
                "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 246 | 
         
            +
                "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 247 | 
         
            +
                "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 248 | 
         
            +
                "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 249 | 
         
            +
                "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 250 | 
         
            +
                "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 251 | 
         
            +
                "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 252 | 
         
            +
                "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 253 | 
         
            +
                "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 254 | 
         
            +
                "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 255 | 
         
            +
                "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 256 | 
         
            +
                "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 257 | 
         
            +
                "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 258 | 
         
            +
                "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 259 | 
         
            +
                "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 260 | 
         
            +
                "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 261 | 
         
            +
                "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 262 | 
         
            +
                "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 263 | 
         
            +
                "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 264 | 
         
            +
                "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 265 | 
         
            +
                "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 266 | 
         
            +
                "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 267 | 
         
            +
                "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 268 | 
         
            +
                "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 269 | 
         
            +
                "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 270 | 
         
            +
                "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 271 | 
         
            +
                "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 272 | 
         
            +
                "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 273 | 
         
            +
                "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 274 | 
         
            +
                "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 275 | 
         
            +
                "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 276 | 
         
            +
                "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 277 | 
         
            +
                "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 278 | 
         
            +
                "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 279 | 
         
            +
                "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 280 | 
         
            +
                "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 281 | 
         
            +
                "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 282 | 
         
            +
                "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 283 | 
         
            +
                "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 284 | 
         
            +
                "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 285 | 
         
            +
                "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 286 | 
         
            +
                "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 287 | 
         
            +
                "language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 288 | 
         
            +
                "language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 289 | 
         
            +
                "language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 290 | 
         
            +
                "language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 291 | 
         
            +
                "language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 292 | 
         
            +
                "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 293 | 
         
            +
                "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 294 | 
         
            +
                "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 295 | 
         
            +
                "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 296 | 
         
            +
                "language_model.model.norm.weight": "model-00003-of-00003.safetensors",
         
     | 
| 297 | 
         
            +
                "projector.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 298 | 
         
            +
                "projector.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 299 | 
         
            +
                "projector.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 300 | 
         
            +
                "projector.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 301 | 
         
            +
                "projector.fc3.bias": "model-00001-of-00003.safetensors",
         
     | 
| 302 | 
         
            +
                "projector.fc3.weight": "model-00001-of-00003.safetensors",
         
     | 
| 303 | 
         
            +
                "vision_backbone.featurizer.blocks.0.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 304 | 
         
            +
                "vision_backbone.featurizer.blocks.0.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 305 | 
         
            +
                "vision_backbone.featurizer.blocks.0.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 306 | 
         
            +
                "vision_backbone.featurizer.blocks.0.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 307 | 
         
            +
                "vision_backbone.featurizer.blocks.0.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 308 | 
         
            +
                "vision_backbone.featurizer.blocks.0.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 309 | 
         
            +
                "vision_backbone.featurizer.blocks.0.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 310 | 
         
            +
                "vision_backbone.featurizer.blocks.0.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 311 | 
         
            +
                "vision_backbone.featurizer.blocks.0.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 312 | 
         
            +
                "vision_backbone.featurizer.blocks.0.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 313 | 
         
            +
                "vision_backbone.featurizer.blocks.0.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 314 | 
         
            +
                "vision_backbone.featurizer.blocks.0.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 315 | 
         
            +
                "vision_backbone.featurizer.blocks.0.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 316 | 
         
            +
                "vision_backbone.featurizer.blocks.0.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 317 | 
         
            +
                "vision_backbone.featurizer.blocks.1.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 318 | 
         
            +
                "vision_backbone.featurizer.blocks.1.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 319 | 
         
            +
                "vision_backbone.featurizer.blocks.1.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 320 | 
         
            +
                "vision_backbone.featurizer.blocks.1.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 321 | 
         
            +
                "vision_backbone.featurizer.blocks.1.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 322 | 
         
            +
                "vision_backbone.featurizer.blocks.1.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 323 | 
         
            +
                "vision_backbone.featurizer.blocks.1.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 324 | 
         
            +
                "vision_backbone.featurizer.blocks.1.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 325 | 
         
            +
                "vision_backbone.featurizer.blocks.1.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 326 | 
         
            +
                "vision_backbone.featurizer.blocks.1.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 327 | 
         
            +
                "vision_backbone.featurizer.blocks.1.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 328 | 
         
            +
                "vision_backbone.featurizer.blocks.1.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 329 | 
         
            +
                "vision_backbone.featurizer.blocks.1.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 330 | 
         
            +
                "vision_backbone.featurizer.blocks.1.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 331 | 
         
            +
                "vision_backbone.featurizer.blocks.10.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 332 | 
         
            +
                "vision_backbone.featurizer.blocks.10.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 333 | 
         
            +
                "vision_backbone.featurizer.blocks.10.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 334 | 
         
            +
                "vision_backbone.featurizer.blocks.10.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 335 | 
         
            +
                "vision_backbone.featurizer.blocks.10.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 336 | 
         
            +
                "vision_backbone.featurizer.blocks.10.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 337 | 
         
            +
                "vision_backbone.featurizer.blocks.10.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 338 | 
         
            +
                "vision_backbone.featurizer.blocks.10.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 339 | 
         
            +
                "vision_backbone.featurizer.blocks.10.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 340 | 
         
            +
                "vision_backbone.featurizer.blocks.10.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 341 | 
         
            +
                "vision_backbone.featurizer.blocks.10.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 342 | 
         
            +
                "vision_backbone.featurizer.blocks.10.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 343 | 
         
            +
                "vision_backbone.featurizer.blocks.10.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 344 | 
         
            +
                "vision_backbone.featurizer.blocks.10.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 345 | 
         
            +
                "vision_backbone.featurizer.blocks.11.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 346 | 
         
            +
                "vision_backbone.featurizer.blocks.11.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 347 | 
         
            +
                "vision_backbone.featurizer.blocks.11.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 348 | 
         
            +
                "vision_backbone.featurizer.blocks.11.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 349 | 
         
            +
                "vision_backbone.featurizer.blocks.11.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 350 | 
         
            +
                "vision_backbone.featurizer.blocks.11.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 351 | 
         
            +
                "vision_backbone.featurizer.blocks.11.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 352 | 
         
            +
                "vision_backbone.featurizer.blocks.11.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 353 | 
         
            +
                "vision_backbone.featurizer.blocks.11.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 354 | 
         
            +
                "vision_backbone.featurizer.blocks.11.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 355 | 
         
            +
                "vision_backbone.featurizer.blocks.11.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 356 | 
         
            +
                "vision_backbone.featurizer.blocks.11.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 357 | 
         
            +
                "vision_backbone.featurizer.blocks.11.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 358 | 
         
            +
                "vision_backbone.featurizer.blocks.11.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 359 | 
         
            +
                "vision_backbone.featurizer.blocks.12.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 360 | 
         
            +
                "vision_backbone.featurizer.blocks.12.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 361 | 
         
            +
                "vision_backbone.featurizer.blocks.12.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 362 | 
         
            +
                "vision_backbone.featurizer.blocks.12.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 363 | 
         
            +
                "vision_backbone.featurizer.blocks.12.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 364 | 
         
            +
                "vision_backbone.featurizer.blocks.12.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 365 | 
         
            +
                "vision_backbone.featurizer.blocks.12.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 366 | 
         
            +
                "vision_backbone.featurizer.blocks.12.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 367 | 
         
            +
                "vision_backbone.featurizer.blocks.12.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 368 | 
         
            +
                "vision_backbone.featurizer.blocks.12.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 369 | 
         
            +
                "vision_backbone.featurizer.blocks.12.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 370 | 
         
            +
                "vision_backbone.featurizer.blocks.12.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 371 | 
         
            +
                "vision_backbone.featurizer.blocks.12.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 372 | 
         
            +
                "vision_backbone.featurizer.blocks.12.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 373 | 
         
            +
                "vision_backbone.featurizer.blocks.13.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 374 | 
         
            +
                "vision_backbone.featurizer.blocks.13.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 375 | 
         
            +
                "vision_backbone.featurizer.blocks.13.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 376 | 
         
            +
                "vision_backbone.featurizer.blocks.13.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 377 | 
         
            +
                "vision_backbone.featurizer.blocks.13.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 378 | 
         
            +
                "vision_backbone.featurizer.blocks.13.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 379 | 
         
            +
                "vision_backbone.featurizer.blocks.13.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 380 | 
         
            +
                "vision_backbone.featurizer.blocks.13.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 381 | 
         
            +
                "vision_backbone.featurizer.blocks.13.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 382 | 
         
            +
                "vision_backbone.featurizer.blocks.13.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 383 | 
         
            +
                "vision_backbone.featurizer.blocks.13.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 384 | 
         
            +
                "vision_backbone.featurizer.blocks.13.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 385 | 
         
            +
                "vision_backbone.featurizer.blocks.13.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 386 | 
         
            +
                "vision_backbone.featurizer.blocks.13.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 387 | 
         
            +
                "vision_backbone.featurizer.blocks.14.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 388 | 
         
            +
                "vision_backbone.featurizer.blocks.14.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 389 | 
         
            +
                "vision_backbone.featurizer.blocks.14.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 390 | 
         
            +
                "vision_backbone.featurizer.blocks.14.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 391 | 
         
            +
                "vision_backbone.featurizer.blocks.14.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 392 | 
         
            +
                "vision_backbone.featurizer.blocks.14.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 393 | 
         
            +
                "vision_backbone.featurizer.blocks.14.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 394 | 
         
            +
                "vision_backbone.featurizer.blocks.14.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 395 | 
         
            +
                "vision_backbone.featurizer.blocks.14.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 396 | 
         
            +
                "vision_backbone.featurizer.blocks.14.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 397 | 
         
            +
                "vision_backbone.featurizer.blocks.14.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 398 | 
         
            +
                "vision_backbone.featurizer.blocks.14.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 399 | 
         
            +
                "vision_backbone.featurizer.blocks.14.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 400 | 
         
            +
                "vision_backbone.featurizer.blocks.14.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 401 | 
         
            +
                "vision_backbone.featurizer.blocks.15.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 402 | 
         
            +
                "vision_backbone.featurizer.blocks.15.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 403 | 
         
            +
                "vision_backbone.featurizer.blocks.15.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 404 | 
         
            +
                "vision_backbone.featurizer.blocks.15.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 405 | 
         
            +
                "vision_backbone.featurizer.blocks.15.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 406 | 
         
            +
                "vision_backbone.featurizer.blocks.15.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 407 | 
         
            +
                "vision_backbone.featurizer.blocks.15.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 408 | 
         
            +
                "vision_backbone.featurizer.blocks.15.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 409 | 
         
            +
                "vision_backbone.featurizer.blocks.15.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 410 | 
         
            +
                "vision_backbone.featurizer.blocks.15.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 411 | 
         
            +
                "vision_backbone.featurizer.blocks.15.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 412 | 
         
            +
                "vision_backbone.featurizer.blocks.15.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 413 | 
         
            +
                "vision_backbone.featurizer.blocks.15.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 414 | 
         
            +
                "vision_backbone.featurizer.blocks.15.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 415 | 
         
            +
                "vision_backbone.featurizer.blocks.16.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 416 | 
         
            +
                "vision_backbone.featurizer.blocks.16.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 417 | 
         
            +
                "vision_backbone.featurizer.blocks.16.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 418 | 
         
            +
                "vision_backbone.featurizer.blocks.16.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 419 | 
         
            +
                "vision_backbone.featurizer.blocks.16.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 420 | 
         
            +
                "vision_backbone.featurizer.blocks.16.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 421 | 
         
            +
                "vision_backbone.featurizer.blocks.16.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 422 | 
         
            +
                "vision_backbone.featurizer.blocks.16.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 423 | 
         
            +
                "vision_backbone.featurizer.blocks.16.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 424 | 
         
            +
                "vision_backbone.featurizer.blocks.16.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 425 | 
         
            +
                "vision_backbone.featurizer.blocks.16.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 426 | 
         
            +
                "vision_backbone.featurizer.blocks.16.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 427 | 
         
            +
                "vision_backbone.featurizer.blocks.16.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 428 | 
         
            +
                "vision_backbone.featurizer.blocks.16.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 429 | 
         
            +
                "vision_backbone.featurizer.blocks.17.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 430 | 
         
            +
                "vision_backbone.featurizer.blocks.17.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 431 | 
         
            +
                "vision_backbone.featurizer.blocks.17.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 432 | 
         
            +
                "vision_backbone.featurizer.blocks.17.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 433 | 
         
            +
                "vision_backbone.featurizer.blocks.17.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 434 | 
         
            +
                "vision_backbone.featurizer.blocks.17.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 435 | 
         
            +
                "vision_backbone.featurizer.blocks.17.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 436 | 
         
            +
                "vision_backbone.featurizer.blocks.17.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 437 | 
         
            +
                "vision_backbone.featurizer.blocks.17.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 438 | 
         
            +
                "vision_backbone.featurizer.blocks.17.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 439 | 
         
            +
                "vision_backbone.featurizer.blocks.17.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 440 | 
         
            +
                "vision_backbone.featurizer.blocks.17.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 441 | 
         
            +
                "vision_backbone.featurizer.blocks.17.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 442 | 
         
            +
                "vision_backbone.featurizer.blocks.17.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 443 | 
         
            +
                "vision_backbone.featurizer.blocks.18.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 444 | 
         
            +
                "vision_backbone.featurizer.blocks.18.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 445 | 
         
            +
                "vision_backbone.featurizer.blocks.18.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 446 | 
         
            +
                "vision_backbone.featurizer.blocks.18.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 447 | 
         
            +
                "vision_backbone.featurizer.blocks.18.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 448 | 
         
            +
                "vision_backbone.featurizer.blocks.18.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 449 | 
         
            +
                "vision_backbone.featurizer.blocks.18.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 450 | 
         
            +
                "vision_backbone.featurizer.blocks.18.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 451 | 
         
            +
                "vision_backbone.featurizer.blocks.18.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 452 | 
         
            +
                "vision_backbone.featurizer.blocks.18.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 453 | 
         
            +
                "vision_backbone.featurizer.blocks.18.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 454 | 
         
            +
                "vision_backbone.featurizer.blocks.18.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 455 | 
         
            +
                "vision_backbone.featurizer.blocks.18.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 456 | 
         
            +
                "vision_backbone.featurizer.blocks.18.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 457 | 
         
            +
                "vision_backbone.featurizer.blocks.19.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 458 | 
         
            +
                "vision_backbone.featurizer.blocks.19.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 459 | 
         
            +
                "vision_backbone.featurizer.blocks.19.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 460 | 
         
            +
                "vision_backbone.featurizer.blocks.19.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 461 | 
         
            +
                "vision_backbone.featurizer.blocks.19.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 462 | 
         
            +
                "vision_backbone.featurizer.blocks.19.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 463 | 
         
            +
                "vision_backbone.featurizer.blocks.19.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 464 | 
         
            +
                "vision_backbone.featurizer.blocks.19.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 465 | 
         
            +
                "vision_backbone.featurizer.blocks.19.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 466 | 
         
            +
                "vision_backbone.featurizer.blocks.19.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 467 | 
         
            +
                "vision_backbone.featurizer.blocks.19.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 468 | 
         
            +
                "vision_backbone.featurizer.blocks.19.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 469 | 
         
            +
                "vision_backbone.featurizer.blocks.19.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 470 | 
         
            +
                "vision_backbone.featurizer.blocks.19.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 471 | 
         
            +
                "vision_backbone.featurizer.blocks.2.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 472 | 
         
            +
                "vision_backbone.featurizer.blocks.2.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 473 | 
         
            +
                "vision_backbone.featurizer.blocks.2.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 474 | 
         
            +
                "vision_backbone.featurizer.blocks.2.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 475 | 
         
            +
                "vision_backbone.featurizer.blocks.2.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 476 | 
         
            +
                "vision_backbone.featurizer.blocks.2.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 477 | 
         
            +
                "vision_backbone.featurizer.blocks.2.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 478 | 
         
            +
                "vision_backbone.featurizer.blocks.2.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 479 | 
         
            +
                "vision_backbone.featurizer.blocks.2.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 480 | 
         
            +
                "vision_backbone.featurizer.blocks.2.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 481 | 
         
            +
                "vision_backbone.featurizer.blocks.2.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 482 | 
         
            +
                "vision_backbone.featurizer.blocks.2.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 483 | 
         
            +
                "vision_backbone.featurizer.blocks.2.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 484 | 
         
            +
                "vision_backbone.featurizer.blocks.2.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 485 | 
         
            +
                "vision_backbone.featurizer.blocks.20.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 486 | 
         
            +
                "vision_backbone.featurizer.blocks.20.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 487 | 
         
            +
                "vision_backbone.featurizer.blocks.20.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 488 | 
         
            +
                "vision_backbone.featurizer.blocks.20.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 489 | 
         
            +
                "vision_backbone.featurizer.blocks.20.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 490 | 
         
            +
                "vision_backbone.featurizer.blocks.20.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 491 | 
         
            +
                "vision_backbone.featurizer.blocks.20.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 492 | 
         
            +
                "vision_backbone.featurizer.blocks.20.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 493 | 
         
            +
                "vision_backbone.featurizer.blocks.20.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 494 | 
         
            +
                "vision_backbone.featurizer.blocks.20.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 495 | 
         
            +
                "vision_backbone.featurizer.blocks.20.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 496 | 
         
            +
                "vision_backbone.featurizer.blocks.20.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 497 | 
         
            +
                "vision_backbone.featurizer.blocks.20.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 498 | 
         
            +
                "vision_backbone.featurizer.blocks.20.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 499 | 
         
            +
                "vision_backbone.featurizer.blocks.21.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 500 | 
         
            +
                "vision_backbone.featurizer.blocks.21.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 501 | 
         
            +
                "vision_backbone.featurizer.blocks.21.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 502 | 
         
            +
                "vision_backbone.featurizer.blocks.21.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 503 | 
         
            +
                "vision_backbone.featurizer.blocks.21.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 504 | 
         
            +
                "vision_backbone.featurizer.blocks.21.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 505 | 
         
            +
                "vision_backbone.featurizer.blocks.21.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 506 | 
         
            +
                "vision_backbone.featurizer.blocks.21.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 507 | 
         
            +
                "vision_backbone.featurizer.blocks.21.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 508 | 
         
            +
                "vision_backbone.featurizer.blocks.21.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 509 | 
         
            +
                "vision_backbone.featurizer.blocks.21.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 510 | 
         
            +
                "vision_backbone.featurizer.blocks.21.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 511 | 
         
            +
                "vision_backbone.featurizer.blocks.21.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 512 | 
         
            +
                "vision_backbone.featurizer.blocks.21.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 513 | 
         
            +
                "vision_backbone.featurizer.blocks.22.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 514 | 
         
            +
                "vision_backbone.featurizer.blocks.22.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 515 | 
         
            +
                "vision_backbone.featurizer.blocks.22.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 516 | 
         
            +
                "vision_backbone.featurizer.blocks.22.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 517 | 
         
            +
                "vision_backbone.featurizer.blocks.22.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 518 | 
         
            +
                "vision_backbone.featurizer.blocks.22.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 519 | 
         
            +
                "vision_backbone.featurizer.blocks.22.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 520 | 
         
            +
                "vision_backbone.featurizer.blocks.22.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 521 | 
         
            +
                "vision_backbone.featurizer.blocks.22.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 522 | 
         
            +
                "vision_backbone.featurizer.blocks.22.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 523 | 
         
            +
                "vision_backbone.featurizer.blocks.22.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 524 | 
         
            +
                "vision_backbone.featurizer.blocks.22.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 525 | 
         
            +
                "vision_backbone.featurizer.blocks.22.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 526 | 
         
            +
                "vision_backbone.featurizer.blocks.22.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 527 | 
         
            +
                "vision_backbone.featurizer.blocks.23.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 528 | 
         
            +
                "vision_backbone.featurizer.blocks.23.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 529 | 
         
            +
                "vision_backbone.featurizer.blocks.23.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 530 | 
         
            +
                "vision_backbone.featurizer.blocks.23.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 531 | 
         
            +
                "vision_backbone.featurizer.blocks.23.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 532 | 
         
            +
                "vision_backbone.featurizer.blocks.23.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 533 | 
         
            +
                "vision_backbone.featurizer.blocks.23.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 534 | 
         
            +
                "vision_backbone.featurizer.blocks.23.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 535 | 
         
            +
                "vision_backbone.featurizer.blocks.23.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 536 | 
         
            +
                "vision_backbone.featurizer.blocks.23.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 537 | 
         
            +
                "vision_backbone.featurizer.blocks.23.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 538 | 
         
            +
                "vision_backbone.featurizer.blocks.23.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 539 | 
         
            +
                "vision_backbone.featurizer.blocks.23.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 540 | 
         
            +
                "vision_backbone.featurizer.blocks.23.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 541 | 
         
            +
                "vision_backbone.featurizer.blocks.3.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 542 | 
         
            +
                "vision_backbone.featurizer.blocks.3.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 543 | 
         
            +
                "vision_backbone.featurizer.blocks.3.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 544 | 
         
            +
                "vision_backbone.featurizer.blocks.3.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 545 | 
         
            +
                "vision_backbone.featurizer.blocks.3.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 546 | 
         
            +
                "vision_backbone.featurizer.blocks.3.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 547 | 
         
            +
                "vision_backbone.featurizer.blocks.3.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 548 | 
         
            +
                "vision_backbone.featurizer.blocks.3.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 549 | 
         
            +
                "vision_backbone.featurizer.blocks.3.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 550 | 
         
            +
                "vision_backbone.featurizer.blocks.3.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 551 | 
         
            +
                "vision_backbone.featurizer.blocks.3.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 552 | 
         
            +
                "vision_backbone.featurizer.blocks.3.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 553 | 
         
            +
                "vision_backbone.featurizer.blocks.3.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 554 | 
         
            +
                "vision_backbone.featurizer.blocks.3.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 555 | 
         
            +
                "vision_backbone.featurizer.blocks.4.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 556 | 
         
            +
                "vision_backbone.featurizer.blocks.4.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 557 | 
         
            +
                "vision_backbone.featurizer.blocks.4.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 558 | 
         
            +
                "vision_backbone.featurizer.blocks.4.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 559 | 
         
            +
                "vision_backbone.featurizer.blocks.4.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 560 | 
         
            +
                "vision_backbone.featurizer.blocks.4.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 561 | 
         
            +
                "vision_backbone.featurizer.blocks.4.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 562 | 
         
            +
                "vision_backbone.featurizer.blocks.4.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 563 | 
         
            +
                "vision_backbone.featurizer.blocks.4.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 564 | 
         
            +
                "vision_backbone.featurizer.blocks.4.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 565 | 
         
            +
                "vision_backbone.featurizer.blocks.4.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 566 | 
         
            +
                "vision_backbone.featurizer.blocks.4.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 567 | 
         
            +
                "vision_backbone.featurizer.blocks.4.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 568 | 
         
            +
                "vision_backbone.featurizer.blocks.4.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 569 | 
         
            +
                "vision_backbone.featurizer.blocks.5.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 570 | 
         
            +
                "vision_backbone.featurizer.blocks.5.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 571 | 
         
            +
                "vision_backbone.featurizer.blocks.5.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 572 | 
         
            +
                "vision_backbone.featurizer.blocks.5.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 573 | 
         
            +
                "vision_backbone.featurizer.blocks.5.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 574 | 
         
            +
                "vision_backbone.featurizer.blocks.5.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 575 | 
         
            +
                "vision_backbone.featurizer.blocks.5.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 576 | 
         
            +
                "vision_backbone.featurizer.blocks.5.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 577 | 
         
            +
                "vision_backbone.featurizer.blocks.5.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 578 | 
         
            +
                "vision_backbone.featurizer.blocks.5.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 579 | 
         
            +
                "vision_backbone.featurizer.blocks.5.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 580 | 
         
            +
                "vision_backbone.featurizer.blocks.5.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 581 | 
         
            +
                "vision_backbone.featurizer.blocks.5.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 582 | 
         
            +
                "vision_backbone.featurizer.blocks.5.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 583 | 
         
            +
                "vision_backbone.featurizer.blocks.6.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 584 | 
         
            +
                "vision_backbone.featurizer.blocks.6.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 585 | 
         
            +
                "vision_backbone.featurizer.blocks.6.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 586 | 
         
            +
                "vision_backbone.featurizer.blocks.6.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 587 | 
         
            +
                "vision_backbone.featurizer.blocks.6.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 588 | 
         
            +
                "vision_backbone.featurizer.blocks.6.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 589 | 
         
            +
                "vision_backbone.featurizer.blocks.6.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 590 | 
         
            +
                "vision_backbone.featurizer.blocks.6.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 591 | 
         
            +
                "vision_backbone.featurizer.blocks.6.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 592 | 
         
            +
                "vision_backbone.featurizer.blocks.6.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 593 | 
         
            +
                "vision_backbone.featurizer.blocks.6.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 594 | 
         
            +
                "vision_backbone.featurizer.blocks.6.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 595 | 
         
            +
                "vision_backbone.featurizer.blocks.6.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 596 | 
         
            +
                "vision_backbone.featurizer.blocks.6.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 597 | 
         
            +
                "vision_backbone.featurizer.blocks.7.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 598 | 
         
            +
                "vision_backbone.featurizer.blocks.7.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 599 | 
         
            +
                "vision_backbone.featurizer.blocks.7.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 600 | 
         
            +
                "vision_backbone.featurizer.blocks.7.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 601 | 
         
            +
                "vision_backbone.featurizer.blocks.7.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 602 | 
         
            +
                "vision_backbone.featurizer.blocks.7.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 603 | 
         
            +
                "vision_backbone.featurizer.blocks.7.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 604 | 
         
            +
                "vision_backbone.featurizer.blocks.7.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 605 | 
         
            +
                "vision_backbone.featurizer.blocks.7.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 606 | 
         
            +
                "vision_backbone.featurizer.blocks.7.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 607 | 
         
            +
                "vision_backbone.featurizer.blocks.7.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 608 | 
         
            +
                "vision_backbone.featurizer.blocks.7.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 609 | 
         
            +
                "vision_backbone.featurizer.blocks.7.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 610 | 
         
            +
                "vision_backbone.featurizer.blocks.7.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 611 | 
         
            +
                "vision_backbone.featurizer.blocks.8.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 612 | 
         
            +
                "vision_backbone.featurizer.blocks.8.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 613 | 
         
            +
                "vision_backbone.featurizer.blocks.8.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 614 | 
         
            +
                "vision_backbone.featurizer.blocks.8.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 615 | 
         
            +
                "vision_backbone.featurizer.blocks.8.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 616 | 
         
            +
                "vision_backbone.featurizer.blocks.8.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 617 | 
         
            +
                "vision_backbone.featurizer.blocks.8.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 618 | 
         
            +
                "vision_backbone.featurizer.blocks.8.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 619 | 
         
            +
                "vision_backbone.featurizer.blocks.8.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 620 | 
         
            +
                "vision_backbone.featurizer.blocks.8.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 621 | 
         
            +
                "vision_backbone.featurizer.blocks.8.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 622 | 
         
            +
                "vision_backbone.featurizer.blocks.8.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 623 | 
         
            +
                "vision_backbone.featurizer.blocks.8.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 624 | 
         
            +
                "vision_backbone.featurizer.blocks.8.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 625 | 
         
            +
                "vision_backbone.featurizer.blocks.9.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 626 | 
         
            +
                "vision_backbone.featurizer.blocks.9.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 627 | 
         
            +
                "vision_backbone.featurizer.blocks.9.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 628 | 
         
            +
                "vision_backbone.featurizer.blocks.9.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 629 | 
         
            +
                "vision_backbone.featurizer.blocks.9.ls1.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 630 | 
         
            +
                "vision_backbone.featurizer.blocks.9.ls2.scale_factor": "model-00001-of-00003.safetensors",
         
     | 
| 631 | 
         
            +
                "vision_backbone.featurizer.blocks.9.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 632 | 
         
            +
                "vision_backbone.featurizer.blocks.9.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 633 | 
         
            +
                "vision_backbone.featurizer.blocks.9.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 634 | 
         
            +
                "vision_backbone.featurizer.blocks.9.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 635 | 
         
            +
                "vision_backbone.featurizer.blocks.9.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 636 | 
         
            +
                "vision_backbone.featurizer.blocks.9.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 637 | 
         
            +
                "vision_backbone.featurizer.blocks.9.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 638 | 
         
            +
                "vision_backbone.featurizer.blocks.9.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 639 | 
         
            +
                "vision_backbone.featurizer.cls_token": "model-00001-of-00003.safetensors",
         
     | 
| 640 | 
         
            +
                "vision_backbone.featurizer.norm.bias": "model-00001-of-00003.safetensors",
         
     | 
| 641 | 
         
            +
                "vision_backbone.featurizer.norm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 642 | 
         
            +
                "vision_backbone.featurizer.patch_embed.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 643 | 
         
            +
                "vision_backbone.featurizer.patch_embed.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 644 | 
         
            +
                "vision_backbone.featurizer.pos_embed": "model-00001-of-00003.safetensors",
         
     | 
| 645 | 
         
            +
                "vision_backbone.featurizer.reg_token": "model-00001-of-00003.safetensors",
         
     | 
| 646 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.kv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 647 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.kv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 648 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.latent": "model-00001-of-00003.safetensors",
         
     | 
| 649 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 650 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 651 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 652 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 653 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.norm.bias": "model-00001-of-00003.safetensors",
         
     | 
| 654 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.norm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 655 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 656 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 657 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.q.bias": "model-00001-of-00003.safetensors",
         
     | 
| 658 | 
         
            +
                "vision_backbone.fused_featurizer.attn_pool.q.weight": "model-00001-of-00003.safetensors",
         
     | 
| 659 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 660 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 661 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 662 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 663 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 664 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 665 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 666 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 667 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 668 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 669 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 670 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.0.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 671 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 672 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 673 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 674 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 675 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 676 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 677 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 678 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 679 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 680 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 681 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 682 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.1.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 683 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 684 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 685 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 686 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 687 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 688 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 689 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 690 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 691 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 692 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 693 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 694 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.10.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 695 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 696 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 697 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 698 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 699 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 700 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 701 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 702 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 703 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 704 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 705 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 706 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.11.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 707 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 708 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 709 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 710 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 711 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 712 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 713 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 714 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 715 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 716 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 717 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 718 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.12.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 719 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 720 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 721 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 722 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 723 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 724 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 725 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 726 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 727 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 728 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 729 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 730 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.13.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 731 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 732 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 733 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 734 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 735 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 736 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 737 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 738 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 739 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 740 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 741 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 742 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.14.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 743 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 744 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 745 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 746 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 747 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 748 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 749 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 750 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 751 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 752 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 753 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 754 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.15.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 755 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 756 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 757 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 758 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 759 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 760 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 761 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 762 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 763 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 764 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 765 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 766 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.16.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 767 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 768 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 769 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 770 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 771 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 772 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 773 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 774 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 775 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 776 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 777 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 778 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.17.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 779 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 780 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 781 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 782 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 783 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 784 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 785 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 786 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 787 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 788 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 789 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 790 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.18.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 791 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 792 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 793 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 794 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 795 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 796 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 797 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 798 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 799 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 800 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 801 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 802 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.19.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 803 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 804 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 805 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 806 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 807 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 808 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 809 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 810 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 811 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 812 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 813 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 814 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.2.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 815 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 816 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 817 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 818 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 819 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 820 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 821 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 822 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 823 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 824 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 825 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 826 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.20.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 827 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 828 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 829 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 830 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 831 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 832 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 833 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 834 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 835 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 836 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 837 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 838 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.21.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 839 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 840 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 841 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 842 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 843 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 844 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 845 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 846 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 847 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 848 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 849 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 850 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.22.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 851 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 852 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 853 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 854 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 855 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 856 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 857 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 858 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 859 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 860 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 861 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 862 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.23.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 863 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 864 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 865 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 866 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 867 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 868 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 869 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 870 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 871 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 872 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 873 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 874 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.24.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 875 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 876 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 877 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 878 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 879 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 880 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 881 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 882 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 883 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 884 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 885 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 886 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.25.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 887 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 888 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 889 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 890 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 891 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 892 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 893 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 894 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 895 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 896 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 897 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 898 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.26.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 899 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 900 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 901 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 902 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 903 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 904 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 905 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 906 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 907 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 908 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 909 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 910 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.3.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 911 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 912 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 913 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 914 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 915 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 916 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 917 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 918 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 919 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 920 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 921 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 922 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.4.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 923 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 924 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 925 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 926 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 927 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 928 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 929 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 930 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 931 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 932 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 933 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 934 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.5.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 935 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 936 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 937 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 938 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 939 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 940 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 941 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 942 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 943 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 944 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 945 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 946 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.6.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 947 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 948 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 949 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 950 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 951 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 952 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 953 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 954 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 955 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 956 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 957 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 958 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.7.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 959 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 960 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 961 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 962 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 963 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 964 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 965 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 966 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 967 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 968 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 969 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 970 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.8.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 971 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.attn.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 972 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.attn.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 973 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.attn.qkv.bias": "model-00001-of-00003.safetensors",
         
     | 
| 974 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.attn.qkv.weight": "model-00001-of-00003.safetensors",
         
     | 
| 975 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.mlp.fc1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 976 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.mlp.fc1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 977 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.mlp.fc2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 978 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.mlp.fc2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 979 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.norm1.bias": "model-00001-of-00003.safetensors",
         
     | 
| 980 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.norm1.weight": "model-00001-of-00003.safetensors",
         
     | 
| 981 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.norm2.bias": "model-00001-of-00003.safetensors",
         
     | 
| 982 | 
         
            +
                "vision_backbone.fused_featurizer.blocks.9.norm2.weight": "model-00001-of-00003.safetensors",
         
     | 
| 983 | 
         
            +
                "vision_backbone.fused_featurizer.norm.bias": "model-00001-of-00003.safetensors",
         
     | 
| 984 | 
         
            +
                "vision_backbone.fused_featurizer.norm.weight": "model-00001-of-00003.safetensors",
         
     | 
| 985 | 
         
            +
                "vision_backbone.fused_featurizer.patch_embed.proj.bias": "model-00001-of-00003.safetensors",
         
     | 
| 986 | 
         
            +
                "vision_backbone.fused_featurizer.patch_embed.proj.weight": "model-00001-of-00003.safetensors",
         
     | 
| 987 | 
         
            +
                "vision_backbone.fused_featurizer.pos_embed": "model-00001-of-00003.safetensors"
         
     | 
| 988 | 
         
            +
              }
         
     | 
| 989 | 
         
            +
            }
         
     | 
    	
        modeling_prismatic.py
    ADDED
    
    | 
         @@ -0,0 +1,565 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            """
         
     | 
| 2 | 
         
            +
            modeling_prismatic.py
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            Core HuggingFace-style PrismaticPreTrainedModel and PrismaticForConditionalGeneration class definitions, inheriting
         
     | 
| 5 | 
         
            +
            from the default `transformers.PretrainedModel`. Meant to be standalone and self-contained, but exactly replicate the
         
     | 
| 6 | 
         
            +
            logic in `prismatic.models.vlms.prismatic.py`.
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            Note =>> for the time being, not adding the custom HF "docstring" formatting.
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            References [LLaVa, IDEFICS-2]:
         
     | 
| 11 | 
         
            +
                => https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/modeling_llava.py
         
     | 
| 12 | 
         
            +
                => https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics2/modeling_idefics2.py
         
     | 
| 13 | 
         
            +
            """
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            import logging
         
     | 
| 16 | 
         
            +
            from dataclasses import dataclass
         
     | 
| 17 | 
         
            +
            from functools import partial
         
     | 
| 18 | 
         
            +
            from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            import numpy as np
         
     | 
| 21 | 
         
            +
            import timm
         
     | 
| 22 | 
         
            +
            import tokenizers
         
     | 
| 23 | 
         
            +
            import torch
         
     | 
| 24 | 
         
            +
            import torch.nn as nn
         
     | 
| 25 | 
         
            +
            import transformers
         
     | 
| 26 | 
         
            +
            from timm.models.vision_transformer import LayerScale
         
     | 
| 27 | 
         
            +
            from transformers import AutoModelForCausalLM, PretrainedConfig, PreTrainedModel
         
     | 
| 28 | 
         
            +
            from transformers.modeling_outputs import ModelOutput
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            from .configuration_prismatic import OpenVLAConfig, PrismaticConfig
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            # Get Logger
         
     | 
| 33 | 
         
            +
            logger = logging.getLogger(__name__)
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            # === PyTorch/HuggingFace Default IGNORE_INDEX (for CrossEntropyLoss labels)
         
     | 
| 37 | 
         
            +
            IGNORE_INDEX = -100
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            # === Utility Functions for Monkey-Patching ===
         
     | 
| 41 | 
         
            +
            def unpack_tuple(fn: Callable[[Any], Tuple[Any]]) -> Callable[[Any], Any]:
         
     | 
| 42 | 
         
            +
                def wrapper(*args: Any, **kwargs: Any) -> Any:
         
     | 
| 43 | 
         
            +
                    result = fn(*args, **kwargs)
         
     | 
| 44 | 
         
            +
                    return result[0] if isinstance(result, tuple) else result
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
                return wrapper
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            # HF Transformers overwrites parameters with names containing `gamma`; we're going to patch VisionBackbone.LayerScale.
         
     | 
| 50 | 
         
            +
            #   =>> TIMM :: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L109
         
     | 
| 51 | 
         
            +
            #   =>> Transformers :: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L3960
         
     | 
| 52 | 
         
            +
            def _ls_new_forward(self, x: torch.Tensor) -> torch.Tensor:
         
     | 
| 53 | 
         
            +
                return x.mul_(self.scale_factor) if self.inplace else x * self.scale_factor
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            def ls_apply_patch(ls_module: LayerScale):
         
     | 
| 57 | 
         
            +
                ls_module.scale_factor = nn.Parameter(ls_module.gamma.clone())
         
     | 
| 58 | 
         
            +
                ls_module.forward = _ls_new_forward.__get__(ls_module, LayerScale)
         
     | 
| 59 | 
         
            +
                del ls_module.gamma
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            # === Prismatic Vision Backbone (nn.Module) Definitions (w/ Fused Backbone Support) ===
         
     | 
| 63 | 
         
            +
            class PrismaticVisionBackbone(nn.Module):
         
     | 
| 64 | 
         
            +
                def __init__(
         
     | 
| 65 | 
         
            +
                    self,
         
     | 
| 66 | 
         
            +
                    use_fused_vision_backbone: bool,
         
     | 
| 67 | 
         
            +
                    image_sizes: List[int],
         
     | 
| 68 | 
         
            +
                    timm_model_ids: List[str],
         
     | 
| 69 | 
         
            +
                    timm_override_act_layers: List[Optional[str]],
         
     | 
| 70 | 
         
            +
                ) -> None:
         
     | 
| 71 | 
         
            +
                    super().__init__()
         
     | 
| 72 | 
         
            +
                    self.use_fused_vision_backbone = use_fused_vision_backbone
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
                    # [Contract] Validate number of (fused) vision backbones, create "alpha" featurizer and Instantiate
         
     | 
| 75 | 
         
            +
                    #   =>> Note :: Monkey-Patch the `forward()` function of the backbone to ensure FSDP-compatibility
         
     | 
| 76 | 
         
            +
                    #               Hardcodes `get_intermediate_layers` to return the **SECOND-TO-LAST** layer patches!
         
     | 
| 77 | 
         
            +
                    assert len(timm_model_ids) <= 2, "Prismatic models only support up to 2 (fused) vision backbones!"
         
     | 
| 78 | 
         
            +
                    self.featurizer = timm.create_model(
         
     | 
| 79 | 
         
            +
                        timm_model_ids[0],
         
     | 
| 80 | 
         
            +
                        pretrained=False,
         
     | 
| 81 | 
         
            +
                        num_classes=0,
         
     | 
| 82 | 
         
            +
                        img_size=image_sizes[0],
         
     | 
| 83 | 
         
            +
                        act_layer=timm_override_act_layers[0],
         
     | 
| 84 | 
         
            +
                    )
         
     | 
| 85 | 
         
            +
                    self.featurizer.forward = unpack_tuple(
         
     | 
| 86 | 
         
            +
                        partial(self.featurizer.get_intermediate_layers, n={len(self.featurizer.blocks) - 2})
         
     | 
| 87 | 
         
            +
                    )
         
     | 
| 88 | 
         
            +
                    self.embed_dim = self.featurizer.embed_dim
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
                    # If `use_fused_vision_backbone` =>> create "beta" featurizer
         
     | 
| 91 | 
         
            +
                    if self.use_fused_vision_backbone:
         
     | 
| 92 | 
         
            +
                        self.fused_featurizer = timm.create_model(
         
     | 
| 93 | 
         
            +
                            timm_model_ids[1],
         
     | 
| 94 | 
         
            +
                            pretrained=False,
         
     | 
| 95 | 
         
            +
                            num_classes=0,
         
     | 
| 96 | 
         
            +
                            img_size=image_sizes[1],
         
     | 
| 97 | 
         
            +
                            act_layer=timm_override_act_layers[1],
         
     | 
| 98 | 
         
            +
                        )
         
     | 
| 99 | 
         
            +
                        self.fused_featurizer.forward = unpack_tuple(
         
     | 
| 100 | 
         
            +
                            partial(self.fused_featurizer.get_intermediate_layers, n={len(self.fused_featurizer.blocks) - 2})
         
     | 
| 101 | 
         
            +
                        )
         
     | 
| 102 | 
         
            +
                        self.embed_dim += self.fused_featurizer.embed_dim
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
                    # Patch `vision_backbone.featurizer` and `vision_backbone.fused_featurizer` with HF-Compatible LayerScale
         
     | 
| 105 | 
         
            +
                    for module in self.featurizer.modules():
         
     | 
| 106 | 
         
            +
                        if isinstance(module, LayerScale):
         
     | 
| 107 | 
         
            +
                            ls_apply_patch(module)
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
                    if self.use_fused_vision_backbone:
         
     | 
| 110 | 
         
            +
                        for module in self.fused_featurizer.modules():
         
     | 
| 111 | 
         
            +
                            if isinstance(module, LayerScale):
         
     | 
| 112 | 
         
            +
                                ls_apply_patch(module)
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
                def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
         
     | 
| 115 | 
         
            +
                    """Run image (`pixel_values`) through featurizer; if channel-stacked, then dispatch and sequence stack."""
         
     | 
| 116 | 
         
            +
                    if not self.use_fused_vision_backbone:
         
     | 
| 117 | 
         
            +
                        return self.featurizer(pixel_values)
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                    # Split `pixel_values :: [bsz, 2 * 3, resolution, resolution]` =>> featurize =>> channel stack
         
     | 
| 120 | 
         
            +
                    img, img_fused = torch.split(pixel_values, [3, 3], dim=1)
         
     | 
| 121 | 
         
            +
                    patches, patches_fused = self.featurizer(img), self.fused_featurizer(img_fused)
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
                    return torch.cat([patches, patches_fused], dim=2)
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
            # === Prismatic Projector (nn.Module) Definitions ===
         
     | 
| 127 | 
         
            +
            class PrismaticProjector(nn.Module):
         
     | 
| 128 | 
         
            +
                def __init__(self, use_fused_vision_backbone: bool, vision_dim: int, llm_dim: int) -> None:
         
     | 
| 129 | 
         
            +
                    super().__init__()
         
     | 
| 130 | 
         
            +
                    self.use_fused_vision_backbone = use_fused_vision_backbone
         
     | 
| 131 | 
         
            +
                    self.vision_dim, self.llm_dim = vision_dim, llm_dim
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
                    # Switch on `use_fused_vision_backbone` =>> use slightly different MLPs and projection factors!
         
     | 
| 134 | 
         
            +
                    if not self.use_fused_vision_backbone:
         
     | 
| 135 | 
         
            +
                        self.fc1 = nn.Linear(self.vision_dim, self.llm_dim, bias=True)
         
     | 
| 136 | 
         
            +
                        self.fc2 = nn.Linear(self.llm_dim, self.llm_dim, bias=True)
         
     | 
| 137 | 
         
            +
                        self.act_fn1 = nn.GELU()
         
     | 
| 138 | 
         
            +
                    else:
         
     | 
| 139 | 
         
            +
                        initial_projection_dim = 4 * vision_dim
         
     | 
| 140 | 
         
            +
                        self.fc1 = nn.Linear(self.vision_dim, initial_projection_dim, bias=True)
         
     | 
| 141 | 
         
            +
                        self.fc2 = nn.Linear(initial_projection_dim, self.llm_dim, bias=True)
         
     | 
| 142 | 
         
            +
                        self.fc3 = nn.Linear(self.llm_dim, self.llm_dim, bias=True)
         
     | 
| 143 | 
         
            +
                        self.act_fn1 = nn.GELU()
         
     | 
| 144 | 
         
            +
                        self.act_fn2 = nn.GELU()
         
     | 
| 145 | 
         
            +
             
     | 
| 146 | 
         
            +
                def forward(self, img_patches: torch.Tensor) -> torch.Tensor:
         
     | 
| 147 | 
         
            +
                    if not self.use_fused_vision_backbone:
         
     | 
| 148 | 
         
            +
                        projected_features = self.fc1(img_patches)
         
     | 
| 149 | 
         
            +
                        projected_features = self.act_fn1(projected_features)
         
     | 
| 150 | 
         
            +
                        projected_features = self.fc2(projected_features)
         
     | 
| 151 | 
         
            +
                    else:
         
     | 
| 152 | 
         
            +
                        projected_features = self.fc1(img_patches)
         
     | 
| 153 | 
         
            +
                        projected_features = self.act_fn1(projected_features)
         
     | 
| 154 | 
         
            +
                        projected_features = self.fc2(projected_features)
         
     | 
| 155 | 
         
            +
                        projected_features = self.act_fn2(projected_features)
         
     | 
| 156 | 
         
            +
                        projected_features = self.fc3(projected_features)
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
                    return projected_features
         
     | 
| 159 | 
         
            +
             
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
            # === Main HF Class Definitions ===
         
     | 
| 162 | 
         
            +
            @dataclass
         
     | 
| 163 | 
         
            +
            class PrismaticCausalLMOutputWithPast(ModelOutput):
         
     | 
| 164 | 
         
            +
                """Base class for Prismatic casual (visually-conditioned) language model outputs; also exposes visual features."""
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
                loss: Optional[torch.FloatTensor] = None
         
     | 
| 167 | 
         
            +
                logits: torch.FloatTensor = None
         
     | 
| 168 | 
         
            +
                past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
         
     | 
| 169 | 
         
            +
                hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
         
     | 
| 170 | 
         
            +
                attentions: Optional[Tuple[torch.FloatTensor]] = None
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
                # Additions for VLMs
         
     | 
| 173 | 
         
            +
                projector_features: Optional[torch.FloatTensor] = None
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
            class PrismaticPreTrainedModel(PreTrainedModel):
         
     | 
| 177 | 
         
            +
                config_class: PretrainedConfig = PrismaticConfig
         
     | 
| 178 | 
         
            +
                base_model_prefix: str = "model"
         
     | 
| 179 | 
         
            +
                supports_gradient_checkpointing: bool = True
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
                _no_split_modules: ClassVar[List[str]] = ["PrismaticProjector"]
         
     | 
| 182 | 
         
            +
                _skip_keys_device_placement: str = "past_key_values"
         
     | 
| 183 | 
         
            +
                _supports_flash_attn_2: bool = True
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
                def _init_weights(self, module: nn.Module) -> None:
         
     | 
| 186 | 
         
            +
                    # Important :: this HF ported version is *not* meant for training from scratch; only inference and fine-tuning!
         
     | 
| 187 | 
         
            +
                    #   => As such, this init_weights code is not correct; if training VLMs from scratch, use the main codebase at
         
     | 
| 188 | 
         
            +
                    #      https://github.com/TRI-ML/prismatic-vlms
         
     | 
| 189 | 
         
            +
                    std = (
         
     | 
| 190 | 
         
            +
                        self.config.initializer_range
         
     | 
| 191 | 
         
            +
                        if hasattr(self.config, "initializer_range")
         
     | 
| 192 | 
         
            +
                        else self.config.text_config.initializer_range
         
     | 
| 193 | 
         
            +
                    )
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
                    if hasattr(module, "class_embedding"):
         
     | 
| 196 | 
         
            +
                        module.class_embedding.data.normal_(mean=0.0, std=std)
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
                    if isinstance(module, (nn.Linear, nn.Conv2d)):
         
     | 
| 199 | 
         
            +
                        module.weight.data.normal_(mean=0.0, std=std)
         
     | 
| 200 | 
         
            +
                        if module.bias is not None:
         
     | 
| 201 | 
         
            +
                            module.bias.data.zero_()
         
     | 
| 202 | 
         
            +
                    elif isinstance(module, nn.Embedding):
         
     | 
| 203 | 
         
            +
                        module.weight.data.normal_(mean=0.0, std=std)
         
     | 
| 204 | 
         
            +
                        if module.padding_idx is not None:
         
     | 
| 205 | 
         
            +
                            module.weight.data[module.padding_idx].zero_()
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
                @property
         
     | 
| 208 | 
         
            +
                def _supports_sdpa(self) -> bool:
         
     | 
| 209 | 
         
            +
                    """Check LLM supports SDPA Attention"""
         
     | 
| 210 | 
         
            +
                    return self.language_model._supports_sdpa
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
            class PrismaticForConditionalGeneration(PrismaticPreTrainedModel):
         
     | 
| 214 | 
         
            +
                def __init__(self, config: PrismaticConfig) -> None:
         
     | 
| 215 | 
         
            +
                    super().__init__(config)
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
                    # [Validation] Lightweight Validate on `config` Fields + Dependency Versions
         
     | 
| 218 | 
         
            +
                    if config.use_fused_vision_backbone is None:
         
     | 
| 219 | 
         
            +
                        raise ValueError("Missing config field `use_fused_vision_backbone`")
         
     | 
| 220 | 
         
            +
             
     | 
| 221 | 
         
            +
                    if timm.__version__ not in {"0.9.10", "0.9.11", "0.9.12", "0.9.16"}:
         
     | 
| 222 | 
         
            +
                        raise NotImplementedError(
         
     | 
| 223 | 
         
            +
                            "TIMM Version must be >= 0.9.10 and < 1.0.0 (breaking); please raise a GitHub Issue "
         
     | 
| 224 | 
         
            +
                            "if you urgently need support for latest TIMM versions."
         
     | 
| 225 | 
         
            +
                        )
         
     | 
| 226 | 
         
            +
             
     | 
| 227 | 
         
            +
                    if (transformers.__version__ != "4.40.1") or (tokenizers.__version__ != "0.19.1"):
         
     | 
| 228 | 
         
            +
                        logger.warning(
         
     | 
| 229 | 
         
            +
                            f"Expected `transformers==4.40.1` and `tokenizers==0.19.1` but got "
         
     | 
| 230 | 
         
            +
                            f"`transformers=={transformers.__version__}` and `tokenizers=={tokenizers.__version__}`; "
         
     | 
| 231 | 
         
            +
                            f"there might be inference-time regressions due to dependency changes. If in doubt, please"
         
     | 
| 232 | 
         
            +
                            f"use the above versions."
         
     | 
| 233 | 
         
            +
                        )
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
                    # Instantiate PrismaticVisionBackbone (w/ Potential Fused Backbone)
         
     | 
| 236 | 
         
            +
                    self.vision_backbone = PrismaticVisionBackbone(
         
     | 
| 237 | 
         
            +
                        config.use_fused_vision_backbone, config.image_sizes, config.timm_model_ids, config.timm_override_act_layers
         
     | 
| 238 | 
         
            +
                    )
         
     | 
| 239 | 
         
            +
             
     | 
| 240 | 
         
            +
                    # Create Multimodal Projector
         
     | 
| 241 | 
         
            +
                    self.projector = PrismaticProjector(
         
     | 
| 242 | 
         
            +
                        config.use_fused_vision_backbone,
         
     | 
| 243 | 
         
            +
                        vision_dim=self.vision_backbone.embed_dim,
         
     | 
| 244 | 
         
            +
                        llm_dim=config.text_config.hidden_size,
         
     | 
| 245 | 
         
            +
                    )
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
                    # Instantiate LLM Backbone
         
     | 
| 248 | 
         
            +
                    self.language_model = AutoModelForCausalLM.from_config(
         
     | 
| 249 | 
         
            +
                        config.text_config, attn_implementation=config._attn_implementation
         
     | 
| 250 | 
         
            +
                    )
         
     | 
| 251 | 
         
            +
                    self.vocab_size = config.text_config.vocab_size
         
     | 
| 252 | 
         
            +
                    self.pad_token_id = config.pad_token_id
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
                    # HF Boilerplate =>> initializes weights via `_init_weights()` and sets gradient checkpointing
         
     | 
| 255 | 
         
            +
                    self.post_init()
         
     | 
| 256 | 
         
            +
             
     | 
| 257 | 
         
            +
                # === `PreTrainedModel` Boilerplate ===
         
     | 
| 258 | 
         
            +
                def get_input_embeddings(self) -> nn.Module:
         
     | 
| 259 | 
         
            +
                    return self.language_model.get_input_embeddings()
         
     | 
| 260 | 
         
            +
             
     | 
| 261 | 
         
            +
                def set_input_embeddings(self, value: nn.Module) -> None:
         
     | 
| 262 | 
         
            +
                    self.language_model.set_input_embeddings(value)
         
     | 
| 263 | 
         
            +
             
     | 
| 264 | 
         
            +
                def get_output_embeddings(self) -> nn.Module:
         
     | 
| 265 | 
         
            +
                    return self.language_model.get_output_embeddings()
         
     | 
| 266 | 
         
            +
             
     | 
| 267 | 
         
            +
                def set_output_embeddings(self, new_embeddings: nn.Module) -> None:
         
     | 
| 268 | 
         
            +
                    self.language_model.set_output_embeddings(new_embeddings)
         
     | 
| 269 | 
         
            +
             
     | 
| 270 | 
         
            +
                def get_decoder(self) -> nn.Module:
         
     | 
| 271 | 
         
            +
                    return self.language_model.get_decoder()
         
     | 
| 272 | 
         
            +
             
     | 
| 273 | 
         
            +
                def set_decoder(self, decoder: nn.Module) -> None:
         
     | 
| 274 | 
         
            +
                    self.language_model.set_decoder(decoder)
         
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
                def tie_weights(self) -> None:
         
     | 
| 277 | 
         
            +
                    self.language_model.tie_weights()  # Note: `Llama-2` and `Mistral` don't tie weights (no-op)
         
     | 
| 278 | 
         
            +
             
     | 
| 279 | 
         
            +
                def resize_token_embeddings(
         
     | 
| 280 | 
         
            +
                    self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None
         
     | 
| 281 | 
         
            +
                ) -> nn.Embedding:
         
     | 
| 282 | 
         
            +
                    updated_embeddings = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
                    # Update config/instance variables
         
     | 
| 285 | 
         
            +
                    self.config.text_config.vocab_size = updated_embeddings.num_embeddings
         
     | 
| 286 | 
         
            +
                    self.vocab_size = updated_embeddings.num_embeddings
         
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
                    return updated_embeddings
         
     | 
| 289 | 
         
            +
             
     | 
| 290 | 
         
            +
                # === Core Prismatic VLM `forward()` Logic ===
         
     | 
| 291 | 
         
            +
                def forward(
         
     | 
| 292 | 
         
            +
                    self,
         
     | 
| 293 | 
         
            +
                    input_ids: Optional[torch.LongTensor] = None,
         
     | 
| 294 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 295 | 
         
            +
                    pixel_values: Optional[torch.FloatTensor] = None,
         
     | 
| 296 | 
         
            +
                    labels: Optional[torch.LongTensor] = None,
         
     | 
| 297 | 
         
            +
                    inputs_embeds: Optional[torch.FloatTensor] = None,
         
     | 
| 298 | 
         
            +
                    past_key_values: Optional[List[torch.FloatTensor]] = None,
         
     | 
| 299 | 
         
            +
                    use_cache: Optional[bool] = None,
         
     | 
| 300 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 301 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 302 | 
         
            +
                    output_projector_features: Optional[bool] = None,
         
     | 
| 303 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 304 | 
         
            +
                ) -> Union[Tuple, PrismaticCausalLMOutputWithPast]:
         
     | 
| 305 | 
         
            +
                    """Run a forward pass through the VLM, returning a PrismaticCausalLMOutputWithPast instance."""
         
     | 
| 306 | 
         
            +
                    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
         
     | 
| 307 | 
         
            +
                    output_hidden_states = (
         
     | 
| 308 | 
         
            +
                        output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
         
     | 
| 309 | 
         
            +
                    )
         
     | 
| 310 | 
         
            +
                    output_projector_features = output_projector_features if output_projector_features is not None else False
         
     | 
| 311 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 312 | 
         
            +
             
     | 
| 313 | 
         
            +
                    # Respect `use_cache` only if not training (even if `gradient_checkpointing` is off)
         
     | 
| 314 | 
         
            +
                    use_cache = use_cache and not self.training
         
     | 
| 315 | 
         
            +
             
     | 
| 316 | 
         
            +
                    # Instantiate Placeholder for Projector Features
         
     | 
| 317 | 
         
            +
                    projected_patch_embeddings = None
         
     | 
| 318 | 
         
            +
             
     | 
| 319 | 
         
            +
                    # Note :: We only support forward passes with the following cases:
         
     | 
| 320 | 
         
            +
                    #   => Cached Generation :: (input_ids.shape[1] == 1) and (past_key_values is not None)
         
     | 
| 321 | 
         
            +
                    #   => Unimodal Forward :: (pixel_values is None)
         
     | 
| 322 | 
         
            +
                    #   => Multimodal Forward :: (pixel_values is not None) and (input_ids/embeds.shape[0] == pixel_values.shape[0])
         
     | 
| 323 | 
         
            +
             
     | 
| 324 | 
         
            +
                    # === Handle Generation with Cache (`input_ids.shape[1] == 1`) =>> requires `past_keys_values` ===
         
     | 
| 325 | 
         
            +
                    if input_ids.shape[1] == 1:
         
     | 
| 326 | 
         
            +
                        assert input_ids.shape[0] == 1, "Generation is only currently supported for batch size of 1!"
         
     | 
| 327 | 
         
            +
                        assert past_key_values is not None, "You must provide `past_key_values` during cached generation!"
         
     | 
| 328 | 
         
            +
                        assert labels is None, "Unexpected key `labels` provided during cached generation!"
         
     | 
| 329 | 
         
            +
             
     | 
| 330 | 
         
            +
                        language_model_output = self.language_model(
         
     | 
| 331 | 
         
            +
                            input_ids=input_ids,
         
     | 
| 332 | 
         
            +
                            attention_mask=None,
         
     | 
| 333 | 
         
            +
                            position_ids=None,
         
     | 
| 334 | 
         
            +
                            past_key_values=past_key_values,
         
     | 
| 335 | 
         
            +
                            inputs_embeds=None,
         
     | 
| 336 | 
         
            +
                            labels=None,
         
     | 
| 337 | 
         
            +
                            use_cache=use_cache,
         
     | 
| 338 | 
         
            +
                            output_attentions=output_attentions,
         
     | 
| 339 | 
         
            +
                            output_hidden_states=output_hidden_states,
         
     | 
| 340 | 
         
            +
                            return_dict=return_dict,
         
     | 
| 341 | 
         
            +
                        )
         
     | 
| 342 | 
         
            +
             
     | 
| 343 | 
         
            +
                    # === Handle Unimodal Forward ===
         
     | 
| 344 | 
         
            +
                    elif pixel_values is None:
         
     | 
| 345 | 
         
            +
                        assert (input_ids is not None) and (inputs_embeds is None), "Missing `input_ids` in language-only forward!"
         
     | 
| 346 | 
         
            +
                        assert past_key_values is None, "Unexpected key `past_key_values` provided during language-only forward!"
         
     | 
| 347 | 
         
            +
             
     | 
| 348 | 
         
            +
                        language_model_output = self.language_model(
         
     | 
| 349 | 
         
            +
                            input_ids=input_ids,
         
     | 
| 350 | 
         
            +
                            attention_mask=attention_mask,
         
     | 
| 351 | 
         
            +
                            position_ids=None,
         
     | 
| 352 | 
         
            +
                            past_key_values=None,
         
     | 
| 353 | 
         
            +
                            inputs_embeds=None,
         
     | 
| 354 | 
         
            +
                            labels=labels,
         
     | 
| 355 | 
         
            +
                            use_cache=use_cache,
         
     | 
| 356 | 
         
            +
                            output_attentions=output_attentions,
         
     | 
| 357 | 
         
            +
                            output_hidden_states=output_hidden_states,
         
     | 
| 358 | 
         
            +
                            return_dict=return_dict,
         
     | 
| 359 | 
         
            +
                        )
         
     | 
| 360 | 
         
            +
             
     | 
| 361 | 
         
            +
                    # === Handle Multimodal Forward ===
         
     | 
| 362 | 
         
            +
                    elif (input_ids.shape[0] == pixel_values.shape[0]) or (inputs_embeds.shape[0] == pixel_values.shape[0]):
         
     | 
| 363 | 
         
            +
                        assert past_key_values is None, "Unexpected key `past_key_values` provided during language-only forward!"
         
     | 
| 364 | 
         
            +
             
     | 
| 365 | 
         
            +
                        # Visual Feature Extraction
         
     | 
| 366 | 
         
            +
                        patch_features = self.vision_backbone(pixel_values)
         
     | 
| 367 | 
         
            +
             
     | 
| 368 | 
         
            +
                        # Projection Logic =>> Update Attention Mask
         
     | 
| 369 | 
         
            +
                        projected_patch_embeddings = self.projector(patch_features)
         
     | 
| 370 | 
         
            +
                        projected_patch_attention_mask = None
         
     | 
| 371 | 
         
            +
                        if attention_mask is not None:
         
     | 
| 372 | 
         
            +
                            projected_patch_attention_mask = torch.full(
         
     | 
| 373 | 
         
            +
                                (projected_patch_embeddings.shape[0], projected_patch_embeddings.shape[1]),
         
     | 
| 374 | 
         
            +
                                fill_value=True,
         
     | 
| 375 | 
         
            +
                                dtype=attention_mask.dtype,
         
     | 
| 376 | 
         
            +
                                device=attention_mask.device,
         
     | 
| 377 | 
         
            +
                            )
         
     | 
| 378 | 
         
            +
             
     | 
| 379 | 
         
            +
                        # Get Input Embeddings (from Language Model Embeddings)
         
     | 
| 380 | 
         
            +
                        input_embeddings = self.get_input_embeddings()(input_ids)
         
     | 
| 381 | 
         
            +
             
     | 
| 382 | 
         
            +
                        # Build Multimodal Embeddings & Attention Mask =>> Prismatic defaults to inserting after <BOS> token (1:)
         
     | 
| 383 | 
         
            +
                        multimodal_embeddings = torch.cat(
         
     | 
| 384 | 
         
            +
                            [input_embeddings[:, :1, :], projected_patch_embeddings, input_embeddings[:, 1:, :]], dim=1
         
     | 
| 385 | 
         
            +
                        )
         
     | 
| 386 | 
         
            +
                        multimodal_attention_mask = None
         
     | 
| 387 | 
         
            +
                        if attention_mask is not None:
         
     | 
| 388 | 
         
            +
                            multimodal_attention_mask = torch.cat(
         
     | 
| 389 | 
         
            +
                                [attention_mask[:, :1], projected_patch_attention_mask, attention_mask[:, 1:]], dim=1
         
     | 
| 390 | 
         
            +
                            )
         
     | 
| 391 | 
         
            +
             
     | 
| 392 | 
         
            +
                        # Build Labels (if specified) =>> Ignore Labels for Patch Embeddings
         
     | 
| 393 | 
         
            +
                        multimodal_labels = None
         
     | 
| 394 | 
         
            +
                        if labels is not None:
         
     | 
| 395 | 
         
            +
                            projected_patch_labels = torch.full(
         
     | 
| 396 | 
         
            +
                                (projected_patch_embeddings.shape[0], projected_patch_embeddings.shape[1]),
         
     | 
| 397 | 
         
            +
                                fill_value=IGNORE_INDEX,
         
     | 
| 398 | 
         
            +
                                dtype=labels.dtype,
         
     | 
| 399 | 
         
            +
                                device=labels.device,
         
     | 
| 400 | 
         
            +
                            )
         
     | 
| 401 | 
         
            +
                            multimodal_labels = torch.cat([labels[:, :1], projected_patch_labels, labels[:, 1:]], dim=1)
         
     | 
| 402 | 
         
            +
             
     | 
| 403 | 
         
            +
                        # Dispatch to Language Model
         
     | 
| 404 | 
         
            +
                        language_model_output = self.language_model(
         
     | 
| 405 | 
         
            +
                            input_ids=None,
         
     | 
| 406 | 
         
            +
                            attention_mask=multimodal_attention_mask,
         
     | 
| 407 | 
         
            +
                            position_ids=None,
         
     | 
| 408 | 
         
            +
                            past_key_values=None,
         
     | 
| 409 | 
         
            +
                            inputs_embeds=multimodal_embeddings,
         
     | 
| 410 | 
         
            +
                            labels=multimodal_labels,
         
     | 
| 411 | 
         
            +
                            use_cache=use_cache,
         
     | 
| 412 | 
         
            +
                            output_attentions=output_attentions,
         
     | 
| 413 | 
         
            +
                            output_hidden_states=output_hidden_states,
         
     | 
| 414 | 
         
            +
                            return_dict=return_dict,
         
     | 
| 415 | 
         
            +
                        )
         
     | 
| 416 | 
         
            +
             
     | 
| 417 | 
         
            +
                    # === Otherwise =>> Assume Invalid! ===
         
     | 
| 418 | 
         
            +
                    elif (input_ids.shape[0] != pixel_values.shape[0]) or (inputs_embeds.shape[0] != pixel_values.shape[0]):
         
     | 
| 419 | 
         
            +
                        raise ValueError("Non-homogenous batch of (text, image) input -- forward() does not support mixed batches!")
         
     | 
| 420 | 
         
            +
             
     | 
| 421 | 
         
            +
                    else:
         
     | 
| 422 | 
         
            +
                        raise ValueError(
         
     | 
| 423 | 
         
            +
                            "Invalid PrismaticForConditionalGeneration `forward()` call with provided arguments:\n"
         
     | 
| 424 | 
         
            +
                            f"=> `input_ids` = {input_ids is not None}\n"
         
     | 
| 425 | 
         
            +
                            f"=> `attention_mask` = {attention_mask is not None}\n"
         
     | 
| 426 | 
         
            +
                            f"=> `pixel_values` = {pixel_values is not None}\n"
         
     | 
| 427 | 
         
            +
                            f"=> `labels` = {labels is not None}\n"
         
     | 
| 428 | 
         
            +
                            f"=> `input_embeds` = {inputs_embeds is not None}\n"
         
     | 
| 429 | 
         
            +
                            f"=> `past_key_values` = {past_key_values is not None}\n"
         
     | 
| 430 | 
         
            +
                            f"=> `use_cache` = {use_cache}"
         
     | 
| 431 | 
         
            +
                        )
         
     | 
| 432 | 
         
            +
             
     | 
| 433 | 
         
            +
                    # Unpack `language_model_output` and return PrismaticCausalLMOutputWithPast (or tuple if not `return_dict`)
         
     | 
| 434 | 
         
            +
                    if not return_dict:
         
     | 
| 435 | 
         
            +
                        if output_projector_features and (projected_patch_embeddings is not None):
         
     | 
| 436 | 
         
            +
                            return *language_model_output, projected_patch_embeddings
         
     | 
| 437 | 
         
            +
             
     | 
| 438 | 
         
            +
                        return language_model_output
         
     | 
| 439 | 
         
            +
             
     | 
| 440 | 
         
            +
                    return PrismaticCausalLMOutputWithPast(
         
     | 
| 441 | 
         
            +
                        loss=language_model_output.loss,
         
     | 
| 442 | 
         
            +
                        logits=language_model_output.logits,
         
     | 
| 443 | 
         
            +
                        past_key_values=language_model_output.past_key_values,
         
     | 
| 444 | 
         
            +
                        hidden_states=language_model_output.hidden_states,
         
     | 
| 445 | 
         
            +
                        attentions=language_model_output.attentions,
         
     | 
| 446 | 
         
            +
                        projector_features=projected_patch_embeddings,
         
     | 
| 447 | 
         
            +
                    )
         
     | 
| 448 | 
         
            +
             
     | 
| 449 | 
         
            +
                # === GenerationMixin Methods ===
         
     | 
| 450 | 
         
            +
                def prepare_inputs_for_generation(
         
     | 
| 451 | 
         
            +
                    self,
         
     | 
| 452 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 453 | 
         
            +
                    past_key_values: Optional[List[torch.FloatTensor]] = None,
         
     | 
| 454 | 
         
            +
                    inputs_embeds: Optional[torch.FloatTensor] = None,
         
     | 
| 455 | 
         
            +
                    pixel_values: Optional[torch.FloatTensor] = None,
         
     | 
| 456 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 457 | 
         
            +
                    **kwargs: str,
         
     | 
| 458 | 
         
            +
                ) -> Dict[str, torch.Tensor]:
         
     | 
| 459 | 
         
            +
                    """Borrowed from `LlamaForCausalLM` and simplified for batch size = 1; mirrors original PrismaticVLM logic."""
         
     | 
| 460 | 
         
            +
                    if ((input_ids is not None) and (input_ids.shape[0] > 1)) or (
         
     | 
| 461 | 
         
            +
                        (inputs_embeds is not None) and (inputs_embeds.shape[0] > 1)
         
     | 
| 462 | 
         
            +
                    ):
         
     | 
| 463 | 
         
            +
                        raise ValueError("Generation with batch size > 1 is not currently supported!")
         
     | 
| 464 | 
         
            +
             
     | 
| 465 | 
         
            +
                    # Handle `past_key_values` (cache) =>> assume `input_ids` just has unprocessed tokens
         
     | 
| 466 | 
         
            +
                    if past_key_values is not None:
         
     | 
| 467 | 
         
            +
                        input_ids = input_ids[:, -1:]
         
     | 
| 468 | 
         
            +
             
     | 
| 469 | 
         
            +
                    # If `input_embeds` are passed, we only want to use them in the 1st generation step
         
     | 
| 470 | 
         
            +
                    if inputs_embeds is not None and past_key_values is None:
         
     | 
| 471 | 
         
            +
                        model_inputs = {"input_embeds": inputs_embeds}
         
     | 
| 472 | 
         
            +
                    else:
         
     | 
| 473 | 
         
            +
                        model_inputs = {"input_ids": input_ids}
         
     | 
| 474 | 
         
            +
             
     | 
| 475 | 
         
            +
                    # Make sure `pixel_values` are preserved in `model_inputs`
         
     | 
| 476 | 
         
            +
                    model_inputs.update(
         
     | 
| 477 | 
         
            +
                        {
         
     | 
| 478 | 
         
            +
                            "attention_mask": attention_mask,
         
     | 
| 479 | 
         
            +
                            "pixel_values": pixel_values,
         
     | 
| 480 | 
         
            +
                            "past_key_values": past_key_values,
         
     | 
| 481 | 
         
            +
                            "use_cache": kwargs.get("use_cache"),
         
     | 
| 482 | 
         
            +
                        }
         
     | 
| 483 | 
         
            +
                    )
         
     | 
| 484 | 
         
            +
             
     | 
| 485 | 
         
            +
                    return model_inputs
         
     | 
| 486 | 
         
            +
             
     | 
| 487 | 
         
            +
                # Defer to Language Model (all handle this differently, with different return types)
         
     | 
| 488 | 
         
            +
                def _reorder_cache(self, *args, **kwargs) -> Any:
         
     | 
| 489 | 
         
            +
                    return self.language_model._reorder_cache(*args, **kwargs)
         
     | 
| 490 | 
         
            +
             
     | 
| 491 | 
         
            +
             
     | 
| 492 | 
         
            +
            class OpenVLAForActionPrediction(PrismaticForConditionalGeneration):
         
     | 
| 493 | 
         
            +
                config_class: PretrainedConfig = OpenVLAConfig
         
     | 
| 494 | 
         
            +
             
     | 
| 495 | 
         
            +
                def __init__(self, config: OpenVLAConfig) -> None:
         
     | 
| 496 | 
         
            +
                    super().__init__(config)
         
     | 
| 497 | 
         
            +
                    self.norm_stats = config.norm_stats
         
     | 
| 498 | 
         
            +
             
     | 
| 499 | 
         
            +
                    # Compute action bins
         
     | 
| 500 | 
         
            +
                    self.bins = np.linspace(-1, 1, config.n_action_bins)
         
     | 
| 501 | 
         
            +
                    self.bin_centers = (self.bins[:-1] + self.bins[1:]) / 2.0
         
     | 
| 502 | 
         
            +
             
     | 
| 503 | 
         
            +
                    # Compute vocab size for de-tokenization -- revert added "multiple of"
         
     | 
| 504 | 
         
            +
                    self.vocab_size = self.config.text_config.vocab_size - self.config.pad_to_multiple_of
         
     | 
| 505 | 
         
            +
             
     | 
| 506 | 
         
            +
                def predict_action(
         
     | 
| 507 | 
         
            +
                    self, input_ids: Optional[torch.LongTensor] = None, unnorm_key: Optional[str] = None, **kwargs: str
         
     | 
| 508 | 
         
            +
                ) -> np.ndarray:
         
     | 
| 509 | 
         
            +
                    """Thin wrapper around super().generate() that decodes predicted actions and de-normalizes them."""
         
     | 
| 510 | 
         
            +
             
     | 
| 511 | 
         
            +
                    # We need to add this special empty token ('') after the colon (':') token in "ASSISTANT:"
         
     | 
| 512 | 
         
            +
                    # in order for the predictions to match the training configuration and be accurate.
         
     | 
| 513 | 
         
            +
                    input_ids = torch.cat(
         
     | 
| 514 | 
         
            +
                        (input_ids, torch.unsqueeze(torch.Tensor([29871]).long(), dim=0).to(input_ids.device)), dim=1
         
     | 
| 515 | 
         
            +
                    )
         
     | 
| 516 | 
         
            +
             
     | 
| 517 | 
         
            +
                    # Run VLA inference
         
     | 
| 518 | 
         
            +
                    generated_ids = self.generate(input_ids, max_new_tokens=self.get_action_dim(unnorm_key), **kwargs)
         
     | 
| 519 | 
         
            +
             
     | 
| 520 | 
         
            +
                    # Extract predicted action tokens and translate into (normalized) continuous actions
         
     | 
| 521 | 
         
            +
                    predicted_action_token_ids = generated_ids[0, -self.get_action_dim(unnorm_key) :].cpu().numpy()
         
     | 
| 522 | 
         
            +
                    discretized_actions = self.vocab_size - predicted_action_token_ids
         
     | 
| 523 | 
         
            +
                    discretized_actions = np.clip(discretized_actions - 1, a_min=0, a_max=self.bin_centers.shape[0] - 1)
         
     | 
| 524 | 
         
            +
                    normalized_actions = self.bin_centers[discretized_actions]
         
     | 
| 525 | 
         
            +
             
     | 
| 526 | 
         
            +
                    # Unnormalize actions
         
     | 
| 527 | 
         
            +
                    action_norm_stats = self.get_action_stats(unnorm_key)
         
     | 
| 528 | 
         
            +
                    mask = action_norm_stats.get("mask", np.ones_like(action_norm_stats["q01"], dtype=bool))
         
     | 
| 529 | 
         
            +
                    action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
         
     | 
| 530 | 
         
            +
                    actions = np.where(
         
     | 
| 531 | 
         
            +
                        mask,
         
     | 
| 532 | 
         
            +
                        0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
         
     | 
| 533 | 
         
            +
                        normalized_actions,
         
     | 
| 534 | 
         
            +
                    )
         
     | 
| 535 | 
         
            +
             
     | 
| 536 | 
         
            +
                    return actions
         
     | 
| 537 | 
         
            +
             
     | 
| 538 | 
         
            +
                @staticmethod
         
     | 
| 539 | 
         
            +
                def _check_unnorm_key(norm_stats: Dict[str, Dict[str, Any]], unnorm_key: Optional[str]) -> str:
         
     | 
| 540 | 
         
            +
                    if unnorm_key is None and len(norm_stats) != 1:
         
     | 
| 541 | 
         
            +
                        raise ValueError(
         
     | 
| 542 | 
         
            +
                            f"Your model was trained on more than one dataset. "
         
     | 
| 543 | 
         
            +
                            f"Please pass a `unnorm_key` from the following options to choose the statistics used for "
         
     | 
| 544 | 
         
            +
                            f"de-normalizing actions: {norm_stats.keys()}"
         
     | 
| 545 | 
         
            +
                        )
         
     | 
| 546 | 
         
            +
             
     | 
| 547 | 
         
            +
                    # If None, grab the (singular) dataset in `norm_stats` to use as `unnorm_key`
         
     | 
| 548 | 
         
            +
                    unnorm_key = unnorm_key if unnorm_key is not None else next(iter(norm_stats.keys()))
         
     | 
| 549 | 
         
            +
                    if unnorm_key not in norm_stats:
         
     | 
| 550 | 
         
            +
                        raise ValueError(
         
     | 
| 551 | 
         
            +
                            f"The `unnorm_key` you chose ({unnorm_key = }) is not in the available statistics. "
         
     | 
| 552 | 
         
            +
                            f"Please choose from: {norm_stats.keys()}"
         
     | 
| 553 | 
         
            +
                        )
         
     | 
| 554 | 
         
            +
             
     | 
| 555 | 
         
            +
                    return unnorm_key
         
     | 
| 556 | 
         
            +
             
     | 
| 557 | 
         
            +
                def get_action_dim(self, unnorm_key: Optional[str] = None) -> int:
         
     | 
| 558 | 
         
            +
                    """Get the dimensionality of the policy's action space."""
         
     | 
| 559 | 
         
            +
                    unnorm_key = self._check_unnorm_key(self.norm_stats, unnorm_key)
         
     | 
| 560 | 
         
            +
                    return len(self.norm_stats[unnorm_key]["action"]["q01"])
         
     | 
| 561 | 
         
            +
             
     | 
| 562 | 
         
            +
                def get_action_stats(self, unnorm_key: Optional[str] = None) -> Dict[str, Any]:
         
     | 
| 563 | 
         
            +
                    """Get all the logged statistics for the given dataset."""
         
     | 
| 564 | 
         
            +
                    unnorm_key = self._check_unnorm_key(self.norm_stats, unnorm_key)
         
     | 
| 565 | 
         
            +
                    return self.norm_stats[unnorm_key]["action"]
         
     |