(Trained with Unsloth)
Browse files- config.json +5 -2
 - pytorch_model-00001-of-00002.bin +1 -1
 - pytorch_model-00002-of-00002.bin +1 -1
 - tokenizer_config.json +1 -1
 
    	
        config.json
    CHANGED
    
    | 
         @@ -12,14 +12,17 @@ 
     | 
|
| 12 | 
         
             
              "hidden_size": 2304,
         
     | 
| 13 | 
         
             
              "initializer_range": 0.1,
         
     | 
| 14 | 
         
             
              "intermediate_size": 5760,
         
     | 
| 15 | 
         
            -
              "max_position_embeddings":  
     | 
| 16 | 
         
             
              "model_type": "llama",
         
     | 
| 17 | 
         
             
              "num_attention_heads": 36,
         
     | 
| 18 | 
         
             
              "num_hidden_layers": 40,
         
     | 
| 19 | 
         
             
              "num_key_value_heads": 36,
         
     | 
| 20 | 
         
             
              "pretraining_tp": 1,
         
     | 
| 21 | 
         
             
              "rms_norm_eps": 1e-05,
         
     | 
| 22 | 
         
            -
              "rope_scaling":  
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 23 | 
         
             
              "rope_theta": 10000.0,
         
     | 
| 24 | 
         
             
              "scale_depth": 1.4,
         
     | 
| 25 | 
         
             
              "scale_emb": 12,
         
     | 
| 
         | 
|
| 12 | 
         
             
              "hidden_size": 2304,
         
     | 
| 13 | 
         
             
              "initializer_range": 0.1,
         
     | 
| 14 | 
         
             
              "intermediate_size": 5760,
         
     | 
| 15 | 
         
            +
              "max_position_embeddings": 32768,
         
     | 
| 16 | 
         
             
              "model_type": "llama",
         
     | 
| 17 | 
         
             
              "num_attention_heads": 36,
         
     | 
| 18 | 
         
             
              "num_hidden_layers": 40,
         
     | 
| 19 | 
         
             
              "num_key_value_heads": 36,
         
     | 
| 20 | 
         
             
              "pretraining_tp": 1,
         
     | 
| 21 | 
         
             
              "rms_norm_eps": 1e-05,
         
     | 
| 22 | 
         
            +
              "rope_scaling": {
         
     | 
| 23 | 
         
            +
                "factor": 4.0,
         
     | 
| 24 | 
         
            +
                "type": "linear"
         
     | 
| 25 | 
         
            +
              },
         
     | 
| 26 | 
         
             
              "rope_theta": 10000.0,
         
     | 
| 27 | 
         
             
              "scale_depth": 1.4,
         
     | 
| 28 | 
         
             
              "scale_emb": 12,
         
     | 
    	
        pytorch_model-00001-of-00002.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 4993313262
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:a51b0ca88bab3fca5379779ce51e5633dfdff89eb38f2e63b1be56f13129b7dd
         
     | 
| 3 | 
         
             
            size 4993313262
         
     | 
    	
        pytorch_model-00002-of-00002.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 1022223648
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:1629609cd855d782e8c7a5fd96f28aadc7191d342ff9a64c4743fe3d3ef42ce1
         
     | 
| 3 | 
         
             
            size 1022223648
         
     | 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -38,7 +38,7 @@ 
     | 
|
| 38 | 
         
             
                "input_ids",
         
     | 
| 39 | 
         
             
                "attention_mask"
         
     | 
| 40 | 
         
             
              ],
         
     | 
| 41 | 
         
            -
              "model_max_length":  
     | 
| 42 | 
         
             
              "pad_token": "</s>",
         
     | 
| 43 | 
         
             
              "padding_side": "right",
         
     | 
| 44 | 
         
             
              "sep_token": null,
         
     | 
| 
         | 
|
| 38 | 
         
             
                "input_ids",
         
     | 
| 39 | 
         
             
                "attention_mask"
         
     | 
| 40 | 
         
             
              ],
         
     | 
| 41 | 
         
            +
              "model_max_length": 32768,
         
     | 
| 42 | 
         
             
              "pad_token": "</s>",
         
     | 
| 43 | 
         
             
              "padding_side": "right",
         
     | 
| 44 | 
         
             
              "sep_token": null,
         
     |