Commit 
							
							·
						
						bd90846
	
0
								Parent(s):
							
							
Squashing commit
Browse filesCo-authored-by: chrismarra <[email protected]>
Co-authored-by: ArthurZ <[email protected]>
- .gitattributes +35 -0
 - LICENSE.txt +126 -0
 - README.md +132 -0
 - USE_POLICY.md +50 -0
 - added_tokens.json +3 -0
 - config.json +25 -0
 - generation_config.json +9 -0
 - model-00001-of-00002.safetensors +3 -0
 - model-00002-of-00002.safetensors +3 -0
 - model.safetensors.index.json +330 -0
 - pytorch_model-00001-of-00003.bin +3 -0
 - pytorch_model-00002-of-00003.bin +3 -0
 - pytorch_model-00003-of-00003.bin +3 -0
 - pytorch_model.bin.index.json +330 -0
 - special_tokens_map.json +24 -0
 - tokenizer.json +0 -0
 - tokenizer.model +3 -0
 - tokenizer_config.json +34 -0
 
    	
        .gitattributes
    ADDED
    
    | 
         @@ -0,0 +1,35 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         
     | 
| 2 | 
         
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         
     | 
| 3 | 
         
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         
     | 
| 4 | 
         
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         
     | 
| 5 | 
         
            +
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         
     | 
| 6 | 
         
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 7 | 
         
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 8 | 
         
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         
     | 
| 9 | 
         
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         
     | 
| 10 | 
         
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 11 | 
         
            +
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         
     | 
| 12 | 
         
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         
     | 
| 13 | 
         
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         
     | 
| 14 | 
         
            +
            *.npy filter=lfs diff=lfs merge=lfs -text
         
     | 
| 15 | 
         
            +
            *.npz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 16 | 
         
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         
     | 
| 17 | 
         
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         
     | 
| 18 | 
         
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         
     | 
| 19 | 
         
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         
     | 
| 20 | 
         
            +
            *.pickle filter=lfs diff=lfs merge=lfs -text
         
     | 
| 21 | 
         
            +
            *.pkl filter=lfs diff=lfs merge=lfs -text
         
     | 
| 22 | 
         
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         
     | 
| 23 | 
         
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         
     | 
| 24 | 
         
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         
     | 
| 25 | 
         
            +
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         
     | 
| 26 | 
         
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 27 | 
         
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 28 | 
         
            +
            *.tar filter=lfs diff=lfs merge=lfs -text
         
     | 
| 29 | 
         
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         
     | 
| 30 | 
         
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 31 | 
         
            +
            *.wasm filter=lfs diff=lfs merge=lfs -text
         
     | 
| 32 | 
         
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 33 | 
         
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         
     | 
| 34 | 
         
            +
            *.zst filter=lfs diff=lfs merge=lfs -text
         
     | 
| 35 | 
         
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         
     | 
    	
        LICENSE.txt
    ADDED
    
    | 
         @@ -0,0 +1,126 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            LLAMA 2 COMMUNITY LICENSE AGREEMENT	
         
     | 
| 2 | 
         
            +
            Llama 2 Version Release Date: July 18, 2023
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            "Agreement" means the terms and conditions for use, reproduction, distribution and 
         
     | 
| 5 | 
         
            +
            modification of the Llama Materials set forth herein.
         
     | 
| 6 | 
         
            +
             
     | 
| 7 | 
         
            +
            "Documentation" means the specifications, manuals and documentation 
         
     | 
| 8 | 
         
            +
            accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
         
     | 
| 9 | 
         
            +
            libraries/llama-downloads/.
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            "Licensee" or "you" means you, or your employer or any other person or entity (if 
         
     | 
| 12 | 
         
            +
            you are entering into this Agreement on such person or entity's behalf), of the age 
         
     | 
| 13 | 
         
            +
            required under applicable laws, rules or regulations to provide legal consent and that 
         
     | 
| 14 | 
         
            +
            has legal authority to bind your employer or such other person or entity if you are 
         
     | 
| 15 | 
         
            +
            entering in this Agreement on their behalf.
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            "Llama 2" means the foundational large language models and software and 
         
     | 
| 18 | 
         
            +
            algorithms, including machine-learning model code, trained model weights, 
         
     | 
| 19 | 
         
            +
            inference-enabling code, training-enabling code, fine-tuning enabling code and other 
         
     | 
| 20 | 
         
            +
            elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
         
     | 
| 21 | 
         
            +
            libraries/llama-downloads/.
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            "Llama Materials" means, collectively, Meta's proprietary Llama 2 and 
         
     | 
| 24 | 
         
            +
            Documentation (and any portion thereof) made available under this Agreement.
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you 
         
     | 
| 27 | 
         
            +
            are an entity, your principal place of business is in the EEA or Switzerland) and Meta 
         
     | 
| 28 | 
         
            +
            Platforms, Inc. (if you are located outside of the EEA or Switzerland). 
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            By clicking "I Accept" below or by using or distributing any portion or element of the 
         
     | 
| 31 | 
         
            +
            Llama Materials, you agree to be bound by this Agreement.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            1. License Rights and Redistribution. 
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
                  a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
         
     | 
| 36 | 
         
            +
            transferable and royalty-free limited license under Meta's intellectual property or 
         
     | 
| 37 | 
         
            +
            other rights owned by Meta embodied in the Llama Materials to use, reproduce, 
         
     | 
| 38 | 
         
            +
            distribute, copy, create derivative works of, and make modifications to the Llama 
         
     | 
| 39 | 
         
            +
            Materials.  
         
     | 
| 40 | 
         
            +
                  
         
     | 
| 41 | 
         
            +
                  b. Redistribution and Use.  
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                        i. If you distribute or make the Llama Materials, or any derivative works 
         
     | 
| 44 | 
         
            +
            thereof, available to a third party, you shall provide a copy of this Agreement to such 
         
     | 
| 45 | 
         
            +
            third party. 
         
     | 
| 46 | 
         
            +
                        ii.  If you receive Llama Materials, or any derivative works thereof, from 
         
     | 
| 47 | 
         
            +
            a Licensee as part of an integrated end user product, then Section 2 of this 
         
     | 
| 48 | 
         
            +
            Agreement will not apply to you. 
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
                        iii. You must retain in all copies of the Llama Materials that you 
         
     | 
| 51 | 
         
            +
            distribute the following attribution notice within a "Notice" text file distributed as a 
         
     | 
| 52 | 
         
            +
            part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, 
         
     | 
| 53 | 
         
            +
            Copyright (c) Meta Platforms, Inc. All Rights Reserved."
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
                        iv. Your use of the Llama Materials must comply with applicable laws 
         
     | 
| 56 | 
         
            +
            and regulations (including trade compliance laws and regulations) and adhere to the 
         
     | 
| 57 | 
         
            +
            Acceptable Use Policy for the Llama Materials (available at 
         
     | 
| 58 | 
         
            +
            https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into 
         
     | 
| 59 | 
         
            +
            this Agreement.
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
                        v. You will not use the Llama Materials or any output or results of the 
         
     | 
| 62 | 
         
            +
            Llama Materials to improve any other large language model (excluding Llama 2 or 
         
     | 
| 63 | 
         
            +
            derivative works thereof).  
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            2. Additional Commercial Terms. If, on the Llama 2 version release date, the 
         
     | 
| 66 | 
         
            +
            monthly active users of the products or services made available by or for Licensee, 
         
     | 
| 67 | 
         
            +
            or Licensee's affiliates, is greater than 700 million monthly active users in the 
         
     | 
| 68 | 
         
            +
            preceding calendar month, you must request a license from Meta, which Meta may 
         
     | 
| 69 | 
         
            +
            grant to you in its sole discretion, and you are not authorized to exercise any of the 
         
     | 
| 70 | 
         
            +
            rights under this Agreement unless or until Meta otherwise expressly grants you 
         
     | 
| 71 | 
         
            +
            such rights.
         
     | 
| 72 | 
         
            +
                        
         
     | 
| 73 | 
         
            +
            3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE 
         
     | 
| 74 | 
         
            +
            LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE 
         
     | 
| 75 | 
         
            +
            PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 
         
     | 
| 76 | 
         
            +
            EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY 
         
     | 
| 77 | 
         
            +
            WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR 
         
     | 
| 78 | 
         
            +
            FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE 
         
     | 
| 79 | 
         
            +
            FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING 
         
     | 
| 80 | 
         
            +
            THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR 
         
     | 
| 81 | 
         
            +
            USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE 
         
     | 
| 84 | 
         
            +
            LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, 
         
     | 
| 85 | 
         
            +
            NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS 
         
     | 
| 86 | 
         
            +
            AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, 
         
     | 
| 87 | 
         
            +
            CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN 
         
     | 
| 88 | 
         
            +
            IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF 
         
     | 
| 89 | 
         
            +
            ANY OF THE FOREGOING.
         
     | 
| 90 | 
         
            +
             
         
     | 
| 91 | 
         
            +
            5. Intellectual Property.
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
                  a. No trademark licenses are granted under this Agreement, and in 
         
     | 
| 94 | 
         
            +
            connection with the Llama Materials, neither Meta nor Licensee may use any name 
         
     | 
| 95 | 
         
            +
            or mark owned by or associated with the other or any of its affiliates, except as 
         
     | 
| 96 | 
         
            +
            required for reasonable and customary use in describing and redistributing the 
         
     | 
| 97 | 
         
            +
            Llama Materials.
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
                  b. Subject to Meta's ownership of Llama Materials and derivatives made by or 
         
     | 
| 100 | 
         
            +
            for Meta, with respect to any derivative works and modifications of the Llama 
         
     | 
| 101 | 
         
            +
            Materials that are made by you, as between you and Meta, you are and will be the 
         
     | 
| 102 | 
         
            +
            owner of such derivative works and modifications.
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
                  c. If you institute litigation or other proceedings against Meta or any entity 
         
     | 
| 105 | 
         
            +
            (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama 
         
     | 
| 106 | 
         
            +
            Materials or Llama 2 outputs or results, or any portion of any of the foregoing, 
         
     | 
| 107 | 
         
            +
            constitutes infringement of intellectual property or other rights owned or licensable 
         
     | 
| 108 | 
         
            +
            by you, then any licenses granted to you under this Agreement shall terminate as of 
         
     | 
| 109 | 
         
            +
            the date such litigation or claim is filed or instituted. You will indemnify and hold 
         
     | 
| 110 | 
         
            +
            harmless Meta from and against any claim by any third party arising out of or related 
         
     | 
| 111 | 
         
            +
            to your use or distribution of the Llama Materials.
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            6. Term and Termination. The term of this Agreement will commence upon your 
         
     | 
| 114 | 
         
            +
            acceptance of this Agreement or access to the Llama Materials and will continue in 
         
     | 
| 115 | 
         
            +
            full force and effect until terminated in accordance with the terms and conditions 
         
     | 
| 116 | 
         
            +
            herein. Meta may terminate this Agreement if you are in breach of any term or 
         
     | 
| 117 | 
         
            +
            condition of this Agreement. Upon termination of this Agreement, you shall delete 
         
     | 
| 118 | 
         
            +
            and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the 
         
     | 
| 119 | 
         
            +
            termination of this Agreement. 
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            7. Governing Law and Jurisdiction. This Agreement will be governed and 
         
     | 
| 122 | 
         
            +
            construed under the laws of the State of California without regard to choice of law 
         
     | 
| 123 | 
         
            +
            principles, and the UN Convention on Contracts for the International Sale of Goods 
         
     | 
| 124 | 
         
            +
            does not apply to this Agreement. The courts of California shall have exclusive 
         
     | 
| 125 | 
         
            +
            jurisdiction of any dispute arising out of this Agreement. 
         
     | 
| 126 | 
         
            +
             
     | 
    	
        README.md
    ADDED
    
    | 
         @@ -0,0 +1,132 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            ---
         
     | 
| 2 | 
         
            +
            extra_gated_heading: Access Llama 2 on Hugging Face
         
     | 
| 3 | 
         
            +
            extra_gated_description: >-
         
     | 
| 4 | 
         
            +
              This is a form to enable access to Llama 2 on Hugging Face after you have been
         
     | 
| 5 | 
         
            +
              granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our
         
     | 
| 6 | 
         
            +
              license terms and acceptable use policy before submitting this form. Requests
         
     | 
| 7 | 
         
            +
              will be processed in 1-2 days.
         
     | 
| 8 | 
         
            +
            extra_gated_button_content: Submit
         
     | 
| 9 | 
         
            +
            extra_gated_fields:
         
     | 
| 10 | 
         
            +
              I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox
         
     | 
| 11 | 
         
            +
            language:
         
     | 
| 12 | 
         
            +
            - en
         
     | 
| 13 | 
         
            +
            pipeline_tag: text-generation
         
     | 
| 14 | 
         
            +
            inference: false
         
     | 
| 15 | 
         
            +
            tags:
         
     | 
| 16 | 
         
            +
            - facebook
         
     | 
| 17 | 
         
            +
            - meta
         
     | 
| 18 | 
         
            +
            - pytorch
         
     | 
| 19 | 
         
            +
            - llama
         
     | 
| 20 | 
         
            +
            - llama-2
         
     | 
| 21 | 
         
            +
            ---
         
     | 
| 22 | 
         
            +
            # **Llama 2**
         
     | 
| 23 | 
         
            +
            Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            ## Model Details
         
     | 
| 26 | 
         
            +
            *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            **Model Developers** Meta
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            **Input** Models input text only.
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            **Output** Models generate text only.
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            ||Training Data|Params|Content Length|GQA|Tokens|LR|
         
     | 
| 42 | 
         
            +
            |---|---|---|---|---|---|---|
         
     | 
| 43 | 
         
            +
            |Llama 2|*A new mix of publicly available online data*|7B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
         
     | 
| 44 | 
         
            +
            |Llama 2|*A new mix of publicly available online data*|13B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
         
     | 
| 45 | 
         
            +
            |Llama 2|*A new mix of publicly available online data*|70B|4k|✔|2.0T|1.5 x 10<sup>-4</sup>|
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models -  70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
            **Model Dates** Llama 2 was trained between January 2023 and July 2023.
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            ## Intended Use
         
     | 
| 56 | 
         
            +
            **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            ## Hardware and Software
         
     | 
| 61 | 
         
            +
            **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
         
     | 
| 66 | 
         
            +
            |---|---|---|---|
         
     | 
| 67 | 
         
            +
            |Llama 2 7B|184320|400|31.22|
         
     | 
| 68 | 
         
            +
            |Llama 2 13B|368640|400|62.44|
         
     | 
| 69 | 
         
            +
            |Llama 2 70B|1720320|400|291.42|
         
     | 
| 70 | 
         
            +
            |Total|3311616||539.00|
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            ## Training Data
         
     | 
| 75 | 
         
            +
            **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            ## Evaluation Results
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
            |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
         
     | 
| 84 | 
         
            +
            |---|---|---|---|---|---|---|---|---|---|
         
     | 
| 85 | 
         
            +
            |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
         
     | 
| 86 | 
         
            +
            |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
         
     | 
| 87 | 
         
            +
            |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
         
     | 
| 88 | 
         
            +
            |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
         
     | 
| 89 | 
         
            +
            |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
         
     | 
| 90 | 
         
            +
            |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
         
     | 
| 91 | 
         
            +
            |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
            |||TruthfulQA|Toxigen|
         
     | 
| 96 | 
         
            +
            |---|---|---|---|
         
     | 
| 97 | 
         
            +
            |Llama 1|7B|27.42|23.00|
         
     | 
| 98 | 
         
            +
            |Llama 1|13B|41.74|23.08|
         
     | 
| 99 | 
         
            +
            |Llama 1|33B|44.19|22.57|
         
     | 
| 100 | 
         
            +
            |Llama 1|65B|48.71|21.77|
         
     | 
| 101 | 
         
            +
            |Llama 2|7B|33.29|**21.25**|
         
     | 
| 102 | 
         
            +
            |Llama 2|13B|41.86|26.10|
         
     | 
| 103 | 
         
            +
            |Llama 2|70B|**50.18**|24.60|
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
            |||TruthfulQA|Toxigen|
         
     | 
| 109 | 
         
            +
            |---|---|---|---|
         
     | 
| 110 | 
         
            +
            |Llama-2-Chat|7B|57.04|**0.00**|
         
     | 
| 111 | 
         
            +
            |Llama-2-Chat|13B|62.18|**0.00**|
         
     | 
| 112 | 
         
            +
            |Llama-2-Chat|70B|**64.14**|0.01|
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
            ## Ethical Considerations and Limitations
         
     | 
| 117 | 
         
            +
            Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
            Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            ## Reporting Issues
         
     | 
| 122 | 
         
            +
            Please report any software “bug,” or other problems with the models through one of the following means:
         
     | 
| 123 | 
         
            +
            - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
         
     | 
| 124 | 
         
            +
            - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
         
     | 
| 125 | 
         
            +
            - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
            ## Llama Model Index
         
     | 
| 128 | 
         
            +
            |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
         
     | 
| 129 | 
         
            +
            |---|---|---|---|---|
         
     | 
| 130 | 
         
            +
            |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
         
     | 
| 131 | 
         
            +
            |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
         
     | 
| 132 | 
         
            +
            |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
         
     | 
    	
        USE_POLICY.md
    ADDED
    
    | 
         @@ -0,0 +1,50 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Llama 2 Acceptable Use Policy
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            ## Prohibited Uses
         
     | 
| 6 | 
         
            +
            We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            1. Violate the law or others’ rights, including to:
         
     | 
| 9 | 
         
            +
                1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 
         
     | 
| 10 | 
         
            +
                    1. Violence or terrorism 
         
     | 
| 11 | 
         
            +
                    2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
         
     | 
| 12 | 
         
            +
                    3. Human trafficking, exploitation, and sexual violence
         
     | 
| 13 | 
         
            +
                    4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
         
     | 
| 14 | 
         
            +
                    5. Sexual solicitation
         
     | 
| 15 | 
         
            +
                    6. Any other criminal activity
         
     | 
| 16 | 
         
            +
                2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
         
     | 
| 17 | 
         
            +
                3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
         
     | 
| 18 | 
         
            +
                4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 
         
     | 
| 19 | 
         
            +
                5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
         
     | 
| 20 | 
         
            +
                6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
         
     | 
| 21 | 
         
            +
                7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
         
     | 
| 26 | 
         
            +
                1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
         
     | 
| 27 | 
         
            +
                2. Guns and illegal weapons (including weapon development)
         
     | 
| 28 | 
         
            +
                3. Illegal drugs and regulated/controlled substances
         
     | 
| 29 | 
         
            +
                4. Operation of critical infrastructure, transportation technologies, or heavy machinery
         
     | 
| 30 | 
         
            +
                5. Self-harm or harm to others, including suicide, cutting, and eating disorders
         
     | 
| 31 | 
         
            +
                6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
         
     | 
| 36 | 
         
            +
                1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
         
     | 
| 37 | 
         
            +
                2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
         
     | 
| 38 | 
         
            +
                3. Generating, promoting, or further distributing spam
         
     | 
| 39 | 
         
            +
                4. Impersonating another individual without consent, authorization, or legal right
         
     | 
| 40 | 
         
            +
                5. Representing that the use of Llama 2 or outputs are human-generated
         
     | 
| 41 | 
         
            +
                6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 
         
     | 
| 42 | 
         
            +
            4. Fail to appropriately disclose to end users any known dangers of your AI system 
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
         
     | 
| 47 | 
         
            +
            * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
         
     | 
| 48 | 
         
            +
            * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
         
     | 
| 49 | 
         
            +
            * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
         
     | 
| 50 | 
         
            +
             
     | 
    	
        added_tokens.json
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "<pad>": 32000
         
     | 
| 3 | 
         
            +
            }
         
     | 
    	
        config.json
    ADDED
    
    | 
         @@ -0,0 +1,25 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "architectures": [
         
     | 
| 3 | 
         
            +
                "LlamaForCausalLM"
         
     | 
| 4 | 
         
            +
              ],
         
     | 
| 5 | 
         
            +
              "bos_token_id": 1,
         
     | 
| 6 | 
         
            +
              "eos_token_id": 2,
         
     | 
| 7 | 
         
            +
              "hidden_act": "silu",
         
     | 
| 8 | 
         
            +
              "hidden_size": 4096,
         
     | 
| 9 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 10 | 
         
            +
              "intermediate_size": 11008,
         
     | 
| 11 | 
         
            +
              "max_position_embeddings": 2048,
         
     | 
| 12 | 
         
            +
              "model_type": "llama",
         
     | 
| 13 | 
         
            +
              "num_attention_heads": 32,
         
     | 
| 14 | 
         
            +
              "num_hidden_layers": 32,
         
     | 
| 15 | 
         
            +
              "num_key_value_heads": 32,
         
     | 
| 16 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 17 | 
         
            +
              "pretraining_tp": 1
         
     | 
| 18 | 
         
            +
              "rms_norm_eps": 1e-05,
         
     | 
| 19 | 
         
            +
              "rope_scaling": null,
         
     | 
| 20 | 
         
            +
              "tie_word_embeddings": false,
         
     | 
| 21 | 
         
            +
              "torch_dtype": "float16",
         
     | 
| 22 | 
         
            +
              "transformers_version": "4.31.0.dev0",
         
     | 
| 23 | 
         
            +
              "use_cache": true,
         
     | 
| 24 | 
         
            +
              "vocab_size": 32000
         
     | 
| 25 | 
         
            +
            }
         
     | 
    	
        generation_config.json
    ADDED
    
    | 
         @@ -0,0 +1,9 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_from_model_config": true,
         
     | 
| 3 | 
         
            +
              "bos_token_id": 1,
         
     | 
| 4 | 
         
            +
              "eos_token_id": 2,
         
     | 
| 5 | 
         
            +
              "pad_token_id": 32000,
         
     | 
| 6 | 
         
            +
              "temperature": 0.9,
         
     | 
| 7 | 
         
            +
              "top_p": 0.6,
         
     | 
| 8 | 
         
            +
              "transformers_version": "4.31.0.dev0"
         
     | 
| 9 | 
         
            +
            }
         
     | 
    	
        model-00001-of-00002.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:66dec18c9f1705b9387d62f8485f4e7d871ca388718786737ed3c72dbfaac9fb
         
     | 
| 3 | 
         
            +
            size 9976576152
         
     | 
    	
        model-00002-of-00002.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:0fd6895090da1b2ccffdb93964847709a3b31e6b69fe7dc5a480dce37c811b1d
         
     | 
| 3 | 
         
            +
            size 3500296424
         
     | 
    	
        model.safetensors.index.json
    ADDED
    
    | 
         @@ -0,0 +1,330 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "metadata": {
         
     | 
| 3 | 
         
            +
                "total_size": 13476835328
         
     | 
| 4 | 
         
            +
              },
         
     | 
| 5 | 
         
            +
              "weight_map": {
         
     | 
| 6 | 
         
            +
                "lm_head.weight": "model-00002-of-00002.safetensors",
         
     | 
| 7 | 
         
            +
                "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
         
     | 
| 8 | 
         
            +
                "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 9 | 
         
            +
                "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 10 | 
         
            +
                "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 11 | 
         
            +
                "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 12 | 
         
            +
                "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 13 | 
         
            +
                "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 14 | 
         
            +
                "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 15 | 
         
            +
                "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 16 | 
         
            +
                "model.layers.0.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 17 | 
         
            +
                "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 18 | 
         
            +
                "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 19 | 
         
            +
                "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 20 | 
         
            +
                "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 21 | 
         
            +
                "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 22 | 
         
            +
                "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 23 | 
         
            +
                "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 24 | 
         
            +
                "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 25 | 
         
            +
                "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 26 | 
         
            +
                "model.layers.1.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 27 | 
         
            +
                "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 28 | 
         
            +
                "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 29 | 
         
            +
                "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 30 | 
         
            +
                "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 31 | 
         
            +
                "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 32 | 
         
            +
                "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 33 | 
         
            +
                "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 34 | 
         
            +
                "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 35 | 
         
            +
                "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 36 | 
         
            +
                "model.layers.10.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 37 | 
         
            +
                "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 38 | 
         
            +
                "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 39 | 
         
            +
                "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 40 | 
         
            +
                "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 41 | 
         
            +
                "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 42 | 
         
            +
                "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 43 | 
         
            +
                "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 44 | 
         
            +
                "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 45 | 
         
            +
                "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 46 | 
         
            +
                "model.layers.11.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 47 | 
         
            +
                "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 48 | 
         
            +
                "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 49 | 
         
            +
                "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 50 | 
         
            +
                "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 51 | 
         
            +
                "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 52 | 
         
            +
                "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 53 | 
         
            +
                "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 54 | 
         
            +
                "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 55 | 
         
            +
                "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 56 | 
         
            +
                "model.layers.12.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 57 | 
         
            +
                "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 58 | 
         
            +
                "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 59 | 
         
            +
                "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 60 | 
         
            +
                "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 61 | 
         
            +
                "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 62 | 
         
            +
                "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 63 | 
         
            +
                "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 64 | 
         
            +
                "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 65 | 
         
            +
                "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 66 | 
         
            +
                "model.layers.13.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 67 | 
         
            +
                "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 68 | 
         
            +
                "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 69 | 
         
            +
                "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 70 | 
         
            +
                "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 71 | 
         
            +
                "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 72 | 
         
            +
                "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 73 | 
         
            +
                "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 74 | 
         
            +
                "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 75 | 
         
            +
                "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 76 | 
         
            +
                "model.layers.14.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 77 | 
         
            +
                "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 78 | 
         
            +
                "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 79 | 
         
            +
                "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 80 | 
         
            +
                "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 81 | 
         
            +
                "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 82 | 
         
            +
                "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 83 | 
         
            +
                "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 84 | 
         
            +
                "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 85 | 
         
            +
                "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 86 | 
         
            +
                "model.layers.15.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 87 | 
         
            +
                "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 88 | 
         
            +
                "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 89 | 
         
            +
                "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 90 | 
         
            +
                "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 91 | 
         
            +
                "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 92 | 
         
            +
                "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 93 | 
         
            +
                "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 94 | 
         
            +
                "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 95 | 
         
            +
                "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 96 | 
         
            +
                "model.layers.16.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 97 | 
         
            +
                "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 98 | 
         
            +
                "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 99 | 
         
            +
                "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 100 | 
         
            +
                "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 101 | 
         
            +
                "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 102 | 
         
            +
                "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 103 | 
         
            +
                "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 104 | 
         
            +
                "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 105 | 
         
            +
                "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 106 | 
         
            +
                "model.layers.17.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 107 | 
         
            +
                "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 108 | 
         
            +
                "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 109 | 
         
            +
                "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 110 | 
         
            +
                "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 111 | 
         
            +
                "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 112 | 
         
            +
                "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 113 | 
         
            +
                "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 114 | 
         
            +
                "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 115 | 
         
            +
                "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 116 | 
         
            +
                "model.layers.18.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 117 | 
         
            +
                "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 118 | 
         
            +
                "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 119 | 
         
            +
                "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 120 | 
         
            +
                "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 121 | 
         
            +
                "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 122 | 
         
            +
                "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 123 | 
         
            +
                "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 124 | 
         
            +
                "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 125 | 
         
            +
                "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 126 | 
         
            +
                "model.layers.19.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 127 | 
         
            +
                "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 128 | 
         
            +
                "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 129 | 
         
            +
                "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 130 | 
         
            +
                "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 131 | 
         
            +
                "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 132 | 
         
            +
                "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 133 | 
         
            +
                "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 134 | 
         
            +
                "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 135 | 
         
            +
                "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 136 | 
         
            +
                "model.layers.2.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 137 | 
         
            +
                "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 138 | 
         
            +
                "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 139 | 
         
            +
                "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 140 | 
         
            +
                "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 141 | 
         
            +
                "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 142 | 
         
            +
                "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 143 | 
         
            +
                "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 144 | 
         
            +
                "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 145 | 
         
            +
                "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 146 | 
         
            +
                "model.layers.20.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 147 | 
         
            +
                "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 148 | 
         
            +
                "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 149 | 
         
            +
                "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 150 | 
         
            +
                "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 151 | 
         
            +
                "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 152 | 
         
            +
                "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 153 | 
         
            +
                "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 154 | 
         
            +
                "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 155 | 
         
            +
                "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 156 | 
         
            +
                "model.layers.21.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 157 | 
         
            +
                "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 158 | 
         
            +
                "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 159 | 
         
            +
                "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 160 | 
         
            +
                "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 161 | 
         
            +
                "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 162 | 
         
            +
                "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 163 | 
         
            +
                "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 164 | 
         
            +
                "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 165 | 
         
            +
                "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 166 | 
         
            +
                "model.layers.22.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 167 | 
         
            +
                "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 168 | 
         
            +
                "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 169 | 
         
            +
                "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 170 | 
         
            +
                "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 171 | 
         
            +
                "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 172 | 
         
            +
                "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 173 | 
         
            +
                "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 174 | 
         
            +
                "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 175 | 
         
            +
                "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 176 | 
         
            +
                "model.layers.23.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 177 | 
         
            +
                "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 178 | 
         
            +
                "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 179 | 
         
            +
                "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 180 | 
         
            +
                "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 181 | 
         
            +
                "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 182 | 
         
            +
                "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 183 | 
         
            +
                "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 184 | 
         
            +
                "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 185 | 
         
            +
                "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 186 | 
         
            +
                "model.layers.24.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 187 | 
         
            +
                "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 188 | 
         
            +
                "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 189 | 
         
            +
                "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 190 | 
         
            +
                "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 191 | 
         
            +
                "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 192 | 
         
            +
                "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 193 | 
         
            +
                "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 194 | 
         
            +
                "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 195 | 
         
            +
                "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 196 | 
         
            +
                "model.layers.25.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 197 | 
         
            +
                "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 198 | 
         
            +
                "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 199 | 
         
            +
                "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 200 | 
         
            +
                "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 201 | 
         
            +
                "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 202 | 
         
            +
                "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 203 | 
         
            +
                "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 204 | 
         
            +
                "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 205 | 
         
            +
                "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 206 | 
         
            +
                "model.layers.26.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 207 | 
         
            +
                "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 208 | 
         
            +
                "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 209 | 
         
            +
                "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 210 | 
         
            +
                "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 211 | 
         
            +
                "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 212 | 
         
            +
                "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 213 | 
         
            +
                "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 214 | 
         
            +
                "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 215 | 
         
            +
                "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 216 | 
         
            +
                "model.layers.27.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 217 | 
         
            +
                "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 218 | 
         
            +
                "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 219 | 
         
            +
                "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 220 | 
         
            +
                "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 221 | 
         
            +
                "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 222 | 
         
            +
                "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 223 | 
         
            +
                "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 224 | 
         
            +
                "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 225 | 
         
            +
                "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 226 | 
         
            +
                "model.layers.28.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 227 | 
         
            +
                "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 228 | 
         
            +
                "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 229 | 
         
            +
                "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 230 | 
         
            +
                "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 231 | 
         
            +
                "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 232 | 
         
            +
                "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 233 | 
         
            +
                "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 234 | 
         
            +
                "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 235 | 
         
            +
                "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 236 | 
         
            +
                "model.layers.29.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 237 | 
         
            +
                "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 238 | 
         
            +
                "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 239 | 
         
            +
                "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 240 | 
         
            +
                "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 241 | 
         
            +
                "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 242 | 
         
            +
                "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 243 | 
         
            +
                "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 244 | 
         
            +
                "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 245 | 
         
            +
                "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 246 | 
         
            +
                "model.layers.3.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 247 | 
         
            +
                "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 248 | 
         
            +
                "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 249 | 
         
            +
                "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 250 | 
         
            +
                "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 251 | 
         
            +
                "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 252 | 
         
            +
                "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 253 | 
         
            +
                "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 254 | 
         
            +
                "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 255 | 
         
            +
                "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 256 | 
         
            +
                "model.layers.30.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 257 | 
         
            +
                "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 258 | 
         
            +
                "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 259 | 
         
            +
                "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 260 | 
         
            +
                "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 261 | 
         
            +
                "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 262 | 
         
            +
                "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
         
     | 
| 263 | 
         
            +
                "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 264 | 
         
            +
                "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 265 | 
         
            +
                "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 266 | 
         
            +
                "model.layers.31.self_attn.rotary_emb.inv_freq": "model-00002-of-00002.safetensors",
         
     | 
| 267 | 
         
            +
                "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
         
     | 
| 268 | 
         
            +
                "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 269 | 
         
            +
                "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 270 | 
         
            +
                "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 271 | 
         
            +
                "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 272 | 
         
            +
                "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 273 | 
         
            +
                "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 274 | 
         
            +
                "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 275 | 
         
            +
                "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 276 | 
         
            +
                "model.layers.4.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 277 | 
         
            +
                "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 278 | 
         
            +
                "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 279 | 
         
            +
                "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 280 | 
         
            +
                "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 281 | 
         
            +
                "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 282 | 
         
            +
                "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 283 | 
         
            +
                "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 284 | 
         
            +
                "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 285 | 
         
            +
                "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 286 | 
         
            +
                "model.layers.5.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 287 | 
         
            +
                "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 288 | 
         
            +
                "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 289 | 
         
            +
                "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 290 | 
         
            +
                "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 291 | 
         
            +
                "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 292 | 
         
            +
                "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 293 | 
         
            +
                "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 294 | 
         
            +
                "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 295 | 
         
            +
                "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 296 | 
         
            +
                "model.layers.6.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 297 | 
         
            +
                "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 298 | 
         
            +
                "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 299 | 
         
            +
                "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 300 | 
         
            +
                "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 301 | 
         
            +
                "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 302 | 
         
            +
                "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 303 | 
         
            +
                "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 304 | 
         
            +
                "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 305 | 
         
            +
                "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 306 | 
         
            +
                "model.layers.7.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 307 | 
         
            +
                "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 308 | 
         
            +
                "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 309 | 
         
            +
                "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 310 | 
         
            +
                "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 311 | 
         
            +
                "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 312 | 
         
            +
                "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 313 | 
         
            +
                "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 314 | 
         
            +
                "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 315 | 
         
            +
                "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 316 | 
         
            +
                "model.layers.8.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 317 | 
         
            +
                "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 318 | 
         
            +
                "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 319 | 
         
            +
                "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 320 | 
         
            +
                "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 321 | 
         
            +
                "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 322 | 
         
            +
                "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
         
     | 
| 323 | 
         
            +
                "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 324 | 
         
            +
                "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 325 | 
         
            +
                "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 326 | 
         
            +
                "model.layers.9.self_attn.rotary_emb.inv_freq": "model-00001-of-00002.safetensors",
         
     | 
| 327 | 
         
            +
                "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
         
     | 
| 328 | 
         
            +
                "model.norm.weight": "model-00002-of-00002.safetensors"
         
     | 
| 329 | 
         
            +
              }
         
     | 
| 330 | 
         
            +
            }
         
     | 
    	
        pytorch_model-00001-of-00003.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:7817162c438c935c31b8db599fa43a9fcf938bdccfdd8b3da0af5c185aa5c9ee
         
     | 
| 3 | 
         
            +
            size 9877989586
         
     | 
    	
        pytorch_model-00002-of-00003.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:09c0622450bbf9960e5bf154388eff109b2c223b1ee96b1a71751f46e1d79e49
         
     | 
| 3 | 
         
            +
            size 9894801014
         
     | 
    	
        pytorch_model-00003-of-00003.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:bbc0ea1c0c288aa38ad082b4691fe477f850ecd07aa3ed6015b8c549eb5a080b
         
     | 
| 3 | 
         
            +
            size 7180990649
         
     | 
    	
        pytorch_model.bin.index.json
    ADDED
    
    | 
         @@ -0,0 +1,330 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "metadata": {
         
     | 
| 3 | 
         
            +
                "total_size": 26953670656
         
     | 
| 4 | 
         
            +
              },
         
     | 
| 5 | 
         
            +
              "weight_map": {
         
     | 
| 6 | 
         
            +
                "lm_head.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 7 | 
         
            +
                "model.embed_tokens.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 8 | 
         
            +
                "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 9 | 
         
            +
                "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 10 | 
         
            +
                "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 11 | 
         
            +
                "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 12 | 
         
            +
                "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 13 | 
         
            +
                "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 14 | 
         
            +
                "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 15 | 
         
            +
                "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 16 | 
         
            +
                "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 17 | 
         
            +
                "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 18 | 
         
            +
                "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 19 | 
         
            +
                "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 20 | 
         
            +
                "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 21 | 
         
            +
                "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 22 | 
         
            +
                "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 23 | 
         
            +
                "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 24 | 
         
            +
                "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 25 | 
         
            +
                "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 26 | 
         
            +
                "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 27 | 
         
            +
                "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 28 | 
         
            +
                "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 29 | 
         
            +
                "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 30 | 
         
            +
                "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 31 | 
         
            +
                "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 32 | 
         
            +
                "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 33 | 
         
            +
                "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 34 | 
         
            +
                "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 35 | 
         
            +
                "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 36 | 
         
            +
                "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 37 | 
         
            +
                "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 38 | 
         
            +
                "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 39 | 
         
            +
                "model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 40 | 
         
            +
                "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 41 | 
         
            +
                "model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 42 | 
         
            +
                "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 43 | 
         
            +
                "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 44 | 
         
            +
                "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 45 | 
         
            +
                "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 46 | 
         
            +
                "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 47 | 
         
            +
                "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 48 | 
         
            +
                "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 49 | 
         
            +
                "model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 50 | 
         
            +
                "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 51 | 
         
            +
                "model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 52 | 
         
            +
                "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 53 | 
         
            +
                "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 54 | 
         
            +
                "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 55 | 
         
            +
                "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 56 | 
         
            +
                "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 57 | 
         
            +
                "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 58 | 
         
            +
                "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 59 | 
         
            +
                "model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 60 | 
         
            +
                "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 61 | 
         
            +
                "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 62 | 
         
            +
                "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 63 | 
         
            +
                "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 64 | 
         
            +
                "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 65 | 
         
            +
                "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 66 | 
         
            +
                "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 67 | 
         
            +
                "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 68 | 
         
            +
                "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 69 | 
         
            +
                "model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 70 | 
         
            +
                "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 71 | 
         
            +
                "model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 72 | 
         
            +
                "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 73 | 
         
            +
                "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 74 | 
         
            +
                "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 75 | 
         
            +
                "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 76 | 
         
            +
                "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 77 | 
         
            +
                "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 78 | 
         
            +
                "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 79 | 
         
            +
                "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 80 | 
         
            +
                "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 81 | 
         
            +
                "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 82 | 
         
            +
                "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 83 | 
         
            +
                "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 84 | 
         
            +
                "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 85 | 
         
            +
                "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 86 | 
         
            +
                "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 87 | 
         
            +
                "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 88 | 
         
            +
                "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 89 | 
         
            +
                "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 90 | 
         
            +
                "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 91 | 
         
            +
                "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 92 | 
         
            +
                "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 93 | 
         
            +
                "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 94 | 
         
            +
                "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 95 | 
         
            +
                "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 96 | 
         
            +
                "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 97 | 
         
            +
                "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 98 | 
         
            +
                "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 99 | 
         
            +
                "model.layers.17.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 100 | 
         
            +
                "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 101 | 
         
            +
                "model.layers.17.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 102 | 
         
            +
                "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 103 | 
         
            +
                "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 104 | 
         
            +
                "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 105 | 
         
            +
                "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 106 | 
         
            +
                "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 107 | 
         
            +
                "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 108 | 
         
            +
                "model.layers.18.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 109 | 
         
            +
                "model.layers.18.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 110 | 
         
            +
                "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 111 | 
         
            +
                "model.layers.18.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 112 | 
         
            +
                "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 113 | 
         
            +
                "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 114 | 
         
            +
                "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 115 | 
         
            +
                "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 116 | 
         
            +
                "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 117 | 
         
            +
                "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 118 | 
         
            +
                "model.layers.19.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 119 | 
         
            +
                "model.layers.19.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 120 | 
         
            +
                "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 121 | 
         
            +
                "model.layers.19.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 122 | 
         
            +
                "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 123 | 
         
            +
                "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 124 | 
         
            +
                "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 125 | 
         
            +
                "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 126 | 
         
            +
                "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 127 | 
         
            +
                "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 128 | 
         
            +
                "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 129 | 
         
            +
                "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 130 | 
         
            +
                "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 131 | 
         
            +
                "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 132 | 
         
            +
                "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 133 | 
         
            +
                "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 134 | 
         
            +
                "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 135 | 
         
            +
                "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 136 | 
         
            +
                "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 137 | 
         
            +
                "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 138 | 
         
            +
                "model.layers.20.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 139 | 
         
            +
                "model.layers.20.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 140 | 
         
            +
                "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 141 | 
         
            +
                "model.layers.20.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 142 | 
         
            +
                "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 143 | 
         
            +
                "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 144 | 
         
            +
                "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 145 | 
         
            +
                "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 146 | 
         
            +
                "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 147 | 
         
            +
                "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 148 | 
         
            +
                "model.layers.21.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 149 | 
         
            +
                "model.layers.21.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 150 | 
         
            +
                "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 151 | 
         
            +
                "model.layers.21.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 152 | 
         
            +
                "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 153 | 
         
            +
                "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 154 | 
         
            +
                "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 155 | 
         
            +
                "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 156 | 
         
            +
                "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 157 | 
         
            +
                "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 158 | 
         
            +
                "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 159 | 
         
            +
                "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 160 | 
         
            +
                "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 161 | 
         
            +
                "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 162 | 
         
            +
                "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 163 | 
         
            +
                "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 164 | 
         
            +
                "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 165 | 
         
            +
                "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 166 | 
         
            +
                "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 167 | 
         
            +
                "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 168 | 
         
            +
                "model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 169 | 
         
            +
                "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 170 | 
         
            +
                "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 171 | 
         
            +
                "model.layers.23.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 172 | 
         
            +
                "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 173 | 
         
            +
                "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 174 | 
         
            +
                "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 175 | 
         
            +
                "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 176 | 
         
            +
                "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
         
     | 
| 177 | 
         
            +
                "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
         
     | 
| 178 | 
         
            +
                "model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 179 | 
         
            +
                "model.layers.24.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 180 | 
         
            +
                "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 181 | 
         
            +
                "model.layers.24.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 182 | 
         
            +
                "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 183 | 
         
            +
                "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 184 | 
         
            +
                "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 185 | 
         
            +
                "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 186 | 
         
            +
                "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 187 | 
         
            +
                "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 188 | 
         
            +
                "model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 189 | 
         
            +
                "model.layers.25.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 190 | 
         
            +
                "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 191 | 
         
            +
                "model.layers.25.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 192 | 
         
            +
                "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 193 | 
         
            +
                "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 194 | 
         
            +
                "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 195 | 
         
            +
                "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 196 | 
         
            +
                "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 197 | 
         
            +
                "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 198 | 
         
            +
                "model.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 199 | 
         
            +
                "model.layers.26.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 200 | 
         
            +
                "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 201 | 
         
            +
                "model.layers.26.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 202 | 
         
            +
                "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 203 | 
         
            +
                "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 204 | 
         
            +
                "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 205 | 
         
            +
                "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 206 | 
         
            +
                "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 207 | 
         
            +
                "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 208 | 
         
            +
                "model.layers.27.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 209 | 
         
            +
                "model.layers.27.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 210 | 
         
            +
                "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 211 | 
         
            +
                "model.layers.27.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 212 | 
         
            +
                "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 213 | 
         
            +
                "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 214 | 
         
            +
                "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 215 | 
         
            +
                "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 216 | 
         
            +
                "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 217 | 
         
            +
                "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 218 | 
         
            +
                "model.layers.28.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 219 | 
         
            +
                "model.layers.28.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 220 | 
         
            +
                "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 221 | 
         
            +
                "model.layers.28.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 222 | 
         
            +
                "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 223 | 
         
            +
                "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 224 | 
         
            +
                "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 225 | 
         
            +
                "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 226 | 
         
            +
                "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 227 | 
         
            +
                "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 228 | 
         
            +
                "model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 229 | 
         
            +
                "model.layers.29.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 230 | 
         
            +
                "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 231 | 
         
            +
                "model.layers.29.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 232 | 
         
            +
                "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 233 | 
         
            +
                "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 234 | 
         
            +
                "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 235 | 
         
            +
                "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 236 | 
         
            +
                "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 237 | 
         
            +
                "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 238 | 
         
            +
                "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 239 | 
         
            +
                "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 240 | 
         
            +
                "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 241 | 
         
            +
                "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 242 | 
         
            +
                "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 243 | 
         
            +
                "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 244 | 
         
            +
                "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 245 | 
         
            +
                "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 246 | 
         
            +
                "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 247 | 
         
            +
                "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 248 | 
         
            +
                "model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 249 | 
         
            +
                "model.layers.30.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 250 | 
         
            +
                "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 251 | 
         
            +
                "model.layers.30.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 252 | 
         
            +
                "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 253 | 
         
            +
                "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 254 | 
         
            +
                "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 255 | 
         
            +
                "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 256 | 
         
            +
                "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 257 | 
         
            +
                "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 258 | 
         
            +
                "model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 259 | 
         
            +
                "model.layers.31.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 260 | 
         
            +
                "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 261 | 
         
            +
                "model.layers.31.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 262 | 
         
            +
                "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 263 | 
         
            +
                "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 264 | 
         
            +
                "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 265 | 
         
            +
                "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 266 | 
         
            +
                "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
         
     | 
| 267 | 
         
            +
                "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
         
     | 
| 268 | 
         
            +
                "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 269 | 
         
            +
                "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 270 | 
         
            +
                "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 271 | 
         
            +
                "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 272 | 
         
            +
                "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 273 | 
         
            +
                "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 274 | 
         
            +
                "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 275 | 
         
            +
                "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 276 | 
         
            +
                "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 277 | 
         
            +
                "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 278 | 
         
            +
                "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 279 | 
         
            +
                "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 280 | 
         
            +
                "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 281 | 
         
            +
                "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 282 | 
         
            +
                "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 283 | 
         
            +
                "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 284 | 
         
            +
                "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 285 | 
         
            +
                "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 286 | 
         
            +
                "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 287 | 
         
            +
                "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 288 | 
         
            +
                "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 289 | 
         
            +
                "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 290 | 
         
            +
                "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 291 | 
         
            +
                "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 292 | 
         
            +
                "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 293 | 
         
            +
                "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 294 | 
         
            +
                "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 295 | 
         
            +
                "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 296 | 
         
            +
                "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 297 | 
         
            +
                "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 298 | 
         
            +
                "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 299 | 
         
            +
                "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 300 | 
         
            +
                "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 301 | 
         
            +
                "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 302 | 
         
            +
                "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 303 | 
         
            +
                "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 304 | 
         
            +
                "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 305 | 
         
            +
                "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 306 | 
         
            +
                "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 307 | 
         
            +
                "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 308 | 
         
            +
                "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 309 | 
         
            +
                "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 310 | 
         
            +
                "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 311 | 
         
            +
                "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 312 | 
         
            +
                "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 313 | 
         
            +
                "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 314 | 
         
            +
                "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 315 | 
         
            +
                "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 316 | 
         
            +
                "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 317 | 
         
            +
                "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 318 | 
         
            +
                "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 319 | 
         
            +
                "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 320 | 
         
            +
                "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 321 | 
         
            +
                "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 322 | 
         
            +
                "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 323 | 
         
            +
                "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 324 | 
         
            +
                "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 325 | 
         
            +
                "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 326 | 
         
            +
                "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
         
     | 
| 327 | 
         
            +
                "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
         
     | 
| 328 | 
         
            +
                "model.norm.weight": "pytorch_model-00003-of-00003.bin"
         
     | 
| 329 | 
         
            +
              }
         
     | 
| 330 | 
         
            +
            }
         
     | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,24 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "bos_token": {
         
     | 
| 3 | 
         
            +
                "content": "<s>",
         
     | 
| 4 | 
         
            +
                "lstrip": false,
         
     | 
| 5 | 
         
            +
                "normalized": true,
         
     | 
| 6 | 
         
            +
                "rstrip": false,
         
     | 
| 7 | 
         
            +
                "single_word": false
         
     | 
| 8 | 
         
            +
              },
         
     | 
| 9 | 
         
            +
              "eos_token": {
         
     | 
| 10 | 
         
            +
                "content": "</s>",
         
     | 
| 11 | 
         
            +
                "lstrip": false,
         
     | 
| 12 | 
         
            +
                "normalized": true,
         
     | 
| 13 | 
         
            +
                "rstrip": false,
         
     | 
| 14 | 
         
            +
                "single_word": false
         
     | 
| 15 | 
         
            +
              },
         
     | 
| 16 | 
         
            +
              "pad_token": "<unk>",
         
     | 
| 17 | 
         
            +
              "unk_token": {
         
     | 
| 18 | 
         
            +
                "content": "<unk>",
         
     | 
| 19 | 
         
            +
                "lstrip": false,
         
     | 
| 20 | 
         
            +
                "normalized": true,
         
     | 
| 21 | 
         
            +
                "rstrip": false,
         
     | 
| 22 | 
         
            +
                "single_word": false
         
     | 
| 23 | 
         
            +
              }
         
     | 
| 24 | 
         
            +
            }
         
     | 
    	
        tokenizer.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer.model
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
         
     | 
| 3 | 
         
            +
            size 499723
         
     | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,34 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "add_bos_token": true,
         
     | 
| 3 | 
         
            +
              "add_eos_token": false,
         
     | 
| 4 | 
         
            +
              "bos_token": {
         
     | 
| 5 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 6 | 
         
            +
                "content": "<s>",
         
     | 
| 7 | 
         
            +
                "lstrip": false,
         
     | 
| 8 | 
         
            +
                "normalized": true,
         
     | 
| 9 | 
         
            +
                "rstrip": false,
         
     | 
| 10 | 
         
            +
                "single_word": false
         
     | 
| 11 | 
         
            +
              },
         
     | 
| 12 | 
         
            +
              "clean_up_tokenization_spaces": false,
         
     | 
| 13 | 
         
            +
              "eos_token": {
         
     | 
| 14 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 15 | 
         
            +
                "content": "</s>",
         
     | 
| 16 | 
         
            +
                "lstrip": false,
         
     | 
| 17 | 
         
            +
                "normalized": true,
         
     | 
| 18 | 
         
            +
                "rstrip": false,
         
     | 
| 19 | 
         
            +
                "single_word": false
         
     | 
| 20 | 
         
            +
              },
         
     | 
| 21 | 
         
            +
              "legacy": false,
         
     | 
| 22 | 
         
            +
              "model_max_length": 1000000000000000019884624838656,
         
     | 
| 23 | 
         
            +
              "pad_token": null,
         
     | 
| 24 | 
         
            +
              "sp_model_kwargs": {},
         
     | 
| 25 | 
         
            +
              "tokenizer_class": "LlamaTokenizer",
         
     | 
| 26 | 
         
            +
              "unk_token": {
         
     | 
| 27 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 28 | 
         
            +
                "content": "<unk>",
         
     | 
| 29 | 
         
            +
                "lstrip": false,
         
     | 
| 30 | 
         
            +
                "normalized": true,
         
     | 
| 31 | 
         
            +
                "rstrip": false,
         
     | 
| 32 | 
         
            +
                "single_word": false
         
     | 
| 33 | 
         
            +
              }
         
     | 
| 34 | 
         
            +
            }
         
     |