Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	
		Clémentine
		
	commited on
		
		
					Commit 
							
							·
						
						fccd458
	
1
								Parent(s):
							
							2a80b46
								
wip adding symbols to model types
Browse files- app.py +11 -0
- src/assets/text_content.py +3 -2
- src/auto_leaderboard/model_metadata_type.py +25 -8
- src/utils_display.py +5 -4
    	
        app.py
    CHANGED
    
    | @@ -179,6 +179,7 @@ def add_new_eval( | |
| 179 | 
             
                precision: str,
         | 
| 180 | 
             
                private: bool,
         | 
| 181 | 
             
                weight_type: str,
         | 
|  | |
| 182 | 
             
            ):
         | 
| 183 | 
             
                precision = precision.split(" ")[0]
         | 
| 184 | 
             
                current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
         | 
| @@ -209,6 +210,7 @@ def add_new_eval( | |
| 209 | 
             
                    "weight_type": weight_type,
         | 
| 210 | 
             
                    "status": "PENDING",
         | 
| 211 | 
             
                    "submitted_time": current_time,
         | 
|  | |
| 212 | 
             
                }
         | 
| 213 |  | 
| 214 | 
             
                user_name = ""
         | 
| @@ -396,6 +398,14 @@ with demo: | |
| 396 | 
             
                                    max_choices=1,
         | 
| 397 | 
             
                                    interactive=True,
         | 
| 398 | 
             
                                )
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 399 | 
             
                                weight_type = gr.Dropdown(
         | 
| 400 | 
             
                                    choices=["Original", "Delta", "Adapter"],
         | 
| 401 | 
             
                                    label="Weights type", 
         | 
| @@ -419,6 +429,7 @@ with demo: | |
| 419 | 
             
                                precision,
         | 
| 420 | 
             
                                private,
         | 
| 421 | 
             
                                weight_type,
         | 
|  | |
| 422 | 
             
                            ],
         | 
| 423 | 
             
                            submission_result,
         | 
| 424 | 
             
                        )
         | 
|  | |
| 179 | 
             
                precision: str,
         | 
| 180 | 
             
                private: bool,
         | 
| 181 | 
             
                weight_type: str,
         | 
| 182 | 
            +
                model_type: str,
         | 
| 183 | 
             
            ):
         | 
| 184 | 
             
                precision = precision.split(" ")[0]
         | 
| 185 | 
             
                current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
         | 
|  | |
| 210 | 
             
                    "weight_type": weight_type,
         | 
| 211 | 
             
                    "status": "PENDING",
         | 
| 212 | 
             
                    "submitted_time": current_time,
         | 
| 213 | 
            +
                    "model_type": model_type,
         | 
| 214 | 
             
                }
         | 
| 215 |  | 
| 216 | 
             
                user_name = ""
         | 
|  | |
| 398 | 
             
                                    max_choices=1,
         | 
| 399 | 
             
                                    interactive=True,
         | 
| 400 | 
             
                                )
         | 
| 401 | 
            +
                                model_type = gr.Dropdown(
         | 
| 402 | 
            +
                                    choices=["pretrained", "fine-tuned", "with RL"], 
         | 
| 403 | 
            +
                                    label="Model type", 
         | 
| 404 | 
            +
                                    multiselect=False,
         | 
| 405 | 
            +
                                    value="pretrained",
         | 
| 406 | 
            +
                                    max_choices=1,
         | 
| 407 | 
            +
                                    interactive=True,
         | 
| 408 | 
            +
                                )
         | 
| 409 | 
             
                                weight_type = gr.Dropdown(
         | 
| 410 | 
             
                                    choices=["Original", "Delta", "Adapter"],
         | 
| 411 | 
             
                                    label="Weights type", 
         | 
|  | |
| 429 | 
             
                                precision,
         | 
| 430 | 
             
                                private,
         | 
| 431 | 
             
                                weight_type,
         | 
| 432 | 
            +
                                model_type
         | 
| 433 | 
             
                            ],
         | 
| 434 | 
             
                            submission_result,
         | 
| 435 | 
             
                        )
         | 
    	
        src/assets/text_content.py
    CHANGED
    
    | @@ -75,6 +75,7 @@ With the plethora of large language models (LLMs) and chatbots being released we | |
| 75 | 
             
            - <a href="https://arxiv.org/abs/2009.03300" target="_blank">  MMLU </a>  (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
         | 
| 76 | 
             
            - <a href="https://arxiv.org/abs/2109.07958" target="_blank">  TruthfulQA </a> (0-shot) - a test to measure a model’s propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
         | 
| 77 |  | 
|  | |
| 78 | 
             
            We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
         | 
| 79 |  | 
| 80 | 
             
            # Some good practices before submitting a model
         | 
| @@ -140,13 +141,13 @@ These models will be automatically evaluated on the 🤗 cluster. | |
| 140 | 
             
            """
         | 
| 141 |  | 
| 142 | 
             
            CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
         | 
| 143 | 
            -
            CITATION_BUTTON_TEXT = r""" | 
|  | |
| 144 | 
             
              author = {Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf},
         | 
| 145 | 
             
              title = {Open LLM Leaderboard},
         | 
| 146 | 
             
              year = {2023},
         | 
| 147 | 
             
              publisher = {Hugging Face},
         | 
| 148 | 
             
              howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}"
         | 
| 149 | 
            -
             | 
| 150 | 
             
            }
         | 
| 151 | 
             
            @software{eval-harness,
         | 
| 152 | 
             
              author       = {Gao, Leo and
         | 
|  | |
| 75 | 
             
            - <a href="https://arxiv.org/abs/2009.03300" target="_blank">  MMLU </a>  (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
         | 
| 76 | 
             
            - <a href="https://arxiv.org/abs/2109.07958" target="_blank">  TruthfulQA </a> (0-shot) - a test to measure a model’s propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
         | 
| 77 |  | 
| 78 | 
            +
            For all these evaluations, a higher score is a better score. 
         | 
| 79 | 
             
            We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
         | 
| 80 |  | 
| 81 | 
             
            # Some good practices before submitting a model
         | 
|  | |
| 141 | 
             
            """
         | 
| 142 |  | 
| 143 | 
             
            CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
         | 
| 144 | 
            +
            CITATION_BUTTON_TEXT = r"""
         | 
| 145 | 
            +
            @misc{open-llm-leaderboard,
         | 
| 146 | 
             
              author = {Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf},
         | 
| 147 | 
             
              title = {Open LLM Leaderboard},
         | 
| 148 | 
             
              year = {2023},
         | 
| 149 | 
             
              publisher = {Hugging Face},
         | 
| 150 | 
             
              howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}"
         | 
|  | |
| 151 | 
             
            }
         | 
| 152 | 
             
            @software{eval-harness,
         | 
| 153 | 
             
              author       = {Gao, Leo and
         | 
    	
        src/auto_leaderboard/model_metadata_type.py
    CHANGED
    
    | @@ -1,10 +1,17 @@ | |
|  | |
| 1 | 
             
            from enum import Enum
         | 
| 2 | 
             
            from typing import Dict, List
         | 
| 3 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 4 | 
             
            class ModelType(Enum):
         | 
| 5 | 
            -
                PT = "pretrained"
         | 
| 6 | 
            -
                SFT = "finetuned"
         | 
| 7 | 
            -
                RL = "with RL"
         | 
| 8 |  | 
| 9 |  | 
| 10 | 
             
            TYPE_METADATA: Dict[str, ModelType] = {
         | 
| @@ -160,13 +167,23 @@ TYPE_METADATA: Dict[str, ModelType] = { | |
| 160 |  | 
| 161 | 
             
            def get_model_type(leaderboard_data: List[dict]):
         | 
| 162 | 
             
                for model_data in leaderboard_data:
         | 
| 163 | 
            -
                     | 
| 164 | 
            -
                     | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 165 | 
             
                        if any([i in model_data["model_name_for_query"] for i in ["finetuned", "-ft-"]]):
         | 
| 166 | 
            -
                            model_data["Type"] = ModelType.SFT
         | 
|  | |
| 167 | 
             
                        elif any([i in model_data["model_name_for_query"] for i in ["pretrained"]]):
         | 
| 168 | 
            -
                            model_data["Type"] = ModelType.PT
         | 
|  | |
| 169 | 
             
                        elif any([i in model_data["model_name_for_query"] for i in ["-rl-", "-rlhf-"]]):
         | 
| 170 | 
            -
                            model_data["Type"] = ModelType.RL
         | 
|  | |
| 171 |  | 
| 172 |  | 
|  | |
| 1 | 
            +
            from dataclasses import dataclass
         | 
| 2 | 
             
            from enum import Enum
         | 
| 3 | 
             
            from typing import Dict, List
         | 
| 4 |  | 
| 5 | 
            +
            @dataclass
         | 
| 6 | 
            +
            class ModelInfo:
         | 
| 7 | 
            +
                name: str
         | 
| 8 | 
            +
                symbol: str # emoji
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
             
            class ModelType(Enum):
         | 
| 12 | 
            +
                PT = ModelInfo(name="pretrained", symbol="🟢")
         | 
| 13 | 
            +
                SFT = ModelInfo(name="finetuned", symbol="🔶")
         | 
| 14 | 
            +
                RL = ModelInfo(name="with RL", symbol="🟦")
         | 
| 15 |  | 
| 16 |  | 
| 17 | 
             
            TYPE_METADATA: Dict[str, ModelType] = {
         | 
|  | |
| 167 |  | 
| 168 | 
             
            def get_model_type(leaderboard_data: List[dict]):
         | 
| 169 | 
             
                for model_data in leaderboard_data:
         | 
| 170 | 
            +
                    # Init
         | 
| 171 | 
            +
                    model_data["Type name"] = "N/A"
         | 
| 172 | 
            +
                    model_data["Type"] = ""
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                    # Stored information
         | 
| 175 | 
            +
                    if model_data["model_name_for_query"] in TYPE_METADATA:
         | 
| 176 | 
            +
                        model_data["Type name"] = TYPE_METADATA[model_data["model_name_for_query"]].value.name
         | 
| 177 | 
            +
                        model_data["Type"] = TYPE_METADATA[model_data["model_name_for_query"]].value.symbol
         | 
| 178 | 
            +
                    else: # Supposed from the name
         | 
| 179 | 
             
                        if any([i in model_data["model_name_for_query"] for i in ["finetuned", "-ft-"]]):
         | 
| 180 | 
            +
                            model_data["Type name"] = ModelType.SFT.value.name
         | 
| 181 | 
            +
                            model_data["Type"] = ModelType.SFT.value.symbol
         | 
| 182 | 
             
                        elif any([i in model_data["model_name_for_query"] for i in ["pretrained"]]):
         | 
| 183 | 
            +
                            model_data["Type name"] = ModelType.PT.value.name
         | 
| 184 | 
            +
                            model_data["Type"] = ModelType.PT.value.symbol
         | 
| 185 | 
             
                        elif any([i in model_data["model_name_for_query"] for i in ["-rl-", "-rlhf-"]]):
         | 
| 186 | 
            +
                            model_data["Type name"] = ModelType.RL.value.name
         | 
| 187 | 
            +
                            model_data["Type"] = ModelType.RL.value.symbol
         | 
| 188 |  | 
| 189 |  | 
    	
        src/utils_display.py
    CHANGED
    
    | @@ -14,13 +14,14 @@ def fields(raw_class): | |
| 14 |  | 
| 15 | 
             
            @dataclass(frozen=True)
         | 
| 16 | 
             
            class AutoEvalColumn: # Auto evals column
         | 
|  | |
| 17 | 
             
                model = ColumnContent("Model", "markdown", True)
         | 
| 18 | 
             
                average = ColumnContent("Average ⬆️", "number", True)
         | 
| 19 | 
            -
                arc = ColumnContent("ARC | 
| 20 | 
            -
                hellaswag = ColumnContent("HellaSwag | 
| 21 | 
            -
                mmlu = ColumnContent("MMLU | 
| 22 | 
             
                truthfulqa = ColumnContent("TruthfulQA (MC) ⬆️", "number", True)
         | 
| 23 | 
            -
                model_type = ColumnContent("Type", "str", False)
         | 
| 24 | 
             
                precision = ColumnContent("Precision", "str", False, True)
         | 
| 25 | 
             
                license = ColumnContent("Hub License", "str", False)
         | 
| 26 | 
             
                params = ColumnContent("#Params (B)", "number", False)
         | 
|  | |
| 14 |  | 
| 15 | 
             
            @dataclass(frozen=True)
         | 
| 16 | 
             
            class AutoEvalColumn: # Auto evals column
         | 
| 17 | 
            +
                model_type_symbol = ColumnContent("Type", "str", True)
         | 
| 18 | 
             
                model = ColumnContent("Model", "markdown", True)
         | 
| 19 | 
             
                average = ColumnContent("Average ⬆️", "number", True)
         | 
| 20 | 
            +
                arc = ColumnContent("ARC", "number", True)
         | 
| 21 | 
            +
                hellaswag = ColumnContent("HellaSwag", "number", True)
         | 
| 22 | 
            +
                mmlu = ColumnContent("MMLU", "number", True)
         | 
| 23 | 
             
                truthfulqa = ColumnContent("TruthfulQA (MC) ⬆️", "number", True)
         | 
| 24 | 
            +
                model_type = ColumnContent("Type name", "str", False)
         | 
| 25 | 
             
                precision = ColumnContent("Precision", "str", False, True)
         | 
| 26 | 
             
                license = ColumnContent("Hub License", "str", False)
         | 
| 27 | 
             
                params = ColumnContent("#Params (B)", "number", False)
         | 
