LLM_MODEL_ARCHS = {
    "stablelm_epoch": "πŸ”΄ StableLM-Epoch",
    "stablelm_alpha": "πŸ”΄ StableLM-Alpha",
    "mixformer-sequential": "πŸ§‘β€πŸ’» Phi Ο†",
    "RefinedWebModel": "πŸ¦… Falcon",
    "gpt_bigcode": "⭐ StarCoder",
    "RefinedWeb": "πŸ¦… Falcon",
    "baichuan": "🌊 Baichuan 百川",  # river
    "internlm": "πŸ§‘β€πŸŽ“ InternLM δΉ¦η”Ÿ",  # scholar
    "mistral": "Ⓜ️ Mistral",
    "mixtral": "Ⓜ️ Mixtral",
    "codegen": "♾️ CodeGen",
    "chatglm": "πŸ’¬ ChatGLM",
    "falcon": "πŸ¦… Falcon",
    "bloom": "🌸 Bloom",
    "llama": "πŸ¦™ LLaMA",
    "rwkv": "πŸ¦β€β¬› RWKV",
    "deci": "πŸ”΅ deci",
    "Yi": "πŸ«‚ Yi δΊΊ", # people
    "mpt": "🧱 MPT",
    # suggest something
    "gpt_neox": "GPT-NeoX",
    "gpt_neo": "GPT-Neo",
    "gpt2": "GPT-2",
    "gptj": "GPT-J",
    "xglm": "XGLM",
    "bart": "BART",
    "opt": "OPT",
}


def model_hyperlink(link, model_name):
    return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'


def process_arch(model_arch):
    if model_arch in LLM_MODEL_ARCHS:
        return LLM_MODEL_ARCHS[model_arch]
    else:
        return model_arch


def process_score(score, quantization):
    if quantization != "None":
        return f"{score:.2f}*"
    else:
        return f"{score:.2f} "


def process_quantization_scheme(x):
    if x["backend.quantization_scheme"] == "bnb" and x["backend.quantization_config.load_in_4bit"] == True:
        return "BnB.4bit"
    elif x["backend.quantization_scheme"] == "bnb" and x["backend.quantization_config.load_in_8bit"] == True:
        return "BnB.8bit"
    elif (x["backend.quantization_scheme"] == "gptq") and (
        x["backend.quantization_config.exllama_config.version"] == 1
    ):
        return "GPTQ.4bit+ExllamaV1"
    elif (x["backend.quantization_scheme"] == "gptq") and (
        x["backend.quantization_config.exllama_config.version"] == 2
    ):
        return "GPTQ.4bit+ExllamaV2"
    elif x["backend.quantization_scheme"] == "gptq" and x["backend.quantization_config.bits"] == 4:
        return "GPTQ.4bit"
    elif x["backend.quantization_scheme"] == "awq" and x["backend.quantization_config.version"] == "gemm":
        return "AWQ.4bit+GEMM"
    elif x["backend.quantization_scheme"] == "awq" and x["backend.quantization_config.version"] == "gemv":
        return "AWQ.4bit+GEMV"
    else:
        return "None"


# def change_tab(query_param):
#     query_param = query_param.replace("'", '"')
#     query_param = json.loads(query_param)

#     if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "plot":
#         return gr.Tabs.update(selected=1)
#     else:
#         return gr.Tabs.update(selected=0)