Llama3.1-8B-Dyanka-Preview / mergekit_config.yml
Xiaojian9992024's picture
Upload folder using huggingface_hub
8cf116c verified
models:
- model: nvidia/Llama-3.1-Nemotron-Nano-8B-v1
#no parameters necessary for base model
- model: SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B
parameters:
density: 0.1
weight: 0.1
- model: arcee-ai/Llama-3.1-SuperNova-Lite
parameters:
density: 0.1
weight: 0.1
- model: passing2961/Thanos-8B
parameters:
density: 0.1
weight: 0.1
- model: prithivMLmods/Llama-3.1-8B-Open-SFT
parameters:
density: 0.1
weight: 0.1
- model: FreedomIntelligence/HuatuoGPT-o1-8B
parameters:
density: 0.1
weight: 0.1
- model: s-emanuilov/LLMBG-Llama-3.1-8B-BG-Reasoning-v0.1
parameters:
density: 0.1
weight: 0.1
- model: HiTZ/Latxa-Llama-3.1-8B-Instruct
parameters:
density: 0.1
weight: 0.1
- model: grimjim/Llama-3.1-8B-Instruct-abliterated_via_adapter
parameters:
density: 0.1
weight: 0.1
- model: DeepAuto-AI/Explore_Llama-3.1-8B-Inst
parameters:
density: 0.1
weight: 0.1
- model: OpenLLM-Ro/RoLlama3.1-8b-Instruct-2024-10-09
parameters:
density: 0.1
weight: 0.1
merge_method: ties
base_model: nvidia/Llama-3.1-Nemotron-Nano-8B-v1
parameters:
normalize: false
int8_mask: true
dtype: bfloat16