name: UltimateMerge-14B-v1-slerp merge_method: slerp base_model: CultriX/Enhanced-TIES-Base-v1 tokenizer_source: base dtype: float32 out_dtype: bfloat16 parameters: normalize: true rescale: false int8_mask: true parameters: t: - value: 0.3 # Small value for refined tuning. slices: - sources: - model: CultriX/Enhanced-TIES-Base-v1 #Base model. layer_range: [ 0, 8 ] - model: arcee-ai/Virtuoso-Small-v2 #The arcee model focuses on IFEval so it is the best as the output layer layer_range: [ 0, 8 ] - sources: - model: CultriX/Enhanced-TIES-Base-v1 layer_range: [ 8, 16 ] - model: sometimesanotion/Lamarck-14B-v0.7-rc4 #Strong average score. layer_range: [ 8, 16 ] - sources: - model: CultriX/Enhanced-TIES-Base-v1 layer_range: [ 16, 24 ] - model: CultriX/Qwen2.5-14B-Hyperionv3 #Improve Reasoning benchmarks. layer_range: [ 16, 24 ] - sources: - model: CultriX/Enhanced-TIES-Base-v1 layer_range: [ 24, 32 ] - model: CultriX/Qwen2.5-14B-Hyperionv5 # General Benchmarks. layer_range: [ 24, 32 ] - sources: - model: CultriX/Enhanced-TIES-Base-v1 layer_range: [ 32, 40 ] - model: sthenno-com/miscii-14b-1225 #IFEval and BBH layer_range: [ 32, 40 ] - sources: - model: CultriX/Enhanced-TIES-Base-v1 layer_range: [ 40, 48 ] - model: arcee-ai/Virtuoso-Small-v2 #Output layer layer_range: [ 40, 48 ]