merge_method: model_stock models: - model: Nexesenex/Llama_3.x_70b_L3.3_Dolphin_128K_v1.02 parameters: weight: 1.0 - model: nbeerbower/Llama-3.1-Nemotron-lorablated-70B parameters: weight: 1.0 - model: mlabonne/Hermes-3-Llama-3.1-70B-lorablated parameters: weight: 1.0 - model: huihui-ai/Llama-3.1-Tulu-3-70B-abliterated parameters: weight: 1.0 - model: huihui-ai/Tess-R1-Limerick-Llama-3.1-70B-abliterated parameters: weight: 1.0 - model: migtissera/Tess-3-Llama-3.1-70B parameters: weight: 1.0 base_model: Nexesenex/Llama_3.x_70b_L3.3_Dolphin_128K_v1.02 dtype: bfloat16 out_dtype: bfloat16 parameters: int8_mask: true normalize: true rescale: false filter_wise: false smooth: false allow_negative_weights: false chat_template: auto tokenizer: source: union