models: - model: flammenai/Mahou-1.3-llama3-8B parameters: weight: 1.0 - model: Danielbrdz/Barcenas-Llama3-8b-ORPO parameters: weight: 1.0 - model: Weyaxi/Einstein-v6.1-Llama3-8B parameters: weight: 1.0 merge_method: linear tokenizer_source: union dtype: float16