models: - model: starsnatched/MemGPT parameters: density: [1, 0.7, 0.1] # density gradient weight: 1.0 - model: 222gate/Ingot-7b-slerp-7-forged-mirror parameters: density: 0.5 weight: [0, 0.3, 0.7, 1] # weight gradient - model: starsnatched/MemGPT parameters: density: 0.33 weight: - filter: mlp value: 0.5 - value: 0 merge_method: ties base_model: liminerity/Mem-Beagle-7b-slerp-v2 parameters: normalize: true int8_mask: true dtype: bfloat16