File size: 1,111 Bytes
bbbca5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

merge_method: linear
parameters:
  weight: 1.0 
slices:
  - sources:
      - model: CultriX/NeuralTrix-7B-dpo # embed_tokens comes along with the ride with whatever is the first layer
        layer_range: [0, 1]
      - model: paulml/DPOB-INMTOB-7B # add dummy second model with 0 weight so tokenizer-based merge routine is invoked for embed_tokens
        layer_range: [0, 1]
        parameters:
          weight: 0
  - sources:
      - model: cognitivecomputations/dolphin-2.1-mistral-7b
        layer_range: [0, 8]
  - sources:
      - model: bardsai/jaskier-7b-dpo-v5.6
        layer_range: [8, 16]
  - sources:
      - model: paulml/OGNO-7B
        layer_range: [16, 24]
  - sources:
      - model: argilla/distilabeled-OpenHermes-2.5-Mistral-7B
        layer_range: [24, 31]
  - sources: # same as above, but for lm_head with the last layer
      - model: CultriX/NeuralTrix-7B-dpo
        layer_range: [31, 32]
      - model: paulml/DPOB-INMTOB-7B
        layer_range: [31, 32]
        parameters:
          weight: 0
dtype: float16
tokenizer_source: model:cognitivecomputations/dolphin-2.1-mistral-7b