automerger commited on
Commit
0b4fe85
1 Parent(s): 4e1f553

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -18,14 +18,14 @@ Experiment26Neuralsirkrishna-7B is an automated merge created by [Maxime Labonne
18
 
19
  ```yaml
20
  models:
21
- - model: rwitz/experiment26-truthy-iter-2
22
  # No parameters necessary for base model
23
  - model: Kukedlc/NeuralSirKrishna-7b
24
  parameters:
25
  density: 0.53
26
  weight: 0.6
27
  merge_method: dare_ties
28
- base_model: rwitz/experiment26-truthy-iter-2
29
  parameters:
30
  int8_mask: true
31
  dtype: bfloat16
 
18
 
19
  ```yaml
20
  models:
21
+ - model: yam-peleg/Experiment26-7B
22
  # No parameters necessary for base model
23
  - model: Kukedlc/NeuralSirKrishna-7b
24
  parameters:
25
  density: 0.53
26
  weight: 0.6
27
  merge_method: dare_ties
28
+ base_model: yam-peleg/Experiment26-7B
29
  parameters:
30
  int8_mask: true
31
  dtype: bfloat16
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "rwitz/experiment26-truthy-iter-2",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
1
  {
2
+ "_name_or_path": "yam-peleg/Experiment26-7B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.1",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
mergekit_config.yml CHANGED
@@ -1,15 +1,14 @@
1
 
2
  models:
3
- - model: rwitz/experiment26-truthy-iter-2
4
  # No parameters necessary for base model
5
  - model: Kukedlc/NeuralSirKrishna-7b
6
  parameters:
7
  density: 0.53
8
  weight: 0.6
9
  merge_method: dare_ties
10
- base_model: rwitz/experiment26-truthy-iter-2
11
  parameters:
12
  int8_mask: true
13
  dtype: bfloat16
14
  random_seed: 0
15
-
 
1
 
2
  models:
3
+ - model: yam-peleg/Experiment26-7B
4
  # No parameters necessary for base model
5
  - model: Kukedlc/NeuralSirKrishna-7b
6
  parameters:
7
  density: 0.53
8
  weight: 0.6
9
  merge_method: dare_ties
10
+ base_model: yam-peleg/Experiment26-7B
11
  parameters:
12
  int8_mask: true
13
  dtype: bfloat16
14
  random_seed: 0
 
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9db25f634d4c2070769b39274bbdb3f884ea61118ff20f46679628137049717
3
  size 9942981696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e26d79688ba47242c536f90031031dccd2d9b69eca249aa55f706f4378a1e16
3
  size 9942981696
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e8535596ddf38322110a8424e07c9a2c0b7f65202c1d8fe96ab7dddd4e3e622
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69932b9221c1216dfe47273ce8a21ea108119d1aec585ba923ca0d034997ff91
3
  size 4540516344