automerger commited on
Commit
6b42d95
·
verified ·
1 Parent(s): e84af1b

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -18,14 +18,14 @@ Experiment26M7-7B is an automated merge created by [Maxime Labonne](https://hugg
18
 
19
  ```yaml
20
  models:
21
- - model: rwitz/experiment26-truthy-iter-0
22
  # No parameters necessary for base model
23
  - model: liminerity/M7-7b
24
  parameters:
25
  density: 0.53
26
  weight: 0.6
27
  merge_method: dare_ties
28
- base_model: rwitz/experiment26-truthy-iter-0
29
  parameters:
30
  int8_mask: true
31
  dtype: bfloat16
 
18
 
19
  ```yaml
20
  models:
21
+ - model: yam-peleg/Experiment26-7B
22
  # No parameters necessary for base model
23
  - model: liminerity/M7-7b
24
  parameters:
25
  density: 0.53
26
  weight: 0.6
27
  merge_method: dare_ties
28
+ base_model: yam-peleg/Experiment26-7B
29
  parameters:
30
  int8_mask: true
31
  dtype: bfloat16
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "rwitz/experiment26-truthy-iter-0",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
1
  {
2
+ "_name_or_path": "yam-peleg/Experiment26-7B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.0",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
mergekit_config.yml CHANGED
@@ -1,13 +1,13 @@
1
 
2
  models:
3
- - model: rwitz/experiment26-truthy-iter-0
4
  # No parameters necessary for base model
5
  - model: liminerity/M7-7b
6
  parameters:
7
  density: 0.53
8
  weight: 0.6
9
  merge_method: dare_ties
10
- base_model: rwitz/experiment26-truthy-iter-0
11
  parameters:
12
  int8_mask: true
13
  dtype: bfloat16
 
1
 
2
  models:
3
+ - model: yam-peleg/Experiment26-7B
4
  # No parameters necessary for base model
5
  - model: liminerity/M7-7b
6
  parameters:
7
  density: 0.53
8
  weight: 0.6
9
  merge_method: dare_ties
10
+ base_model: yam-peleg/Experiment26-7B
11
  parameters:
12
  int8_mask: true
13
  dtype: bfloat16
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94bfdf09ecf2bc004d9a98eaa9acfea00c49cb8e30b09019dce6489e05882490
3
  size 9825524456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:691d778f7768244662ea78c8526be629088179f9efef397e95908d11e937eddc
3
  size 9825524456
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57d5c00c37898fb038461d909b19e6eebb8e2be9fc8ba33ceec4f9b89e875822
3
  size 4657973592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3194c9a0e24bf4d64646126eb5fa49f7df0b703db4c2609bfbfaabb48764b9be
3
  size 4657973592