BounharAbdelaziz commited on
Commit
854b918
·
verified ·
1 Parent(s): 41c71e1

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -3,17 +3,17 @@ tags:
3
  - merge
4
  - mergekit
5
  - lazymergekit
6
- - openchat/openchat-3.5-1210
7
  - WizardLM/WizardMath-7B-V1.1
8
  base_model:
9
- - openchat/openchat-3.5-1210
10
  - WizardLM/WizardMath-7B-V1.1
11
  ---
12
 
13
  # Telecomien
14
 
15
  Telecomien is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
16
- * [openchat/openchat-3.5-1210](https://huggingface.co/openchat/openchat-3.5-1210)
17
  * [WizardLM/WizardMath-7B-V1.1](https://huggingface.co/WizardLM/WizardMath-7B-V1.1)
18
 
19
  ## 🧩 Configuration
@@ -21,12 +21,12 @@ Telecomien is a merge of the following models using [LazyMergekit](https://colab
21
  ```yaml
22
  slices:
23
  - sources:
24
- - model: openchat/openchat-3.5-1210
25
  layer_range: [0, 32]
26
  - model: WizardLM/WizardMath-7B-V1.1
27
  layer_range: [0, 32]
28
  merge_method: slerp
29
- base_model: openchat/openchat-3.5-1210
30
  parameters:
31
  t:
32
  - filter: self_attn
 
3
  - merge
4
  - mergekit
5
  - lazymergekit
6
+ - beowolx/CodeNinja-1.0-OpenChat-7B
7
  - WizardLM/WizardMath-7B-V1.1
8
  base_model:
9
+ - beowolx/CodeNinja-1.0-OpenChat-7B
10
  - WizardLM/WizardMath-7B-V1.1
11
  ---
12
 
13
  # Telecomien
14
 
15
  Telecomien is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
16
+ * [beowolx/CodeNinja-1.0-OpenChat-7B](https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B)
17
  * [WizardLM/WizardMath-7B-V1.1](https://huggingface.co/WizardLM/WizardMath-7B-V1.1)
18
 
19
  ## 🧩 Configuration
 
21
  ```yaml
22
  slices:
23
  - sources:
24
+ - model: beowolx/CodeNinja-1.0-OpenChat-7B
25
  layer_range: [0, 32]
26
  - model: WizardLM/WizardMath-7B-V1.1
27
  layer_range: [0, 32]
28
  merge_method: slerp
29
+ base_model: beowolx/CodeNinja-1.0-OpenChat-7B
30
  parameters:
31
  t:
32
  - filter: self_attn
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "_name_or_path": "openchat/openchat-3.5-1210",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
- "eos_token_id": 32000,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.38.1",
24
- "use_cache": true,
25
  "vocab_size": 32002
26
  }
 
1
  {
2
+ "_name_or_path": "beowolx/CodeNinja-1.0-OpenChat-7B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.38.1",
24
+ "use_cache": false,
25
  "vocab_size": 32002
26
  }
mergekit_config.yml CHANGED
@@ -1,12 +1,12 @@
1
 
2
  slices:
3
  - sources:
4
- - model: openchat/openchat-3.5-1210
5
  layer_range: [0, 32]
6
  - model: WizardLM/WizardMath-7B-V1.1
7
  layer_range: [0, 32]
8
  merge_method: slerp
9
- base_model: openchat/openchat-3.5-1210
10
  parameters:
11
  t:
12
  - filter: self_attn
 
1
 
2
  slices:
3
  - sources:
4
+ - model: beowolx/CodeNinja-1.0-OpenChat-7B
5
  layer_range: [0, 32]
6
  - model: WizardLM/WizardMath-7B-V1.1
7
  layer_range: [0, 32]
8
  merge_method: slerp
9
+ base_model: beowolx/CodeNinja-1.0-OpenChat-7B
10
  parameters:
11
  t:
12
  - filter: self_attn
model-00001-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acc116665e515da5681f89e060534b25b294c1bd1f1094036bf64c528ee252fa
3
  size 1979773128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:667d0ad12f2f991eede1f1cdcc21d29d3671809d56bf5111a46f1b943f679822
3
  size 1979773128
model-00002-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb8c8dc4408136ce586fd18c55d770240dc4446a5030f0af819f8c7c52771322
3
  size 1946235640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af836e381f6898b75fb89e2d3894a23a23bcd0b4548456ffd6dc7c1014b1f95c
3
  size 1946235640
model-00003-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fe9c9b606001955371522b77a19d8bcdb26ab4a76eb1b748d4c946217201994
3
  size 1973490216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25e642451de7a7ecd070a466d64def74b8e64d59c33cafe974009d06a90898c4
3
  size 1973490216
model-00004-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e13829b9c0e1843966a7a0a49b7fe2297e3e13f5577e7f34fd41a59862cc3616
3
  size 1979781464
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d25c13d5bb01fa85054be503e971507c8b2441cde33d88e15927a2d2ac18164
3
  size 1979781464
model-00005-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cbe6b9c75e98fd5b4512ca3bebfd3e379bd124411688083ccb6725a885dd083
3
  size 1946243984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556c74c27af05a98a14e9b77348f61c301f06e57f8f394a6f2eaa32845d68187
3
  size 1946243984
model-00006-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23e51a4749dd0d3bff6629193a9a856c01285c89a2e52d4b823ad9bc4cc73ac2
3
  size 1923166040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0470349147273ceba591e1fb3ae4d1425517fd2300d8fd4edb041b89826c27b
3
  size 1923166040
model-00007-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65e4e9bde0d7c02ca87c73e1444c0bcdfae14c75950a3e00909736f5746b38fa
3
  size 1946243984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d974ff8696fc51b2b940d2034af39c8780190b4c1bafe9faa69d7a8d3bf6635
3
  size 1946243984
model-00008-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb385216c5edd4dbe48f9fd0a626f2093d1eefe851739a2005c08a22ce2bd6ca
3
  size 788563544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:473041c5be6b99fc3efa3bf7e0f6fa0a38cff001d4cb6209ca23678f8f91178e
3
  size 788563544
special_tokens_map.json CHANGED
@@ -11,7 +11,14 @@
11
  "single_word": false
12
  },
13
  "eos_token": {
14
- "content": "<|end_of_turn|>",
 
 
 
 
 
 
 
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
 
11
  "single_word": false
12
  },
13
  "eos_token": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "</s>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
@@ -50,13 +51,15 @@
50
  "bos_token": "<s>",
51
  "chat_template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
52
  "clean_up_tokenization_spaces": false,
53
- "eos_token": "<|end_of_turn|>",
54
  "legacy": true,
55
  "model_max_length": 1000000000000000019884624838656,
56
- "pad_token": null,
57
  "sp_model_kwargs": {},
58
  "spaces_between_special_tokens": false,
59
  "tokenizer_class": "LlamaTokenizer",
 
60
  "unk_token": "<unk>",
61
- "use_default_system_prompt": true
 
62
  }
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
51
  "bos_token": "<s>",
52
  "chat_template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
53
  "clean_up_tokenization_spaces": false,
54
+ "eos_token": "</s>",
55
  "legacy": true,
56
  "model_max_length": 1000000000000000019884624838656,
57
+ "pad_token": "</s>",
58
  "sp_model_kwargs": {},
59
  "spaces_between_special_tokens": false,
60
  "tokenizer_class": "LlamaTokenizer",
61
+ "trust_remote_code": false,
62
  "unk_token": "<unk>",
63
+ "use_default_system_prompt": true,
64
+ "use_fast": true
65
  }