danielhanchen commited on
Commit
f041447
·
verified ·
1 Parent(s): 7ca7e74

Add files using upload-large-folder tool

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json CHANGED
@@ -25,67 +25,74 @@
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
- "language_model.model.layers.12.mlp",
 
 
 
 
 
 
 
 
 
29
  "language_model.model.layers.9.mlp",
30
- "language_model.model.layers.10.mlp",
31
- "language_model.model.layers.22.self_attn",
32
- "language_model.model.layers.11.mlp",
33
- "language_model.model.layers.18.mlp",
34
- "language_model.model.layers.17.mlp",
35
- "vision_tower.vision_model.encoder.layers.22.self_attn",
36
- "vision_tower.vision_model.encoder.layers.26.self_attn",
37
- "vision_tower.vision_model.encoder.layers.23.self_attn",
38
  "vision_tower.vision_model.encoder.layers.25.self_attn",
39
- "vision_tower.vision_model.encoder.layers.24.self_attn",
40
- "vision_tower.vision_model.encoder.layers.19.self_attn",
41
- "vision_tower.vision_model.encoder.layers.16.self_attn",
42
- "vision_tower.vision_model.encoder.layers.19.mlp",
43
- "vision_tower.vision_model.encoder.layers.25.mlp",
44
  "vision_tower.vision_model.encoder.layers.17.self_attn",
45
- "vision_tower.vision_model.encoder.layers.24.mlp",
46
- "vision_tower.vision_model.encoder.layers.21.self_attn",
47
- "vision_tower.vision_model.encoder.layers.17.mlp",
48
- "vision_tower.vision_model.encoder.layers.20.self_attn",
49
- "vision_tower.vision_model.encoder.layers.15.mlp",
50
- "vision_tower.vision_model.encoder.layers.15.self_attn",
51
  "vision_tower.vision_model.encoder.layers.16.mlp",
 
 
 
52
  "vision_tower.vision_model.encoder.layers.20.mlp",
53
  "vision_tower.vision_model.encoder.layers.23.mlp",
54
- "vision_tower.vision_model.encoder.layers.14.self_attn",
55
- "vision_tower.vision_model.encoder.layers.18.self_attn",
56
- "vision_tower.vision_model.encoder.layers.21.mlp",
57
  "vision_tower.vision_model.encoder.layers.22.mlp",
 
 
 
58
  "vision_tower.vision_model.encoder.layers.13.mlp",
59
- "vision_tower.vision_model.encoder.layers.18.mlp",
60
- "vision_tower.vision_model.encoder.layers.14.mlp",
61
- "vision_tower.vision_model.encoder.layers.10.mlp",
62
- "vision_tower.vision_model.encoder.layers.12.mlp",
63
- "vision_tower.vision_model.encoder.layers.11.self_attn",
64
- "vision_tower.vision_model.encoder.layers.8.mlp",
65
- "vision_tower.vision_model.encoder.layers.13.self_attn",
66
- "vision_tower.vision_model.encoder.layers.7.mlp",
67
- "vision_tower.vision_model.encoder.layers.12.self_attn",
68
- "vision_tower.vision_model.encoder.layers.9.mlp",
69
  "vision_tower.vision_model.encoder.layers.10.self_attn",
70
- "vision_tower.vision_model.encoder.layers.11.mlp",
 
 
71
  "vision_tower.vision_model.encoder.layers.4.mlp",
72
  "vision_tower.vision_model.encoder.layers.5.mlp",
73
- "vision_tower.vision_model.encoder.layers.9.self_attn",
 
 
 
 
 
 
 
 
 
74
  "vision_tower.vision_model.encoder.layers.6.self_attn",
 
 
 
 
 
 
75
  "vision_tower.vision_model.encoder.layers.8.self_attn",
 
76
  "vision_tower.vision_model.encoder.layers.3.mlp",
77
- "vision_tower.vision_model.encoder.layers.7.self_attn",
78
- "vision_tower.vision_model.encoder.layers.4.self_attn",
79
- "vision_tower.vision_model.encoder.layers.6.mlp",
80
- "vision_tower.vision_model.encoder.layers.5.self_attn",
81
- "vision_tower.vision_model.encoder.layers.3.self_attn",
82
- "vision_tower.vision_model.encoder.layers.1.mlp",
83
  "vision_tower.vision_model.encoder.layers.2.mlp",
84
- "vision_tower.vision_model.encoder.layers.1.self_attn",
85
- "vision_tower.vision_model.encoder.layers.26.mlp",
86
- "vision_tower.vision_model.encoder.layers.2.self_attn",
87
  "vision_tower.vision_model.encoder.layers.0.self_attn",
88
- "vision_tower.vision_model.encoder.layers.0.mlp"
 
 
89
  ],
90
  "llm_int8_threshold": 6.0,
91
  "load_in_4bit": true,
@@ -123,7 +130,7 @@
123
  "vocab_size": 262208
124
  },
125
  "torch_dtype": "bfloat16",
126
- "transformers_version": "4.51.0",
127
  "unsloth_fixed": true,
128
  "vision_config": {
129
  "attention_dropout": 0.0,
 
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
+ "language_model.model.layers.17.self_attn",
29
+ "language_model.model.layers.16.mlp",
30
+ "language_model.model.layers.24.mlp",
31
+ "language_model.model.layers.27.mlp",
32
+ "language_model.model.layers.20.mlp",
33
+ "language_model.model.layers.22.mlp",
34
+ "language_model.model.layers.15.mlp",
35
+ "language_model.model.layers.25.self_attn",
36
+ "language_model.model.layers.12.self_attn",
37
+ "language_model.model.layers.19.mlp",
38
  "language_model.model.layers.9.mlp",
 
 
 
 
 
 
 
 
39
  "vision_tower.vision_model.encoder.layers.25.self_attn",
40
+ "vision_tower.vision_model.encoder.layers.22.self_attn",
41
+ "language_model.model.layers.12.mlp",
42
+ "language_model.model.layers.27.self_attn",
 
 
43
  "vision_tower.vision_model.encoder.layers.17.self_attn",
44
+ "vision_tower.vision_model.encoder.layers.23.self_attn",
 
 
 
 
 
45
  "vision_tower.vision_model.encoder.layers.16.mlp",
46
+ "vision_tower.vision_model.encoder.layers.20.self_attn",
47
+ "vision_tower.vision_model.encoder.layers.26.self_attn",
48
+ "vision_tower.vision_model.encoder.layers.24.mlp",
49
  "vision_tower.vision_model.encoder.layers.20.mlp",
50
  "vision_tower.vision_model.encoder.layers.23.mlp",
51
+ "vision_tower.vision_model.encoder.layers.16.self_attn",
 
 
52
  "vision_tower.vision_model.encoder.layers.22.mlp",
53
+ "vision_tower.vision_model.encoder.layers.21.mlp",
54
+ "vision_tower.vision_model.encoder.layers.18.self_attn",
55
+ "vision_tower.vision_model.encoder.layers.17.mlp",
56
  "vision_tower.vision_model.encoder.layers.13.mlp",
57
+ "vision_tower.vision_model.encoder.layers.24.self_attn",
58
+ "vision_tower.vision_model.encoder.layers.15.mlp",
 
 
 
 
 
 
 
 
59
  "vision_tower.vision_model.encoder.layers.10.self_attn",
60
+ "vision_tower.vision_model.encoder.layers.19.self_attn",
61
+ "vision_tower.vision_model.encoder.layers.15.self_attn",
62
+ "vision_tower.vision_model.encoder.layers.21.self_attn",
63
  "vision_tower.vision_model.encoder.layers.4.mlp",
64
  "vision_tower.vision_model.encoder.layers.5.mlp",
65
+ "vision_tower.vision_model.encoder.layers.18.mlp",
66
+ "vision_tower.vision_model.encoder.layers.13.self_attn",
67
+ "vision_tower.vision_model.encoder.layers.9.mlp",
68
+ "vision_tower.vision_model.encoder.layers.1.self_attn",
69
+ "vision_tower.vision_model.encoder.layers.25.mlp",
70
+ "vision_tower.vision_model.encoder.layers.7.self_attn",
71
+ "vision_tower.vision_model.encoder.layers.8.mlp",
72
+ "vision_tower.vision_model.encoder.layers.19.mlp",
73
+ "vision_tower.vision_model.encoder.layers.1.mlp",
74
+ "vision_tower.vision_model.encoder.layers.12.self_attn",
75
  "vision_tower.vision_model.encoder.layers.6.self_attn",
76
+ "vision_tower.vision_model.encoder.layers.7.mlp",
77
+ "vision_tower.vision_model.encoder.layers.14.self_attn",
78
+ "vision_tower.vision_model.encoder.layers.11.self_attn",
79
+ "vision_tower.vision_model.encoder.layers.26.mlp",
80
+ "vision_tower.vision_model.encoder.layers.14.mlp",
81
+ "vision_tower.vision_model.encoder.layers.9.self_attn",
82
  "vision_tower.vision_model.encoder.layers.8.self_attn",
83
+ "vision_tower.vision_model.encoder.layers.10.mlp",
84
  "vision_tower.vision_model.encoder.layers.3.mlp",
85
+ "vision_tower.vision_model.encoder.layers.0.mlp",
86
+ "language_model.model.layers.23.self_attn",
87
+ "vision_tower.vision_model.encoder.layers.11.mlp",
88
+ "vision_tower.vision_model.encoder.layers.12.mlp",
 
 
89
  "vision_tower.vision_model.encoder.layers.2.mlp",
90
+ "vision_tower.vision_model.encoder.layers.5.self_attn",
91
+ "vision_tower.vision_model.encoder.layers.6.mlp",
 
92
  "vision_tower.vision_model.encoder.layers.0.self_attn",
93
+ "vision_tower.vision_model.encoder.layers.3.self_attn",
94
+ "vision_tower.vision_model.encoder.layers.4.self_attn",
95
+ "vision_tower.vision_model.encoder.layers.2.self_attn"
96
  ],
97
  "llm_int8_threshold": 6.0,
98
  "load_in_4bit": true,
 
130
  "vocab_size": 262208
131
  },
132
  "torch_dtype": "bfloat16",
133
+ "transformers_version": "4.52.0.dev0",
134
  "unsloth_fixed": true,
135
  "vision_config": {
136
  "attention_dropout": 0.0,
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
- "transformers_version": "4.51.0"
13
  }
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.52.0.dev0"
13
  }
model-00002-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d572c5c1d096a6827adaf5de00d561ed38c5626a7701ca5d9d33a7aec6e1765e
3
- size 4864824358
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f8ba85678d8a1edcf5381a26f5f2bdaa6d61f275576b7b57cc4c95673baf58
3
+ size 4990481558
model-00003-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba55a5216d5a160834c97eebc06043d32f8142bd8f84c1a2203589568a103e0d
3
- size 4985666158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790ddcec09e1facf09c0457e451dc267ec15c9879ab81e8b51f4eeaedd52d557
3
+ size 4998171391
model-00004-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff3d94bfc64db912d1eb580efdce9b9cad05d5aa5ca0b0ab3a1f56ed3117f001
3
- size 4959770330
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310e8f1b0b19c4c0b70bbd247834ef85869b99102a56c0e8836ad6eccc4534bd
3
+ size 4993857860
model-00005-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27f9084745e9d4e62d5a8360aa2b99d874fd0f25ccb2d7dc1d60fd53556377db
3
- size 272742837
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27f0d4cb691dbcd9912a380f36a500be85fcfbf0a47feac0422eb14ac45b937c
3
+ size 2036775347
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -51325,7 +51325,6 @@
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<end_of_turn>",
 
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
 
51328
  "clean_up_tokenization_spaces": false,
51329
  "eoi_token": "<end_of_image>",
51330
  "eos_token": "<end_of_turn>",