leon-se commited on
Commit
35919b4
·
verified ·
1 Parent(s): 664fb32

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
config.json ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "eoi_token_index": 256000,
7
+ "eos_token_id": [
8
+ 1,
9
+ 106
10
+ ],
11
+ "image_token_index": 262144,
12
+ "initializer_range": 0.02,
13
+ "mm_tokens_per_image": 256,
14
+ "model_type": "gemma3",
15
+ "quantization_config": {
16
+ "config_groups": {
17
+ "group_0": {
18
+ "input_activations": {
19
+ "actorder": null,
20
+ "block_structure": null,
21
+ "dynamic": true,
22
+ "group_size": null,
23
+ "num_bits": 8,
24
+ "observer": null,
25
+ "observer_kwargs": {},
26
+ "strategy": "token",
27
+ "symmetric": true,
28
+ "type": "float"
29
+ },
30
+ "output_activations": null,
31
+ "targets": [
32
+ "Linear"
33
+ ],
34
+ "weights": {
35
+ "actorder": null,
36
+ "block_structure": null,
37
+ "dynamic": false,
38
+ "group_size": null,
39
+ "num_bits": 8,
40
+ "observer": "minmax",
41
+ "observer_kwargs": {},
42
+ "strategy": "channel",
43
+ "symmetric": true,
44
+ "type": "float"
45
+ }
46
+ }
47
+ },
48
+ "format": "float-quantized",
49
+ "global_compression_ratio": 1.2400234132867625,
50
+ "ignore": [
51
+ "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj",
52
+ "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj",
53
+ "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj",
54
+ "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj",
55
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc1",
56
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc2",
57
+ "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj",
58
+ "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj",
59
+ "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj",
60
+ "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj",
61
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc1",
62
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc2",
63
+ "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj",
64
+ "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj",
65
+ "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj",
66
+ "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj",
67
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc1",
68
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc2",
69
+ "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj",
70
+ "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj",
71
+ "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj",
72
+ "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj",
73
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc1",
74
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc2",
75
+ "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj",
76
+ "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj",
77
+ "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj",
78
+ "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj",
79
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc1",
80
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc2",
81
+ "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj",
82
+ "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj",
83
+ "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj",
84
+ "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj",
85
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc1",
86
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc2",
87
+ "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj",
88
+ "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj",
89
+ "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj",
90
+ "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj",
91
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc1",
92
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc2",
93
+ "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj",
94
+ "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj",
95
+ "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj",
96
+ "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj",
97
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc1",
98
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc2",
99
+ "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj",
100
+ "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj",
101
+ "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj",
102
+ "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj",
103
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc1",
104
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc2",
105
+ "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj",
106
+ "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj",
107
+ "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj",
108
+ "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj",
109
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc1",
110
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc2",
111
+ "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj",
112
+ "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj",
113
+ "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj",
114
+ "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj",
115
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc1",
116
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc2",
117
+ "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj",
118
+ "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj",
119
+ "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj",
120
+ "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj",
121
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc1",
122
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc2",
123
+ "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj",
124
+ "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj",
125
+ "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj",
126
+ "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj",
127
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc1",
128
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc2",
129
+ "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj",
130
+ "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj",
131
+ "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj",
132
+ "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj",
133
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc1",
134
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc2",
135
+ "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj",
136
+ "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj",
137
+ "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj",
138
+ "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj",
139
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc1",
140
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc2",
141
+ "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj",
142
+ "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj",
143
+ "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj",
144
+ "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj",
145
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc1",
146
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc2",
147
+ "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj",
148
+ "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj",
149
+ "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj",
150
+ "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj",
151
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc1",
152
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc2",
153
+ "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj",
154
+ "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj",
155
+ "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj",
156
+ "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj",
157
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc1",
158
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc2",
159
+ "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj",
160
+ "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj",
161
+ "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj",
162
+ "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj",
163
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc1",
164
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc2",
165
+ "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj",
166
+ "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj",
167
+ "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj",
168
+ "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj",
169
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc1",
170
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc2",
171
+ "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj",
172
+ "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj",
173
+ "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj",
174
+ "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj",
175
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc1",
176
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc2",
177
+ "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj",
178
+ "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj",
179
+ "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj",
180
+ "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj",
181
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc1",
182
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc2",
183
+ "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj",
184
+ "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj",
185
+ "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj",
186
+ "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj",
187
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc1",
188
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc2",
189
+ "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj",
190
+ "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj",
191
+ "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj",
192
+ "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj",
193
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc1",
194
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc2",
195
+ "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj",
196
+ "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj",
197
+ "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj",
198
+ "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj",
199
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc1",
200
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc2",
201
+ "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj",
202
+ "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj",
203
+ "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj",
204
+ "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj",
205
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc1",
206
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc2",
207
+ "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj",
208
+ "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj",
209
+ "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj",
210
+ "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj",
211
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc1",
212
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc2",
213
+ "language_model.lm_head"
214
+ ],
215
+ "kv_cache_scheme": null,
216
+ "quant_method": "compressed-tensors",
217
+ "quantization_status": "compressed"
218
+ },
219
+ "text_config": {
220
+ "attention_bias": false,
221
+ "attention_dropout": 0.0,
222
+ "attn_logit_softcapping": null,
223
+ "cache_implementation": "hybrid",
224
+ "final_logit_softcapping": null,
225
+ "head_dim": 128,
226
+ "hidden_activation": "gelu_pytorch_tanh",
227
+ "hidden_size": 5376,
228
+ "initializer_range": 0.02,
229
+ "intermediate_size": 21504,
230
+ "max_position_embeddings": 131072,
231
+ "model_type": "gemma3_text",
232
+ "num_attention_heads": 32,
233
+ "num_hidden_layers": 62,
234
+ "num_key_value_heads": 16,
235
+ "query_pre_attn_scalar": 168,
236
+ "rms_norm_eps": 1e-06,
237
+ "rope_local_base_freq": 10000.0,
238
+ "rope_scaling": {
239
+ "factor": 8.0,
240
+ "rope_type": "linear"
241
+ },
242
+ "rope_theta": 1000000.0,
243
+ "sliding_window": 1024,
244
+ "sliding_window_pattern": 6,
245
+ "use_cache": true,
246
+ "vocab_size": 262208
247
+ },
248
+ "torch_dtype": "bfloat16",
249
+ "transformers_version": "4.50.0.dev0",
250
+ "vision_config": {
251
+ "attention_dropout": 0.0,
252
+ "hidden_act": "gelu_pytorch_tanh",
253
+ "hidden_size": 1152,
254
+ "image_size": 896,
255
+ "intermediate_size": 4304,
256
+ "layer_norm_eps": 1e-06,
257
+ "model_type": "siglip_vision_model",
258
+ "num_attention_heads": 16,
259
+ "num_channels": 3,
260
+ "num_hidden_layers": 27,
261
+ "patch_size": 14,
262
+ "vision_use_head": false
263
+ }
264
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "transformers_version": "4.50.0.dev0"
11
+ }
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d84ad648b5bcee21ff5fbe79e80d72f014ccdb1f993b678a7099e9b1b2093de1
3
+ size 4970671784
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12e1334b87685ebc7e80fb3ab449968e31a455ff5f9e0677f94796081463ebb6
3
+ size 4956561616
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30faf3cd86a376cd73fedad0127e0946b0e7c4805af3ddb2d4746c47571ff424
3
+ size 4956561760
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b985dbd33f075ec375783150c5a71c123faa1a6090a03a14233feb31f3e1931
3
+ size 4956561760
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:626752cc99b99f68cadc6a09ed85aebb94c835f4e3461b04195923a79cd3f120
3
+ size 4956561760
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8d18d02f99a05719152c1caf2ef8d07c26fae011e583829354be91df0163e4
3
+ size 4477436544
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_pan_and_scan": null,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Gemma3ImageProcessor",
13
+ "image_seq_length": 256,
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "pan_and_scan_max_num_crops": null,
20
+ "pan_and_scan_min_crop_size": null,
21
+ "pan_and_scan_min_ratio_to_activate": null,
22
+ "processor_class": "Gemma3Processor",
23
+ "resample": 2,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 896,
27
+ "width": 896
28
+ }
29
+ }
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }
recipe.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ DEFAULT_stage:
2
+ DEFAULT_modifiers:
3
+ QuantizationModifier:
4
+ ignore: ['re:.*lm_head', 're:vision_tower.*', 're:multi_modal_projector.*']
5
+ targets: [Linear]
6
+ scheme: FP8_DYNAMIC
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff