BAGEL-7B-MoT-DF11 / config.json
LeanQuant's picture
Add files using upload-large-folder tool
97e2fbf verified
{
"dfloat11_config": {
"bytes_per_thread": 8,
"pattern_dict": {
"language_model\\.model\\.embed_tokens": [],
"language_model\\.model\\.layers.\\d+": [
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
"self_attn.o_proj",
"self_attn.q_proj_moe_gen",
"self_attn.k_proj_moe_gen",
"self_attn.v_proj_moe_gen",
"self_attn.o_proj_moe_gen",
"mlp.gate_proj",
"mlp.up_proj",
"mlp.down_proj",
"mlp_moe_gen.gate_proj",
"mlp_moe_gen.up_proj",
"mlp_moe_gen.down_proj"
],
"language_model\\.lm_head": [],
"vit_model": [
"vision_model.encoder.layers.0.self_attn.k_proj",
"vision_model.encoder.layers.0.self_attn.v_proj",
"vision_model.encoder.layers.0.self_attn.q_proj",
"vision_model.encoder.layers.0.self_attn.out_proj",
"vision_model.encoder.layers.0.mlp.fc1",
"vision_model.encoder.layers.0.mlp.fc2",
"vision_model.encoder.layers.1.self_attn.k_proj",
"vision_model.encoder.layers.1.self_attn.v_proj",
"vision_model.encoder.layers.1.self_attn.q_proj",
"vision_model.encoder.layers.1.self_attn.out_proj",
"vision_model.encoder.layers.1.mlp.fc1",
"vision_model.encoder.layers.1.mlp.fc2",
"vision_model.encoder.layers.2.self_attn.k_proj",
"vision_model.encoder.layers.2.self_attn.v_proj",
"vision_model.encoder.layers.2.self_attn.q_proj",
"vision_model.encoder.layers.2.self_attn.out_proj",
"vision_model.encoder.layers.2.mlp.fc1",
"vision_model.encoder.layers.2.mlp.fc2",
"vision_model.encoder.layers.3.self_attn.k_proj",
"vision_model.encoder.layers.3.self_attn.v_proj",
"vision_model.encoder.layers.3.self_attn.q_proj",
"vision_model.encoder.layers.3.self_attn.out_proj",
"vision_model.encoder.layers.3.mlp.fc1",
"vision_model.encoder.layers.3.mlp.fc2",
"vision_model.encoder.layers.4.self_attn.k_proj",
"vision_model.encoder.layers.4.self_attn.v_proj",
"vision_model.encoder.layers.4.self_attn.q_proj",
"vision_model.encoder.layers.4.self_attn.out_proj",
"vision_model.encoder.layers.4.mlp.fc1",
"vision_model.encoder.layers.4.mlp.fc2",
"vision_model.encoder.layers.5.self_attn.k_proj",
"vision_model.encoder.layers.5.self_attn.v_proj",
"vision_model.encoder.layers.5.self_attn.q_proj",
"vision_model.encoder.layers.5.self_attn.out_proj",
"vision_model.encoder.layers.5.mlp.fc1",
"vision_model.encoder.layers.5.mlp.fc2",
"vision_model.encoder.layers.6.self_attn.k_proj",
"vision_model.encoder.layers.6.self_attn.v_proj",
"vision_model.encoder.layers.6.self_attn.q_proj",
"vision_model.encoder.layers.6.self_attn.out_proj",
"vision_model.encoder.layers.6.mlp.fc1",
"vision_model.encoder.layers.6.mlp.fc2",
"vision_model.encoder.layers.7.self_attn.k_proj",
"vision_model.encoder.layers.7.self_attn.v_proj",
"vision_model.encoder.layers.7.self_attn.q_proj",
"vision_model.encoder.layers.7.self_attn.out_proj",
"vision_model.encoder.layers.7.mlp.fc1",
"vision_model.encoder.layers.7.mlp.fc2",
"vision_model.encoder.layers.8.self_attn.k_proj",
"vision_model.encoder.layers.8.self_attn.v_proj",
"vision_model.encoder.layers.8.self_attn.q_proj",
"vision_model.encoder.layers.8.self_attn.out_proj",
"vision_model.encoder.layers.8.mlp.fc1",
"vision_model.encoder.layers.8.mlp.fc2",
"vision_model.encoder.layers.9.self_attn.k_proj",
"vision_model.encoder.layers.9.self_attn.v_proj",
"vision_model.encoder.layers.9.self_attn.q_proj",
"vision_model.encoder.layers.9.self_attn.out_proj",
"vision_model.encoder.layers.9.mlp.fc1",
"vision_model.encoder.layers.9.mlp.fc2",
"vision_model.encoder.layers.10.self_attn.k_proj",
"vision_model.encoder.layers.10.self_attn.v_proj",
"vision_model.encoder.layers.10.self_attn.q_proj",
"vision_model.encoder.layers.10.self_attn.out_proj",
"vision_model.encoder.layers.10.mlp.fc1",
"vision_model.encoder.layers.10.mlp.fc2",
"vision_model.encoder.layers.11.self_attn.k_proj",
"vision_model.encoder.layers.11.self_attn.v_proj",
"vision_model.encoder.layers.11.self_attn.q_proj",
"vision_model.encoder.layers.11.self_attn.out_proj",
"vision_model.encoder.layers.11.mlp.fc1",
"vision_model.encoder.layers.11.mlp.fc2",
"vision_model.encoder.layers.12.self_attn.k_proj",
"vision_model.encoder.layers.12.self_attn.v_proj",
"vision_model.encoder.layers.12.self_attn.q_proj",
"vision_model.encoder.layers.12.self_attn.out_proj",
"vision_model.encoder.layers.12.mlp.fc1",
"vision_model.encoder.layers.12.mlp.fc2",
"vision_model.encoder.layers.13.self_attn.k_proj",
"vision_model.encoder.layers.13.self_attn.v_proj",
"vision_model.encoder.layers.13.self_attn.q_proj",
"vision_model.encoder.layers.13.self_attn.out_proj",
"vision_model.encoder.layers.13.mlp.fc1",
"vision_model.encoder.layers.13.mlp.fc2",
"vision_model.encoder.layers.14.self_attn.k_proj",
"vision_model.encoder.layers.14.self_attn.v_proj",
"vision_model.encoder.layers.14.self_attn.q_proj",
"vision_model.encoder.layers.14.self_attn.out_proj",
"vision_model.encoder.layers.14.mlp.fc1",
"vision_model.encoder.layers.14.mlp.fc2",
"vision_model.encoder.layers.15.self_attn.k_proj",
"vision_model.encoder.layers.15.self_attn.v_proj",
"vision_model.encoder.layers.15.self_attn.q_proj",
"vision_model.encoder.layers.15.self_attn.out_proj",
"vision_model.encoder.layers.15.mlp.fc1",
"vision_model.encoder.layers.15.mlp.fc2",
"vision_model.encoder.layers.16.self_attn.k_proj",
"vision_model.encoder.layers.16.self_attn.v_proj",
"vision_model.encoder.layers.16.self_attn.q_proj",
"vision_model.encoder.layers.16.self_attn.out_proj",
"vision_model.encoder.layers.16.mlp.fc1",
"vision_model.encoder.layers.16.mlp.fc2",
"vision_model.encoder.layers.17.self_attn.k_proj",
"vision_model.encoder.layers.17.self_attn.v_proj",
"vision_model.encoder.layers.17.self_attn.q_proj",
"vision_model.encoder.layers.17.self_attn.out_proj",
"vision_model.encoder.layers.17.mlp.fc1",
"vision_model.encoder.layers.17.mlp.fc2",
"vision_model.encoder.layers.18.self_attn.k_proj",
"vision_model.encoder.layers.18.self_attn.v_proj",
"vision_model.encoder.layers.18.self_attn.q_proj",
"vision_model.encoder.layers.18.self_attn.out_proj",
"vision_model.encoder.layers.18.mlp.fc1",
"vision_model.encoder.layers.18.mlp.fc2",
"vision_model.encoder.layers.19.self_attn.k_proj",
"vision_model.encoder.layers.19.self_attn.v_proj",
"vision_model.encoder.layers.19.self_attn.q_proj",
"vision_model.encoder.layers.19.self_attn.out_proj",
"vision_model.encoder.layers.19.mlp.fc1",
"vision_model.encoder.layers.19.mlp.fc2",
"vision_model.encoder.layers.20.self_attn.k_proj",
"vision_model.encoder.layers.20.self_attn.v_proj",
"vision_model.encoder.layers.20.self_attn.q_proj",
"vision_model.encoder.layers.20.self_attn.out_proj",
"vision_model.encoder.layers.20.mlp.fc1",
"vision_model.encoder.layers.20.mlp.fc2",
"vision_model.encoder.layers.21.self_attn.k_proj",
"vision_model.encoder.layers.21.self_attn.v_proj",
"vision_model.encoder.layers.21.self_attn.q_proj",
"vision_model.encoder.layers.21.self_attn.out_proj",
"vision_model.encoder.layers.21.mlp.fc1",
"vision_model.encoder.layers.21.mlp.fc2",
"vision_model.encoder.layers.22.self_attn.k_proj",
"vision_model.encoder.layers.22.self_attn.v_proj",
"vision_model.encoder.layers.22.self_attn.q_proj",
"vision_model.encoder.layers.22.self_attn.out_proj",
"vision_model.encoder.layers.22.mlp.fc1",
"vision_model.encoder.layers.22.mlp.fc2",
"vision_model.encoder.layers.23.self_attn.k_proj",
"vision_model.encoder.layers.23.self_attn.v_proj",
"vision_model.encoder.layers.23.self_attn.q_proj",
"vision_model.encoder.layers.23.self_attn.out_proj",
"vision_model.encoder.layers.23.mlp.fc1",
"vision_model.encoder.layers.23.mlp.fc2",
"vision_model.encoder.layers.24.self_attn.k_proj",
"vision_model.encoder.layers.24.self_attn.v_proj",
"vision_model.encoder.layers.24.self_attn.q_proj",
"vision_model.encoder.layers.24.self_attn.out_proj",
"vision_model.encoder.layers.24.mlp.fc1",
"vision_model.encoder.layers.24.mlp.fc2",
"vision_model.encoder.layers.25.self_attn.k_proj",
"vision_model.encoder.layers.25.self_attn.v_proj",
"vision_model.encoder.layers.25.self_attn.q_proj",
"vision_model.encoder.layers.25.self_attn.out_proj",
"vision_model.encoder.layers.25.mlp.fc1",
"vision_model.encoder.layers.25.mlp.fc2",
"vision_model.embeddings.patch_embedding"
]
},
"threads_per_block": [
512
],
"version": "0.2.0"
},
"model_type": "qwen2"
}