saml_test / config.json
GCancilla's picture
Upload 3 files
516d62d verified
{
"architectures": [
"SamModel"
],
"initializer_range": 0.02,
"mask_decoder_config": {
"attention_downsample_rate": 2,
"hidden_act": "relu",
"hidden_size": 256,
"iou_head_depth": 3,
"iou_head_hidden_dim": 256,
"layer_norm_eps": 1e-06,
"mlp_dim": 2048,
"model_type": "",
"num_attention_heads": 8,
"num_hidden_layers": 2,
"num_multimask_outputs": 3,
"torch_dtype": "float32"
},
"model_type": "sam",
"prompt_encoder_config": {
"hidden_act": "gelu",
"hidden_size": 256,
"image_embedding_size": 64,
"image_size": 1024,
"layer_norm_eps": 1e-06,
"mask_input_channels": 16,
"model_type": "",
"num_point_embeddings": 4,
"patch_size": 16,
"torch_dtype": "float32"
},
"torch_dtype": "float32",
"transformers_version": "4.52.0.dev0",
"vision_config": {
"attention_dropout": 0.0,
"dropout": 0.0,
"global_attn_indexes": [
5,
11,
17,
23
],
"hidden_act": "gelu",
"hidden_size": 1024,
"image_size": 1024,
"initializer_factor": 1.0,
"initializer_range": 1e-10,
"intermediate_size": 6144,
"layer_norm_eps": 1e-06,
"mlp_dim": 4096,
"mlp_ratio": 4.0,
"model_type": "sam_vision_model",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"num_pos_feats": 128,
"output_channels": 256,
"patch_size": 16,
"projection_dim": 512,
"qkv_bias": true,
"torch_dtype": "float32",
"use_abs_pos": true,
"use_rel_pos": true,
"window_size": 14
}
}