''' CopyRight @DeepGlint 2025 ''' import torch import torch.nn as nn from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig def build_vision_tower(model_cfg, **kwargs): vision_tower = getattr(model_cfg, "vision_tower_config", getattr(model_cfg, "vision_tower", None)) return CLIPVisionTower(vision_tower, args=model_cfg, **kwargs) class CLIPVisionTower(nn.Module): def __init__(self, vision_tower, args, delay_load=False): super().__init__() self.is_loaded = False self.vision_tower_cfg = vision_tower self.vision_tower_processor = args.vision_tower_processor self.select_layer = args.mm_vision_select_layer self.select_feature = getattr(args, "mm_vision_select_feature", "patch") if not delay_load: self.init_model() elif getattr(args, "unfreeze_mm_vision_tower", False): # TODO: better detector is needed. self.init_model() elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts: self.init_model() else: raise RuntimeError("Not support now, please check config.json or contact us") def init_model(self, device_map=None): if self.is_loaded: return vision_tower_config = CLIPVisionConfig().from_dict(self.vision_tower_cfg) self.image_processor = CLIPImageProcessor(**self.vision_tower_processor) self.vision_tower = CLIPVisionModel(config=vision_tower_config) self.vision_tower.requires_grad_(False) self.is_loaded = True def feature_select(self, image_forward_outs): select_feature_type = self.select_feature if self.select_feature in ["slicefour_patch", "slicefour_cls_patch"]: select_every_k_layer = len(image_forward_outs.hidden_states) // 4 image_features = torch.cat([image_forward_outs.hidden_states[i] for i in range(select_every_k_layer + self.select_layer, len(image_forward_outs.hidden_states), select_every_k_layer)], dim=-1) select_feature_type = select_feature_type.replace("slicefour_", "") elif self.select_feature in ["slice_m25811_f6_patch", "slice_m25811_f6_cls_patch"]: select_layers = [-2, -5, -8, -11, 6] image_features = torch.cat([image_forward_outs.hidden_states[i] for i in select_layers], dim=-1) select_feature_type = select_feature_type.replace("slice_m25811_f6_", "") else: image_features = image_forward_outs.hidden_states[self.select_layer] if select_feature_type == "patch": image_features = image_features[:, 1:] elif select_feature_type == "cls_patch": image_features = image_features else: raise ValueError(f"Unexpected select feature: {select_feature_type}") return image_features def forward(self, images): if type(images) is list: image_features = [] for image in images: image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) image_feature = self.feature_select(image_forward_out).to(image.dtype) image_features.append(image_feature) else: image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) image_features = self.feature_select(image_forward_outs).to(images.dtype) return image_features @property def dummy_feature(self): return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) @property def dtype(self): return self.vision_tower.dtype @property def device(self): return self.vision_tower.device @property def config(self): if self.is_loaded: return self.vision_tower.config else: return self.cfg_only @property def hidden_size(self): _hidden_size = self.config.hidden_size if "slicefour" in self.select_feature: _hidden_size *= 4 if "slice_m25811_f6" in self.select_feature: _hidden_size *= 5 return _hidden_size @property def num_patches_per_side(self): return self.config.image_size // self.config.patch_size @property def num_patches(self): _num_patches = (self.config.image_size // self.config.patch_size) ** 2 if "cls_patch" in self.select_feature: _num_patches += 1 return _num_patches @property def image_size(self): return self.config.image_size