|
from datasets import load_from_disk |
|
from transformers import ( |
|
AutoModelForObjectDetection, |
|
AutoImageProcessor, |
|
Trainer, |
|
TrainingArguments, |
|
EarlyStoppingCallback, |
|
) |
|
import torch |
|
import numpy as np |
|
from torchmetrics.detection.mean_ap import MeanAveragePrecision |
|
from functools import partial |
|
from augmentations import augment_data_point, validation_transform |
|
from pprint import pprint |
|
from custom_parser import parse_args |
|
from torch.optim import AdamW |
|
import os |
|
import torchvision.transforms as transforms |
|
from copy import deepcopy |
|
import time |
|
|
|
from PIL import Image, ImageDraw |
|
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
def compute_metrics(results): |
|
assert len(results.predictions) == len(results.label_ids) |
|
|
|
prediction_list = [] |
|
for batch in results.predictions: |
|
logits = batch[1] |
|
scores = torch.softmax(torch.tensor(logits), dim=-1) |
|
confidences, labels = scores.max(dim=-1) |
|
boxes = batch[2] |
|
|
|
for boxes_per_image, confidences_per_image, labels_per_image in zip(boxes, confidences, labels): |
|
boxes_tensor = torch.tensor(boxes_per_image) |
|
sorted_indices = torch.argsort(confidences_per_image, descending=True) |
|
keep = sorted_indices[:100] |
|
boxes_top = boxes_tensor[keep] |
|
scores_top = confidences_per_image[keep] |
|
labels_top = labels_per_image[keep] |
|
prediction_list.append({ |
|
'boxes': boxes_top, |
|
'scores': scores_top, |
|
'labels': labels_top, |
|
}) |
|
|
|
target_list = [] |
|
for batch in results.label_ids: |
|
for target in batch: |
|
target_list.append( |
|
{ |
|
'labels' : torch.tensor(target['class_labels']), |
|
'boxes' : torch.tensor(target['boxes']), |
|
'area' : torch.tensor(target['area']), |
|
'iscrowd' : torch.tensor(target['iscrowd']), |
|
} |
|
) |
|
|
|
assert len(prediction_list) == len(target_list) |
|
for target in target_list: |
|
assert len(target['boxes']) == len(target['labels']) == len(target['area']) == len(target['iscrowd']) > 2 |
|
|
|
|
|
|
|
metric = MeanAveragePrecision(box_format='cxcywh') |
|
metric.update(prediction_list, target_list) |
|
metrics = metric.compute() |
|
|
|
|
|
|
|
return { |
|
"map" : metrics["map"].item(), |
|
"map_50" : metrics["map_50"].item(), |
|
"map_75" : metrics["map_75"].item(), |
|
} |
|
|
|
|
|
|
|
def collate_fn(batch, image_processor): |
|
images = [sample["image"] for sample in batch] |
|
|
|
|
|
image_ids = [sample["image_id"] for sample in batch] |
|
formatted_annotations = [{"image_id": sample["image_id"], "annotations": sample["annotations"]} for sample in batch] |
|
|
|
inputs = image_processor(images=images, annotations=formatted_annotations, return_tensors="pt") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return inputs |
|
|
|
def get_param_groups(model, args): |
|
model_modules = dict(model.named_children()) |
|
invalid_keys = [key for key in args.learning_rates if key not in model_modules] |
|
if invalid_keys: |
|
raise ValueError(f"Invalid keys in learning_rates: {invalid_keys}. " |
|
f"These are not valid model components. Valid options: {list(model_modules.keys())}") |
|
param_groups = [] |
|
for name, lr in args.learning_rates.items(): |
|
if args.model_name == "PekingU/rtdetr_v2_r50vd" and name == "model": |
|
param_groups.append( |
|
{ |
|
"params": model_modules[name].backbone.parameters(), |
|
"lr": float(lr) |
|
} |
|
) |
|
else: |
|
param_groups.append( |
|
{ |
|
"params": model_modules[name].parameters(), |
|
"lr": float(lr) |
|
} |
|
) |
|
return param_groups |
|
|
|
|
|
def main(): |
|
args = parse_args() |
|
print(args) |
|
dataset = load_from_disk(args.dataset_path) |
|
train_subset = dataset['train'].with_transform(lambda batch: augment_data_point(batch, args)) |
|
val_subset = dataset['val'].with_transform(validation_transform) |
|
test_subset = dataset['test'].with_transform(validation_transform) |
|
dataset = { |
|
"train": train_subset, |
|
"validation": val_subset, |
|
"test" : test_subset |
|
} |
|
|
|
if args.model_name in ["SenseTime/deformable-detr", "PekingU/rtdetr_v2_r50vd", "microsoft/conditional-detr-resnet-50"]: |
|
num_labels = 2 |
|
else: |
|
num_labels = 1 |
|
model = AutoModelForObjectDetection.from_pretrained( |
|
args.model_name, |
|
num_labels=num_labels, |
|
ignore_mismatched_sizes=True |
|
) |
|
|
|
processor = AutoImageProcessor.from_pretrained( |
|
args.model_name, |
|
do_normalize=False |
|
) |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
|
|
param_groups = get_param_groups(model, args) |
|
optimizer = AdamW(param_groups, weight_decay=args.weight_decay) |
|
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir=f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}", |
|
num_train_epochs=args.num_train_epochs, |
|
max_grad_norm=args.max_grad_norm, |
|
per_device_train_batch_size=args.train_batch_size, |
|
per_device_eval_batch_size=args.eval_batch_size, |
|
dataloader_num_workers=args.dataloader_num_workers, |
|
eval_strategy=args.eval_strategy, |
|
save_strategy=args.save_strategy, |
|
save_total_limit=2, |
|
metric_for_best_model="eval_map", |
|
greater_is_better=True, |
|
load_best_model_at_end=True, |
|
eval_do_concat_batches=False, |
|
remove_unused_columns=False, |
|
dataloader_drop_last=True, |
|
lr_scheduler_type=args.lr_scheduler_type, |
|
warmup_steps=args.warmup_steps, |
|
eval_accumulation_steps=10 |
|
) |
|
|
|
print(training_args) |
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=dataset["train"], |
|
eval_dataset=dataset["validation"], |
|
data_collator=partial(collate_fn, image_processor=processor), |
|
compute_metrics=compute_metrics, |
|
callbacks=[ |
|
EarlyStoppingCallback( |
|
early_stopping_patience=args.early_stopping_patience |
|
), |
|
], |
|
optimizers=(optimizer, None) |
|
) |
|
|
|
|
|
trainer.train() |
|
model.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model") |
|
processor.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model") |
|
metrics = trainer.evaluate() |
|
pprint(metrics) |
|
print("test results") |
|
print(f"test dataset length: {len(dataset['test'])}") |
|
metrics = trainer.evaluate(dataset["test"]) |
|
pprint(metrics) |
|
|
|
if __name__ == "__main__": |
|
main() |