Tony Fang
added transformer_benchmark
d67ed13
raw
history blame contribute delete
8.83 kB
from datasets import load_from_disk
from transformers import (
AutoModelForObjectDetection,
AutoImageProcessor,
Trainer,
TrainingArguments,
EarlyStoppingCallback,
)
import torch
import numpy as np
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from functools import partial
from augmentations import augment_data_point, validation_transform
from pprint import pprint
from custom_parser import parse_args
from torch.optim import AdamW
import os
import torchvision.transforms as transforms
from copy import deepcopy
import time
# from ultralytics.utils.plotting import Annotator
from PIL import Image, ImageDraw
# this is still bugged!!!
# I guess time to parallelise this and see if there are more bugs, currently seems fine
# just dropping the last incomplete batch
# check how many bounding boxes there are for each image, if they all equal to 8,
# can just add assertions
@torch.no_grad()
def compute_metrics(results):
assert len(results.predictions) == len(results.label_ids)
# pack up predictions
prediction_list = []
for batch in results.predictions:
logits = batch[1] # shape: (number of samples, number of predictions for each sample, number of classes)
scores = torch.softmax(torch.tensor(logits), dim=-1)
confidences, labels = scores.max(dim=-1)
boxes = batch[2]
for boxes_per_image, confidences_per_image, labels_per_image in zip(boxes, confidences, labels):
boxes_tensor = torch.tensor(boxes_per_image)
sorted_indices = torch.argsort(confidences_per_image, descending=True)
keep = sorted_indices[:100]
boxes_top = boxes_tensor[keep]
scores_top = confidences_per_image[keep]
labels_top = labels_per_image[keep]
prediction_list.append({
'boxes': boxes_top,
'scores': scores_top,
'labels': labels_top,
})
# pack up targets
target_list = []
for batch in results.label_ids:
for target in batch:
target_list.append(
{
'labels' : torch.tensor(target['class_labels']),
'boxes' : torch.tensor(target['boxes']),
'area' : torch.tensor(target['area']),
'iscrowd' : torch.tensor(target['iscrowd']),
}
)
assert len(prediction_list) == len(target_list)
for target in target_list:
assert len(target['boxes']) == len(target['labels']) == len(target['area']) == len(target['iscrowd']) > 2
# compute metrics
metric = MeanAveragePrecision(box_format='cxcywh')
metric.update(prediction_list, target_list)
metrics = metric.compute()
# for key in metrics.keys():
# print(key, type(metrics[key]))
# print("map_small", type(metric["map_small"]), print(metric["map_small"]))
return {
"map" : metrics["map"].item(),
"map_50" : metrics["map_50"].item(),
"map_75" : metrics["map_75"].item(),
}
# Do not delete the plots!!! Need to do it for every detector!!!
def collate_fn(batch, image_processor):
images = [sample["image"] for sample in batch]
# for image in images:
# image.save(f"original_{time.time()}.jpg")
image_ids = [sample["image_id"] for sample in batch]
formatted_annotations = [{"image_id": sample["image_id"], "annotations": sample["annotations"]} for sample in batch]
inputs = image_processor(images=images, annotations=formatted_annotations, return_tensors="pt")
# mean = [0.485, 0.456, 0.406] # Example: ImageNet Mean
# std = [0.229, 0.224, 0.225] # Example: ImageNet Std
# transform = transforms.Compose([
# # transforms.Normalize(mean=[-m/s for m, s in zip(mean, std)], std=[1/s for s in std]), # Undo standardization
# transforms.ToPILImage()
# ])
# for pixel_values, labels in zip(inputs.pixel_values, inputs.labels):
# img_width, img_height = labels['size'].tolist()
# print(pixel_values.size(), labels['size'])
# bboxes = labels['boxes']
# image_cpy = deepcopy(pixel_values)
# image_cpy = transform(image_cpy)
# draw = ImageDraw.Draw(image_cpy)
# for box in bboxes:
# x_center, y_center, width, height = box.tolist()
# x0 = (x_center - width/2) * img_width
# y0 = (y_center - height/2) * img_height
# x1 = (x_center + width/2) * img_width
# y1 = (y_center + height/2) * img_height
# draw.rectangle([x0, y0, x1, y1], outline="green", width=1)
# image_cpy.save(f"{time.time()}.jpg")
return inputs
def get_param_groups(model, args):
model_modules = dict(model.named_children())
invalid_keys = [key for key in args.learning_rates if key not in model_modules]
if invalid_keys:
raise ValueError(f"Invalid keys in learning_rates: {invalid_keys}. "
f"These are not valid model components. Valid options: {list(model_modules.keys())}")
param_groups = []
for name, lr in args.learning_rates.items():
if args.model_name == "PekingU/rtdetr_v2_r50vd" and name == "model":
param_groups.append(
{
"params": model_modules[name].backbone.parameters(),
"lr": float(lr)
}
)
else:
param_groups.append(
{
"params": model_modules[name].parameters(),
"lr": float(lr)
}
)
return param_groups
# pick out the hyperparameters and have a save directory
def main():
args = parse_args()
print(args)
dataset = load_from_disk(args.dataset_path)
train_subset = dataset['train'].with_transform(lambda batch: augment_data_point(batch, args))
val_subset = dataset['val'].with_transform(validation_transform)
test_subset = dataset['test'].with_transform(validation_transform)
dataset = {
"train": train_subset,
"validation": val_subset,
"test" : test_subset
}
if args.model_name in ["SenseTime/deformable-detr", "PekingU/rtdetr_v2_r50vd", "microsoft/conditional-detr-resnet-50"]:
num_labels = 2
else:
num_labels = 1
model = AutoModelForObjectDetection.from_pretrained(
args.model_name,
num_labels=num_labels,
ignore_mismatched_sizes=True
)
processor = AutoImageProcessor.from_pretrained(
args.model_name,
do_normalize=False
)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
param_groups = get_param_groups(model, args)
optimizer = AdamW(param_groups, weight_decay=args.weight_decay)
# Training arguments
# eventually set these to the same as ultralytics !!!!
# evaluation always goes through the entire validation dataset
training_args = TrainingArguments(
output_dir=f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}",
num_train_epochs=args.num_train_epochs,
max_grad_norm=args.max_grad_norm,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
dataloader_num_workers=args.dataloader_num_workers,
eval_strategy=args.eval_strategy,
save_strategy=args.save_strategy,
save_total_limit=2,
metric_for_best_model="eval_map",
greater_is_better=True,
load_best_model_at_end=True,
eval_do_concat_batches=False,
remove_unused_columns=False,
dataloader_drop_last=True,
lr_scheduler_type=args.lr_scheduler_type,
warmup_steps=args.warmup_steps,
eval_accumulation_steps=10
)
print(training_args)
# Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset["train"],
eval_dataset=dataset["validation"],
data_collator=partial(collate_fn, image_processor=processor),
compute_metrics=compute_metrics,
callbacks=[
EarlyStoppingCallback(
early_stopping_patience=args.early_stopping_patience
),
],
optimizers=(optimizer, None)
)
# Train
trainer.train()
model.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model")
processor.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model")
metrics = trainer.evaluate()
pprint(metrics)
print("test results")
print(f"test dataset length: {len(dataset['test'])}")
metrics = trainer.evaluate(dataset["test"])
pprint(metrics)
if __name__ == "__main__":
main()