Tony Fang
added transformer_benchmark
d67ed13
raw
history blame contribute delete
4.16 kB
from datasets import load_from_disk
import albumentations as A
import numpy as np
from collections import defaultdict
import os
from PIL import Image, ImageDraw
from functools import partial
from custom_parser import parse_args
# augmentation probabilities need to be hyperparameters
def augment_data_point(batch, args):
assert len(batch["image_id"]) == len(batch["image"]) == len(batch["annotations"])
transform = A.Compose([
A.RandomRotate90(p=args.rotate90),
A.HorizontalFlip(p=args.horizontal_flip),
A.RandomBrightnessContrast(p=args.brightness_contrast),
A.ElasticTransform(
alpha=args.elastic_alpha,
sigma=args.elastic_sigma,
p=args.elastic_transform
),
A.Resize(640, 640)
], bbox_params=A.BboxParams(
format="coco",
label_fields=["category_ids"],
))
new_batch = defaultdict(list)
for id, image, annotations in zip(batch["image_id"], batch["image"], batch["annotations"]):
image_np = np.array(image)
bboxes = [ann["bbox"] for ann in annotations]
category_ids = [ann["category_id"] for ann in annotations]
transformed = transform(image=image_np, bboxes=bboxes, category_ids=category_ids)
transformed_image = Image.fromarray(transformed["image"])
transformed_annotations = []
for ann, new_bbox in zip(annotations, transformed["bboxes"]):
new_ann = ann.copy()
new_ann["bbox"] = new_bbox
transformed_annotations.append(new_ann)
new_batch["image_id"].append(id)
new_batch["image"].append(transformed_image)
new_batch["annotations"].append(transformed_annotations)
return new_batch
def validation_transform(batch):
assert len(batch["image_id"]) == len(batch["image"]) == len(batch["annotations"])
transform = A.Compose([
A.Resize(640, 640)
], bbox_params=A.BboxParams(
format="coco",
label_fields=["category_ids"],
))
new_batch = defaultdict(list)
for id, image, annotations in zip(batch["image_id"], batch["image"], batch["annotations"]):
image_np = np.array(image)
bboxes = [ann["bbox"] for ann in annotations]
category_ids = [ann["category_id"] for ann in annotations]
transformed = transform(image=image_np, bboxes=bboxes, category_ids=category_ids)
transformed_image = Image.fromarray(transformed["image"])
transformed_annotations = []
for ann, new_bbox in zip(annotations, transformed["bboxes"]):
new_ann = ann.copy()
new_ann["bbox"] = new_bbox
transformed_annotations.append(new_ann)
new_batch["image_id"].append(id)
new_batch["image"].append(transformed_image)
new_batch["annotations"].append(transformed_annotations)
return new_batch
def save_sample(dataset, idx, filename, save_dir="augmented_samples"):
os.makedirs(save_dir, exist_ok=True)
sample = dataset[idx] # Fetch sample (augmented or original)
image = sample["image"]
bboxes = [ann["bbox"] for ann in sample["annotations"]]
# Convert image to RGB (if not already)
image = image.convert("RGB")
# Draw bounding boxes
draw = ImageDraw.Draw(image)
for bbox in bboxes:
x, y, w, h = bbox
draw.rectangle([x, y, x + w, y + h], outline="red", width=3)
# Save the image
image_path = os.path.join(save_dir, filename)
image.save(image_path)
def main():
args = parse_args()
dataset = load_from_disk(args.dataset_path)
train_dataset = dataset["train"]
augmented_train_dataset = train_dataset.with_transform(lambda batch: augment_data_point(batch, args))
valid_dataset = dataset["val"].with_transform(validation_transform)
# print(train_dataset[0])
for i in range(5): # Adjust index as needed
save_sample(augmented_train_dataset, i, f"augmented_sample_{i}.png")
save_sample(train_dataset, i, f"original_sample_{i}.png")
save_sample(valid_dataset, i, f"validation_image_{i}.png")
if __name__ == "__main__":
main()