Tony Fang
commited on
Commit
·
d67ed13
1
Parent(s):
437e9f0
added transformer_benchmark
Browse files- .gitignore +4 -0
- README.md +9 -1
- object_detector_benchmark/data_wrangling.py +70 -0
- object_detector_benchmark/transformer_benchmark/Configs/conditional_detr.yaml +49 -0
- object_detector_benchmark/transformer_benchmark/Configs/facebook_detr.yaml +49 -0
- object_detector_benchmark/transformer_benchmark/Configs/yolos.yaml +49 -0
- object_detector_benchmark/transformer_benchmark/augmentations.py +117 -0
- object_detector_benchmark/transformer_benchmark/custom_parser.py +37 -0
- object_detector_benchmark/transformer_benchmark/train.py +229 -0
- object_detector_benchmark/yolo_to_coco.py +144 -0
.gitignore
CHANGED
@@ -7,3 +7,7 @@
|
|
7 |
object_detector_benchmark/yolo_benchmark/*
|
8 |
!object_detector_benchmark/yolo_benchmark/
|
9 |
!object_detector_benchmark/yolo_benchmark/*.yaml
|
|
|
|
|
|
|
|
|
|
7 |
object_detector_benchmark/yolo_benchmark/*
|
8 |
!object_detector_benchmark/yolo_benchmark/
|
9 |
!object_detector_benchmark/yolo_benchmark/*.yaml
|
10 |
+
object_detector_benchmark/8_calves_arrow/
|
11 |
+
object_detector_benchmark/8_calves_coco/
|
12 |
+
object_detector_benchmark/transformer_benchmark/runs
|
13 |
+
object_detector_benchmark/transformer_benchmark/__pycache__
|
README.md
CHANGED
@@ -110,7 +110,15 @@ T = [
|
|
110 |
```
|
111 |
|
112 |
Step 4:
|
113 |
-
run the yolo detectors following
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
|
116 |
### Identity Classification
|
|
|
110 |
```
|
111 |
|
112 |
Step 4:
|
113 |
+
run the yolo detectors following the following steps:
|
114 |
+
|
115 |
+
```
|
116 |
+
cd yolo_benchmark
|
117 |
+
|
118 |
+
Model_Name=yolov9t
|
119 |
+
|
120 |
+
yolo cfg=experiment.yaml model=$Model_Name.yaml name=$Model_Name
|
121 |
+
```
|
122 |
|
123 |
|
124 |
### Identity Classification
|
object_detector_benchmark/data_wrangling.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from datasets import Dataset, DatasetDict, Image, Features, Value, Sequence
|
4 |
+
|
5 |
+
def create_split_dataset(coco_dir, split):
|
6 |
+
"""Create a dataset for a single split with exact annotation format"""
|
7 |
+
with open(os.path.join(coco_dir, f"{split}.json")) as f:
|
8 |
+
coco_data = json.load(f)
|
9 |
+
|
10 |
+
# Create annotation map with full COCO fields
|
11 |
+
ann_map = {img['id']: [] for img in coco_data['images']}
|
12 |
+
for ann in coco_data['annotations']:
|
13 |
+
ann_map[ann['image_id']].append({
|
14 |
+
'id': ann['id'],
|
15 |
+
'category_id': 0,
|
16 |
+
'bbox': [float(x) for x in ann['bbox']], # Ensure float values
|
17 |
+
'area': float(ann['area']),
|
18 |
+
'iscrowd': int(ann.get('iscrowd', 0))
|
19 |
+
})
|
20 |
+
|
21 |
+
# Build dataset entries with exact format
|
22 |
+
dataset = []
|
23 |
+
for img in coco_data['images']:
|
24 |
+
dataset.append({
|
25 |
+
'image_id': int(img['id']),
|
26 |
+
'image': {'path': os.path.join(coco_dir, img['file_name'])},
|
27 |
+
'annotations': ann_map[img['id']] # List of annotation dicts
|
28 |
+
})
|
29 |
+
|
30 |
+
# Define features schema
|
31 |
+
features = Features({
|
32 |
+
'image_id': Value('int64'),
|
33 |
+
'image': Image(),
|
34 |
+
'annotations': [{
|
35 |
+
'id': Value('int64'),
|
36 |
+
'category_id': Value('int64'),
|
37 |
+
'bbox': [Value('float32')], # List of floats for bbox
|
38 |
+
'area': Value('float32'),
|
39 |
+
'iscrowd': Value('int64')
|
40 |
+
}]
|
41 |
+
})
|
42 |
+
|
43 |
+
return Dataset.from_list(dataset, features=features)
|
44 |
+
|
45 |
+
# Configuration
|
46 |
+
coco_dir = "8_calves_coco"
|
47 |
+
debug_limits = {"train": 50, "val": 20, "test": 10}
|
48 |
+
seed = 42
|
49 |
+
|
50 |
+
# Initialize containers
|
51 |
+
full_dataset = DatasetDict()
|
52 |
+
debug_dataset = DatasetDict()
|
53 |
+
|
54 |
+
# Process splits
|
55 |
+
for split in ["train", "val", "test"]:
|
56 |
+
# Create full split dataset
|
57 |
+
full_split = create_split_dataset(coco_dir, split)
|
58 |
+
full_dataset[split] = full_split
|
59 |
+
|
60 |
+
# Create debug version with random samples
|
61 |
+
debug_split = full_split.shuffle(seed=seed).select(range(debug_limits[split]))
|
62 |
+
debug_dataset[split] = debug_split
|
63 |
+
|
64 |
+
# Save debug first
|
65 |
+
# debug_dataset.save_to_disk("cache_debug")
|
66 |
+
# print(f"✅ Debug cache saved with {sum(len(d) for d in debug_dataset.values())} samples")
|
67 |
+
|
68 |
+
# Save full dataset
|
69 |
+
full_dataset.save_to_disk("8_calves_arrow")
|
70 |
+
print(f"✅ Full cache saved with {sum(len(d) for d in full_dataset.values())} samples")
|
object_detector_benchmark/transformer_benchmark/Configs/conditional_detr.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: "../8_calves_arrow"
|
2 |
+
save_directory: "runs"
|
3 |
+
|
4 |
+
model_name: "microsoft/conditional-detr-resnet-50"
|
5 |
+
|
6 |
+
# Training Hyperparameters
|
7 |
+
num_train_epochs: 100
|
8 |
+
|
9 |
+
# Learning Rates
|
10 |
+
learning_rates:
|
11 |
+
model: 1e-5
|
12 |
+
class_labels_classifier: 1e-4
|
13 |
+
bbox_predictor: 1e-4
|
14 |
+
|
15 |
+
# Scheduler Type
|
16 |
+
|
17 |
+
lr_scheduler_type: "constant_with_warmup"
|
18 |
+
|
19 |
+
# Weight Decay
|
20 |
+
weight_decay: 1e-4 # or just set 0
|
21 |
+
|
22 |
+
# Batch Sizes
|
23 |
+
train_batch_size: 5
|
24 |
+
eval_batch_size: 25
|
25 |
+
dataloader_num_workers: 8
|
26 |
+
|
27 |
+
# Gradient Clipping
|
28 |
+
max_grad_norm: 0.1
|
29 |
+
|
30 |
+
# Warm up
|
31 |
+
warmup_steps: 0
|
32 |
+
|
33 |
+
# Augmentation Probabilities
|
34 |
+
rotate90: 1.0
|
35 |
+
horizontal_flip: 0.5
|
36 |
+
brightness_contrast: 0.4
|
37 |
+
|
38 |
+
# Elastic Transform Parameters
|
39 |
+
elastic_transform: 0.5
|
40 |
+
elastic_alpha: 100.0
|
41 |
+
elastic_sigma: 5.0
|
42 |
+
|
43 |
+
# Evaluation & Saving
|
44 |
+
eval_strategy: "epoch" # Options: no, steps, epoch
|
45 |
+
save_strategy: "epoch" # Options: no, steps, epoch
|
46 |
+
|
47 |
+
# Early Stopping
|
48 |
+
early_stopping_patience: 10
|
49 |
+
|
object_detector_benchmark/transformer_benchmark/Configs/facebook_detr.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: "../8_calves_arrow"
|
2 |
+
save_directory: "runs"
|
3 |
+
|
4 |
+
model_name: "facebook/detr-resnet-50"
|
5 |
+
|
6 |
+
# Training Hyperparameters
|
7 |
+
num_train_epochs: 100
|
8 |
+
|
9 |
+
# Learning Rates
|
10 |
+
learning_rates:
|
11 |
+
model: 1e-5
|
12 |
+
class_labels_classifier: 1e-4
|
13 |
+
bbox_predictor: 1e-4
|
14 |
+
|
15 |
+
# Scheduler Type
|
16 |
+
|
17 |
+
lr_scheduler_type: "constant_with_warmup"
|
18 |
+
|
19 |
+
# Weight Decay
|
20 |
+
weight_decay: 1e-4 # or just set 0
|
21 |
+
|
22 |
+
# Batch Sizes
|
23 |
+
train_batch_size: 5
|
24 |
+
eval_batch_size: 25
|
25 |
+
dataloader_num_workers: 8
|
26 |
+
|
27 |
+
# Gradient Clipping
|
28 |
+
max_grad_norm: 0.1
|
29 |
+
|
30 |
+
# Warm up
|
31 |
+
warmup_steps: 0
|
32 |
+
|
33 |
+
# Augmentation Probabilities
|
34 |
+
rotate90: 1.0
|
35 |
+
horizontal_flip: 0.5
|
36 |
+
brightness_contrast: 0.4
|
37 |
+
|
38 |
+
# Elastic Transform Parameters
|
39 |
+
elastic_transform: 0.5
|
40 |
+
elastic_alpha: 100.0
|
41 |
+
elastic_sigma: 5.0
|
42 |
+
|
43 |
+
# Evaluation & Saving
|
44 |
+
eval_strategy: "epoch" # Options: no, steps, epoch
|
45 |
+
save_strategy: "epoch" # Options: no, steps, epoch
|
46 |
+
|
47 |
+
# Early Stopping
|
48 |
+
early_stopping_patience: 10
|
49 |
+
|
object_detector_benchmark/transformer_benchmark/Configs/yolos.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: "../8_calves_arrow"
|
2 |
+
save_directory: "runs"
|
3 |
+
|
4 |
+
model_name: "hustvl/yolos-small"
|
5 |
+
|
6 |
+
# Training Hyperparameters
|
7 |
+
num_train_epochs: 100
|
8 |
+
|
9 |
+
# Learning Rates
|
10 |
+
learning_rates:
|
11 |
+
vit: 2.5e-5
|
12 |
+
class_labels_classifier: 2.5e-5
|
13 |
+
bbox_predictor: 2.5e-5
|
14 |
+
|
15 |
+
# Scheduler Type
|
16 |
+
|
17 |
+
lr_scheduler_type: "cosine"
|
18 |
+
|
19 |
+
# Weight Decay
|
20 |
+
weight_decay: 1e-4 # or just set 0
|
21 |
+
|
22 |
+
# Batch Sizes
|
23 |
+
train_batch_size: 5
|
24 |
+
eval_batch_size: 25
|
25 |
+
dataloader_num_workers: 8
|
26 |
+
|
27 |
+
# Gradient Clipping
|
28 |
+
max_grad_norm: 0.0
|
29 |
+
|
30 |
+
# Warm up
|
31 |
+
warmup_steps: 0
|
32 |
+
|
33 |
+
# Augmentation Probabilities
|
34 |
+
rotate90: 1.0
|
35 |
+
horizontal_flip: 0.5
|
36 |
+
brightness_contrast: 0.4
|
37 |
+
|
38 |
+
# Elastic Transform Parameters
|
39 |
+
elastic_transform: 0.5
|
40 |
+
elastic_alpha: 100.0
|
41 |
+
elastic_sigma: 5.0
|
42 |
+
|
43 |
+
# Evaluation & Saving
|
44 |
+
eval_strategy: "epoch" # Options: no, steps, epoch
|
45 |
+
save_strategy: "epoch" # Options: no, steps, epoch
|
46 |
+
|
47 |
+
# Early Stopping
|
48 |
+
early_stopping_patience: 10
|
49 |
+
|
object_detector_benchmark/transformer_benchmark/augmentations.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_from_disk
|
2 |
+
import albumentations as A
|
3 |
+
import numpy as np
|
4 |
+
from collections import defaultdict
|
5 |
+
import os
|
6 |
+
from PIL import Image, ImageDraw
|
7 |
+
from functools import partial
|
8 |
+
from custom_parser import parse_args
|
9 |
+
|
10 |
+
|
11 |
+
# augmentation probabilities need to be hyperparameters
|
12 |
+
def augment_data_point(batch, args):
|
13 |
+
assert len(batch["image_id"]) == len(batch["image"]) == len(batch["annotations"])
|
14 |
+
transform = A.Compose([
|
15 |
+
A.RandomRotate90(p=args.rotate90),
|
16 |
+
A.HorizontalFlip(p=args.horizontal_flip),
|
17 |
+
A.RandomBrightnessContrast(p=args.brightness_contrast),
|
18 |
+
A.ElasticTransform(
|
19 |
+
alpha=args.elastic_alpha,
|
20 |
+
sigma=args.elastic_sigma,
|
21 |
+
p=args.elastic_transform
|
22 |
+
),
|
23 |
+
A.Resize(640, 640)
|
24 |
+
], bbox_params=A.BboxParams(
|
25 |
+
format="coco",
|
26 |
+
label_fields=["category_ids"],
|
27 |
+
))
|
28 |
+
|
29 |
+
new_batch = defaultdict(list)
|
30 |
+
for id, image, annotations in zip(batch["image_id"], batch["image"], batch["annotations"]):
|
31 |
+
image_np = np.array(image)
|
32 |
+
bboxes = [ann["bbox"] for ann in annotations]
|
33 |
+
category_ids = [ann["category_id"] for ann in annotations]
|
34 |
+
|
35 |
+
transformed = transform(image=image_np, bboxes=bboxes, category_ids=category_ids)
|
36 |
+
transformed_image = Image.fromarray(transformed["image"])
|
37 |
+
|
38 |
+
transformed_annotations = []
|
39 |
+
for ann, new_bbox in zip(annotations, transformed["bboxes"]):
|
40 |
+
new_ann = ann.copy()
|
41 |
+
new_ann["bbox"] = new_bbox
|
42 |
+
transformed_annotations.append(new_ann)
|
43 |
+
new_batch["image_id"].append(id)
|
44 |
+
new_batch["image"].append(transformed_image)
|
45 |
+
new_batch["annotations"].append(transformed_annotations)
|
46 |
+
|
47 |
+
return new_batch
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
def validation_transform(batch):
|
52 |
+
assert len(batch["image_id"]) == len(batch["image"]) == len(batch["annotations"])
|
53 |
+
transform = A.Compose([
|
54 |
+
A.Resize(640, 640)
|
55 |
+
], bbox_params=A.BboxParams(
|
56 |
+
format="coco",
|
57 |
+
label_fields=["category_ids"],
|
58 |
+
))
|
59 |
+
|
60 |
+
new_batch = defaultdict(list)
|
61 |
+
for id, image, annotations in zip(batch["image_id"], batch["image"], batch["annotations"]):
|
62 |
+
image_np = np.array(image)
|
63 |
+
bboxes = [ann["bbox"] for ann in annotations]
|
64 |
+
category_ids = [ann["category_id"] for ann in annotations]
|
65 |
+
|
66 |
+
transformed = transform(image=image_np, bboxes=bboxes, category_ids=category_ids)
|
67 |
+
transformed_image = Image.fromarray(transformed["image"])
|
68 |
+
|
69 |
+
transformed_annotations = []
|
70 |
+
for ann, new_bbox in zip(annotations, transformed["bboxes"]):
|
71 |
+
new_ann = ann.copy()
|
72 |
+
new_ann["bbox"] = new_bbox
|
73 |
+
transformed_annotations.append(new_ann)
|
74 |
+
new_batch["image_id"].append(id)
|
75 |
+
new_batch["image"].append(transformed_image)
|
76 |
+
new_batch["annotations"].append(transformed_annotations)
|
77 |
+
|
78 |
+
return new_batch
|
79 |
+
|
80 |
+
|
81 |
+
def save_sample(dataset, idx, filename, save_dir="augmented_samples"):
|
82 |
+
os.makedirs(save_dir, exist_ok=True)
|
83 |
+
sample = dataset[idx] # Fetch sample (augmented or original)
|
84 |
+
image = sample["image"]
|
85 |
+
bboxes = [ann["bbox"] for ann in sample["annotations"]]
|
86 |
+
|
87 |
+
# Convert image to RGB (if not already)
|
88 |
+
image = image.convert("RGB")
|
89 |
+
|
90 |
+
# Draw bounding boxes
|
91 |
+
draw = ImageDraw.Draw(image)
|
92 |
+
for bbox in bboxes:
|
93 |
+
x, y, w, h = bbox
|
94 |
+
draw.rectangle([x, y, x + w, y + h], outline="red", width=3)
|
95 |
+
|
96 |
+
# Save the image
|
97 |
+
image_path = os.path.join(save_dir, filename)
|
98 |
+
image.save(image_path)
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
def main():
|
103 |
+
args = parse_args()
|
104 |
+
dataset = load_from_disk(args.dataset_path)
|
105 |
+
|
106 |
+
train_dataset = dataset["train"]
|
107 |
+
augmented_train_dataset = train_dataset.with_transform(lambda batch: augment_data_point(batch, args))
|
108 |
+
valid_dataset = dataset["val"].with_transform(validation_transform)
|
109 |
+
# print(train_dataset[0])
|
110 |
+
for i in range(5): # Adjust index as needed
|
111 |
+
save_sample(augmented_train_dataset, i, f"augmented_sample_{i}.png")
|
112 |
+
save_sample(train_dataset, i, f"original_sample_{i}.png")
|
113 |
+
save_sample(valid_dataset, i, f"validation_image_{i}.png")
|
114 |
+
|
115 |
+
|
116 |
+
if __name__ == "__main__":
|
117 |
+
main()
|
object_detector_benchmark/transformer_benchmark/custom_parser.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
import argparse
|
3 |
+
from argparse import Namespace
|
4 |
+
from ultralytics.utils import IterableSimpleNamespace
|
5 |
+
|
6 |
+
|
7 |
+
# Function to Load Configurations from YAML
|
8 |
+
def load_config(config_path):
|
9 |
+
with open(config_path, "r") as file:
|
10 |
+
config_dict = yaml.safe_load(file)
|
11 |
+
|
12 |
+
# Convert specific keys to float/int to prevent type errors
|
13 |
+
config_dict["weight_decay"] = float(config_dict["weight_decay"])
|
14 |
+
config_dict["warmup_steps"] = int(config_dict["warmup_steps"])
|
15 |
+
|
16 |
+
config_dict["rotate90"] = float(config_dict["rotate90"])
|
17 |
+
config_dict["horizontal_flip"] = float(config_dict["horizontal_flip"])
|
18 |
+
config_dict["brightness_contrast"] = float(config_dict["brightness_contrast"])
|
19 |
+
config_dict["elastic_transform"] = float(config_dict["elastic_transform"])
|
20 |
+
config_dict["elastic_alpha"] = float(config_dict["elastic_alpha"])
|
21 |
+
config_dict["elastic_sigma"] = float(config_dict["elastic_sigma"])
|
22 |
+
return IterableSimpleNamespace(**config_dict)
|
23 |
+
|
24 |
+
def parse_args():
|
25 |
+
parser = argparse.ArgumentParser(description="Custom Hugging Face Training with config file")
|
26 |
+
|
27 |
+
parser.add_argument("--config", type=str, required=True, help="Path to YAML config file")
|
28 |
+
|
29 |
+
args = parser.parse_args()
|
30 |
+
|
31 |
+
# Load settings from the YAML file
|
32 |
+
config = load_config(args.config)
|
33 |
+
return config
|
34 |
+
|
35 |
+
|
36 |
+
if __name__ == "__main__":
|
37 |
+
main()
|
object_detector_benchmark/transformer_benchmark/train.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_from_disk
|
2 |
+
from transformers import (
|
3 |
+
AutoModelForObjectDetection,
|
4 |
+
AutoImageProcessor,
|
5 |
+
Trainer,
|
6 |
+
TrainingArguments,
|
7 |
+
EarlyStoppingCallback,
|
8 |
+
)
|
9 |
+
import torch
|
10 |
+
import numpy as np
|
11 |
+
from torchmetrics.detection.mean_ap import MeanAveragePrecision
|
12 |
+
from functools import partial
|
13 |
+
from augmentations import augment_data_point, validation_transform
|
14 |
+
from pprint import pprint
|
15 |
+
from custom_parser import parse_args
|
16 |
+
from torch.optim import AdamW
|
17 |
+
import os
|
18 |
+
import torchvision.transforms as transforms
|
19 |
+
from copy import deepcopy
|
20 |
+
import time
|
21 |
+
# from ultralytics.utils.plotting import Annotator
|
22 |
+
from PIL import Image, ImageDraw
|
23 |
+
|
24 |
+
# this is still bugged!!!
|
25 |
+
# I guess time to parallelise this and see if there are more bugs, currently seems fine
|
26 |
+
# just dropping the last incomplete batch
|
27 |
+
# check how many bounding boxes there are for each image, if they all equal to 8,
|
28 |
+
# can just add assertions
|
29 |
+
@torch.no_grad()
|
30 |
+
def compute_metrics(results):
|
31 |
+
assert len(results.predictions) == len(results.label_ids)
|
32 |
+
# pack up predictions
|
33 |
+
prediction_list = []
|
34 |
+
for batch in results.predictions:
|
35 |
+
logits = batch[1] # shape: (number of samples, number of predictions for each sample, number of classes)
|
36 |
+
scores = torch.softmax(torch.tensor(logits), dim=-1)
|
37 |
+
confidences, labels = scores.max(dim=-1)
|
38 |
+
boxes = batch[2]
|
39 |
+
|
40 |
+
for boxes_per_image, confidences_per_image, labels_per_image in zip(boxes, confidences, labels):
|
41 |
+
boxes_tensor = torch.tensor(boxes_per_image)
|
42 |
+
sorted_indices = torch.argsort(confidences_per_image, descending=True)
|
43 |
+
keep = sorted_indices[:100]
|
44 |
+
boxes_top = boxes_tensor[keep]
|
45 |
+
scores_top = confidences_per_image[keep]
|
46 |
+
labels_top = labels_per_image[keep]
|
47 |
+
prediction_list.append({
|
48 |
+
'boxes': boxes_top,
|
49 |
+
'scores': scores_top,
|
50 |
+
'labels': labels_top,
|
51 |
+
})
|
52 |
+
# pack up targets
|
53 |
+
target_list = []
|
54 |
+
for batch in results.label_ids:
|
55 |
+
for target in batch:
|
56 |
+
target_list.append(
|
57 |
+
{
|
58 |
+
'labels' : torch.tensor(target['class_labels']),
|
59 |
+
'boxes' : torch.tensor(target['boxes']),
|
60 |
+
'area' : torch.tensor(target['area']),
|
61 |
+
'iscrowd' : torch.tensor(target['iscrowd']),
|
62 |
+
}
|
63 |
+
)
|
64 |
+
|
65 |
+
assert len(prediction_list) == len(target_list)
|
66 |
+
for target in target_list:
|
67 |
+
assert len(target['boxes']) == len(target['labels']) == len(target['area']) == len(target['iscrowd']) > 2
|
68 |
+
|
69 |
+
|
70 |
+
# compute metrics
|
71 |
+
metric = MeanAveragePrecision(box_format='cxcywh')
|
72 |
+
metric.update(prediction_list, target_list)
|
73 |
+
metrics = metric.compute()
|
74 |
+
# for key in metrics.keys():
|
75 |
+
# print(key, type(metrics[key]))
|
76 |
+
# print("map_small", type(metric["map_small"]), print(metric["map_small"]))
|
77 |
+
return {
|
78 |
+
"map" : metrics["map"].item(),
|
79 |
+
"map_50" : metrics["map_50"].item(),
|
80 |
+
"map_75" : metrics["map_75"].item(),
|
81 |
+
}
|
82 |
+
|
83 |
+
|
84 |
+
# Do not delete the plots!!! Need to do it for every detector!!!
|
85 |
+
def collate_fn(batch, image_processor):
|
86 |
+
images = [sample["image"] for sample in batch]
|
87 |
+
# for image in images:
|
88 |
+
# image.save(f"original_{time.time()}.jpg")
|
89 |
+
image_ids = [sample["image_id"] for sample in batch]
|
90 |
+
formatted_annotations = [{"image_id": sample["image_id"], "annotations": sample["annotations"]} for sample in batch]
|
91 |
+
|
92 |
+
inputs = image_processor(images=images, annotations=formatted_annotations, return_tensors="pt")
|
93 |
+
# mean = [0.485, 0.456, 0.406] # Example: ImageNet Mean
|
94 |
+
# std = [0.229, 0.224, 0.225] # Example: ImageNet Std
|
95 |
+
# transform = transforms.Compose([
|
96 |
+
# # transforms.Normalize(mean=[-m/s for m, s in zip(mean, std)], std=[1/s for s in std]), # Undo standardization
|
97 |
+
# transforms.ToPILImage()
|
98 |
+
# ])
|
99 |
+
# for pixel_values, labels in zip(inputs.pixel_values, inputs.labels):
|
100 |
+
# img_width, img_height = labels['size'].tolist()
|
101 |
+
# print(pixel_values.size(), labels['size'])
|
102 |
+
# bboxes = labels['boxes']
|
103 |
+
# image_cpy = deepcopy(pixel_values)
|
104 |
+
# image_cpy = transform(image_cpy)
|
105 |
+
# draw = ImageDraw.Draw(image_cpy)
|
106 |
+
|
107 |
+
# for box in bboxes:
|
108 |
+
# x_center, y_center, width, height = box.tolist()
|
109 |
+
# x0 = (x_center - width/2) * img_width
|
110 |
+
# y0 = (y_center - height/2) * img_height
|
111 |
+
# x1 = (x_center + width/2) * img_width
|
112 |
+
# y1 = (y_center + height/2) * img_height
|
113 |
+
# draw.rectangle([x0, y0, x1, y1], outline="green", width=1)
|
114 |
+
# image_cpy.save(f"{time.time()}.jpg")
|
115 |
+
return inputs
|
116 |
+
|
117 |
+
def get_param_groups(model, args):
|
118 |
+
model_modules = dict(model.named_children())
|
119 |
+
invalid_keys = [key for key in args.learning_rates if key not in model_modules]
|
120 |
+
if invalid_keys:
|
121 |
+
raise ValueError(f"Invalid keys in learning_rates: {invalid_keys}. "
|
122 |
+
f"These are not valid model components. Valid options: {list(model_modules.keys())}")
|
123 |
+
param_groups = []
|
124 |
+
for name, lr in args.learning_rates.items():
|
125 |
+
if args.model_name == "PekingU/rtdetr_v2_r50vd" and name == "model":
|
126 |
+
param_groups.append(
|
127 |
+
{
|
128 |
+
"params": model_modules[name].backbone.parameters(),
|
129 |
+
"lr": float(lr)
|
130 |
+
}
|
131 |
+
)
|
132 |
+
else:
|
133 |
+
param_groups.append(
|
134 |
+
{
|
135 |
+
"params": model_modules[name].parameters(),
|
136 |
+
"lr": float(lr)
|
137 |
+
}
|
138 |
+
)
|
139 |
+
return param_groups
|
140 |
+
|
141 |
+
# pick out the hyperparameters and have a save directory
|
142 |
+
def main():
|
143 |
+
args = parse_args()
|
144 |
+
print(args)
|
145 |
+
dataset = load_from_disk(args.dataset_path)
|
146 |
+
train_subset = dataset['train'].with_transform(lambda batch: augment_data_point(batch, args))
|
147 |
+
val_subset = dataset['val'].with_transform(validation_transform)
|
148 |
+
test_subset = dataset['test'].with_transform(validation_transform)
|
149 |
+
dataset = {
|
150 |
+
"train": train_subset,
|
151 |
+
"validation": val_subset,
|
152 |
+
"test" : test_subset
|
153 |
+
}
|
154 |
+
|
155 |
+
if args.model_name in ["SenseTime/deformable-detr", "PekingU/rtdetr_v2_r50vd", "microsoft/conditional-detr-resnet-50"]:
|
156 |
+
num_labels = 2
|
157 |
+
else:
|
158 |
+
num_labels = 1
|
159 |
+
model = AutoModelForObjectDetection.from_pretrained(
|
160 |
+
args.model_name,
|
161 |
+
num_labels=num_labels,
|
162 |
+
ignore_mismatched_sizes=True
|
163 |
+
)
|
164 |
+
|
165 |
+
processor = AutoImageProcessor.from_pretrained(
|
166 |
+
args.model_name,
|
167 |
+
do_normalize=False
|
168 |
+
)
|
169 |
+
|
170 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
171 |
+
model.to(device)
|
172 |
+
|
173 |
+
param_groups = get_param_groups(model, args)
|
174 |
+
optimizer = AdamW(param_groups, weight_decay=args.weight_decay)
|
175 |
+
|
176 |
+
# Training arguments
|
177 |
+
# eventually set these to the same as ultralytics !!!!
|
178 |
+
# evaluation always goes through the entire validation dataset
|
179 |
+
training_args = TrainingArguments(
|
180 |
+
output_dir=f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}",
|
181 |
+
num_train_epochs=args.num_train_epochs,
|
182 |
+
max_grad_norm=args.max_grad_norm,
|
183 |
+
per_device_train_batch_size=args.train_batch_size,
|
184 |
+
per_device_eval_batch_size=args.eval_batch_size,
|
185 |
+
dataloader_num_workers=args.dataloader_num_workers,
|
186 |
+
eval_strategy=args.eval_strategy,
|
187 |
+
save_strategy=args.save_strategy,
|
188 |
+
save_total_limit=2,
|
189 |
+
metric_for_best_model="eval_map",
|
190 |
+
greater_is_better=True,
|
191 |
+
load_best_model_at_end=True,
|
192 |
+
eval_do_concat_batches=False,
|
193 |
+
remove_unused_columns=False,
|
194 |
+
dataloader_drop_last=True,
|
195 |
+
lr_scheduler_type=args.lr_scheduler_type,
|
196 |
+
warmup_steps=args.warmup_steps,
|
197 |
+
eval_accumulation_steps=10
|
198 |
+
)
|
199 |
+
|
200 |
+
print(training_args)
|
201 |
+
# Trainer
|
202 |
+
trainer = Trainer(
|
203 |
+
model=model,
|
204 |
+
args=training_args,
|
205 |
+
train_dataset=dataset["train"],
|
206 |
+
eval_dataset=dataset["validation"],
|
207 |
+
data_collator=partial(collate_fn, image_processor=processor),
|
208 |
+
compute_metrics=compute_metrics,
|
209 |
+
callbacks=[
|
210 |
+
EarlyStoppingCallback(
|
211 |
+
early_stopping_patience=args.early_stopping_patience
|
212 |
+
),
|
213 |
+
],
|
214 |
+
optimizers=(optimizer, None)
|
215 |
+
)
|
216 |
+
|
217 |
+
# Train
|
218 |
+
trainer.train()
|
219 |
+
model.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model")
|
220 |
+
processor.save_pretrained(f"./{args.save_directory}/{os.environ.get('SLURM_JOB_ID')}/best_model")
|
221 |
+
metrics = trainer.evaluate()
|
222 |
+
pprint(metrics)
|
223 |
+
print("test results")
|
224 |
+
print(f"test dataset length: {len(dataset['test'])}")
|
225 |
+
metrics = trainer.evaluate(dataset["test"])
|
226 |
+
pprint(metrics)
|
227 |
+
|
228 |
+
if __name__ == "__main__":
|
229 |
+
main()
|
object_detector_benchmark/yolo_to_coco.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from PIL import Image
|
4 |
+
import multiprocessing
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
# Configuration
|
8 |
+
YOLO_DIR = "8_calves_yolo"
|
9 |
+
COCO_DIR = "8_calves_coco"
|
10 |
+
CATEGORIES = [{"id": 1, "name": "cow"}]
|
11 |
+
NUM_WORKERS = multiprocessing.cpu_count() # Use all available cores
|
12 |
+
|
13 |
+
def process_image(args):
|
14 |
+
image_path, label_path, image_id = args
|
15 |
+
try:
|
16 |
+
with Image.open(image_path) as img:
|
17 |
+
width, height = img.size
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Error opening {image_path}: {e}")
|
20 |
+
return None, []
|
21 |
+
|
22 |
+
image_info = {
|
23 |
+
"id": image_id,
|
24 |
+
"file_name": os.path.relpath(image_path, COCO_DIR),
|
25 |
+
"width": width,
|
26 |
+
"height": height,
|
27 |
+
}
|
28 |
+
|
29 |
+
annotations = []
|
30 |
+
if os.path.exists(label_path):
|
31 |
+
try:
|
32 |
+
with open(label_path, "r") as f:
|
33 |
+
lines = f.readlines()
|
34 |
+
except Exception as e:
|
35 |
+
print(f"Error reading {label_path}: {e}")
|
36 |
+
return image_info, []
|
37 |
+
|
38 |
+
for line in lines:
|
39 |
+
parts = line.strip().split()
|
40 |
+
if len(parts) != 5:
|
41 |
+
continue
|
42 |
+
|
43 |
+
try:
|
44 |
+
class_id = int(parts[0])
|
45 |
+
x_center, y_center = float(parts[1]), float(parts[2])
|
46 |
+
w, h = float(parts[3]), float(parts[4])
|
47 |
+
except:
|
48 |
+
print(f"Error parsing line in {label_path}: {line}")
|
49 |
+
continue
|
50 |
+
|
51 |
+
if class_id != 0:
|
52 |
+
continue
|
53 |
+
|
54 |
+
# Convert YOLO to COCO bbox with boundary checks
|
55 |
+
w_abs = w * width
|
56 |
+
h_abs = h * height
|
57 |
+
x_min = max(0, (x_center * width) - w_abs/2)
|
58 |
+
y_min = max(0, (y_center * height) - h_abs/2)
|
59 |
+
w_abs = min(width - x_min, w_abs)
|
60 |
+
h_abs = min(height - y_min, h_abs)
|
61 |
+
|
62 |
+
annotations.append({
|
63 |
+
"image_id": image_id,
|
64 |
+
"category_id": 1,
|
65 |
+
"bbox": [x_min, y_min, w_abs, h_abs],
|
66 |
+
"area": w_abs * h_abs,
|
67 |
+
"iscrowd": 0,
|
68 |
+
})
|
69 |
+
|
70 |
+
return image_info, annotations
|
71 |
+
|
72 |
+
def process_split(split):
|
73 |
+
split_dir = os.path.join(YOLO_DIR, split)
|
74 |
+
image_dir = os.path.join(split_dir, "images")
|
75 |
+
label_dir = os.path.join(split_dir, "labels")
|
76 |
+
|
77 |
+
if not os.path.exists(image_dir):
|
78 |
+
print(f"Skipping {split} - no image directory")
|
79 |
+
return
|
80 |
+
|
81 |
+
# Get sorted list of image files
|
82 |
+
image_files = sorted([
|
83 |
+
f for f in os.listdir(image_dir)
|
84 |
+
if f.lower().endswith(".png")
|
85 |
+
])
|
86 |
+
|
87 |
+
# Prepare arguments for parallel processing
|
88 |
+
tasks = []
|
89 |
+
for idx, image_file in enumerate(image_files, 1):
|
90 |
+
image_path = os.path.join(image_dir, image_file)
|
91 |
+
label_path = os.path.join(label_dir, os.path.splitext(image_file)[0] + ".txt")
|
92 |
+
tasks.append((image_path, label_path, idx))
|
93 |
+
|
94 |
+
# Process images in parallel
|
95 |
+
results = []
|
96 |
+
with multiprocessing.Pool(processes=NUM_WORKERS) as pool:
|
97 |
+
for result in tqdm(pool.imap(process_image, tasks),
|
98 |
+
total=len(tasks),
|
99 |
+
desc=f"Processing {split}"):
|
100 |
+
results.append(result)
|
101 |
+
|
102 |
+
# Collect results
|
103 |
+
images = []
|
104 |
+
annotations = []
|
105 |
+
annotation_id = 1
|
106 |
+
|
107 |
+
for image_info, image_anns in results:
|
108 |
+
if image_info is None:
|
109 |
+
continue
|
110 |
+
|
111 |
+
images.append(image_info)
|
112 |
+
for ann in image_anns:
|
113 |
+
ann["id"] = annotation_id
|
114 |
+
annotations.append(ann)
|
115 |
+
annotation_id += 1
|
116 |
+
|
117 |
+
# Create COCO format
|
118 |
+
coco_data = {
|
119 |
+
"info": {
|
120 |
+
"description": "COCO Dataset converted from YOLO format",
|
121 |
+
"version": "1.0",
|
122 |
+
"year": 2023,
|
123 |
+
"contributor": "",
|
124 |
+
},
|
125 |
+
"licenses": [],
|
126 |
+
"categories": CATEGORIES,
|
127 |
+
"images": images,
|
128 |
+
"annotations": annotations,
|
129 |
+
}
|
130 |
+
|
131 |
+
# Save to JSON
|
132 |
+
output_path = os.path.join(COCO_DIR, f"{split}.json")
|
133 |
+
with open(output_path, "w") as f:
|
134 |
+
json.dump(coco_data, f, indent=2)
|
135 |
+
|
136 |
+
print(f"Saved {split} with {len(images)} images and {len(annotations)} annotations")
|
137 |
+
|
138 |
+
def main():
|
139 |
+
os.makedirs(COCO_DIR, exist_ok=True)
|
140 |
+
for split in ["train", "val", "test"]:
|
141 |
+
process_split(split)
|
142 |
+
|
143 |
+
if __name__ == "__main__":
|
144 |
+
main()
|