|
import json |
|
import os |
|
from datasets import Dataset, DatasetDict, Image, Features, Value, Sequence |
|
|
|
def create_split_dataset(coco_dir, split): |
|
"""Create a dataset for a single split with exact annotation format""" |
|
with open(os.path.join(coco_dir, f"{split}.json")) as f: |
|
coco_data = json.load(f) |
|
|
|
|
|
ann_map = {img['id']: [] for img in coco_data['images']} |
|
for ann in coco_data['annotations']: |
|
ann_map[ann['image_id']].append({ |
|
'id': ann['id'], |
|
'category_id': 0, |
|
'bbox': [float(x) for x in ann['bbox']], |
|
'area': float(ann['area']), |
|
'iscrowd': int(ann.get('iscrowd', 0)) |
|
}) |
|
|
|
|
|
dataset = [] |
|
for img in coco_data['images']: |
|
dataset.append({ |
|
'image_id': int(img['id']), |
|
'image': {'path': os.path.join(coco_dir, img['file_name'])}, |
|
'annotations': ann_map[img['id']] |
|
}) |
|
|
|
|
|
features = Features({ |
|
'image_id': Value('int64'), |
|
'image': Image(), |
|
'annotations': [{ |
|
'id': Value('int64'), |
|
'category_id': Value('int64'), |
|
'bbox': [Value('float32')], |
|
'area': Value('float32'), |
|
'iscrowd': Value('int64') |
|
}] |
|
}) |
|
|
|
return Dataset.from_list(dataset, features=features) |
|
|
|
|
|
coco_dir = "8_calves_coco" |
|
debug_limits = {"train": 50, "val": 20, "test": 10} |
|
seed = 42 |
|
|
|
|
|
full_dataset = DatasetDict() |
|
debug_dataset = DatasetDict() |
|
|
|
|
|
for split in ["train", "val", "test"]: |
|
|
|
full_split = create_split_dataset(coco_dir, split) |
|
full_dataset[split] = full_split |
|
|
|
|
|
debug_split = full_split.shuffle(seed=seed).select(range(debug_limits[split])) |
|
debug_dataset[split] = debug_split |
|
|
|
|
|
|
|
|
|
|
|
|
|
full_dataset.save_to_disk("8_calves_arrow") |
|
print(f"✅ Full cache saved with {sum(len(d) for d in full_dataset.values())} samples") |
|
|