Tony Fang
added transformer_benchmark
d67ed13
raw
history blame contribute delete
2.37 kB
import json
import os
from datasets import Dataset, DatasetDict, Image, Features, Value, Sequence
def create_split_dataset(coco_dir, split):
"""Create a dataset for a single split with exact annotation format"""
with open(os.path.join(coco_dir, f"{split}.json")) as f:
coco_data = json.load(f)
# Create annotation map with full COCO fields
ann_map = {img['id']: [] for img in coco_data['images']}
for ann in coco_data['annotations']:
ann_map[ann['image_id']].append({
'id': ann['id'],
'category_id': 0,
'bbox': [float(x) for x in ann['bbox']], # Ensure float values
'area': float(ann['area']),
'iscrowd': int(ann.get('iscrowd', 0))
})
# Build dataset entries with exact format
dataset = []
for img in coco_data['images']:
dataset.append({
'image_id': int(img['id']),
'image': {'path': os.path.join(coco_dir, img['file_name'])},
'annotations': ann_map[img['id']] # List of annotation dicts
})
# Define features schema
features = Features({
'image_id': Value('int64'),
'image': Image(),
'annotations': [{
'id': Value('int64'),
'category_id': Value('int64'),
'bbox': [Value('float32')], # List of floats for bbox
'area': Value('float32'),
'iscrowd': Value('int64')
}]
})
return Dataset.from_list(dataset, features=features)
# Configuration
coco_dir = "8_calves_coco"
debug_limits = {"train": 50, "val": 20, "test": 10}
seed = 42
# Initialize containers
full_dataset = DatasetDict()
debug_dataset = DatasetDict()
# Process splits
for split in ["train", "val", "test"]:
# Create full split dataset
full_split = create_split_dataset(coco_dir, split)
full_dataset[split] = full_split
# Create debug version with random samples
debug_split = full_split.shuffle(seed=seed).select(range(debug_limits[split]))
debug_dataset[split] = debug_split
# Save debug first
# debug_dataset.save_to_disk("cache_debug")
# print(f"✅ Debug cache saved with {sum(len(d) for d in debug_dataset.values())} samples")
# Save full dataset
full_dataset.save_to_disk("8_calves_arrow")
print(f"✅ Full cache saved with {sum(len(d) for d in full_dataset.values())} samples")