File size: 4,244 Bytes
d67ed13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import os
import json
from PIL import Image
import multiprocessing
from tqdm import tqdm
# Configuration
YOLO_DIR = "8_calves_yolo"
COCO_DIR = "8_calves_coco"
CATEGORIES = [{"id": 1, "name": "cow"}]
NUM_WORKERS = multiprocessing.cpu_count() # Use all available cores
def process_image(args):
image_path, label_path, image_id = args
try:
with Image.open(image_path) as img:
width, height = img.size
except Exception as e:
print(f"Error opening {image_path}: {e}")
return None, []
image_info = {
"id": image_id,
"file_name": os.path.relpath(image_path, COCO_DIR),
"width": width,
"height": height,
}
annotations = []
if os.path.exists(label_path):
try:
with open(label_path, "r") as f:
lines = f.readlines()
except Exception as e:
print(f"Error reading {label_path}: {e}")
return image_info, []
for line in lines:
parts = line.strip().split()
if len(parts) != 5:
continue
try:
class_id = int(parts[0])
x_center, y_center = float(parts[1]), float(parts[2])
w, h = float(parts[3]), float(parts[4])
except:
print(f"Error parsing line in {label_path}: {line}")
continue
if class_id != 0:
continue
# Convert YOLO to COCO bbox with boundary checks
w_abs = w * width
h_abs = h * height
x_min = max(0, (x_center * width) - w_abs/2)
y_min = max(0, (y_center * height) - h_abs/2)
w_abs = min(width - x_min, w_abs)
h_abs = min(height - y_min, h_abs)
annotations.append({
"image_id": image_id,
"category_id": 1,
"bbox": [x_min, y_min, w_abs, h_abs],
"area": w_abs * h_abs,
"iscrowd": 0,
})
return image_info, annotations
def process_split(split):
split_dir = os.path.join(YOLO_DIR, split)
image_dir = os.path.join(split_dir, "images")
label_dir = os.path.join(split_dir, "labels")
if not os.path.exists(image_dir):
print(f"Skipping {split} - no image directory")
return
# Get sorted list of image files
image_files = sorted([
f for f in os.listdir(image_dir)
if f.lower().endswith(".png")
])
# Prepare arguments for parallel processing
tasks = []
for idx, image_file in enumerate(image_files, 1):
image_path = os.path.join(image_dir, image_file)
label_path = os.path.join(label_dir, os.path.splitext(image_file)[0] + ".txt")
tasks.append((image_path, label_path, idx))
# Process images in parallel
results = []
with multiprocessing.Pool(processes=NUM_WORKERS) as pool:
for result in tqdm(pool.imap(process_image, tasks),
total=len(tasks),
desc=f"Processing {split}"):
results.append(result)
# Collect results
images = []
annotations = []
annotation_id = 1
for image_info, image_anns in results:
if image_info is None:
continue
images.append(image_info)
for ann in image_anns:
ann["id"] = annotation_id
annotations.append(ann)
annotation_id += 1
# Create COCO format
coco_data = {
"info": {
"description": "COCO Dataset converted from YOLO format",
"version": "1.0",
"year": 2023,
"contributor": "",
},
"licenses": [],
"categories": CATEGORIES,
"images": images,
"annotations": annotations,
}
# Save to JSON
output_path = os.path.join(COCO_DIR, f"{split}.json")
with open(output_path, "w") as f:
json.dump(coco_data, f, indent=2)
print(f"Saved {split} with {len(images)} images and {len(annotations)} annotations")
def main():
os.makedirs(COCO_DIR, exist_ok=True)
for split in ["train", "val", "test"]:
process_split(split)
if __name__ == "__main__":
main()
|