jfk-files-2025 / generate_dataset_files.py
Varun Hemachandran
Update dataset files with actual PDF metadata
a16b08b
raw
history blame contribute delete
1.8 kB
#!/usr/bin/env python3
import os
import json
import glob
from tqdm import tqdm
# Directory containing the PDF files
data_dir = "data"
# Get all PDF files
pdf_files = glob.glob(os.path.join(data_dir, "*.pdf"))
# Create dataset structure
dataset = {
"file_name": [],
"file_path": [],
"file_size": [],
"content_type": []
}
# Add information for each PDF file
for pdf_file in tqdm(pdf_files, desc="Processing PDF files"):
file_name = os.path.basename(pdf_file)
file_path = pdf_file
file_size = os.path.getsize(pdf_file)
dataset["file_name"].append(file_name)
dataset["file_path"].append(file_path)
dataset["file_size"].append(file_size)
dataset["content_type"].append("application/pdf")
# Create the default directory if it doesn't exist
os.makedirs("default", exist_ok=True)
# Write the dataset to a JSON file
with open("default/train.json", "w") as f:
json.dump(dataset, f, indent=2)
# Update dataset_dict.json with correct information
dataset_dict = {
"default": {
"splits": {
"train": {
"name": "train",
"num_bytes": sum(dataset["file_size"]),
"num_examples": len(dataset["file_name"]),
"dataset_name": "jfk-files-2025"
}
},
"download_checksums": {},
"download_size": sum(dataset["file_size"]),
"post_processing_size": None,
"dataset_size": sum(dataset["file_size"]),
"size_in_bytes": sum(dataset["file_size"])
}
}
# Write the dataset_dict.json file
with open("dataset_dict.json", "w") as f:
json.dump(dataset_dict, f, indent=2)
print(f"Successfully processed {len(dataset['file_name'])} PDF files")
print(f"Total dataset size: {sum(dataset['file_size']) / (1024*1024):.2f} MB")