File size: 1,797 Bytes
a16b08b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
#!/usr/bin/env python3
import os
import json
import glob
from tqdm import tqdm
# Directory containing the PDF files
data_dir = "data"
# Get all PDF files
pdf_files = glob.glob(os.path.join(data_dir, "*.pdf"))
# Create dataset structure
dataset = {
"file_name": [],
"file_path": [],
"file_size": [],
"content_type": []
}
# Add information for each PDF file
for pdf_file in tqdm(pdf_files, desc="Processing PDF files"):
file_name = os.path.basename(pdf_file)
file_path = pdf_file
file_size = os.path.getsize(pdf_file)
dataset["file_name"].append(file_name)
dataset["file_path"].append(file_path)
dataset["file_size"].append(file_size)
dataset["content_type"].append("application/pdf")
# Create the default directory if it doesn't exist
os.makedirs("default", exist_ok=True)
# Write the dataset to a JSON file
with open("default/train.json", "w") as f:
json.dump(dataset, f, indent=2)
# Update dataset_dict.json with correct information
dataset_dict = {
"default": {
"splits": {
"train": {
"name": "train",
"num_bytes": sum(dataset["file_size"]),
"num_examples": len(dataset["file_name"]),
"dataset_name": "jfk-files-2025"
}
},
"download_checksums": {},
"download_size": sum(dataset["file_size"]),
"post_processing_size": None,
"dataset_size": sum(dataset["file_size"]),
"size_in_bytes": sum(dataset["file_size"])
}
}
# Write the dataset_dict.json file
with open("dataset_dict.json", "w") as f:
json.dump(dataset_dict, f, indent=2)
print(f"Successfully processed {len(dataset['file_name'])} PDF files")
print(f"Total dataset size: {sum(dataset['file_size']) / (1024*1024):.2f} MB")
|