Varun Hemachandran commited on
Commit
a16b08b
·
1 Parent(s): 0cd38af

Update dataset files with actual PDF metadata

Browse files
dataset_dict.json CHANGED
@@ -3,15 +3,15 @@
3
  "splits": {
4
  "train": {
5
  "name": "train",
6
- "num_bytes": 0,
7
- "num_examples": 0,
8
  "dataset_name": "jfk-files-2025"
9
  }
10
  },
11
  "download_checksums": {},
12
- "download_size": 0,
13
  "post_processing_size": null,
14
- "dataset_size": 0,
15
- "size_in_bytes": 0
16
  }
17
- }
 
3
  "splits": {
4
  "train": {
5
  "name": "train",
6
+ "num_bytes": 6468037461,
7
+ "num_examples": 2181,
8
  "dataset_name": "jfk-files-2025"
9
  }
10
  },
11
  "download_checksums": {},
12
+ "download_size": 6468037461,
13
  "post_processing_size": null,
14
+ "dataset_size": 6468037461,
15
+ "size_in_bytes": 6468037461
16
  }
17
+ }
default/train.json CHANGED
The diff for this file is too large to render. See raw diff
 
generate_dataset_files.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import json
4
+ import glob
5
+ from tqdm import tqdm
6
+
7
+ # Directory containing the PDF files
8
+ data_dir = "data"
9
+
10
+ # Get all PDF files
11
+ pdf_files = glob.glob(os.path.join(data_dir, "*.pdf"))
12
+
13
+ # Create dataset structure
14
+ dataset = {
15
+ "file_name": [],
16
+ "file_path": [],
17
+ "file_size": [],
18
+ "content_type": []
19
+ }
20
+
21
+ # Add information for each PDF file
22
+ for pdf_file in tqdm(pdf_files, desc="Processing PDF files"):
23
+ file_name = os.path.basename(pdf_file)
24
+ file_path = pdf_file
25
+ file_size = os.path.getsize(pdf_file)
26
+
27
+ dataset["file_name"].append(file_name)
28
+ dataset["file_path"].append(file_path)
29
+ dataset["file_size"].append(file_size)
30
+ dataset["content_type"].append("application/pdf")
31
+
32
+ # Create the default directory if it doesn't exist
33
+ os.makedirs("default", exist_ok=True)
34
+
35
+ # Write the dataset to a JSON file
36
+ with open("default/train.json", "w") as f:
37
+ json.dump(dataset, f, indent=2)
38
+
39
+ # Update dataset_dict.json with correct information
40
+ dataset_dict = {
41
+ "default": {
42
+ "splits": {
43
+ "train": {
44
+ "name": "train",
45
+ "num_bytes": sum(dataset["file_size"]),
46
+ "num_examples": len(dataset["file_name"]),
47
+ "dataset_name": "jfk-files-2025"
48
+ }
49
+ },
50
+ "download_checksums": {},
51
+ "download_size": sum(dataset["file_size"]),
52
+ "post_processing_size": None,
53
+ "dataset_size": sum(dataset["file_size"]),
54
+ "size_in_bytes": sum(dataset["file_size"])
55
+ }
56
+ }
57
+
58
+ # Write the dataset_dict.json file
59
+ with open("dataset_dict.json", "w") as f:
60
+ json.dump(dataset_dict, f, indent=2)
61
+
62
+ print(f"Successfully processed {len(dataset['file_name'])} PDF files")
63
+ print(f"Total dataset size: {sum(dataset['file_size']) / (1024*1024):.2f} MB")