wikidata-all / convert.py
keplersj's picture
feat: basic version of convert.py
ac614a9
raw
history blame
4.12 kB
from qwikidata.entity import WikidataItem
from qwikidata.json_dump import WikidataJsonDump
import pyarrow as pa
import pyarrow.parquet as pq
import pandas as pd
# create an instance of WikidataJsonDump
wjd_dump_path = "wikidata-20240304-all.json.bz2"
wjd = WikidataJsonDump(wjd_dump_path)
# Create an empty list to store the dictionaries
# data = []
# # Iterate over the entities in wjd and add them to the list
# for ii, entity_dict in enumerate(wjd):
# if ii > 1:
# break
# if entity_dict["type"] == "item":
# data.append(entity_dict)
# TODO: Schema for Data Set
# Create a schema for the table
# {
# "id": "Q60",
# "type": "item",
# "labels": {},
# "descriptions": {},
# "aliases": {},
# "claims": {},
# "sitelinks": {},
# "lastrevid": 195301613,
# "modified": "2020-02-10T12:42:02Z"
#}
# schema = pa.schema([
# ("id", pa.string()),
# ("type", pa.string()),
# # {
# # "labels": {
# # "en": {
# # "language": "en",
# # "value": "New York City"
# # },
# # "ar": {
# # "language": "ar",
# # "value": "\u0645\u062f\u064a\u0646\u0629 \u0646\u064a\u0648 \u064a\u0648\u0631\u0643"
# # }
# # }
# ("labels", pa.map_(pa.string(), pa.struct([
# ("language", pa.string()),
# ("value", pa.string())
# ]))),
# # "descriptions": {
# # "en": {
# # "language": "en",
# # "value": "largest city in New York and the United States of America"
# # },
# # "it": {
# # "language": "it",
# # "value": "citt\u00e0 degli Stati Uniti d'America"
# # }
# # }
# ("descriptions", pa.map_(pa.string(), pa.struct([
# ("language", pa.string()),
# ("value", pa.string())
# ]))),
# # "aliases": {
# # "en": [
# # {
# # "language": "en",pa.string
# # "value": "New York"
# # }
# # ],
# # "fr": [
# # {
# # "language": "fr",
# # "value": "New York City"
# # },
# # {
# # "language": "fr",
# # "value": "NYC"
# # },
# # {
# # "language": "fr",
# # "value": "The City"
# # },
# # {
# # "language": "fr",
# # "value": "La grosse pomme"
# # }
# # ]
# # }
# # }
# ("aliases", pa.map_(pa.string(), pa.struct([
# ("language", pa.string()),
# ("value", pa.string())
# ]))),
# # {
# # "claims": {
# # "P17": [
# # {
# # "id": "q60$5083E43C-228B-4E3E-B82A-4CB20A22A3FB",
# # "mainsnak": {},
# # "type": "statement",
# # "rank": "normal",
# # "qualifiers": {
# # "P580": [],
# # "P5436": []
# # },
# # "references": [
# # {
# # "hash": "d103e3541cc531fa54adcaffebde6bef28d87d32",
# # "snaks": []
# # }
# # ]
# # }
# # ]
# # }
# # }
# ("claims", pa.map_(pa.string(), pa.array(pa.struct([
# ("id", pa.string()),
# ("mainsnak", pa.struct([])),
# ("type", pa.string()),
# ("rank", pa.string()),
# ("qualifiers", pa.map_(pa.string(), pa.array(pa.struct([
# ])))),
# ("references", pa.array(pa.struct([
# ("hash", pa.string()),
# ("snaks", pa.array(pa.struct([])))
# ])))
# ])))),
# ("sitelinks", pa.struct([
# ("site", pa.string()),
# ("title", pa.string())
# ])),
# ("lastrevid", pa.int64()),
# ("modified", pa.string())
# ])
# Create a table from the list of dictionaries and the schema
# table = pa.Table.from_pandas(pd.DataFrame(data), schema=schema)
table = pa.Table.from_pandas(pd.DataFrame(wjd))
# Write the table to disk as parquet
parquet_path = "wikidata-20240304-all.parquet"
pq.write_table(table, parquet_path)