|
import csv |
|
import random |
|
from itertools import combinations |
|
from pathlib import Path |
|
from typing import Any, Dict, List, Union |
|
|
|
import datasets |
|
import numpy as np |
|
import pandas as pd |
|
|
|
|
|
|
|
LANGS = [ |
|
"ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", |
|
"aka_Latn", "als_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "arb_Latn", "ars_Arab", |
|
"ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", |
|
"azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", |
|
"bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", |
|
"cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", |
|
"dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", |
|
"epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "fij_Latn", "fin_Latn", |
|
"fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gaz_Latn", "gla_Latn", "gle_Latn", |
|
"glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", |
|
"hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", |
|
"isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", |
|
"kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "kaz_Cyrl", "kbp_Latn", "kea_Latn", |
|
"khk_Cyrl", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kmr_Latn", |
|
"knc_Arab", "knc_Latn", "kon_Latn", "kor_Hang", "lao_Laoo", "lij_Latn", "lim_Latn", |
|
"lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", |
|
"luo_Latn", "lus_Latn", "lvs_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", |
|
"min_Arab", "min_Latn", "mkd_Cyrl", "mlt_Latn", "mni_Beng", "mos_Latn", "mri_Latn", |
|
"mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nqo_Nkoo", "nso_Latn", |
|
"nus_Latn", "nya_Latn", "oci_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", |
|
"pbt_Arab", "pes_Arab", "plt_Latn", "pol_Latn", "por_Latn", "prs_Arab", "quy_Latn", |
|
"ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Olck", "scn_Latn", |
|
"shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", |
|
"som_Latn", "sot_Latn", "spa_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", |
|
"swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "taq_Latn", "taq_Tfng", "tat_Cyrl", |
|
"tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "tpi_Latn", "tsn_Latn", |
|
"tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", |
|
"ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", |
|
"wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", |
|
"zsm_Latn", "zul_Latn" |
|
] |
|
|
|
|
|
|
|
|
|
try: |
|
cwd = Path(__file__).parent |
|
except NameError as _: |
|
cwd = Path.cwd() |
|
|
|
SEED: int = 42 |
|
N: int = 1004 |
|
UPSAMPLING_FACTOR: int = 3 |
|
NUM_NEGATIVES: int = 3 |
|
NUM_REFERENCES: int = 5 |
|
NUM_EXAMPLES_PER_OPTION: int = 1 |
|
|
|
CATEGORIES: List[str] = [ |
|
"entertainment", |
|
"geography", |
|
"health", |
|
"politics", |
|
"science", |
|
"sports", |
|
"travel", |
|
] |
|
|
|
|
|
_SIB_URL: str = "https://huggingface.co/datasets/wuenlp/mvl-sib200/resolve/main/data/sib200/{lang}/{split}.tsv" |
|
_IMG_URL: str = "https://huggingface.co/datasets/wuenlp/mvl-sib200/resolve/main/data/images/sib200/{category}_{no}.jpg" |
|
|
|
|
|
_DESCRIPTION: str = ( |
|
"MVLSIB is a multilingual dataset designed to provide sentence-image pairs " |
|
"spanning multiple languages and categories. The goal is to support tasks such as " |
|
"multimodal classification, cross-lingual information retrieval, and more. " |
|
"Each row contains a textual entry (sentence) along with category information, " |
|
"and the dataset also includes image references for the same set of categories." |
|
) |
|
|
|
|
|
def read_tsv_to_dict_list(file_path: Union[str, Path]) -> List[Dict[str, Any]]: |
|
""" |
|
Reads a TSV file with columns 'index_id', 'category', and 'text' into a list of dictionaries. |
|
|
|
The TSV is expected to have the following columns (in order): |
|
1. index_id |
|
2. category |
|
3. text |
|
|
|
Parameters |
|
---------- |
|
file_path : Union[str, Path] |
|
The path to the TSV file. |
|
|
|
Returns |
|
------- |
|
List[Dict[str, Any]] |
|
A list of dictionaries, where each element has keys: |
|
- 'index_id': int |
|
- 'category': str |
|
- 'text': str |
|
|
|
Raises |
|
------ |
|
ValueError |
|
If the TSV headers do not match the expected format. |
|
""" |
|
data: List[Dict[str, Any]] = [] |
|
expected_headers = ["index_id", "category", "text"] |
|
|
|
with open(file_path, mode="r", encoding="utf-8") as tsvfile: |
|
reader = csv.DictReader(tsvfile, delimiter="\t") |
|
|
|
|
|
if reader.fieldnames != expected_headers: |
|
raise ValueError( |
|
f"Expected headers {expected_headers}, but got {reader.fieldnames}" |
|
) |
|
|
|
|
|
for _, row in enumerate(reader, start=2): |
|
|
|
if all( |
|
(row[key].strip() == key) or (row[key].strip() == "") |
|
for key in expected_headers |
|
): |
|
continue |
|
|
|
index_id = int(row["index_id"]) |
|
|
|
category = row["category"].strip() |
|
text = row["text"].strip() |
|
|
|
|
|
data.append({"index_id": index_id, "category": category, "text": text}) |
|
|
|
return data |
|
|
|
|
|
def read_lang_tsv(filepaths: List[str]) -> List[Dict[str, Any]]: |
|
""" |
|
Reads a list of TSV file paths containing SIB data in the same language |
|
and merges them into a single, sorted list of dictionaries. |
|
|
|
Specifically: |
|
1. Calls `read_tsv_to_dict_list` for each file path. |
|
2. Merges all resulting dictionaries. |
|
3. Sorts by 'index_id'. |
|
|
|
Also normalizes the category "science/technology" to "science" for internal consistency. |
|
|
|
Parameters |
|
---------- |
|
filepaths : List[str] |
|
A list of TSV file paths for a specific language. |
|
|
|
Returns |
|
------- |
|
List[Dict[str, Any]] |
|
A list of dictionaries sorted by 'index_id' with normalized categories. |
|
""" |
|
|
|
dicos = [read_tsv_to_dict_list(path) for path in filepaths] |
|
|
|
out: List[Dict[str, Any]] = sorted( |
|
[line for dico in dicos for line in dico], key=lambda row: row["index_id"] |
|
) |
|
|
|
for line in out: |
|
if line["category"] == "science/technology": |
|
line["category"] = "science" |
|
return out |
|
|
|
|
|
def replicate_and_negatives( |
|
df: pd.DataFrame, |
|
num_replicates: int = 3, |
|
num_negatives: int = 4, |
|
num_positives: int = 4, |
|
seed: int = 42, |
|
) -> pd.DataFrame: |
|
""" |
|
Create multiple replicated rows from the input DataFrame `df` and |
|
sample negative and positive examples for each row. |
|
|
|
*Negative* samples are drawn from rows whose category is different |
|
from the row's category. **Additionally, each negative example for |
|
a given row is drawn from a distinct category among the negatives, |
|
if there are enough categories to do so without replacement.** |
|
|
|
*Positive* samples are drawn from rows of the same category (excluding |
|
the row's own 'index_id'). |
|
|
|
Parameters |
|
---------- |
|
df : pd.DataFrame |
|
The original input DataFrame with columns ['index_id', 'category', 'text']. |
|
num_replicates : int, optional |
|
Number of times to replicate each row, by default 2. |
|
num_negatives : int, optional |
|
Number of negative samples to pick for each row, by default 2. |
|
num_positives : int, optional |
|
Number of positive samples to pick for each row, by default 2. |
|
seed : int, optional |
|
Seed for random operations, by default 42. |
|
|
|
Returns |
|
------- |
|
pd.DataFrame |
|
A new DataFrame containing replicated rows plus columns: |
|
- neg_id_i, neg_cat_i, neg_text_i for i in [0 .. num_negatives-1] |
|
- pos_id_i, pos_cat_i, pos_text_i for i in [0 .. num_positives-1] |
|
|
|
Notes |
|
----- |
|
- Negative examples for a row are taken from distinct categories |
|
(other than the row's category) if enough categories exist. If |
|
fewer categories exist than `num_negatives`, we sample categories |
|
with replacement, so some duplicates may appear. |
|
- Positive sampling excludes the row's own 'index_id'. |
|
If there are fewer available positives than `num_positives`, |
|
we sample with replacement. |
|
""" |
|
|
|
rng = np.random.default_rng(seed=seed) |
|
|
|
|
|
df_new = pd.concat([df] * num_replicates, ignore_index=True) |
|
|
|
|
|
for i in range(num_negatives): |
|
df_new[f"neg_id_{i}"] = None |
|
df_new[f"neg_cat_{i}"] = None |
|
df_new[f"neg_text_{i}"] = None |
|
|
|
for i in range(num_positives): |
|
df_new[f"pos_id_{i}"] = None |
|
df_new[f"pos_cat_{i}"] = None |
|
df_new[f"pos_text_{i}"] = None |
|
|
|
|
|
|
|
unique_cats = df_new["category"].unique() |
|
cat_to_df: Dict[str, pd.DataFrame] = {} |
|
for c in unique_cats: |
|
cat_to_df[c] = df_new[df_new["category"] == c].reset_index(drop=True) |
|
|
|
|
|
|
|
pos_pool_by_cat = {} |
|
for c in unique_cats: |
|
pos_pool_by_cat[c] = df.loc[ |
|
df["category"] == c, ["index_id", "category", "text"] |
|
].reset_index(drop=True) |
|
|
|
|
|
grouped = df_new.groupby("category", group_keys=False) |
|
output_chunks: List[pd.DataFrame] = [] |
|
|
|
for cat, group_df in grouped: |
|
g_size = len(group_df) |
|
|
|
|
|
|
|
neg_id_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)] |
|
neg_cat_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)] |
|
neg_text_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)] |
|
|
|
|
|
pos_id_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)] |
|
pos_cat_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)] |
|
pos_text_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)] |
|
|
|
|
|
|
|
negative_candidate_cats = [c for c in unique_cats if c != cat] |
|
|
|
|
|
row_ids_for_group = group_df["index_id"].to_numpy() |
|
for i_row in range(g_size): |
|
row_id = row_ids_for_group[i_row] |
|
|
|
|
|
|
|
|
|
replace_for_cats = len(negative_candidate_cats) < num_negatives |
|
chosen_neg_cats = rng.choice( |
|
negative_candidate_cats, size=num_negatives, replace=replace_for_cats |
|
) |
|
|
|
|
|
for j, neg_cat in enumerate(chosen_neg_cats): |
|
neg_pool = cat_to_df[neg_cat] |
|
pick_idx = rng.integers(len(neg_pool)) |
|
neg_id_cols[j][i_row] = neg_pool["index_id"].iloc[pick_idx] |
|
neg_cat_cols[j][i_row] = neg_pool["category"].iloc[pick_idx] |
|
neg_text_cols[j][i_row] = neg_pool["text"].iloc[pick_idx] |
|
|
|
|
|
pos_pool_cat = pos_pool_by_cat[cat] |
|
|
|
valid_mask = pos_pool_cat["index_id"] != row_id |
|
valid_pos_pool = pos_pool_cat[valid_mask] |
|
|
|
replace_pos_for_row = len(valid_pos_pool) < num_positives |
|
|
|
if len(valid_pos_pool) == 0: |
|
|
|
|
|
|
|
|
|
valid_pos_pool = pos_pool_cat |
|
replace_pos_for_row = True |
|
|
|
valid_idx_array = valid_pos_pool.index.to_numpy() |
|
chosen_indices = rng.choice( |
|
valid_idx_array, size=num_positives, replace=replace_pos_for_row |
|
) |
|
for j in range(num_positives): |
|
pick_idx = chosen_indices[j] |
|
pos_id_cols[j][i_row] = valid_pos_pool["index_id"].loc[pick_idx] |
|
pos_cat_cols[j][i_row] = valid_pos_pool["category"].loc[pick_idx] |
|
pos_text_cols[j][i_row] = valid_pos_pool["text"].loc[pick_idx] |
|
|
|
|
|
for j in range(num_negatives): |
|
group_df[f"neg_id_{j}"] = neg_id_cols[j] |
|
group_df[f"neg_cat_{j}"] = neg_cat_cols[j] |
|
group_df[f"neg_text_{j}"] = neg_text_cols[j] |
|
|
|
|
|
for j in range(num_positives): |
|
group_df[f"pos_id_{j}"] = pos_id_cols[j] |
|
group_df[f"pos_cat_{j}"] = pos_cat_cols[j] |
|
group_df[f"pos_text_{j}"] = pos_text_cols[j] |
|
|
|
output_chunks.append(group_df) |
|
|
|
|
|
df_out = pd.concat(output_chunks, axis=0) |
|
df_out.sort_index(inplace=True) |
|
return df_out |
|
|
|
|
|
def get_reference_image_ids( |
|
N: int, num_images: int, k: int, seed: int |
|
) -> List[List[int]]: |
|
""" |
|
Generates reference image ID combinations for each row in a dataset of size N. |
|
|
|
We pick (k)-combinations from the range [1 .. num_images-1]. Then we sample |
|
from these combinations (with replacement) for each of N rows, and shuffle them |
|
in a reproducible manner. |
|
|
|
Parameters |
|
---------- |
|
N : int |
|
Number of rows in the dataset. |
|
num_images : int |
|
Total number of images available per category. |
|
k : int |
|
Number of images to select in each combination. |
|
seed : int |
|
Global seed for random operations. |
|
|
|
Returns |
|
------- |
|
List[List[int]] |
|
A list of length N, where each element is a list of k unique image IDs. |
|
|
|
Notes |
|
----- |
|
- We use Python's `random.choices` to draw from all possible k-combinations. |
|
- Each combination is then locally shuffled to remove ordering biases. |
|
""" |
|
all_combinations = list(combinations(range(0, num_images), k)) |
|
random.seed(seed) |
|
sampled_combinations = [list(x) for x in random.choices(all_combinations, k=N)] |
|
|
|
for i, tuple_ in enumerate(sampled_combinations): |
|
|
|
random.seed(seed + i) |
|
random.shuffle(tuple_) |
|
return sampled_combinations |
|
|
|
|
|
class MVLSIBConfig(datasets.BuilderConfig): |
|
""" |
|
Configuration class for the MVLSIB (Multilingual Visual Language SIB) dataset. |
|
|
|
Parameters |
|
---------- |
|
name : str |
|
The configuration name, typically in the format "task.lang". |
|
upsampling_factor : int, optional |
|
How many times to replicate each row for additional sampling variety, default: 3. |
|
num_references : int, optional |
|
Number of positive references to sample for each row, default: 5. |
|
num_negatives : int, optional |
|
Number of negative samples to pair with each row, default: 3. |
|
seed : int, optional |
|
Seed for random operations, default: 42. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
name: str, |
|
upsampling_factor: int = UPSAMPLING_FACTOR, |
|
num_references: int = NUM_REFERENCES, |
|
num_negatives: int = NUM_NEGATIVES, |
|
seed: int = SEED, |
|
**kwargs: Any, |
|
): |
|
super(MVLSIBConfig, self).__init__(**kwargs) |
|
self.name: str = name |
|
self.task, self.lang = name.split(".") |
|
self.upsampling_factor: int = upsampling_factor |
|
self.num_references: int = num_references |
|
self.num_negatives: int = num_negatives |
|
self.seed: int = seed |
|
|
|
|
|
def _builder_configs() -> List[MVLSIBConfig]: |
|
""" |
|
Internal helper to build the list of MVLSIBConfig objects |
|
for all tasks ('img2sent', 'sent2img') and all available languages in LANGS. |
|
|
|
Returns |
|
------- |
|
List[MVLSIBConfig] |
|
A list of dataset configuration objects, each specifying a (task, language) pair. |
|
""" |
|
configs: List[MVLSIBConfig] = [] |
|
for task in ("img2sent", "sent2img"): |
|
for lang in LANGS: |
|
cfg = MVLSIBConfig( |
|
name=f"{task}.{lang}", |
|
version=datasets.Version("1.0.0"), |
|
description=f"MVLSIB: {task}.{lang}", |
|
) |
|
configs.append(cfg) |
|
return configs |
|
|
|
|
|
class MVLSIB(datasets.GeneratorBasedBuilder): |
|
""" |
|
MVLSIB is a multilingual dataset that provides matched |
|
(sentence -> image) or (image -> sentence) examples for |
|
classification or retrieval tasks. |
|
|
|
Each configuration is specified by a task (img2sent or sent2img) |
|
and a language code, e.g. 'img2sent.eng_Latn'. |
|
|
|
The dataset is structured such that each row includes: |
|
- A set of reference items (images or sentences, depending on the task). |
|
- A set of 4 possible answers (1 positive, 3 negative). |
|
- A label indicating which of the 4 answers is correct. |
|
""" |
|
|
|
BUILDER_CONFIGS = _builder_configs() |
|
BUILDER_CONFIG_CLASS = MVLSIBConfig |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
""" |
|
Returns the dataset metadata, including features. |
|
|
|
The dataset has two major tasks: |
|
- 'img2sent': Given reference images, choose the best matching sentence. |
|
- 'sent2img': Given reference sentences, choose the best matching image. |
|
|
|
Each example row in 'img2sent' includes: |
|
- images (list of str URLs to images) |
|
- sentences (list of str, one positive, three negatives) |
|
- categories (list of str categories matching each sentence) |
|
- label (int specifying which of the sentences is correct) |
|
- id (an integer ID) |
|
- index_id (the original row ID from the SIB .tsv) |
|
|
|
Each example row in 'sent2img' includes: |
|
- sentences (list of str, the positive reference sentences) |
|
- images (list of str URLs to images, one positive, three negatives) |
|
- categories (list of str categories matching each image) |
|
- label (int specifying which of the images is correct) |
|
- id (an integer ID) |
|
- index_id (the original row ID from the SIB .tsv) |
|
|
|
Returns |
|
------- |
|
datasets.DatasetInfo |
|
The Hugging Face DatasetInfo object describing the dataset features, |
|
licensing, homepage, citation, etc. |
|
""" |
|
from datasets import DatasetInfo, Features, Sequence, Value |
|
|
|
img2sents = Features( |
|
{ |
|
"images": Sequence(Value("string")), |
|
"sentences": Sequence(Value("string")), |
|
"categories": Sequence(Value("string")), |
|
"label": Value("int8"), |
|
"id": Value("int64"), |
|
"index_id": Value("int64"), |
|
} |
|
) |
|
sent2imgs = Features( |
|
{ |
|
"sentences": Sequence(Value("string")), |
|
"images": Sequence(Value("string")), |
|
"categories": Sequence(Value("string")), |
|
"label": Value("int8"), |
|
"id": Value("int64"), |
|
"index_id": Value("int64"), |
|
} |
|
) |
|
|
|
features = { |
|
"img2sent": img2sents, |
|
"sent2img": sent2imgs, |
|
} |
|
|
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features[self.config.task], |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager, *args: Any, **kwargs: Any |
|
) -> List[datasets.SplitGenerator]: |
|
""" |
|
Defines the splits of the dataset. In this case, we only produce a single 'test' split, |
|
but in principle, you can define train/dev/test or others. |
|
|
|
Parameters |
|
---------- |
|
dl_manager : datasets.DownloadManager |
|
The Hugging Face DownloadManager used to download files. |
|
|
|
Returns |
|
------- |
|
List[datasets.SplitGenerator] |
|
A list of SplitGenerator objects. Each defines a split name |
|
and a gen_kwargs dict for the `_generate_examples` method. |
|
""" |
|
|
|
files = dl_manager.download( |
|
[ |
|
_SIB_URL.format(lang=self.config.lang, split=split) |
|
for split in ("train", "dev", "test") |
|
] |
|
) |
|
|
|
images: Dict[str, List[str]] = {} |
|
for cat in CATEGORIES: |
|
images[cat] = [] |
|
for i in range(10): |
|
images[cat].append( |
|
dl_manager.download(_IMG_URL.format(category=cat, no=i)) |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name="test", |
|
gen_kwargs={"sib_filepaths": files, "images_filepaths": images}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
sib_filepaths: List[str], |
|
images_filepaths: Dict[str, List[str]], |
|
*args: Any, |
|
**kwargs: Any, |
|
) -> Any: |
|
""" |
|
Generator function that yields dataset examples in the format needed by |
|
Hugging Face Datasets. |
|
|
|
Depending on the task (img2sent or sent2img), the function constructs examples where: |
|
- img2sent: reference images, 4 candidate sentences (1 positive, 3 negative) |
|
- sent2img: reference sentences, 4 candidate images (1 positive, 3 negative) |
|
|
|
Parameters |
|
---------- |
|
sib_filepaths : List[str] |
|
The downloaded .tsv file paths (train/dev/test) for the specified language. |
|
images_filepaths : Dict[str, List[str]] |
|
A dictionary from category -> list of 10 image URLs, as downloaded from `_split_generators`. |
|
|
|
Yields |
|
------ |
|
Tuple[int, Dict[str, Any]] |
|
A tuple where the first element is an integer index, |
|
and the second is a dictionary matching the features specification |
|
of the dataset. |
|
""" |
|
|
|
records = read_lang_tsv(sib_filepaths) |
|
df = pd.DataFrame.from_records(records) |
|
|
|
|
|
ext_df = replicate_and_negatives( |
|
df, |
|
num_replicates=self.config.upsampling_factor, |
|
num_negatives=self.config.num_negatives, |
|
|
|
num_positives=self.config.num_references - 1, |
|
seed=self.config.seed, |
|
) |
|
|
|
sent_ids = list(range(self.config.num_negatives + 1)) |
|
N = len(ext_df) |
|
num_images = len(next(iter(images_filepaths.values()))) |
|
|
|
if self.config.task == "img2sent": |
|
|
|
image_ids = get_reference_image_ids( |
|
N=N, |
|
num_images=num_images, |
|
k=self.config.num_references, |
|
seed=self.config.seed, |
|
) |
|
for i, row in ext_df.iterrows(): |
|
|
|
text = [row["text"]] |
|
categories = [row["category"]] |
|
for j in range(self.config.num_negatives): |
|
text.append(row[f"neg_text_{j}"]) |
|
categories.append(row[f"neg_cat_{j}"]) |
|
|
|
|
|
random.seed(i) |
|
random.shuffle(sent_ids) |
|
label = sent_ids[0] |
|
|
|
|
|
_, categories_shuffled = zip(*sorted(zip(sent_ids, categories))) |
|
_, sentences_shuffled = zip(*sorted(zip(sent_ids, text))) |
|
|
|
|
|
row_image_ids = image_ids[i] |
|
cat = row["category"] |
|
cat_images = images_filepaths[cat] |
|
row_images = [ |
|
cat_images[row_image_ids[j]] |
|
for j in range(self.config.num_references) |
|
] |
|
|
|
yield ( |
|
i, |
|
{ |
|
"id": i, |
|
"index_id": row["index_id"], |
|
"images": row_images, |
|
"categories": categories_shuffled, |
|
"sentences": sentences_shuffled, |
|
"label": label, |
|
}, |
|
) |
|
else: |
|
|
|
rng = np.random.default_rng(seed=self.config.seed) |
|
choice_image_ids = rng.integers( |
|
0, num_images, (N, 1 + self.config.num_negatives) |
|
).tolist() |
|
|
|
for i, row in ext_df.iterrows(): |
|
|
|
pos_text = [row["text"]] |
|
|
|
cats = [row["category"]] |
|
for j in range(self.config.num_negatives): |
|
cats.append(row[f"neg_cat_{j}"]) |
|
for j in range(self.config.num_references - 1): |
|
pos_text.append(row[f"pos_text_{j}"]) |
|
|
|
random.seed(i) |
|
random.shuffle(sent_ids) |
|
label = sent_ids[0] |
|
|
|
|
|
|
|
_, categories_shuffled = zip(*sorted(zip(sent_ids, cats))) |
|
|
|
|
|
row_image_ids = choice_image_ids[i] |
|
row_images = [ |
|
images_filepaths[cat][idx] |
|
for idx, cat in zip(row_image_ids, categories_shuffled) |
|
] |
|
|
|
yield ( |
|
i, |
|
{ |
|
"id": i, |
|
"index_id": row["index_id"], |
|
"images": row_images, |
|
"categories": categories_shuffled, |
|
"sentences": pos_text, |
|
"label": label, |
|
}, |
|
) |
|
|