File size: 2,952 Bytes
9683be4
 
 
 
 
 
 
 
 
 
 
6dc0bff
9683be4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462964b
9683be4
 
462964b
9683be4
 
 
 
 
 
 
 
462964b
9683be4
defebf8
9683be4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333f652
9683be4
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

import csv
import datasets


_URLs = {
        'train': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/train.csv",
        'validation': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/dev.csv",
        'test': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/test.csv"
}

_CITATION = """\
@inproceedings{koto-etal-2022-cloze,
    title = "Cloze Evaluation for Deeper Understanding of Commonsense Stories in {I}ndonesian",
    author = "Koto, Fajri  and
      Baldwin, Timothy  and
      Lau, Jey Han",
    booktitle = "Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)",
    month = may,
    year = "2022",
    address = "Dublin, Ireland",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.csrr-1.2",
    doi = "10.18653/v1/2022.csrr-1.2",
    pages = "8--16",
}"""

class IndoStoryClozeConfig(datasets.BuilderConfig):
    """IndoStoryClozeConfig for IndoStoryCloze."""

    def __init__(self, **kwargs):
        """BuilderConfig for IndoStoryCloze.
        **kwargs: keyword arguments forwarded to super.
        """
        # Version history:
        # 1.0.0: Release version
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = ['sentence-1','sentence-2','sentence-3','sentence-4', 'correct_ending', 'incorrect_ending']

class IndoStoryCloze(datasets.GeneratorBasedBuilder):
    """The Indo_Story_Cloze Datasets."""

    BUILDER_CONFIGS = [IndoStoryClozeConfig()]

    def _info(self):
        features = {feature: datasets.Value("string") for feature in self.config.features}

        return datasets.DatasetInfo(
                        description='indo_story_cloze',
                        features=datasets.Features(features),
                        homepage='https://github.com/fajri91/IndoCloze',
                        citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URLs)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": downloaded_file['train']}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_file": downloaded_file['validation']}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_file": downloaded_file['test']}),
        ]

    def _generate_examples(self, data_file):
        data = csv.DictReader(open(data_file, newline=''))
        for i, row in enumerate(data):
            yield i, {
                "sentence-1": row['Kalimat-1'],
                "sentence-2": row['Kalimat-2'],
                "sentence-3": row['Kalimat-3'],
                "sentence-4": row['Kalimat-4'],
                "correct_ending": row['Correct Ending'],
                "incorrect_ending": row['Incorrect Ending'],
            }