khalidalt commited on
Commit
3181bf5
·
verified ·
1 Parent(s): 59d9aa4

Delete loading script

Browse files
Files changed (1) hide show
  1. subscene.py +0 -220
subscene.py DELETED
@@ -1,220 +0,0 @@
1
- #Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
- import gzip
24
-
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @misc{refine-ai,
29
- author = {Refine AI},
30
- title = {Subscene: A Large-Scale Multilingual Subtitle Dataset},
31
- year = {2024},
32
- }
33
- """
34
-
35
- # TODO: Add description of the dataset here
36
- # You can copy an official description
37
- _DESCRIPTION = """\
38
- Subscene is a vast collection of multilingual subtitles, encompassing 65 different languages and consisting of more than 30 billion tokens with a total size of 410.70 GB. This dataset includes subtitles for movies, series, and animations gathered from the Subscene dump. It provides a rich resource for studying language variations and building multilingual NLP models. We have carefully applied a fastText classifier to remove any non-language content from incorrect subsets. Additionally, we performed basic cleaning and filtration. However, there is still room for further cleaning and refinement.
39
- """
40
-
41
- # TODO: Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = "https://archive.org/details/subscene-final-dump"
43
-
44
- # TODO: Add the licence for the dataset here if you can find it
45
- _LICENSE = ""
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in _split_generators method)
50
- _URLS = "https://huggingface.co/datasets/refine-ai/subscene/resolve/main/{Lang}/{Lang}_subscene_{split}{index}.json.gz"
51
-
52
-
53
- _N_FILES_PER_SPLIT = {
54
- # 'albanian': {'train': 1},
55
- 'arabic': {'train': 33},
56
- 'armenian': {'train': 1},
57
- 'azerbaijani': {'train': 1},
58
- 'basque': {'train': 1},
59
- 'belarusian': {'train': 1},
60
- 'bengali': {'train': 2},
61
- 'traditional_chinese': {'train': 1},
62
- 'bosnian': {'train': 1},
63
- 'brazillian_portuguese': {'train': 6},
64
- 'bulgarian': {'train': 1},
65
- 'bulgarian_english': {'train': 1},
66
- 'burmese': {'train': 1},
67
- 'cambodian_khmer': {'train': 1},
68
- 'catalan': {'train': 1},
69
- 'simplified_chinese': {'train': 1},
70
- 'croatian': {'train': 1},
71
- 'czech': {'train': 2},
72
- 'danish': {'train': 8},
73
- 'dutch': {'train': 4},
74
- 'dutch_english': {'train': 1},
75
- 'english': {'train': 82},
76
- 'english_german': {'train': 1},
77
- 'esperanto': {'train': 1},
78
- 'estonian': {'train': 1},
79
- 'farsi_persian': {'train': 23},
80
- 'finnish': {'train': 3},
81
- 'french': {'train': 12},
82
- 'georgian': {'train': 1},
83
- 'german': {'train': 2},
84
- 'greek': {'train': 3},
85
- 'greenlandic': {'train': 1},
86
- 'hebrew': {'train': 3},
87
- 'hindi': {'train': 1},
88
- 'hungarian': {'train': 1},
89
- 'hungarian_english': {'train': 1},
90
- 'icelandic': {'train': 1},
91
- 'indonesian': {'train': 25},
92
- 'italian': {'train': 7},
93
- 'japanese': {'train': 2},
94
- 'kannada': {'train': 1},
95
- 'korean': {'train': 3},
96
- 'kurdish': {'train': 1},
97
- 'latvian': {'train': 1},
98
- 'lithuanian': {'train': 1},
99
- 'macedonian': {'train': 1},
100
- 'malay': {'train': 4},
101
- 'malayalam': {'train': 1},
102
- 'manipuri': {'train': 1},
103
- 'mongolian': {'train': 1},
104
- 'nepali': {'train': 1},
105
- 'norwegian': {'train': 5},
106
- 'pashto': {'train': 1},
107
- 'polish': {'train': 2},
108
- 'portuguese': {'train': 3},
109
- 'punjabi': {'train': 1},
110
- 'romanian': {'train': 3},
111
- 'russian': {'train': 1},
112
- 'serbian': {'train': 1},
113
- 'sinhala': {'train': 1},
114
- 'slovak': {'train': 1},
115
- 'slovenian': {'train': 1},
116
- 'somali': {'train': 1},
117
- 'spanish': {'train': 7},
118
- 'sundanese': {'train': 1},
119
- 'swahili': {'train': 1},
120
- 'swedish': {'train': 5},
121
- 'tagalog': {'train': 1},
122
- 'tamil': {'train': 1},
123
- 'telugu': {'train': 1},
124
- 'thai': {'train': 4},
125
- 'turkish': {'train': 5},
126
- 'ukranian': {'train': 1},
127
- 'urdu': {'train': 1},
128
- 'vietnamese': {'train': 10},
129
- 'yoruba': {'train': 1}
130
- }
131
-
132
- _LangID = ['arabic', 'armenian', 'azerbaijani', 'basque', 'belarusian', 'bengali', 'traditional_chinese', 'bosnian', 'brazillian_portuguese', 'bulgarian', 'bulgarian_english', 'burmese', 'cambodian_khmer', 'catalan', 'simplified_chinese', 'croatian', 'czech', 'danish', 'dutch', 'dutch_english', 'english', 'english_german', 'esperanto', 'estonian', 'farsi_persian', 'finnish', 'french', 'georgian', 'german', 'greek', 'greenlandic', 'hebrew', 'hindi', 'hungarian', 'hungarian_english', 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'kurdish', 'latvian', 'lithuanian', 'macedonian', 'malay', 'malayalam', 'manipuri', 'mongolian', 'nepali', 'norwegian', 'pashto', 'polish', 'portuguese', 'punjabi', 'romanian', 'russian', 'serbian', 'sinhala', 'slovak', 'slovenian', 'somali', 'spanish', 'sundanese', 'swahili', 'swedish', 'tagalog', 'tamil', 'telugu', 'thai', 'turkish', 'ukranian', 'urdu', 'vietnamese', 'yoruba']
133
-
134
- class SubsceneConfig(datasets.BuilderConfig):
135
- """ Builder config for Subscene Dataset. """
136
-
137
- def __init__(self, subset, **kwargs):
138
- super(SubsceneConfig, self).__init__(**kwargs)
139
-
140
- if subset !="all":
141
-
142
- self.subset = [subset]
143
- else:
144
- self.subset = _LangID
145
-
146
- class Subscene(datasets.GeneratorBasedBuilder):
147
- """TODO: Short description of my dataset."""
148
- VERSION = datasets.Version("1.1.0")
149
- BUILDER_CONFIGS_CLASS = SubsceneConfig
150
- BUILDER_CONFIGS = [
151
- SubsceneConfig(name=subset,
152
- subset=subset,
153
- version=datasets.Version("1.1.0", ""),
154
- description='')
155
- for subset in _LangID
156
- ]
157
-
158
-
159
- def _info(self):
160
- # information about the datasets and feature type of the datasets items.
161
-
162
- features = datasets.Features(
163
- {
164
- "subtitle_name": datasets.Value("string"),
165
- "file_name": datasets.Value("string"),
166
- "transcript": datasets.Value("string"),
167
- }
168
- )
169
-
170
-
171
-
172
- return datasets.DatasetInfo(
173
- description=_DESCRIPTION,
174
- features=features,
175
- homepage=_HOMEPAGE,
176
- # License for the dataset if available
177
- license=_LICENSE,
178
- # Citation for the dataset
179
- citation=_CITATION,
180
- )
181
-
182
- def _split_generators(self, dl_manager):
183
- #split = 'train'
184
- #print("Split")
185
- data_urls = {}
186
- for split in ['train']: #'validation']:
187
- #if self.config.subset = "all":
188
-
189
- data_urls[split] = [
190
- _URLS.format(
191
- Lang = subset,
192
- split='validation' if split=='_val' else '',
193
- index = i,
194
- )
195
- for subset in self.config.subset
196
- for i in range(_N_FILES_PER_SPLIT[subset][split])
197
- ]
198
-
199
- train_downloaded_files = dl_manager.download(data_urls["train"])
200
- #validation_downloaded_files = dl_manager.download(data_urls["validation"])
201
- return [
202
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
203
- #datasets.SplitGenerator(
204
- # name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
205
- #),
206
- ]
207
-
208
-
209
- # method parameters are unpacked from gen_kwargs as given in _split_generators
210
- def _generate_examples(self, filepaths):
211
-
212
- id_ = 0
213
- for filepath in filepaths:
214
- with gzip.open(open(filepath,"rb"), "rt", encoding = "utf-8") as f:
215
- for row in f:
216
- if row:
217
-
218
- data = json.loads(row)
219
- yield id_, data
220
- id_ +=1