File size: 4,209 Bytes
48f626a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
# coding=utf-8
"""VietnamCeleb dataset."""
import os
from typing import List
from pathlib import Path
import librosa
import datasets
from rich import print
DATA_DIR_STRUCTURE = """
root/
βββ data
βββ id00000
βββ 00001.wav
...
...
"""
MANUAL_DOWNLOAD_INSTRUCTION = f"""
To use VietnamCeleb you have to download it manually.
The tree structure of the downloaded data looks like:
{DATA_DIR_STRUCTURE}
"""
SAMPLING_RATE = 16_000
class VietnamCelebConfig(datasets.BuilderConfig):
"""BuilderConfig for VietnamCeleb."""
def __init__(self, features, **kwargs):
super(VietnamCelebConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
self.features = features
class VietnamCeleb(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
VietnamCelebConfig(
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=SAMPLING_RATE),
"speaker": datasets.Value("string"),
# "duration": datasets.Value("int32"),
}
),
name="verification",
description="",
),
]
DEFAULT_CONFIG_NAME = "verification"
def _info(self):
return datasets.DatasetInfo(
description="VietnamCeleb for verification",
features=self.config.features,
)
@property
def manual_download_instructions(self):
return MANUAL_DOWNLOAD_INSTRUCTION
def _split_generators(self, dl_manager):
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise self.manual_download_instructions
if not os.path.isdir(os.path.join(data_dir, 'data')):
raise FileExistsError(f"{data_dir} does not exist. Make sure you have unzipped the dataset.")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}),
]
def _generate_examples(self, split, data_dir):
"""Generate examples from VietnamCeleb"""
train_txt = os.path.join(data_dir, 'vietnam-celeb-t.txt')
archive_path = os.path.join(data_dir, 'data')
with open(train_txt, "r") as f:
train_speakers = [line.strip().split()[0] for line in f]
train_speakers = list(set(train_speakers))
test_speakers = list(set(os.listdir(archive_path)) - set(train_speakers))
# Iterating the contents of the data to extract the relevant information
extensions = ['.wav']
if split == 'train':
speakers = train_speakers
elif split == 'test':
speakers = test_speakers
guid = 0
for speaker in speakers:
_, wav_paths = fast_scandir(os.path.join(archive_path, speaker), extensions)
for wav_path in wav_paths:
yield guid, {
"id": str(guid),
"audio": wav_path,
"speaker": speaker
}
guid += 1
def fast_scandir(path: str, extensions: List[str], recursive: bool = False):
# Scan files recursively faster than glob
# From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
subfolders, files = [], []
try: # hope to avoid 'permission denied' by this try
for f in os.scandir(path):
try: # 'hope to avoid too many levels of symbolic links' error
if f.is_dir():
subfolders.append(f.path)
elif f.is_file():
if os.path.splitext(f.name)[1].lower() in extensions:
files.append(f.path)
except Exception:
pass
except Exception:
pass
if recursive:
for path in list(subfolders):
sf, f = fast_scandir(path, extensions, recursive=recursive)
subfolders.extend(sf)
files.extend(f) # type: ignore
return subfolders, files |