File size: 1,383 Bytes
ada97aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#!/usr/bin/env python3

import argparse
from collections.abc import Iterator

from datasets import load_dataset
from tokenizers import Tokenizer
from tokenizers.models import WordLevel
from tokenizers.normalizers import Sequence, NFC, Strip, Lowercase
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import WordLevelTrainer
from tqdm.auto import tqdm


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('--vocabulary', type=int, default=75000, help='Vocabulary size')
    parser.add_argument('--batch', type=int, default=1024, help='Batch size')
    args = parser.parse_args()

    dataset = load_dataset('wikitext', 'wikitext-103-raw-v1', split='train+validation+test')

    tokenizer = Tokenizer(WordLevel(unk_token='<unk>'))
    tokenizer.normalizer = Sequence([NFC(), Strip(), Lowercase()])
    tokenizer.pre_tokenizer = Whitespace()

    def batches(batch_size: int) -> Iterator[str]:
        for batch in tqdm(dataset.iter(batch_size=batch_size), desc='Tokenization'):
            yield batch['text']

    trainer = WordLevelTrainer(vocab_size=args.vocabulary,
                               special_tokens=['<s>', '</s>', '<unk>'])

    tokenizer.train_from_iterator(batches(args.batch), trainer=trainer, length=len(dataset))

    tokenizer.save('tokenizer.json', pretty=True)


if __name__ == '__main__':
    main()