File size: 4,777 Bytes
12a0dd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# Copyright 2024 The Qwen Team and The HuggingFace Inc. team.
# SPDX-License-Identifier: Apache-2.0

"""Tokenization classes for Qwen2."""

from typing import Optional, Tuple

from transformers.tokenization_utils import AddedToken
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.utils import logging
from .tokenization_qwen2 import Qwen2Tokenizer


logger = logging.get_logger(__name__)

VOCAB_FILES_NAMES = {
    "vocab_file": "vocab.json",
    "merges_file": "merges.txt",
    "tokenizer_file": "tokenizer.json",
}


MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}


class Qwen2TokenizerFast(PreTrainedTokenizerFast):
    """

    Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level

    Byte-Pair-Encoding.



    Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will

    be encoded differently whether it is at the beginning of the sentence (without space) or not:



    ```python

    >>> from transformers import Qwen2TokenizerFast



    >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")

    >>> tokenizer("Hello world")["input_ids"]

    [9707, 1879]



    >>> tokenizer(" Hello world")["input_ids"]

    [21927, 1879]

    ```

    This is expected.



    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should

    refer to this superclass for more information regarding those methods.



    Args:

        vocab_file (`str`, *optional*):

            Path to the vocabulary file.

        merges_file (`str`, *optional*):

            Path to the merges file.

        tokenizer_file (`str`, *optional*):

            Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that

            contains everything needed to load the tokenizer.

        unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):

            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this

            token instead. Not applicable to this tokenizer.

        bos_token (`str`, *optional*):

            The beginning of sequence token. Not applicable for this tokenizer.

        eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):

            The end of sequence token.

        pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):

            The token used for padding, for example when batching sequences of different lengths.

    """

    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ["input_ids", "attention_mask"]
    slow_tokenizer_class = Qwen2Tokenizer

    def __init__(

        self,

        vocab_file=None,

        merges_file=None,

        tokenizer_file=None,

        unk_token="<|endoftext|>",

        bos_token=None,

        eos_token="<|endoftext|>",

        pad_token="<|endoftext|>",

        **kwargs,

    ):
        # We need to at least pass vocab_file and merges_file to base class
        # in case a slow tokenizer needs to be initialized; other can be
        # configured through files.
        # following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token

        bos_token = (
            AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
            if isinstance(bos_token, str)
            else bos_token
        )
        eos_token = (
            AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
            if isinstance(eos_token, str)
            else eos_token
        )
        unk_token = (
            AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
            if isinstance(unk_token, str)
            else unk_token
        )
        pad_token = (
            AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
            if isinstance(pad_token, str)
            else pad_token
        )

        super().__init__(
            vocab_file=vocab_file,
            merges_file=merges_file,
            tokenizer_file=tokenizer_file,
            unk_token=unk_token,
            bos_token=bos_token,
            eos_token=eos_token,
            pad_token=pad_token,
            **kwargs,
        )

    # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        files = self._tokenizer.model.save(save_directory, name=filename_prefix)
        return tuple(files)