|
| 1 | +"""Tokenization classes for ChatGLM.""" |
| 2 | +import sys |
| 3 | +import unicodedata |
| 4 | +from typing import List, Optional, Union |
| 5 | +from functools import lru_cache |
| 6 | +import os |
| 7 | +import collections |
| 8 | +import re |
| 9 | + |
| 10 | +from transformers.tokenization_utils import PreTrainedTokenizer |
| 11 | +from icetk.text_tokenizer import TextTokenizer |
| 12 | +from icetk.utils import auto_create |
| 13 | +import icetk.sentencepiece_model_pb2 as sp_model |
| 14 | +from transformers.utils import logging |
| 15 | + |
| 16 | +logger = logging.get_logger(__name__) |
| 17 | + |
| 18 | +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| 19 | + "THUDM/chatglm-6b": 2048, |
| 20 | +} |
| 21 | + |
| 22 | + |
| 23 | +class SPTokenizer: |
| 24 | + def __init__( |
| 25 | + self, |
| 26 | + vocab_file, |
| 27 | + max_blank_length=80, |
| 28 | + byte_fallback=True, |
| 29 | + ): |
| 30 | + assert vocab_file is not None |
| 31 | + self.vocab_file = vocab_file |
| 32 | + self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"] |
| 33 | + self.max_blank_length = max_blank_length |
| 34 | + self.byte_fallback = byte_fallback |
| 35 | + self.text_tokenizer = self._build_text_tokenizer(encode_special_tokens=False) |
| 36 | + self.special_text_tokenizer = self._build_text_tokenizer(encode_special_tokens=True) |
| 37 | + |
| 38 | + @staticmethod |
| 39 | + def _configure_tokenizer( |
| 40 | + text_tokenizer: TextTokenizer, |
| 41 | + special_tokens: List[str], |
| 42 | + max_blank_length: int, |
| 43 | + byte_fallback: bool, |
| 44 | + encode_special_tokens=False, |
| 45 | + ): |
| 46 | + # special token |
| 47 | + special_token_type = 4 if encode_special_tokens else 3 # 3 - CONTROL, 4 - USER_DEFINE |
| 48 | + for token in special_tokens: |
| 49 | + text_tokenizer.proto.pieces.append( |
| 50 | + sp_model.ModelProto.SentencePiece(piece=token, score=0.0, type=special_token_type) |
| 51 | + ) |
| 52 | + # whitespaces |
| 53 | + for token in [SPTokenizer.get_tab_token()] + [ |
| 54 | + SPTokenizer.get_blank_token(i) for i in range(2, max_blank_length + 1) |
| 55 | + ]: |
| 56 | + text_tokenizer.proto.pieces.append(sp_model.ModelProto.SentencePiece(piece=token, score=0.0, type=4)) |
| 57 | + # byte fallback |
| 58 | + if byte_fallback: |
| 59 | + text_tokenizer.proto.trainer_spec.byte_fallback = True |
| 60 | + for i in range(256): |
| 61 | + text_tokenizer.proto.pieces.append( |
| 62 | + sp_model.ModelProto.SentencePiece(piece="<0x{:02X}>".format(i), score=0.0, type=6) |
| 63 | + ) |
| 64 | + text_tokenizer.refresh() |
| 65 | + |
| 66 | + def _build_text_tokenizer(self, encode_special_tokens=False): |
| 67 | + tokenizer = TextTokenizer(self.vocab_file) |
| 68 | + self._configure_tokenizer( |
| 69 | + tokenizer, self.special_tokens, self.max_blank_length, self.byte_fallback, encode_special_tokens |
| 70 | + ) |
| 71 | + return tokenizer |
| 72 | + |
| 73 | + def _get_text_tokenizer(self, encode_special_tokens=False): |
| 74 | + if encode_special_tokens: |
| 75 | + return self.special_text_tokenizer |
| 76 | + else: |
| 77 | + return self.text_tokenizer |
| 78 | + |
| 79 | + @staticmethod |
| 80 | + def get_blank_token(length: int): |
| 81 | + assert length >= 2 |
| 82 | + return f"<|blank_{length}|>" |
| 83 | + |
| 84 | + @staticmethod |
| 85 | + def get_tab_token(): |
| 86 | + return f"<|tab|>" |
| 87 | + |
| 88 | + @property |
| 89 | + def num_image_tokens(self): |
| 90 | + return 20000 |
| 91 | + |
| 92 | + @property |
| 93 | + def num_text_tokens(self): |
| 94 | + return self.text_tokenizer.num_tokens |
| 95 | + |
| 96 | + @property |
| 97 | + def num_tokens(self): |
| 98 | + return self.num_image_tokens + self.num_text_tokens |
| 99 | + |
| 100 | + @staticmethod |
| 101 | + def _encode_whitespaces(text: str, max_len: int = 80): |
| 102 | + text = text.replace("\t", SPTokenizer.get_tab_token()) |
| 103 | + for i in range(max_len, 1, -1): |
| 104 | + text = text.replace(" " * i, SPTokenizer.get_blank_token(i)) |
| 105 | + return text |
| 106 | + |
| 107 | + def _preprocess(self, text: str, linebreak=True, whitespaces=True): |
| 108 | + if linebreak: |
| 109 | + text = text.replace("\n", "<n>") |
| 110 | + if whitespaces: |
| 111 | + text = self._encode_whitespaces(text, max_len=self.max_blank_length) |
| 112 | + return text |
| 113 | + |
| 114 | + def encode( |
| 115 | + self, text: str, linebreak=True, whitespaces=True, special_tokens=False, add_dummy_prefix=True |
| 116 | + ) -> List[int]: |
| 117 | + """ |
| 118 | + @param text: Text to encode. |
| 119 | + @param linebreak: Whether to encode newline (\n) in text. |
| 120 | + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. |
| 121 | + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. |
| 122 | + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. |
| 123 | + """ |
| 124 | + text = self._preprocess(text, linebreak, whitespaces) |
| 125 | + if not add_dummy_prefix: |
| 126 | + text = "<n>" + text |
| 127 | + tmp = self._get_text_tokenizer(encode_special_tokens=special_tokens).encode(text) |
| 128 | + tokens = [x + self.num_image_tokens for x in tmp] |
| 129 | + return tokens if add_dummy_prefix else tokens[2:] |
| 130 | + |
| 131 | + def decode(self, text_ids: List[int], special_tokens=False) -> str: |
| 132 | + ids = [int(_id) - self.num_image_tokens for _id in text_ids] |
| 133 | + text = self._get_text_tokenizer(encode_special_tokens=special_tokens).decode(ids) |
| 134 | + text = text.replace("<n>", "\n") |
| 135 | + text = text.replace(SPTokenizer.get_tab_token(), "\t") |
| 136 | + for i in range(2, self.max_blank_length + 1): |
| 137 | + text = text.replace(self.get_blank_token(i), " " * i) |
| 138 | + return text |
| 139 | + |
| 140 | + def tokenize( |
| 141 | + self, text: str, linebreak=True, whitespaces=True, special_tokens=False, add_dummy_prefix=True |
| 142 | + ) -> List[str]: |
| 143 | + """ |
| 144 | + @param text: Text to encode. |
| 145 | + @param linebreak: Whether to encode newline (\n) in text. |
| 146 | + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. |
| 147 | + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. |
| 148 | + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. |
| 149 | + """ |
| 150 | + text = self._preprocess(text, linebreak, whitespaces) |
| 151 | + if not add_dummy_prefix: |
| 152 | + text = "<n>" + text |
| 153 | + tokens = self._get_text_tokenizer(encode_special_tokens=special_tokens).tokenize(text) |
| 154 | + return tokens if add_dummy_prefix else tokens[2:] |
| 155 | + |
| 156 | + def __getitem__(self, x: Union[int, str]): |
| 157 | + if isinstance(x, int): |
| 158 | + if x < self.num_image_tokens: |
| 159 | + return "<image_{}>".format(x) |
| 160 | + else: |
| 161 | + return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens) |
| 162 | + elif isinstance(x, str): |
| 163 | + if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit(): |
| 164 | + return int(x[7:-1]) |
| 165 | + else: |
| 166 | + return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens |
| 167 | + else: |
| 168 | + raise ValueError("The key should be str or int.") |
| 169 | + |
| 170 | + |
| 171 | +class ChatGLMTokenizer(PreTrainedTokenizer): |
| 172 | + """ |
| 173 | + Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding. |
| 174 | +
|
| 175 | + Args: |
| 176 | + vocab_file (`str`): |
| 177 | + Path to the vocabulary file. |
| 178 | + """ |
| 179 | + |
| 180 | + vocab_files_names = {"vocab_file": "ice_text.model"} |
| 181 | + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
| 182 | + model_input_names = ["input_ids"] |
| 183 | + |
| 184 | + def __init__( |
| 185 | + self, |
| 186 | + vocab_file, |
| 187 | + do_lower_case=False, |
| 188 | + remove_space=False, |
| 189 | + bos_token='sop', |
| 190 | + eos_token='eos', |
| 191 | + eop_token='eop', |
| 192 | + mask_token='[MASK]', |
| 193 | + gmask_token='[gMASK]', |
| 194 | + padding_side="left", |
| 195 | + **kwargs |
| 196 | + ) -> None: |
| 197 | + super().__init__( |
| 198 | + do_lower_case=do_lower_case, |
| 199 | + remove_space=remove_space, |
| 200 | + padding_side=padding_side, |
| 201 | + **kwargs |
| 202 | + ) |
| 203 | + |
| 204 | + self.do_lower_case = do_lower_case |
| 205 | + self.remove_space = remove_space |
| 206 | + self.vocab_file = vocab_file |
| 207 | + |
| 208 | + self.bos_token = bos_token |
| 209 | + self.eos_token = eos_token |
| 210 | + self.eop_token = eop_token |
| 211 | + self.mask_token = mask_token |
| 212 | + self.gMASK_token = gmask_token |
| 213 | + |
| 214 | + self.sp_tokenizer = SPTokenizer(vocab_file) |
| 215 | + |
| 216 | + """ Initialisation """ |
| 217 | + |
| 218 | + @property |
| 219 | + def eop_token_id(self) -> Optional[int]: |
| 220 | + """ |
| 221 | + `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been |
| 222 | + set. |
| 223 | + """ |
| 224 | + if self.eop_token is None: |
| 225 | + return None |
| 226 | + return self.convert_tokens_to_ids(self.eop_token) |
| 227 | + |
| 228 | + @property |
| 229 | + def vocab_size(self): |
| 230 | + """ Returns vocab size """ |
| 231 | + return self.sp_tokenizer.num_tokens |
| 232 | + |
| 233 | + def get_vocab(self): |
| 234 | + """ Returns vocab as a dict """ |
| 235 | + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} |
| 236 | + vocab.update(self.added_tokens_encoder) |
| 237 | + return vocab |
| 238 | + |
| 239 | + def preprocess_text(self, inputs): |
| 240 | + if self.remove_space: |
| 241 | + outputs = " ".join(inputs.strip().split()) |
| 242 | + else: |
| 243 | + outputs = inputs |
| 244 | + |
| 245 | + if self.do_lower_case: |
| 246 | + outputs = outputs.lower() |
| 247 | + |
| 248 | + return outputs |
| 249 | + |
| 250 | + def _tokenize(self, text, **kwargs): |
| 251 | + """ Returns a tokenized string. """ |
| 252 | + text = self.preprocess_text(text) |
| 253 | + |
| 254 | + seq = self.sp_tokenizer.tokenize(text) |
| 255 | + |
| 256 | + return seq |
| 257 | + |
| 258 | + def decode( |
| 259 | + self, |
| 260 | + token_ids: Union[List[int], List[List[int]]], |
| 261 | + skip_special_tokens: bool = False, |
| 262 | + clean_up_tokenization_spaces: bool = True, |
| 263 | + spaces_between_special_tokens: bool = True, |
| 264 | + **kwargs |
| 265 | + ) -> str: |
| 266 | + if isinstance(token_ids[0], list): |
| 267 | + tokens = [] |
| 268 | + for single_token_ids in token_ids: |
| 269 | + if self.pad_token_id in single_token_ids: # remove pad |
| 270 | + single_token_ids = list(filter((self.pad_token_id).__ne__, single_token_ids)) |
| 271 | + tokens.append(self.sp_tokenizer.decode(single_token_ids)) |
| 272 | + return (tokens) |
| 273 | + else: |
| 274 | + if self.pad_token_id in token_ids: # remove pad |
| 275 | + token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) |
| 276 | + return self.sp_tokenizer.decode(token_ids) |
| 277 | + |
| 278 | + def _convert_token_to_id(self, token): |
| 279 | + """ Converts a token (str) in an id using the vocab. """ |
| 280 | + return self.sp_tokenizer[token] |
| 281 | + |
| 282 | + def _convert_id_to_token(self, index): |
| 283 | + """Converts an index (integer) in a token (str) using the vocab.""" |
| 284 | + return self.sp_tokenizer[index] |
| 285 | + |
| 286 | + def save_vocabulary(self, save_directory, filename_prefix=None): |
| 287 | + """ |
| 288 | + Save the vocabulary and special tokens file to a directory. |
| 289 | +
|
| 290 | + Args: |
| 291 | + save_directory (`str`): |
| 292 | + The directory in which to save the vocabulary. |
| 293 | + filename_prefix (`str`, *optional*): |
| 294 | + An optional prefix to add to the named of the saved files. |
| 295 | +
|
| 296 | + Returns: |
| 297 | + `Tuple(str)`: Paths to the files saved. |
| 298 | + """ |
| 299 | + if os.path.isdir(save_directory): |
| 300 | + vocab_file = os.path.join( |
| 301 | + save_directory, self.vocab_files_names["vocab_file"] |
| 302 | + ) |
| 303 | + else: |
| 304 | + vocab_file = save_directory |
| 305 | + |
| 306 | + with open(self.vocab_file, 'rb') as fin: |
| 307 | + proto_str = fin.read() |
| 308 | + |
| 309 | + with open(vocab_file, "wb") as writer: |
| 310 | + writer.write(proto_str) |
| 311 | + |
| 312 | + return (vocab_file,) |
| 313 | + |
| 314 | + def build_inputs_with_special_tokens( |
| 315 | + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| 316 | + ) -> List[int]: |
| 317 | + """ |
| 318 | + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| 319 | + adding special tokens. A BERT sequence has the following format: |
| 320 | +
|
| 321 | + - single sequence: `[CLS] X [SEP]` |
| 322 | + - pair of sequences: `[CLS] A [SEP] B [SEP]` |
| 323 | +
|
| 324 | + Args: |
| 325 | + token_ids_0 (`List[int]`): |
| 326 | + List of IDs to which the special tokens will be added. |
| 327 | + token_ids_1 (`List[int]`, *optional*): |
| 328 | + Optional second list of IDs for sequence pairs. |
| 329 | +
|
| 330 | + Returns: |
| 331 | + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| 332 | + """ |
| 333 | + if token_ids_1 is not None: |
| 334 | + token_ids_0 += token_ids_1 |
| 335 | + mask_ids = self.sp_tokenizer[self.mask_token] |
| 336 | + gmask_ids = self.sp_tokenizer[self.gMASK_token] |
| 337 | + if mask_ids not in token_ids_0 and gmask_ids not in token_ids_0: |
| 338 | + token_ids_0 += [gmask_ids] |
| 339 | + |
| 340 | + if token_ids_0[-1] != mask_ids and token_ids_0[-1] != gmask_ids: |
| 341 | + token_ids_0 += [self.sp_tokenizer[self.eos_token]] |
| 342 | + |
| 343 | + token_ids_0 += [self.sp_tokenizer[self.bos_token]] |
| 344 | + |
| 345 | + return token_ids_0 |
| 346 | + |
0 commit comments