You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transforms.py 37 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. The module text.transforms is inherited from _c_dataengine
  16. and is implemented based on ICU4C and cppjieba in C++.
  17. It's a high performance module to process NLP text.
  18. Users can use Vocab to build their own dictionary,
  19. use appropriate tokenizers to split sentences into different tokens,
  20. and use Lookup to find the index of tokens in Vocab.
  21. .. Note::
  22. A constructor's arguments for every class in this module must be saved into the
  23. class attributes (self.xxx) to support save() and load().
  24. Examples:
  25. >>> import mindspore.dataset as ds
  26. >>> import mindspore.dataset.text as text
  27. >>>
  28. >>> dataset_file = "path/to/text_file_path"
  29. >>> # Create a dataset for text sentences saved as line data in a file
  30. >>> data1 = ds.TextFileDataset(dataset_file, shuffle=False)
  31. >>> # Tokenize sentences to unicode characters
  32. >>> tokenizer = text.UnicodeCharTokenizer()
  33. >>> # Load vocabulary from list
  34. >>> vocab = text.Vocab.from_list(['深', '圳', '欢', '迎', '您'])
  35. >>> # Use Lookup operator to map tokens to ids
  36. >>> lookup = text.Lookup(vocab)
  37. >>> data1 = data1.map(operations=[tokenizer, lookup])
  38. >>> for i in data1.create_dict_iterator():
  39. >>> print(i)
  40. >>> # if text line in dataset_file is:
  41. >>> # 深圳欢迎您
  42. >>> # then the output will be:
  43. >>> # {'text': array([0, 1, 2, 3, 4], dtype=int32)}
  44. """
  45. import os
  46. import re
  47. import platform
  48. import numpy as np
  49. import mindspore._c_dataengine as cde
  50. import mindspore.common.dtype as mstype
  51. from .utils import JiebaMode, NormalizeForm, to_str, SPieceTokenizerOutType, SPieceTokenizerLoadType
  52. from .validators import check_lookup, check_jieba_add_dict, \
  53. check_jieba_add_word, check_jieba_init, check_with_offsets, check_unicode_script_tokenizer, \
  54. check_wordpiece_tokenizer, check_regex_tokenizer, check_basic_tokenizer, check_ngram, check_pair_truncate, \
  55. check_to_number, check_bert_tokenizer, check_python_tokenizer, check_slidingwindow
  56. from ..core.datatypes import mstype_to_detype
  57. class Lookup(cde.LookupOp):
  58. """
  59. Lookup operator that looks up a word to an id.
  60. Args:
  61. vocab (Vocab): A vocabulary object.
  62. unknown_token (str, optional): Word used for lookup if the word being looked up is out-of-vocabulary (OOV).
  63. If unknown_token is OOV, a runtime error will be thrown (default=None).
  64. data_type (mindspore.dtype, optional): mindspore.dtype that lookup maps string to (default=mstype.int32)
  65. Examples:
  66. >>> import mindspore.dataset.text as text
  67. >>>
  68. >>> # Load vocabulary from list
  69. >>> vocab = text.Vocab.from_list(['深', '圳', '欢', '迎', '您'])
  70. >>> # Use Lookup operator to map tokens to ids
  71. >>> lookup = text.Lookup(vocab, "<unk>")
  72. >>> data1 = data1.map(operations=[lookup])
  73. """
  74. @check_lookup
  75. def __init__(self, vocab, unknown_token=None, data_type=mstype.int32):
  76. super().__init__(vocab, unknown_token, mstype_to_detype(data_type))
  77. class SlidingWindow(cde.SlidingWindowOp):
  78. """
  79. TensorOp to construct a tensor from data (only 1-D for now), where each element in the dimension axis
  80. is a slice of data starting at the corresponding position, with a specified width.
  81. Args:
  82. width (int): The width of the window. It must be an integer and greater than zero.
  83. axis (int, optional): The axis along which the sliding window is computed (default=0).
  84. Examples:
  85. >>> import mindspore.dataset.text as text
  86. >>>
  87. >>> # Data before
  88. >>> # | col1 |
  89. >>> # +-------------+
  90. >>> # | [1,2,3,4,5] |
  91. >>> # +-------------+
  92. >>> data1 = data1.map(operations=text.SlidingWindow(3, 0))
  93. >>> # Data after
  94. >>> # | col1 |
  95. >>> # +-------------+
  96. >>> # | [[1,2,3], |
  97. >>> # | [2,3,4], |
  98. >>> # | [3,4,5]] |
  99. >>> # +--------------+
  100. """
  101. @check_slidingwindow
  102. def __init__(self, width, axis=0):
  103. super().__init__(width, axis)
  104. class Ngram(cde.NgramOp):
  105. """
  106. TensorOp to generate n-gram from a 1-D string Tensor.
  107. Refer to https://en.wikipedia.org/wiki/N-gram#Examples for an overview of what n-gram is and how it works.
  108. Args:
  109. n (list[int]): n in n-gram, n >= 1. n is a list of positive integers. For example, if n=[4,3], then the result
  110. would be a 4-gram followed by a 3-gram in the same tensor. If the number of words is not enough to make up
  111. for a n-gram, an empty string will be returned. For example, 3 grams on ["mindspore","best"] will result in
  112. an empty string produced.
  113. left_pad (tuple, optional): ("pad_token", pad_width). Padding performed on left side of the sequence. pad_width
  114. will be capped at n-1. left_pad=("_",2) would pad left side of the sequence with "__" (default=None).
  115. right_pad (tuple, optional): ("pad_token", pad_width). Padding performed on right side of the sequence.
  116. pad_width will be capped at n-1. right_pad=("-":2) would pad right side of the sequence with "--"
  117. (default=None).
  118. separator (str, optional): symbol used to join strings together. For example. if 2-gram is
  119. ["mindspore", "amazing"] with separator="-", the result would be ["mindspore-amazing"]
  120. (default=None, which means whitespace is used).
  121. Examples:
  122. >>> import mindspore.dataset.text as text
  123. >>>
  124. >>> data1 = data1.map(operations=text.Ngram(3, separator=" "))
  125. """
  126. @check_ngram
  127. def __init__(self, n, left_pad=("", 0), right_pad=("", 0), separator=" "):
  128. super().__init__(n, left_pad[1], right_pad[1], left_pad[0], right_pad[0], separator)
  129. DE_C_INTER_JIEBA_MODE = {
  130. JiebaMode.MIX: cde.JiebaMode.DE_JIEBA_MIX,
  131. JiebaMode.MP: cde.JiebaMode.DE_JIEBA_MP,
  132. JiebaMode.HMM: cde.JiebaMode.DE_JIEBA_HMM
  133. }
  134. class JiebaTokenizer(cde.JiebaTokenizerOp):
  135. """
  136. Tokenize Chinese string into words based on dictionary.
  137. Args:
  138. hmm_path (str): Dictionary file is used by HMMSegment algorithm.
  139. The dictionary can be obtained on the official website of cppjieba.
  140. mp_path (str): Dictionary file is used by MPSegment algorithm.
  141. The dictionary can be obtained on the official website of cppjieba.
  142. mode (JiebaMode, optional): Valid values can be any of [JiebaMode.MP, JiebaMode.HMM,
  143. JiebaMode.MIX](default=JiebaMode.MIX).
  144. - JiebaMode.MP, tokenize with MPSegment algorithm.
  145. - JiebaMode.HMM, tokenize with Hiddel Markov Model Segment algorithm.
  146. - JiebaMode.MIX, tokenize with a mix of MPSegment and HMMSegment algorithm.
  147. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  148. Examples:
  149. >>> import mindspore.dataset.text as text
  150. >>>
  151. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  152. >>> tokenizer_op = text.JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP, with_offsets=False)
  153. >>> data1 = data1.map(operations=tokenizer_op)
  154. >>> # If with_offsets=False, then output three columns {["token", dtype=str], ["offsets_start", dtype=uint32],
  155. >>> # ["offsets_limit", dtype=uint32]}
  156. >>> tokenizer_op = text.JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP, with_offsets=True)
  157. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  158. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  159. >>> column_order=["token", "offsets_start", "offsets_limit"])
  160. """
  161. @check_jieba_init
  162. def __init__(self, hmm_path, mp_path, mode=JiebaMode.MIX, with_offsets=False):
  163. if not isinstance(mode, JiebaMode):
  164. raise TypeError("Wrong input type for mode, should be JiebaMode.")
  165. self.mode = mode
  166. self.__check_path__(hmm_path)
  167. self.__check_path__(mp_path)
  168. self.with_offsets = with_offsets
  169. super().__init__(hmm_path, mp_path,
  170. DE_C_INTER_JIEBA_MODE[mode],
  171. self.with_offsets)
  172. @check_jieba_add_word
  173. def add_word(self, word, freq=None):
  174. """
  175. Add user defined word to JiebaTokenizer's dictionary.
  176. Args:
  177. word (str): The word to be added to the JiebaTokenizer instance.
  178. The added word will not be written into the built-in dictionary on disk.
  179. freq (int, optional): The frequency of the word to be added. The higher the frequency,
  180. the better chance the word will be tokenized (default=None, use default frequency).
  181. Examples:
  182. >>> import mindspore.dataset.text as text
  183. >>>
  184. >>> jieba_op = text.JiebaTokenizer(HMM_FILE, MP_FILE, mode=text.JiebaMode.MP)
  185. >>> with open(VOCAB_FILE, 'r') as f:
  186. >>> for line in f:
  187. >>> word = line.split(',')[0]
  188. >>> jieba_op.add_word(word)
  189. >>> data1 = data1.map(operations=jieba_op, input_columns=["text"])
  190. """
  191. if freq is None:
  192. super().add_word(word, 0)
  193. else:
  194. super().add_word(word, freq)
  195. @check_jieba_add_dict
  196. def add_dict(self, user_dict):
  197. """
  198. Add user defined word to JiebaTokenizer's dictionary.
  199. Args:
  200. user_dict (Union[str, dict]): Dictionary to be added, file path or Python dictionary,
  201. Python Dict format: {word1:freq1, word2:freq2,...}.
  202. Jieba dictionary format : word(required), freq(optional), such as:
  203. .. code-block::
  204. word1 freq1
  205. word2
  206. word3 freq3
  207. Examples:
  208. >>> import mindspore.dataset.text as text
  209. >>>
  210. >>> user_dict = {"男默女泪": 10}
  211. >>> jieba_op = text.JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  212. >>> jieba_op.add_dict(user_dict)
  213. >>> data1 = data1.map(operations=jieba_op, input_columns=["text"])
  214. """
  215. if isinstance(user_dict, str):
  216. self.__add_dict_py_file(user_dict)
  217. elif isinstance(user_dict, dict):
  218. for k, v in user_dict.items():
  219. self.add_word(k, v)
  220. else:
  221. raise ValueError("the type of user_dict must str or dict")
  222. def __add_dict_py_file(self, file_path):
  223. """Add user defined word by file"""
  224. words_list = self.__parser_file(file_path)
  225. for data in words_list:
  226. if data[1] is None:
  227. freq = 0
  228. else:
  229. freq = int(data[1])
  230. self.add_word(data[0], freq)
  231. def __parser_file(self, file_path):
  232. """parser user defined word by file"""
  233. if not os.path.exists(file_path):
  234. raise ValueError(
  235. "user dict file {} is not exist".format(file_path))
  236. real_file_path = os.path.realpath(file_path)
  237. file_dict = open(real_file_path)
  238. data_re = re.compile('^(.+?)( [0-9]+)?$', re.U)
  239. words_list = []
  240. for item in file_dict:
  241. data = item.strip()
  242. if not isinstance(data, str):
  243. data = self.__decode(data)
  244. words = data_re.match(data).groups()
  245. if len(words) != 2:
  246. raise ValueError(
  247. "user dict file {} format error".format(real_file_path))
  248. words_list.append(words)
  249. file_dict.close()
  250. return words_list
  251. def __decode(self, data):
  252. """decode the dict file to utf8"""
  253. try:
  254. data = data.decode('utf-8')
  255. except UnicodeDecodeError:
  256. raise ValueError("user dict file must utf8")
  257. return data.lstrip('\ufeff')
  258. def __check_path__(self, model_path):
  259. """check model path"""
  260. if not os.path.exists(model_path):
  261. raise ValueError(
  262. " jieba mode file {} is not exist".format(model_path))
  263. class UnicodeCharTokenizer(cde.UnicodeCharTokenizerOp):
  264. """
  265. Tokenize a scalar tensor of UTF-8 string to Unicode characters.
  266. Args:
  267. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  268. Examples:
  269. >>> import mindspore.dataset.text as text
  270. >>>
  271. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  272. >>> tokenizer_op = text.UnicodeCharTokenizer()
  273. >>> data1 = data1.map(operations=tokenizer_op)
  274. >>> # If with_offsets=False, then output three columns {["token", dtype=str], ["offsets_start", dtype=uint32],
  275. >>> # ["offsets_limit", dtype=uint32]}
  276. >>> tokenizer_op = text.UnicodeCharTokenizer(True)
  277. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  278. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  279. >>> column_order=["token", "offsets_start", "offsets_limit"])
  280. """
  281. @check_with_offsets
  282. def __init__(self, with_offsets=False):
  283. self.with_offsets = with_offsets
  284. super().__init__(self.with_offsets)
  285. class WordpieceTokenizer(cde.WordpieceTokenizerOp):
  286. """
  287. Tokenize scalar token or 1-D tokens to 1-D subword tokens.
  288. Args:
  289. vocab (Vocab): A vocabulary object.
  290. suffix_indicator (str, optional): Used to show that the subword is the last part of a word (default='##').
  291. max_bytes_per_token (int, optional): Tokens exceeding this length will not be further split (default=100).
  292. unknown_token (str, optional): When a token cannot be found: if 'unknown_token' is empty string,
  293. return the token directly, else return 'unknown_token' (default='[UNK]').
  294. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  295. Examples:
  296. >>> import mindspore.dataset.text as text
  297. >>>
  298. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  299. >>> tokenizer_op = text.WordpieceTokenizer(vocab=vocab, unknown_token=['UNK'],
  300. >>> max_bytes_per_token=100, with_offsets=False)
  301. >>> data1 = data1.map(operations=tokenizer_op)
  302. >>> # If with_offsets=False, then output three columns {["token", dtype=str], ["offsets_start", dtype=uint32],
  303. >>> # ["offsets_limit", dtype=uint32]}
  304. >>> tokenizer_op = text.WordpieceTokenizer(vocab=vocab, unknown_token=['UNK'],
  305. >>> max_bytes_per_token=100, with_offsets=True)
  306. >>> data2 = data2.map(operations=tokenizer_op,
  307. >>> input_columns=["text"], output_columns=["token", "offsets_start", "offsets_limit"],
  308. >>> column_order=["token", "offsets_start", "offsets_limit"])
  309. """
  310. @check_wordpiece_tokenizer
  311. def __init__(self, vocab, suffix_indicator='##', max_bytes_per_token=100,
  312. unknown_token='[UNK]', with_offsets=False):
  313. self.vocab = vocab
  314. self.suffix_indicator = suffix_indicator
  315. self.max_bytes_per_token = max_bytes_per_token
  316. self.unknown_token = unknown_token
  317. self.with_offsets = with_offsets
  318. super().__init__(self.vocab, self.suffix_indicator, self.max_bytes_per_token,
  319. self.unknown_token, self.with_offsets)
  320. DE_C_INTER_SENTENCEPIECE_LOADTYPE = {
  321. SPieceTokenizerLoadType.FILE: cde.SPieceTokenizerLoadType.DE_SPIECE_TOKENIZER_LOAD_KFILE,
  322. SPieceTokenizerLoadType.MODEL: cde.SPieceTokenizerLoadType.DE_SPIECE_TOKENIZER_LOAD_KMODEL
  323. }
  324. DE_C_INTER_SENTENCEPIECE_OUTTYPE = {
  325. SPieceTokenizerOutType.STRING: cde.SPieceTokenizerOutType.DE_SPIECE_TOKENIZER_OUTTYPE_KString,
  326. SPieceTokenizerOutType.INT: cde.SPieceTokenizerOutType.DE_SPIECE_TOKENIZER_OUTTYPE_KINT
  327. }
  328. class SentencePieceTokenizer(cde.SentencePieceTokenizerOp):
  329. """
  330. Tokenize scalar token or 1-D tokens to tokens by sentencepiece.
  331. Args:
  332. mode (Union[str, SentencePieceVocab]): If the input parameter is a file, then it is of type string.
  333. If the input parameter is a SentencePieceVocab object, then it is of type SentencePieceVocab.
  334. out_type (Union[str, int]): The type of output.
  335. Examples:
  336. >>> import mindspore.dataset.text as text
  337. >>>
  338. >>> vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  339. >>> tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)
  340. >>> data1 = data1.map(operations=tokenizer)
  341. """
  342. def __init__(self, mode, out_type):
  343. self.out_type = out_type
  344. if isinstance(mode, str):
  345. model_path, model_filename = os.path.split(mode)
  346. super().__init__(model_path, model_filename,
  347. DE_C_INTER_SENTENCEPIECE_LOADTYPE[SPieceTokenizerLoadType.FILE],
  348. DE_C_INTER_SENTENCEPIECE_OUTTYPE[out_type])
  349. elif isinstance(mode, cde.SentencePieceVocab):
  350. super().__init__(mode, DE_C_INTER_SENTENCEPIECE_LOADTYPE[SPieceTokenizerLoadType.MODEL],
  351. DE_C_INTER_SENTENCEPIECE_OUTTYPE[out_type])
  352. if platform.system().lower() != 'windows':
  353. class WhitespaceTokenizer(cde.WhitespaceTokenizerOp):
  354. """
  355. Tokenize a scalar tensor of UTF-8 string on ICU4C defined whitespaces, such as: ' ', '\\\\t', '\\\\r', '\\\\n'.
  356. Args:
  357. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  358. Examples:
  359. >>> import mindspore.dataset.text as text
  360. >>>
  361. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  362. >>> tokenizer_op = text.WhitespaceTokenizer()
  363. >>> data1 = data1.map(operations=tokenizer_op)
  364. >>> # If with_offsets=False, then output three columns {["token", dtype=str],
  365. >>> # ["offsets_start", dtype=uint32],
  366. >>> # ["offsets_limit", dtype=uint32]}
  367. >>> tokenizer_op = text.WhitespaceTokenizer(True)
  368. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  369. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  370. >>> column_order=["token", "offsets_start", "offsets_limit"])
  371. """
  372. @check_with_offsets
  373. def __init__(self, with_offsets=False):
  374. self.with_offsets = with_offsets
  375. super().__init__(self.with_offsets)
  376. class UnicodeScriptTokenizer(cde.UnicodeScriptTokenizerOp):
  377. """
  378. Tokenize a scalar tensor of UTF-8 string on Unicode script boundaries.
  379. Args:
  380. keep_whitespace (bool, optional): If or not emit whitespace tokens (default=False).
  381. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  382. Examples:
  383. >>> import mindspore.dataset.text as text
  384. >>>
  385. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  386. >>> tokenizer_op = text.UnicodeScriptTokenizerOp(keep_whitespace=True, with_offsets=False)
  387. >>> data1 = data1.map(operations=tokenizer_op)
  388. >>> # If with_offsets=False, then output three columns {["token", dtype=str],
  389. >>> # ["offsets_start", dtype=uint32],
  390. >>> # ["offsets_limit", dtype=uint32]}
  391. >>> tokenizer_op = text.UnicodeScriptTokenizerOp(keep_whitespace=True, with_offsets=True)
  392. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  393. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  394. >>> column_order=["token", "offsets_start", "offsets_limit"])
  395. """
  396. @check_unicode_script_tokenizer
  397. def __init__(self, keep_whitespace=False, with_offsets=False):
  398. self.keep_whitespace = keep_whitespace
  399. self.with_offsets = with_offsets
  400. super().__init__(self.keep_whitespace, self.with_offsets)
  401. class CaseFold(cde.CaseFoldOp):
  402. """
  403. Apply case fold operation on utf-8 string tensor.
  404. Examples:
  405. >>> import mindspore.dataset.text as text
  406. >>>
  407. >>> case_op = text.CaseFold()
  408. >>> data1 = data1.map(operations=case_op)
  409. """
  410. DE_C_INTER_NORMALIZE_FORM = {
  411. NormalizeForm.NONE: cde.NormalizeForm.DE_NORMALIZE_NONE,
  412. NormalizeForm.NFC: cde.NormalizeForm.DE_NORMALIZE_NFC,
  413. NormalizeForm.NFKC: cde.NormalizeForm.DE_NORMALIZE_NFKC,
  414. NormalizeForm.NFD: cde.NormalizeForm.DE_NORMALIZE_NFD,
  415. NormalizeForm.NFKD: cde.NormalizeForm.DE_NORMALIZE_NFKD
  416. }
  417. class NormalizeUTF8(cde.NormalizeUTF8Op):
  418. """
  419. Apply normalize operation on utf-8 string tensor.
  420. Args:
  421. normalize_form (NormalizeForm, optional): Valid values can be any of [NormalizeForm.NONE,
  422. NormalizeForm.NFC, NormalizeForm.NFKC, NormalizeForm.NFD,
  423. NormalizeForm.NFKD](default=NormalizeForm.NFKC).
  424. See http://unicode.org/reports/tr15/ for details.
  425. - NormalizeForm.NONE, do nothing for input string tensor.
  426. - NormalizeForm.NFC, normalize with Normalization Form C.
  427. - NormalizeForm.NFKC, normalize with Normalization Form KC.
  428. - NormalizeForm.NFD, normalize with Normalization Form D.
  429. - NormalizeForm.NFKD, normalize with Normalization Form KD.
  430. Examples:
  431. >>> import mindspore.dataset.text as text
  432. >>>
  433. >>> normalize_op = text.NormalizeUTF8(normalize_form=NormalizeForm.NFC)
  434. >>> data1 = data1.map(operations=normalize_op)
  435. """
  436. def __init__(self, normalize_form=NormalizeForm.NFKC):
  437. if not isinstance(normalize_form, NormalizeForm):
  438. raise TypeError("Wrong input type for normalization_form, should be NormalizeForm.")
  439. self.normalize_form = DE_C_INTER_NORMALIZE_FORM[normalize_form]
  440. super().__init__(self.normalize_form)
  441. class RegexReplace(cde.RegexReplaceOp):
  442. """
  443. Replace utf-8 string tensor with 'replace' according to regular expression 'pattern'.
  444. See http://userguide.icu-project.org/strings/regexp for support regex pattern.
  445. Args:
  446. pattern (str): the regex expression patterns.
  447. replace (str): the string to replace matched element.
  448. replace_all (bool, optional): If False, only replace first matched element;
  449. if True, replace all matched elements (default=True).
  450. Examples:
  451. >>> import mindspore.dataset.text as text
  452. >>>
  453. >>> pattern = 'Canada'
  454. >>> replace = 'China'
  455. >>> replace_op = text.RegexReplace(pattern, replace)
  456. >>> data1 = data1.map(operations=replace_op)
  457. """
  458. def __init__(self, pattern, replace, replace_all=True):
  459. self.pattern = pattern
  460. self.replace = replace
  461. self.replace_all = replace_all
  462. super().__init__(self.pattern, self.replace, self.replace_all)
  463. class RegexTokenizer(cde.RegexTokenizerOp):
  464. """
  465. Tokenize a scalar tensor of UTF-8 string by regex expression pattern.
  466. See http://userguide.icu-project.org/strings/regexp for support regex pattern.
  467. Args:
  468. delim_pattern (str): The pattern of regex delimiters.
  469. The original string will be split by matched elements.
  470. keep_delim_pattern (str, optional): The string matched by 'delim_pattern' can be kept as a token
  471. if it can be matched by 'keep_delim_pattern'. The default value is an empty str ('')
  472. which means that delimiters will not be kept as an output token (default='').
  473. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  474. Examples:
  475. >>> import mindspore.dataset.text as text
  476. >>>
  477. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  478. >>> tokenizer_op = text.RegexTokenizer(delim_pattern, keep_delim_pattern, with_offsets=False)
  479. >>> data1 = data1.map(operations=tokenizer_op)
  480. >>> # If with_offsets=False, then output three columns {["token", dtype=str],
  481. >>> # ["offsets_start", dtype=uint32],
  482. >>> # ["offsets_limit", dtype=uint32]}
  483. >>> tokenizer_op = text.RegexTokenizer(delim_pattern, keep_delim_pattern, with_offsets=True)
  484. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  485. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  486. >>> column_order=["token", "offsets_start", "offsets_limit"])
  487. """
  488. @check_regex_tokenizer
  489. def __init__(self, delim_pattern, keep_delim_pattern='', with_offsets=False):
  490. self.delim_pattern = delim_pattern
  491. self.keep_delim_pattern = keep_delim_pattern
  492. self.with_offsets = with_offsets
  493. super().__init__(self.delim_pattern, self.keep_delim_pattern, self.with_offsets)
  494. class BasicTokenizer(cde.BasicTokenizerOp):
  495. """
  496. Tokenize a scalar tensor of UTF-8 string by specific rules.
  497. Args:
  498. lower_case (bool, optional): If True, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation
  499. on input text to fold the text to lower case and strip accents characters. If False, only apply
  500. NormalizeUTF8('normalization_form' mode) operation on input text (default=False).
  501. keep_whitespace (bool, optional): If True, the whitespace will be kept in out tokens (default=False).
  502. normalization_form (NormalizeForm, optional): Used to specify a specific normalize mode. This is
  503. only effective when 'lower_case' is False. See NormalizeUTF8 for details (default=NormalizeForm.NONE).
  504. preserve_unused_token (bool, optional): If True, do not split special tokens like
  505. '[CLS]', '[SEP]', '[UNK]', '[PAD]', '[MASK]' (default=True).
  506. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  507. Examples:
  508. >>> import mindspore.dataset.text as text
  509. >>>
  510. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  511. >>> tokenizer_op = text.BasicTokenizer(lower_case=False,
  512. >>> keep_whitespace=False,
  513. >>> normalization_form=NormalizeForm.NONE,
  514. >>> preserve_unused_token=True,
  515. >>> with_offsets=False)
  516. >>> data1 = data1.map(operations=tokenizer_op)
  517. >>> # If with_offsets=False, then output three columns {["token", dtype=str],
  518. >>> # ["offsets_start", dtype=uint32],
  519. >>> # ["offsets_limit", dtype=uint32]}
  520. >>> tokenizer_op = text.BasicTokenizer(lower_case=False,
  521. >>> keep_whitespace=False,
  522. >>> normalization_form=NormalizeForm.NONE,
  523. >>> preserve_unused_token=True,
  524. >>> with_offsets=True)
  525. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  526. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  527. >>> column_order=["token", "offsets_start", "offsets_limit"])
  528. """
  529. @check_basic_tokenizer
  530. def __init__(self, lower_case=False, keep_whitespace=False, normalization_form=NormalizeForm.NONE,
  531. preserve_unused_token=True, with_offsets=False):
  532. if not isinstance(normalization_form, NormalizeForm):
  533. raise TypeError("Wrong input type for normalization_form, should be NormalizeForm.")
  534. self.lower_case = lower_case
  535. self.keep_whitespace = keep_whitespace
  536. self.normalization_form = DE_C_INTER_NORMALIZE_FORM[normalization_form]
  537. self.preserve_unused_token = preserve_unused_token
  538. self.with_offsets = with_offsets
  539. super().__init__(self.lower_case, self.keep_whitespace, self.normalization_form,
  540. self.preserve_unused_token, self.with_offsets)
  541. class BertTokenizer(cde.BertTokenizerOp):
  542. """
  543. Tokenizer used for Bert text process.
  544. Args:
  545. vocab (Vocab): A vocabulary object.
  546. suffix_indicator (str, optional): Used to show that the subword is the last part of a word (default='##').
  547. max_bytes_per_token (int, optional): Tokens exceeding this length will not be further split (default=100).
  548. unknown_token (str, optional): When a token cannot be found: if 'unknown_token' is empty string,
  549. return the token directly, else return 'unknown_token'(default='[UNK]').
  550. lower_case (bool, optional): If True, apply CaseFold, NormalizeUTF8(NFD mode), RegexReplace operation
  551. on input text to fold the text to lower case and strip accented characters. If False, only apply
  552. NormalizeUTF8('normalization_form' mode) operation on input text (default=False).
  553. keep_whitespace (bool, optional): If True, the whitespace will be kept in out tokens (default=False).
  554. normalization_form (NormalizeForm, optional): Used to specify a specific normalize mode,
  555. only effective when 'lower_case' is False. See NormalizeUTF8 for details (default='NONE').
  556. preserve_unused_token (bool, optional): If True, do not split special tokens like
  557. '[CLS]', '[SEP]', '[UNK]', '[PAD]', '[MASK]' (default=True).
  558. with_offsets (bool, optional): If or not output offsets of tokens (default=False).
  559. Examples:
  560. >>> import mindspore.dataset.text as text
  561. >>>
  562. >>> # If with_offsets=False, default output one column {["text", dtype=str]}
  563. >>> tokenizer_op = text.BertTokenizer(vocab=vocab, suffix_indicator='##', max_bytes_per_token=100,
  564. >>> unknown_token=100, lower_case=False, keep_whitespace=False,
  565. >>> normalization_form=NormalizeForm.NONE, preserve_unused_token=True,
  566. >>> with_offsets=False)
  567. >>> data1 = data1.map(operations=tokenizer_op)
  568. >>> # If with_offsets=False, then output three columns {["token", dtype=str],
  569. >>> # ["offsets_start", dtype=uint32],
  570. >>> # ["offsets_limit", dtype=uint32]}
  571. >>> tokenizer_op = text.BertTokenizer(vocab=vocab, suffix_indicator='##', max_bytes_per_token=100,
  572. >>> unknown_token=100, lower_case=False, keep_whitespace=False,
  573. >>> normalization_form=NormalizeForm.NONE, preserve_unused_token=True,
  574. >>> with_offsets=True)
  575. >>> data2 = data2.map(operations=tokenizer_op, input_columns=["text"],
  576. >>> output_columns=["token", "offsets_start", "offsets_limit"],
  577. >>> column_order=["token", "offsets_start", "offsets_limit"])
  578. """
  579. @check_bert_tokenizer
  580. def __init__(self, vocab, suffix_indicator='##', max_bytes_per_token=100, unknown_token='[UNK]',
  581. lower_case=False, keep_whitespace=False, normalization_form=NormalizeForm.NONE,
  582. preserve_unused_token=True, with_offsets=False):
  583. if not isinstance(normalization_form, NormalizeForm):
  584. raise TypeError("Wrong input type for normalization_form, should be NormalizeForm.")
  585. self.vocab = vocab
  586. self.suffix_indicator = suffix_indicator
  587. self.max_bytes_per_token = max_bytes_per_token
  588. self.unknown_token = unknown_token
  589. self.lower_case = lower_case
  590. self.keep_whitespace = keep_whitespace
  591. self.normalization_form = DE_C_INTER_NORMALIZE_FORM[normalization_form]
  592. self.preserve_unused_token = preserve_unused_token
  593. self.with_offsets = with_offsets
  594. super().__init__(self.vocab, self.suffix_indicator, self.max_bytes_per_token, self.unknown_token,
  595. self.lower_case, self.keep_whitespace, self.normalization_form,
  596. self.preserve_unused_token, self.with_offsets)
  597. class TruncateSequencePair(cde.TruncateSequencePairOp):
  598. """
  599. Truncate a pair of rank-1 tensors such that the total length is less than max_length.
  600. This operation takes two input tensors and returns two output Tenors.
  601. Args:
  602. max_length (int): Maximum length required.
  603. Examples:
  604. >>> import mindspore.dataset.text as text
  605. >>>
  606. >>> # Data before
  607. >>> # | col1 | col2 |
  608. >>> # +---------+---------|
  609. >>> # | [1,2,3] | [4,5] |
  610. >>> # +---------+---------+
  611. >>> data1 = data1.map(operations=text.TruncateSequencePair(4))
  612. >>> # Data after
  613. >>> # | col1 | col2 |
  614. >>> # +---------+---------+
  615. >>> # | [1,2] | [4,5] |
  616. >>> # +---------+---------+
  617. """
  618. @check_pair_truncate
  619. def __init__(self, max_length):
  620. super().__init__(max_length)
  621. class ToNumber(cde.ToNumberOp):
  622. """
  623. Tensor operation to convert every element of a string tensor to a number.
  624. Strings are casted according to the rules specified in the following links:
  625. https://en.cppreference.com/w/cpp/string/basic_string/stof,
  626. https://en.cppreference.com/w/cpp/string/basic_string/stoul,
  627. except that any strings which represent negative numbers cannot be cast to an
  628. unsigned integer type.
  629. Args:
  630. data_type (mindspore.dtype): mindspore.dtype to be casted to. Must be
  631. a numeric type.
  632. Raises:
  633. RuntimeError: If strings are invalid to cast, or are out of range after being casted.
  634. Examples:
  635. >>> import mindspore.dataset.text as text
  636. >>> import mindspore.common.dtype as mstype
  637. >>>
  638. >>> to_number_op = text.ToNumber(mstype.int8)
  639. >>> data1 = data1.map(operations=to_number_op)
  640. """
  641. @check_to_number
  642. def __init__(self, data_type):
  643. data_type = mstype_to_detype(data_type)
  644. self.data_type = str(data_type)
  645. super().__init__(data_type)
  646. class PythonTokenizer:
  647. """
  648. Callable class to be used for user-defined string tokenizer.
  649. Args:
  650. tokenizer (Callable): Python function that takes a `str` and returns a list of `str` as tokens.
  651. Examples:
  652. >>> import mindspore.dataset.text as text
  653. >>>
  654. >>> def my_tokenizer(line):
  655. >>> return line.split()
  656. >>> data1 = data1.map(operations=text.PythonTokenizer(my_tokenizer))
  657. """
  658. @check_python_tokenizer
  659. def __init__(self, tokenizer):
  660. self.tokenizer = np.vectorize(lambda x: np.array(tokenizer(x), dtype='U'), signature='()->(n)')
  661. def __call__(self, in_array):
  662. in_array = to_str(in_array)
  663. tokens = self.tokenizer(in_array)
  664. return tokens