You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 7.4 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. The module text.utils provides some general methods for nlp text processing.
  16. For example, you can use Vocab to build a dictionary,
  17. use to_bytes and to_str to encode and decode strings into a specified format.
  18. """
  19. from enum import IntEnum
  20. import copy
  21. import numpy as np
  22. import mindspore._c_dataengine as cde
  23. from .validators import check_from_file, check_from_list, check_from_dict, check_from_dataset
  24. class Vocab(cde.Vocab):
  25. """
  26. Vocab object that is used to lookup a word.
  27. It contains a map that maps each word(str) to an id (int).
  28. """
  29. @classmethod
  30. @check_from_dataset
  31. def from_dataset(cls, dataset, columns=None, freq_range=None, top_k=None, special_tokens=None,
  32. special_first=None):
  33. """
  34. Build a vocab from a dataset.
  35. This would collect all unique words in a dataset and return a vocab within
  36. the frequency range specified by user in freq_range. User would be warned if no words fall into the frequency.
  37. Words in vocab are ordered from highest frequency to lowest frequency. Words with the same frequency would be
  38. ordered lexicographically.
  39. Args:
  40. dataset(Dataset): dataset to build vocab from.
  41. columns(list of str, optional): column names to get words from. It can be a list of column names.
  42. (default=None, where all columns will be used. If any column isn't string type, will return error).
  43. freq_range(tuple, optional): A tuple of integers (min_frequency, max_frequency). Words within the frequency
  44. range would be kept. 0 <= min_frequency <= max_frequency <= total_words. min_frequency=0 is the same as
  45. min_frequency=1. max_frequency > total_words is the same as max_frequency = total_words.
  46. min_frequency/max_frequency can be None, which corresponds to 0/total_words separately
  47. (default=None, all words are included).
  48. top_k(int, optional): top_k > 0. Number of words to be built into vocab. top_k most frequent words are
  49. taken. top_k is taken after freq_range. If not enough top_k, all words will be taken (default=None,
  50. all words are included).
  51. special_tokens(list, optional): a list of strings, each one is a special token. for example
  52. special_tokens=["<pad>","<unk>"] (default=None, no special tokens will be added).
  53. special_first(bool, optional): whether special_tokens will be prepended/appended to vocab. If special_tokens
  54. is specified and special_first is set to None, special_tokens will be prepended (default=None).
  55. Returns:
  56. Vocab, Vocab object built from dataset.
  57. """
  58. vocab = Vocab()
  59. root = copy.deepcopy(dataset).build_vocab(vocab, columns, freq_range, top_k, special_tokens, special_first)
  60. for d in root.create_dict_iterator():
  61. if d is not None:
  62. raise ValueError("from_dataset should receive data other than None.")
  63. return vocab
  64. @classmethod
  65. @check_from_list
  66. def from_list(cls, word_list, special_tokens=None, special_first=None):
  67. """
  68. Build a vocab object from a list of word.
  69. Args:
  70. word_list(list): a list of string where each element is a word of type string.
  71. special_tokens(list, optional): a list of strings, each one is a special token. for example
  72. special_tokens=["<pad>","<unk>"] (default=None, no special tokens will be added).
  73. special_first(bool, optional): whether special_tokens will be prepended/appended to vocab, If special_tokens
  74. is specified and special_first is set to None, special_tokens will be prepended (default=None).
  75. """
  76. return super().from_list(word_list, special_tokens, special_first)
  77. @classmethod
  78. @check_from_file
  79. def from_file(cls, file_path, delimiter=None, vocab_size=None, special_tokens=None, special_first=None):
  80. """
  81. Build a vocab object from a list of word.
  82. Args:
  83. file_path (str): path to the file which contains the vocab list.
  84. delimiter (str, optional): a delimiter to break up each line in file, the first element is taken to be
  85. the word (default=None).
  86. vocab_size (int, optional): number of words to read from file_path (default=None, all words are taken).
  87. special_tokens (list, optional): a list of strings, each one is a special token. for example
  88. special_tokens=["<pad>","<unk>"] (default=None, no special tokens will be added).
  89. special_first (bool, optional): whether special_tokens will be prepended/appended to vocab,
  90. If special_tokens is specified and special_first is set to None,
  91. special_tokens will be prepended (default=None).
  92. """
  93. return super().from_file(file_path, delimiter, vocab_size, special_tokens, special_first)
  94. @classmethod
  95. @check_from_dict
  96. def from_dict(cls, word_dict):
  97. """
  98. Build a vocab object from a dict.
  99. Args:
  100. word_dict (dict): dict contains word, id pairs where word should be str and id int. id is recommended to
  101. start from 0 and be continuous. ValueError will be raised if id is negative.
  102. """
  103. return super().from_dict(word_dict)
  104. def to_str(array, encoding='utf8'):
  105. """
  106. Convert numpy array of `bytes` to array of `str` by decoding each element based on charset `encoding`.
  107. Args:
  108. array (numpy.ndarray): Array of type `bytes` representing strings.
  109. encoding (string): Indicating the charset for decoding.
  110. Returns:
  111. numpy.ndarray, numpy array of `str`.
  112. """
  113. if not isinstance(array, np.ndarray):
  114. raise ValueError('input should be a numpy array.')
  115. return np.char.decode(array, encoding)
  116. def to_bytes(array, encoding='utf8'):
  117. """
  118. Convert numpy array of `str` to array of `bytes` by encoding each element based on charset `encoding`.
  119. Args:
  120. array (numpy.ndarray): Array of type `str` representing strings.
  121. encoding (str): Indicating the charset for encoding.
  122. Returns:
  123. numpy.ndarray, numpy array of `bytes`.
  124. """
  125. if not isinstance(array, np.ndarray):
  126. raise ValueError('input should be a numpy array.')
  127. return np.char.encode(array, encoding)
  128. class JiebaMode(IntEnum):
  129. """An enumeration for JiebaTokenizer, effective enumeration types are MIX, MP, HMM."""
  130. MIX = 0
  131. MP = 1
  132. HMM = 2
  133. class NormalizeForm(IntEnum):
  134. """An enumeration for NormalizeUTF8, effective enumeration types are NONE, NFC, NFKC, NFD, NFKD."""
  135. NONE = 0
  136. NFC = 1
  137. NFKC = 2
  138. NFD = 3
  139. NFKD = 4