You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

text.cc 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /**
  2. * Copyright 2020-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "minddata/dataset/include/dataset/text.h"
  17. #include <unistd.h>
  18. #include <fstream>
  19. #include <regex>
  20. #include "minddata/dataset/core/type_id.h"
  21. #include "minddata/dataset/text/ir/kernels/text_ir.h"
  22. #include "mindspore/core/ir/dtype/type_id.h"
  23. #include "utils/file_utils.h"
  24. namespace mindspore {
  25. namespace dataset {
  26. // Transform operations for text.
  27. namespace text {
  28. constexpr size_t size_two = 2;
  29. constexpr size_t size_three = 3;
  30. constexpr int64_t value_one = 1;
  31. constexpr int64_t value_two = 2;
  32. constexpr size_t kMaxLoggedRows = 10;
  33. // FUNCTIONS TO CREATE TEXT OPERATIONS
  34. // (In alphabetical order)
  35. #ifndef _WIN32
  36. // BasicTokenizer
  37. struct BasicTokenizer::Data {
  38. Data(bool lower_case, bool keep_whitespace, const NormalizeForm normalize_form, bool preserve_unused_token,
  39. bool with_offsets)
  40. : lower_case_(lower_case),
  41. keep_whitespace_(keep_whitespace),
  42. normalize_form_(normalize_form),
  43. preserve_unused_token_(preserve_unused_token),
  44. with_offsets_(with_offsets) {}
  45. bool lower_case_;
  46. bool keep_whitespace_;
  47. NormalizeForm normalize_form_;
  48. bool preserve_unused_token_;
  49. bool with_offsets_;
  50. };
  51. BasicTokenizer::BasicTokenizer(bool lower_case, bool keep_whitespace, const NormalizeForm normalize_form,
  52. bool preserve_unused_token, bool with_offsets)
  53. : data_(std::make_shared<Data>(lower_case, keep_whitespace, normalize_form, preserve_unused_token, with_offsets)) {}
  54. std::shared_ptr<TensorOperation> BasicTokenizer::Parse() {
  55. return std::make_shared<BasicTokenizerOperation>(data_->lower_case_, data_->keep_whitespace_, data_->normalize_form_,
  56. data_->preserve_unused_token_, data_->with_offsets_);
  57. }
  58. // BertTokenizer
  59. struct BertTokenizer::Data {
  60. Data(const std::shared_ptr<Vocab> &vocab, const std::vector<char> &suffix_indicator, int32_t max_bytes_per_token,
  61. const std::vector<char> &unknown_token, bool lower_case, bool keep_whitespace,
  62. const NormalizeForm normalize_form, bool preserve_unused_token, bool with_offsets)
  63. : vocab_(vocab),
  64. suffix_indicator_(CharToString(suffix_indicator)),
  65. max_bytes_per_token_(max_bytes_per_token),
  66. unknown_token_(CharToString(unknown_token)),
  67. lower_case_(lower_case),
  68. keep_whitespace_(keep_whitespace),
  69. normalize_form_(normalize_form),
  70. preserve_unused_token_(preserve_unused_token),
  71. with_offsets_(with_offsets) {}
  72. std::shared_ptr<Vocab> vocab_;
  73. std::string suffix_indicator_;
  74. int32_t max_bytes_per_token_;
  75. std::string unknown_token_;
  76. bool lower_case_;
  77. bool keep_whitespace_;
  78. NormalizeForm normalize_form_;
  79. bool preserve_unused_token_;
  80. bool with_offsets_;
  81. };
  82. BertTokenizer::BertTokenizer(const std::shared_ptr<Vocab> &vocab, const std::vector<char> &suffix_indicator,
  83. int32_t max_bytes_per_token, const std::vector<char> &unknown_token, bool lower_case,
  84. bool keep_whitespace, const NormalizeForm normalize_form, bool preserve_unused_token,
  85. bool with_offsets)
  86. : data_(std::make_shared<Data>(vocab, suffix_indicator, max_bytes_per_token, unknown_token, lower_case,
  87. keep_whitespace, normalize_form, preserve_unused_token, with_offsets)) {}
  88. std::shared_ptr<TensorOperation> BertTokenizer::Parse() {
  89. return std::make_shared<BertTokenizerOperation>(
  90. data_->vocab_, data_->suffix_indicator_, data_->max_bytes_per_token_, data_->unknown_token_, data_->lower_case_,
  91. data_->keep_whitespace_, data_->normalize_form_, data_->preserve_unused_token_, data_->with_offsets_);
  92. }
  93. // CaseFold
  94. CaseFold::CaseFold() = default;
  95. std::shared_ptr<TensorOperation> CaseFold::Parse() { return std::make_shared<CaseFoldOperation>(); }
  96. #endif
  97. // JiebaTokenizer
  98. struct JiebaTokenizer::Data {
  99. Data(const std::vector<char> &hmm_path, const std::vector<char> &mp_path, const JiebaMode &mode, bool with_offsets)
  100. : hmm_path_(CharToString(hmm_path)),
  101. mp_path_(CharToString(mp_path)),
  102. mode_(mode),
  103. with_offsets_(with_offsets),
  104. words_list_({}) {}
  105. std::string hmm_path_;
  106. std::string mp_path_;
  107. JiebaMode mode_;
  108. bool with_offsets_;
  109. std::vector<std::pair<std::string, int64_t>> words_list_;
  110. };
  111. JiebaTokenizer::JiebaTokenizer(const std::vector<char> &hmm_path, const std::vector<char> &mp_path,
  112. const JiebaMode &mode, bool with_offsets)
  113. : data_(std::make_shared<Data>(hmm_path, mp_path, mode, with_offsets)) {}
  114. std::shared_ptr<TensorOperation> JiebaTokenizer::Parse() {
  115. std::shared_ptr<JiebaTokenizerOperation> jieba_tokenizer =
  116. std::make_shared<JiebaTokenizerOperation>(data_->hmm_path_, data_->mp_path_, data_->mode_, data_->with_offsets_);
  117. for (auto &word : data_->words_list_) {
  118. Status rc = jieba_tokenizer->AddWord(word.first, word.second);
  119. if (rc.IsError()) {
  120. MS_LOG(ERROR) << rc;
  121. return {};
  122. }
  123. }
  124. return jieba_tokenizer;
  125. }
  126. Status JiebaTokenizer::AddWordChar(const std::vector<char> &word, int64_t freq) {
  127. if (word.empty()) {
  128. std::string err_msg = "JiebaTokenizer : The parameter word is empty or not provided.";
  129. LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg);
  130. }
  131. if (freq < 0) {
  132. std::string err_msg = "JiebaTokenizer : The parameter freq must be greater than or equal to 0.";
  133. LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg);
  134. }
  135. (void)data_->words_list_.emplace_back(CharToString(word), freq);
  136. return Status::OK();
  137. }
  138. Status JiebaTokenizer::AddDictChar(const std::vector<std::pair<std::vector<char>, int64_t>> &user_dict) {
  139. for (auto &word_freq_pair : user_dict) {
  140. RETURN_IF_NOT_OK(AddWordChar(word_freq_pair.first, word_freq_pair.second));
  141. }
  142. return Status::OK();
  143. }
  144. Status JiebaTokenizer::AddDictChar(const std::vector<char> &file_path) {
  145. std::vector<std::pair<std::string, int64_t>> user_dict;
  146. RETURN_IF_NOT_OK(ParserFile(CharToString(file_path), &user_dict));
  147. RETURN_IF_NOT_OK(AddDictChar(PairStringInt64ToPairCharInt64(user_dict)));
  148. return Status::OK();
  149. }
  150. Status JiebaTokenizer::ParserFile(const std::string &file_path,
  151. std::vector<std::pair<std::string, int64_t>> *const user_dict) {
  152. RETURN_UNEXPECTED_IF_NULL(user_dict);
  153. auto realpath = FileUtils::GetRealPath(file_path.data());
  154. if (!realpath.has_value()) {
  155. std::string err_msg = "Get real path failed, path: " + file_path;
  156. LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg);
  157. }
  158. std::ifstream ifs(realpath.value());
  159. if (!ifs) {
  160. std::string err_msg = "JiebaTokenizer : Fail to load dictionary from the input file, check the file path.";
  161. LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg);
  162. }
  163. std::string line;
  164. while (std::getline(ifs, line)) {
  165. if (line.empty()) {
  166. continue;
  167. }
  168. std::regex regex("^\\s*([^\\s*]+?)\\s*([0-9]+)?\\s*$");
  169. std::smatch tokens;
  170. if (std::regex_match(line, tokens, regex)) {
  171. if (tokens.size() == size_two) {
  172. (void)user_dict->emplace_back(tokens.str(value_one), 0);
  173. } else if (tokens.size() == size_three) {
  174. (void)user_dict->emplace_back(tokens.str(value_one), strtoll(tokens.str(value_two).c_str(), nullptr, 0));
  175. } else {
  176. continue;
  177. }
  178. } else {
  179. continue;
  180. }
  181. }
  182. ifs.close();
  183. MS_LOG(INFO) << "JiebaTokenizer::AddDict: The size of user input dictionary is: " << user_dict->size();
  184. MS_LOG(INFO) << "Valid rows in input dictionary (Maximum of first 10 rows are shown.):";
  185. for (std::size_t i = 0; i != user_dict->size(); ++i) {
  186. if (i >= kMaxLoggedRows) break;
  187. MS_LOG(INFO) << user_dict->at(i).first << " " << user_dict->at(i).second;
  188. }
  189. return Status::OK();
  190. }
  191. // Lookup
  192. struct Lookup::Data {
  193. Data(const std::shared_ptr<Vocab> &vocab, const std::optional<std::vector<char>> &unknown_token,
  194. mindspore::DataType data_type)
  195. : vocab_(vocab), data_type_(dataset::MSTypeToDEType(static_cast<TypeId>(data_type))) {
  196. if (unknown_token == std::nullopt) {
  197. unknown_token_ = std::nullopt;
  198. } else {
  199. unknown_token_ = std::string(unknown_token->begin(), unknown_token->end());
  200. }
  201. }
  202. std::shared_ptr<Vocab> vocab_;
  203. std::optional<std::string> unknown_token_;
  204. dataset::DataType data_type_;
  205. };
  206. Lookup::Lookup(const std::shared_ptr<Vocab> &vocab, const std::optional<std::vector<char>> &unknown_token,
  207. mindspore::DataType data_type)
  208. : data_(std::make_shared<Data>(vocab, unknown_token, data_type)) {
  209. data_->data_type_ = dataset::MSTypeToDEType(static_cast<TypeId>(data_type));
  210. }
  211. std::shared_ptr<TensorOperation> Lookup::Parse() {
  212. return std::make_shared<LookupOperation>(data_->vocab_, data_->unknown_token_, data_->data_type_);
  213. }
  214. // Ngram
  215. struct Ngram::Data {
  216. Data(const std::vector<int32_t> &ngrams, const std::pair<std::vector<char>, int32_t> &left_pad,
  217. const std::pair<std::vector<char>, int32_t> &right_pad, const std::vector<char> &separator)
  218. : ngrams_(ngrams),
  219. left_pad_(PairCharToString(left_pad)),
  220. right_pad_(PairCharToString(right_pad)),
  221. separator_(CharToString(separator)) {}
  222. std::vector<int32_t> ngrams_;
  223. std::pair<std::string, int32_t> left_pad_;
  224. std::pair<std::string, int32_t> right_pad_;
  225. std::string separator_;
  226. };
  227. Ngram::Ngram(const std::vector<int32_t> &ngrams, const std::pair<std::vector<char>, int32_t> &left_pad,
  228. const std::pair<std::vector<char>, int32_t> &right_pad, const std::vector<char> &separator)
  229. : data_(std::make_shared<Data>(ngrams, left_pad, right_pad, separator)) {}
  230. std::shared_ptr<TensorOperation> Ngram::Parse() {
  231. return std::make_shared<NgramOperation>(data_->ngrams_, data_->left_pad_, data_->right_pad_, data_->separator_);
  232. }
  233. #ifndef _WIN32
  234. // NormalizeUTF8
  235. struct NormalizeUTF8::Data {
  236. explicit Data(NormalizeForm normalize_form) : normalize_form_(normalize_form) {}
  237. NormalizeForm normalize_form_;
  238. };
  239. NormalizeUTF8::NormalizeUTF8(NormalizeForm normalize_form) : data_(std::make_shared<Data>(normalize_form)) {}
  240. std::shared_ptr<TensorOperation> NormalizeUTF8::Parse() {
  241. return std::make_shared<NormalizeUTF8Operation>(data_->normalize_form_);
  242. }
  243. // RegexReplace
  244. struct RegexReplace::Data {
  245. Data(const std::vector<char> &pattern, const std::vector<char> &replace, bool replace_all)
  246. : pattern_(CharToString(pattern)), replace_(CharToString(replace)), replace_all_(replace_all) {}
  247. std::string pattern_;
  248. std::string replace_;
  249. bool replace_all_;
  250. };
  251. RegexReplace::RegexReplace(const std::vector<char> &pattern, const std::vector<char> &replace, bool replace_all)
  252. : data_(std::make_shared<Data>(pattern, replace, replace_all)) {}
  253. std::shared_ptr<TensorOperation> RegexReplace::Parse() {
  254. return std::make_shared<RegexReplaceOperation>(data_->pattern_, data_->replace_, data_->replace_all_);
  255. }
  256. // RegexTokenizer
  257. struct RegexTokenizer::Data {
  258. Data(const std::vector<char> &delim_pattern, const std::vector<char> &keep_delim_pattern, bool with_offsets)
  259. : delim_pattern_(CharToString(delim_pattern)),
  260. keep_delim_pattern_(CharToString(keep_delim_pattern)),
  261. with_offsets_(with_offsets) {}
  262. std::string delim_pattern_;
  263. std::string keep_delim_pattern_;
  264. bool with_offsets_;
  265. };
  266. RegexTokenizer::RegexTokenizer(const std::vector<char> &delim_pattern, const std::vector<char> &keep_delim_pattern,
  267. bool with_offsets)
  268. : data_(std::make_shared<Data>(delim_pattern, keep_delim_pattern, with_offsets)) {}
  269. std::shared_ptr<TensorOperation> RegexTokenizer::Parse() {
  270. return std::make_shared<RegexTokenizerOperation>(data_->delim_pattern_, data_->keep_delim_pattern_,
  271. data_->with_offsets_);
  272. }
  273. #endif
  274. // SentencePieceTokenizer
  275. struct SentencePieceTokenizer::Data {
  276. Data(const std::shared_ptr<SentencePieceVocab> &vocab, SPieceTokenizerOutType out_type)
  277. : vocab_(vocab), vocab_path_(""), out_type_(out_type) {}
  278. Data(const std::vector<char> &vocab_path, SPieceTokenizerOutType out_type)
  279. : vocab_(nullptr), vocab_path_(CharToString(vocab_path)), out_type_(out_type) {}
  280. std::shared_ptr<SentencePieceVocab> vocab_;
  281. std::string vocab_path_;
  282. SPieceTokenizerOutType out_type_;
  283. };
  284. SentencePieceTokenizer::SentencePieceTokenizer(const std::shared_ptr<SentencePieceVocab> &vocab,
  285. SPieceTokenizerOutType out_type)
  286. : data_(std::make_shared<Data>(vocab, out_type)) {}
  287. SentencePieceTokenizer::SentencePieceTokenizer(const std::vector<char> &vocab_path, SPieceTokenizerOutType out_type)
  288. : data_(std::make_shared<Data>(vocab_path, out_type)) {}
  289. std::shared_ptr<TensorOperation> SentencePieceTokenizer::Parse() {
  290. if (data_->vocab_ != nullptr) {
  291. return std::make_shared<SentencePieceTokenizerOperation>(data_->vocab_, data_->out_type_);
  292. } else {
  293. return std::make_shared<SentencePieceTokenizerOperation>(data_->vocab_path_, data_->out_type_);
  294. }
  295. }
  296. // SlidingWindow
  297. struct SlidingWindow::Data {
  298. Data(const int32_t width, const int32_t axis) : width_(width), axis_(axis) {}
  299. int32_t width_;
  300. int32_t axis_;
  301. };
  302. SlidingWindow::SlidingWindow(const int32_t width, const int32_t axis) : data_(std::make_shared<Data>(width, axis)) {}
  303. std::shared_ptr<TensorOperation> SlidingWindow::Parse() {
  304. return std::make_shared<SlidingWindowOperation>(data_->width_, data_->axis_);
  305. }
  306. // ToNumber
  307. struct ToNumber::Data {
  308. dataset::DataType data_type_;
  309. };
  310. ToNumber::ToNumber(mindspore::DataType data_type) : data_(std::make_shared<Data>()) {
  311. data_->data_type_ = dataset::MSTypeToDEType(static_cast<TypeId>(data_type));
  312. }
  313. std::shared_ptr<TensorOperation> ToNumber::Parse() { return std::make_shared<ToNumberOperation>(data_->data_type_); }
  314. // ToVectors
  315. struct ToVectors::Data {
  316. Data(const std::shared_ptr<Vectors> &vectors, const std::vector<float> &unk_init, bool lower_case_backup)
  317. : vectors_(vectors), unk_init_(unk_init), lower_case_backup_(lower_case_backup) {}
  318. std::shared_ptr<Vectors> vectors_;
  319. std::vector<float> unk_init_;
  320. bool lower_case_backup_;
  321. };
  322. ToVectors::ToVectors(const std::shared_ptr<Vectors> &vectors, const std::vector<float> &unk_init,
  323. bool lower_case_backup)
  324. : data_(std::make_shared<Data>(vectors, unk_init, lower_case_backup)) {}
  325. std::shared_ptr<TensorOperation> ToVectors::Parse() {
  326. return std::make_shared<ToVectorsOperation>(data_->vectors_, data_->unk_init_, data_->lower_case_backup_);
  327. }
  328. // TruncateSequencePair
  329. struct TruncateSequencePair::Data {
  330. explicit Data(int32_t max_length) : max_length_(max_length) {}
  331. int32_t max_length_;
  332. };
  333. TruncateSequencePair::TruncateSequencePair(int32_t max_length) : data_(std::make_shared<Data>(max_length)) {}
  334. std::shared_ptr<TensorOperation> TruncateSequencePair::Parse() {
  335. return std::make_shared<TruncateSequencePairOperation>(data_->max_length_);
  336. }
  337. // UnicodeCharTokenizer
  338. struct UnicodeCharTokenizer::Data {
  339. explicit Data(bool with_offsets) : with_offsets_(with_offsets) {}
  340. bool with_offsets_;
  341. };
  342. UnicodeCharTokenizer::UnicodeCharTokenizer(bool with_offsets) : data_(std::make_shared<Data>(with_offsets)) {}
  343. std::shared_ptr<TensorOperation> UnicodeCharTokenizer::Parse() {
  344. return std::make_shared<UnicodeCharTokenizerOperation>(data_->with_offsets_);
  345. }
  346. // WordpieceTokenizer
  347. struct WordpieceTokenizer::Data {
  348. Data(const std::shared_ptr<Vocab> &vocab, const std::vector<char> &suffix_indicator, int32_t max_bytes_per_token,
  349. const std::vector<char> &unknown_token, bool with_offsets)
  350. : vocab_(vocab),
  351. suffix_indicator_(CharToString(suffix_indicator)),
  352. max_bytes_per_token_(max_bytes_per_token),
  353. unknown_token_(CharToString(unknown_token)),
  354. with_offsets_(with_offsets) {}
  355. std::shared_ptr<Vocab> vocab_;
  356. std::string suffix_indicator_;
  357. int32_t max_bytes_per_token_;
  358. std::string unknown_token_;
  359. bool with_offsets_;
  360. };
  361. WordpieceTokenizer::WordpieceTokenizer(const std::shared_ptr<Vocab> &vocab, const std::vector<char> &suffix_indicator,
  362. int32_t max_bytes_per_token, const std::vector<char> &unknown_token,
  363. bool with_offsets)
  364. : data_(std::make_shared<Data>(vocab, suffix_indicator, max_bytes_per_token, unknown_token, with_offsets)) {}
  365. std::shared_ptr<TensorOperation> WordpieceTokenizer::Parse() {
  366. return std::make_shared<WordpieceTokenizerOperation>(
  367. data_->vocab_, data_->suffix_indicator_, data_->max_bytes_per_token_, data_->unknown_token_, data_->with_offsets_);
  368. }
  369. #ifndef _WIN32
  370. // UnicodeScriptTokenizer
  371. struct UnicodeScriptTokenizer::Data {
  372. Data(bool keep_whitespace, bool with_offsets) : keep_whitespace_(keep_whitespace), with_offsets_(with_offsets) {}
  373. bool keep_whitespace_;
  374. bool with_offsets_;
  375. };
  376. UnicodeScriptTokenizer::UnicodeScriptTokenizer(bool keep_whitespace, bool with_offsets)
  377. : data_(std::make_shared<Data>(keep_whitespace, with_offsets)) {}
  378. std::shared_ptr<TensorOperation> UnicodeScriptTokenizer::Parse() {
  379. return std::make_shared<UnicodeScriptTokenizerOperation>(data_->keep_whitespace_, data_->with_offsets_);
  380. }
  381. // WhitespaceTokenizer
  382. struct WhitespaceTokenizer::Data {
  383. explicit Data(bool with_offsets) : with_offsets_(with_offsets) {}
  384. bool with_offsets_;
  385. };
  386. WhitespaceTokenizer::WhitespaceTokenizer(bool with_offsets) : data_(std::make_shared<Data>(with_offsets)) {}
  387. std::shared_ptr<TensorOperation> WhitespaceTokenizer::Parse() {
  388. return std::make_shared<WhitespaceTokenizerOperation>(data_->with_offsets_);
  389. }
  390. #endif
  391. } // namespace text
  392. } // namespace dataset
  393. } // namespace mindspore