diff --git a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc index 9d3a17b559..eaf8378639 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc @@ -74,6 +74,10 @@ Status BuildSentenceVocabNode::ValidateParams() { RETURN_STATUS_SYNTAX_ERROR(err_msg); } + if (!col_names_.empty()) { + RETURN_IF_NOT_OK(ValidateDatasetColumnParam("BuildVocabNode", "columns", col_names_)); + } + return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/lite_mat.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/lite_mat.cc index 45c31406e2..64734274dc 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/lite_mat.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/lite_mat.cc @@ -198,8 +198,7 @@ void LiteMat::Init(int width, int height, void *p_data, LDataType data_type) { c_step_ = height_ * width_; size_ = c_step_ * channel_ * elem_size_; data_ptr_ = p_data; - ref_count_ = new int[1]; - *ref_count_ = 0; + ref_count_ = nullptr; } void LiteMat::Init(int width, int height, int channel, LDataType data_type) { @@ -227,8 +226,7 @@ void LiteMat::Init(int width, int height, int channel, void *p_data, LDataType d c_step_ = height_ * width_; size_ = c_step_ * channel_ * elem_size_; data_ptr_ = p_data; - ref_count_ = new int[1]; - *ref_count_ = 0; + ref_count_ = nullptr; } bool LiteMat::IsEmpty() const { return data_ptr_ == 0 || data_ptr_ == nullptr || c_step_ * channel_ == 0; } diff --git a/mindspore/dataset/text/transforms.py b/mindspore/dataset/text/transforms.py index 5d3e160a6c..a8ec4ff560 100644 --- a/mindspore/dataset/text/transforms.py +++ b/mindspore/dataset/text/transforms.py @@ -76,7 +76,7 @@ class Lookup(cde.LookupOp): >>> # Load vocabulary from list >>> vocab = text.Vocab.from_list(['深', '圳', '欢', '迎', '您']) >>> # Use Lookup operator to map tokens to ids - >>> lookup = text.Lookup(vocab, "") + >>> lookup = text.Lookup(vocab) >>> data1 = data1.map(operations=[lookup]) """