Merge pull request !7623 from xulei/lite_test0905tags/v1.1.0
| @@ -306,6 +306,7 @@ checkopts() | |||||
| T) | T) | ||||
| check_on_off $OPTARG T | check_on_off $OPTARG T | ||||
| SUPPORT_TRAIN=$OPTARG | SUPPORT_TRAIN=$OPTARG | ||||
| COMPILE_MINDDATA_LITE="full" | |||||
| echo "support train on device " | echo "support train on device " | ||||
| ;; | ;; | ||||
| A) | A) | ||||
| @@ -706,6 +706,8 @@ Status TensorVectorToBatchTensor(const std::vector<std::shared_ptr<Tensor>> &inp | |||||
| } | } | ||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| #ifndef ENABLE_ANDROID | |||||
| template <typename T> | template <typename T> | ||||
| struct UniqueOpHashMap { | struct UniqueOpHashMap { | ||||
| using map_type = std::unordered_map<T, int32_t>; | using map_type = std::unordered_map<T, int32_t>; | ||||
| @@ -778,6 +780,7 @@ Status Unique(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out | |||||
| } | } | ||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| #endif | |||||
| } // namespace dataset | } // namespace dataset | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -19,7 +19,9 @@ | |||||
| #include <memory> | #include <memory> | ||||
| #include <string> | #include <string> | ||||
| #include <vector> | #include <vector> | ||||
| #ifndef ENABLE_ANDROID | |||||
| #include <unordered_map> | #include <unordered_map> | ||||
| #endif | |||||
| #include "minddata/dataset/core/constants.h" | #include "minddata/dataset/core/constants.h" | ||||
| #include "minddata/dataset/core/cv_tensor.h" | #include "minddata/dataset/core/cv_tensor.h" | ||||
| #include "minddata/dataset/core/data_type.h" | #include "minddata/dataset/core/data_type.h" | ||||
| @@ -177,6 +179,7 @@ Status BatchTensorToTensorVector(const std::shared_ptr<Tensor> &input, std::vect | |||||
| /// \return Status ok/error | /// \return Status ok/error | ||||
| Status TensorVectorToBatchTensor(const std::vector<std::shared_ptr<Tensor>> &input, std::shared_ptr<Tensor> *output); | Status TensorVectorToBatchTensor(const std::vector<std::shared_ptr<Tensor>> &input, std::shared_ptr<Tensor> *output); | ||||
| #ifndef ENABLE_ANDROID | |||||
| /// Helper method that uniques the input tensor | /// Helper method that uniques the input tensor | ||||
| /// @tparam T type of the tensor | /// @tparam T type of the tensor | ||||
| /// \param input[in] input 1d tensor | /// \param input[in] input 1d tensor | ||||
| @@ -197,7 +200,7 @@ Status UniqueHelper(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor | |||||
| /// \return Status ok/error | /// \return Status ok/error | ||||
| Status Unique(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, | Status Unique(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, | ||||
| std::shared_ptr<Tensor> *output_idx, std::shared_ptr<Tensor> *output_cnt); | std::shared_ptr<Tensor> *output_idx, std::shared_ptr<Tensor> *output_cnt); | ||||
| #endif | |||||
| } // namespace dataset | } // namespace dataset | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -23,6 +23,7 @@ namespace mindspore { | |||||
| namespace dataset { | namespace dataset { | ||||
| Status UniqueOp::Compute(const TensorRow &input, TensorRow *output) { | Status UniqueOp::Compute(const TensorRow &input, TensorRow *output) { | ||||
| #ifndef ENABLE_ANDROID | |||||
| IO_CHECK_VECTOR(input, output); | IO_CHECK_VECTOR(input, output); | ||||
| CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "Input should be one tensor"); | CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "Input should be one tensor"); | ||||
| @@ -43,10 +44,10 @@ Status UniqueOp::Compute(const TensorRow &input, TensorRow *output) { | |||||
| std::shared_ptr<Tensor> out_cnt; | std::shared_ptr<Tensor> out_cnt; | ||||
| RETURN_IF_NOT_OK(Unique(in_tensor, &out, &out_idx, &out_cnt)); | RETURN_IF_NOT_OK(Unique(in_tensor, &out, &out_idx, &out_cnt)); | ||||
| output->push_back(out); | output->push_back(out); | ||||
| output->push_back(out_idx); | output->push_back(out_idx); | ||||
| output->push_back(out_cnt); | output->push_back(out_cnt); | ||||
| #endif | |||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| } // namespace dataset | } // namespace dataset | ||||
| @@ -81,10 +81,10 @@ bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, const std::vector< | |||||
| bool Pad(const LiteMat &src, LiteMat &dst, int top, int bottom, int left, int right, PaddBorderType pad_type, | bool Pad(const LiteMat &src, LiteMat &dst, int top, int bottom, int left, int right, PaddBorderType pad_type, | ||||
| uint8_t fill_b_or_gray, uint8_t fill_g, uint8_t fill_r); | uint8_t fill_b_or_gray, uint8_t fill_g, uint8_t fill_r); | ||||
| /// \brief extract image channel by index | |||||
| /// \brief Extract image channel by index | |||||
| bool ExtractChannel(const LiteMat &src, LiteMat &dst, int col); | bool ExtractChannel(const LiteMat &src, LiteMat &dst, int col); | ||||
| /// \brief split image channels to single channel | |||||
| /// \brief Split image channels to single channel | |||||
| bool Split(const LiteMat &src, std::vector<LiteMat> &mv); | bool Split(const LiteMat &src, std::vector<LiteMat> &mv); | ||||
| /// \brief Create a multi-channel image out of several single-channel arrays. | /// \brief Create a multi-channel image out of several single-channel arrays. | ||||
| @@ -31,8 +31,8 @@ def compare(array, res, idx, cnt): | |||||
| np.testing.assert_array_equal(idx, d["y"]) | np.testing.assert_array_equal(idx, d["y"]) | ||||
| np.testing.assert_array_equal(cnt, d["z"]) | np.testing.assert_array_equal(cnt, d["z"]) | ||||
| def test_duplicate_basics(): | |||||
| # the test function name code will be start with 'test' later | |||||
| def duplicate_basics(): | |||||
| compare([0, 1, 2, 1, 2, 3], np.array([0, 1, 2, 3]), | compare([0, 1, 2, 1, 2, 3], np.array([0, 1, 2, 3]), | ||||
| np.array([0, 1, 2, 1, 2, 3]), np.array([1, 2, 2, 1])) | np.array([0, 1, 2, 1, 2, 3]), np.array([1, 2, 2, 1])) | ||||
| compare([0.0, 1.0, 2.0, 1.0, 2.0, 3.0], np.array([0.0, 1.0, 2.0, 3.0]), | compare([0.0, 1.0, 2.0, 1.0, 2.0, 3.0], np.array([0.0, 1.0, 2.0, 3.0]), | ||||