From ca8a914736c67d1ff44095af7192531b6a511376 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Wed, 13 May 2020 14:27:35 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8E=A5=E5=85=A5=E7=AE=97=E5=AD=90resizebilin?= =?UTF-8?q?ear=20and=20resizebilineargrad?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 2 + mindspore/ops/_op_impl/tbe/__init__.py | 2 + mindspore/ops/_op_impl/tbe/resize_bilinear.py | 39 +++++++++++++++++++ .../ops/_op_impl/tbe/resize_bilinear_grad.py | 38 ++++++++++++++++++ tests/ut/python/ops/test_ops.py | 9 +++++ 5 files changed, 90 insertions(+) create mode 100644 mindspore/ops/_op_impl/tbe/resize_bilinear.py create mode 100644 mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index e11b3f535d..61effb5388 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -78,6 +78,8 @@ static std::map tbe_func_adapter_map = { {"pad", "pad_d"}, {"space_to_batch", "space_to_batch_d"}, {"batch_to_space", "batch_to_space_d"}, + {"resize_bilinear", "resize_bilinear_v2_d"}, + {"resize_bilinear_grad", "resize_bilinear_v2_grad"}, {"adam", "apply_adam_d"}}; void TbeAdapter::NormalizeFuncName(std::string *func_name) { diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index a70d172570..9e7c8745ac 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -162,3 +162,5 @@ from .batch_to_space import _batch_to_space_tbe from .space_to_batch import _space_to_batch_tbe from .floor import _floor_tbe from .log1p import _log1p_tbe +from .resize_bilinear import _resize_bilinear_tbe +from .resize_bilinear_grad import _resize_bilinear_grad_tbe diff --git a/mindspore/ops/_op_impl/tbe/resize_bilinear.py b/mindspore/ops/_op_impl/tbe/resize_bilinear.py new file mode 100644 index 0000000000..2d8091488c --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/resize_bilinear.py @@ -0,0 +1,39 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ResizeBilinear op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +resize_bilinear_op_info = TBERegOp("ResizeBilinear") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("resize_bilinear_v2_d.so") \ + .compute_cost(10) \ + .kernel_name("resize_bilinear_v2_d") \ + .partial_flag(True) \ + .attr("size", "required", "listInt", "all") \ + .attr("align_corners", "optional", "bool", "all") \ + .attr("half_pixel_centers", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(resize_bilinear_op_info) +def _resize_bilinear_tbe(): + """ResizeBilinear TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py b/mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py new file mode 100644 index 0000000000..bbc4419458 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py @@ -0,0 +1,38 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ResizeBilinearGrad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +resize_bilinear_grad_op_info = TBERegOp("ResizeBilinearGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("resize_bilinear_v2_grad.so") \ + .compute_cost(10) \ + .kernel_name("resize_bilinear_v2_grad") \ + .partial_flag(True) \ + .attr("align_corners", "optional", "bool", "all") \ + .attr("half_pixel_centers", "optional", "bool", "all")\ + .input(0, "grads", False, "required", "all") \ + .input(1, "original_image", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(resize_bilinear_grad_op_info) +def _resize_bilinear_grad_tbe(): + """ResizeBilinearGrad TBE register""" + return diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index c7d6cd12f3..1c1d9d5335 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -924,6 +924,15 @@ test_case_nn_ops = [ 'block': P.L2Loss(), 'desc_inputs': [Tensor(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]), mstype.float16)], 'desc_bprop': []}), + ('ResizeBilinear', { + 'block': P.ResizeBilinear((5, 5)), + 'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)], + 'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)]}), + ('ResizeBilinearGrad', { + 'block': G.ResizeBilinearGrad(), + 'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32), Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)], + 'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)], + 'skip': ['backward']}), ] test_case_array_ops = [