Browse Source

!1121 Complete vm ops for ResizeBilinear and ResizeBilinearGrad

Merge pull request !1121 from lihongkang/master
tags/v0.3.0-alpha
mindspore-ci-bot Gitee 5 years ago
parent
commit
21bcdcd8ad
5 changed files with 90 additions and 0 deletions
  1. +2
    -0
      mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
  2. +2
    -0
      mindspore/ops/_op_impl/tbe/__init__.py
  3. +39
    -0
      mindspore/ops/_op_impl/tbe/resize_bilinear.py
  4. +38
    -0
      mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py
  5. +9
    -0
      tests/ut/python/ops/test_ops.py

+ 2
- 0
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc View File

@@ -78,6 +78,8 @@ static std::map<string, string> tbe_func_adapter_map = {
{"pad", "pad_d"},
{"space_to_batch", "space_to_batch_d"},
{"batch_to_space", "batch_to_space_d"},
{"resize_bilinear", "resize_bilinear_v2_d"},
{"resize_bilinear_grad", "resize_bilinear_v2_grad"},
{"adam", "apply_adam_d"}};

void TbeAdapter::NormalizeFuncName(std::string *func_name) {


+ 2
- 0
mindspore/ops/_op_impl/tbe/__init__.py View File

@@ -162,3 +162,5 @@ from .batch_to_space import _batch_to_space_tbe
from .space_to_batch import _space_to_batch_tbe
from .floor import _floor_tbe
from .log1p import _log1p_tbe
from .resize_bilinear import _resize_bilinear_tbe
from .resize_bilinear_grad import _resize_bilinear_grad_tbe

+ 39
- 0
mindspore/ops/_op_impl/tbe/resize_bilinear.py View File

@@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""ResizeBilinear op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType

resize_bilinear_op_info = TBERegOp("ResizeBilinear") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("resize_bilinear_v2_d.so") \
.compute_cost(10) \
.kernel_name("resize_bilinear_v2_d") \
.partial_flag(True) \
.attr("size", "required", "listInt", "all") \
.attr("align_corners", "optional", "bool", "all") \
.attr("half_pixel_centers", "optional", "bool", "all") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F32_5HD) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD) \
.get_op_info()


@op_info_register(resize_bilinear_op_info)
def _resize_bilinear_tbe():
"""ResizeBilinear TBE register"""
return

+ 38
- 0
mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py View File

@@ -0,0 +1,38 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""ResizeBilinearGrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType

resize_bilinear_grad_op_info = TBERegOp("ResizeBilinearGrad") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("resize_bilinear_v2_grad.so") \
.compute_cost(10) \
.kernel_name("resize_bilinear_v2_grad") \
.partial_flag(True) \
.attr("align_corners", "optional", "bool", "all") \
.attr("half_pixel_centers", "optional", "bool", "all")\
.input(0, "grads", False, "required", "all") \
.input(1, "original_image", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \
.get_op_info()


@op_info_register(resize_bilinear_grad_op_info)
def _resize_bilinear_grad_tbe():
"""ResizeBilinearGrad TBE register"""
return

+ 9
- 0
tests/ut/python/ops/test_ops.py View File

@@ -924,6 +924,15 @@ test_case_nn_ops = [
'block': P.L2Loss(),
'desc_inputs': [Tensor(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]), mstype.float16)],
'desc_bprop': []}),
('ResizeBilinear', {
'block': P.ResizeBilinear((5, 5)),
'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)],
'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)]}),
('ResizeBilinearGrad', {
'block': G.ResizeBilinearGrad(),
'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32), Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
'skip': ['backward']}),
]

test_case_array_ops = [


Loading…
Cancel
Save