Browse Source

fix conv3d when pad_mode is pad

tags/v1.4.0
zuochuanyong 4 years ago
parent
commit
4a2a254658
5 changed files with 125 additions and 19 deletions
  1. +2
    -8
      mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv_cpu_kernel.h
  2. +4
    -4
      mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc
  3. +1
    -0
      mindspore/ops/_op_impl/cpu/__init__.py
  4. +30
    -0
      mindspore/ops/_op_impl/cpu/conv3d.py
  5. +88
    -7
      tests/st/ops/cpu/test_conv_op.py

+ 2
- 8
mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv_cpu_kernel.h View File

@@ -34,14 +34,8 @@ class ConvCPUKernel : public MKLCPUKernel {
};

MS_REG_CPU_KERNEL(Conv2D, KernelAttr(), ConvCPUKernel);
MS_REG_CPU_KERNEL(
Conv3D,
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
ConvCPUKernel);
MS_REG_CPU_KERNEL(
Conv3D,
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
ConvCPUKernel);
MS_REG_CPU_KERNEL(Conv3D, KernelAttr(), ConvCPUKernel);

} // namespace kernel
} // namespace mindspore



+ 4
- 4
mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc View File

@@ -58,10 +58,10 @@ void MKLCPUKernel::GetPadding(const CNodePtr &kernel_node, const std::string &pa
std::vector<int64_t> pad_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, PAD_LIST);
(void)std::transform(pad_me.begin(), pad_me.end(), std::back_inserter(pad),
[](const int64_t &value) { return static_cast<int>(value); });
padding_l->emplace_back(pad[0]);
padding_l->emplace_back(pad[2]);
padding_r->emplace_back(pad[1]);
padding_r->emplace_back(pad[3]);
for (size_t i = 0; i < dim; i += 2) {
padding_l->emplace_back(pad[i]);
padding_r->emplace_back(pad[i + 1]);
}
}
}



+ 1
- 0
mindspore/ops/_op_impl/cpu/__init__.py View File

@@ -35,6 +35,7 @@ from .gather_nd import _gather_nd_cpu
from .maximum import _maximum_cpu
from .maximum_grad import _maximum_grad_cpu
from .conv2d import _conv2d_cpu
from .conv3d import _conv3d_cpu
from .hsigmoid import _hsigmoid_cpu
from .hsigmoid_grad import _hsigmoid_grad_cpu
from .hswish import _hswish_cpu


+ 30
- 0
mindspore/ops/_op_impl/cpu/conv3d.py View File

@@ -0,0 +1,30 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Conv3D op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType

conv3d_op_info = CpuRegOp("Conv3D") \
.input(0, "x", "required") \
.input(1, "filter", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()


@op_info_register(conv3d_op_info)
def _conv3d_cpu():
"""Conv3D cpu register"""
return

+ 88
- 7
tests/st/ops/cpu/test_conv_op.py View File

@@ -173,15 +173,15 @@ def test_conv():


class NetConv3d(nn.Cell):
def __init__(self):
def __init__(self, mode, pad_mode, pad):
super(NetConv3d, self).__init__()
out_channel = 4
kernel_size = 2
self.conv = P.Conv3D(out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
mode=mode,
pad_mode=pad_mode,
pad=pad,
stride=1,
dilation=1,
group=1)
@@ -212,13 +212,94 @@ def test_conv3d():
[76860., 78864.]],
[[88884., 90888.],
[94896., 96900.]]]]]).astype(np.float32)

mode = 1
pad_mode = "valid"
pad = 0
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
net = NetConv3d()
net = NetConv3d(mode, pad_mode, pad)
output = net(x, w)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = NetConv3d(mode, pad_mode, pad)
output = net(x, w)
assert (output.asnumpy() == expect).all()


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_conv3d_2():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = NetConv3d()
x = Tensor(np.arange(1 * 3 * 3 * 3 * 3).reshape(1, 3, 3, 3, 3).astype(np.float32))
w = Tensor(np.arange(4 * 3 * 2 * 2 * 2).reshape(4, 3, 2, 2, 2).astype(np.float32))
expect = np.array([[[[[1647, 3258, 3345, 1650],
[3267, 6447, 6609, 3252],
[3519, 6933, 7095, 3486],
[1719, 3378, 3453, 1692]],
[[3375, 6639, 6789, 3330],
[6606, 12960, 13236, 6474],
[7038, 13788, 14064, 6870],
[3393, 6627, 6753, 3288]],
[[4077, 7989, 8139, 3978],
[7902, 15444, 15720, 7662],
[8334, 16272, 16548, 8058],
[3987, 7761, 7887, 3828]],
[[1917, 3732, 3795, 1842],
[3663, 7107, 7221, 3492],
[3843, 7449, 7563, 3654],
[1809, 3492, 3543, 1704]]],
[[[3591, 7218, 7449, 3738],
[7371, 14799, 15249, 7644],
[8055, 16149, 16599, 8310],
[4095, 8202, 8421, 4212]],
[[7911, 15855, 16293, 8154],
[16110, 32256, 33108, 16554],
[17406, 34812, 35664, 17814],
[8793, 17571, 17985, 8976]],
[[9909, 19797, 20235, 10098],
[19998, 39924, 40776, 20334],
[21294, 42480, 43332, 21594],
[10683, 21297, 21711, 10812]],
[[5157, 10284, 10491, 5226],
[10359, 20643, 21045, 10476],
[10971, 21849, 22251, 11070],
[5481, 10908, 11103, 5520]]],
[[[5535, 11178, 11553, 5826],
[11475, 23151, 23889, 12036],
[12591, 25365, 26103, 13134],
[6471, 13026, 13389, 6732]],
[[12447, 25071, 25797, 12978],
[25614, 51552, 52980, 26634],
[27774, 55836, 57264, 28758],
[14193, 28515, 29217, 14664]],
[[15741, 31605, 32331, 16218],
[32094, 64404, 65832, 33006],
[34254, 68688, 70116, 35130],
[17379, 34833, 35535, 17796]],
[[8397, 16836, 17187, 8610],
[17055, 34179, 34869, 17460],
[18099, 36249, 36939, 18486],
[9153, 18324, 18663, 9336]]],
[[[7479, 15138, 15657, 7914],
[15579, 31503, 32529, 16428],
[17127, 34581, 35607, 17958],
[8847, 17850, 18357, 9252]],
[[16983, 34287, 35301, 17802],
[35118, 70848, 72852, 36714],
[38142, 76860, 78864, 39702],
[19593, 39459, 40449, 20352]],
[[21573, 43413, 44427, 22338],
[44190, 88884, 90888, 45678],
[47214, 94896, 96900, 48666],
[24075, 48369, 49359, 24780]],
[[11637, 23388, 23883, 11994],
[23751, 47715, 48693, 24444],
[25227, 50649, 51627, 25902],
[12825, 25740, 26223, 13152]]]]]).astype(np.float32)
mode = 1
pad_mode = "pad"
pad = (1, 1, 1, 1, 1, 1)
net = NetConv3d(mode, pad_mode, pad)
output = net(x, w)
assert (output.asnumpy() == expect).all()



Loading…
Cancel
Save