Browse Source

!15006 upgrade onednn to v2.1.2 and fix coredump under poolmax op

From: @zuochuanyong
Reviewed-by: @guoqi1024,@zhaizhiqiang
Signed-off-by: @zhaizhiqiang
pull/15006/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
9ef83fe7f4
3 changed files with 19 additions and 6 deletions
  1. +5
    -5
      cmake/external_libs/mkl_dnn.cmake
  2. +8
    -1
      mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/pooling_cpu_kernel.cc
  3. +6
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/pooling_cpu_kernel.h

+ 5
- 5
cmake/external_libs/mkl_dnn.cmake View File

@@ -10,14 +10,14 @@ if(CMAKE_SYSTEM_NAME MATCHES "Windows")
MD5 fe660e34e9f73ab13a65987819a0712e) MD5 fe660e34e9f73ab13a65987819a0712e)
else() else()
if(ENABLE_GITEE) if(ENABLE_GITEE)
set(REQ_URL "https://gitee.com/mirrors/MKL-DNN/repository/archive/v1.6.tar.gz")
set(MD5 "44da423a3b6848990a907f99a65b26e7")
set(REQ_URL "https://gitee.com/mirrors/MKL-DNN/repository/archive/v2.1.2.tar.gz")
set(MD5 "d98f171d7e66e252c79e2e167ba4a8e8")
else() else()
set(REQ_URL "https://github.com/oneapi-src/oneDNN/archive/v1.6.tar.gz")
set(MD5 "7cf251209f774ae6d61489ad7c2c3bea")
set(REQ_URL "https://github.com/oneapi-src/oneDNN/archive/v2.1.2.tar.gz")
set(MD5 "1df4f16f650b7ea08610a10af013faa3")
endif() endif()
mindspore_add_pkg(onednn mindspore_add_pkg(onednn
VER 1.6
VER 2.1.2
LIBS dnnl mkldnn LIBS dnnl mkldnn
URL ${REQ_URL} URL ${REQ_URL}
MD5 ${MD5} MD5 ${MD5}


+ 8
- 1
mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/pooling_cpu_kernel.cc View File

@@ -22,6 +22,11 @@


namespace mindspore { namespace mindspore {
namespace kernel { namespace kernel {
void PoolingCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
CPUKernel::InitInputOutputSize(kernel_node);
workspace_size_list_.emplace_back(workspace_size_);
}

void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) { void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node); MS_EXCEPTION_IF_NULL(kernel_node);
std::vector<size_t> src_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0); std::vector<size_t> src_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
@@ -62,6 +67,7 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dst_desc, strides_dims, kernels_dims, padding_l, padding_r); dst_desc, strides_dims, kernels_dims, padding_l, padding_r);
} }
auto prim_desc = dnnl::pooling_forward::primitive_desc(desc, MKLKernelEngine::Get().engine()); auto prim_desc = dnnl::pooling_forward::primitive_desc(desc, MKLKernelEngine::Get().engine());
workspace_size_ = prim_desc.workspace_desc().get_size();
primitive_ = std::make_shared<dnnl::pooling_forward>(prim_desc); primitive_ = std::make_shared<dnnl::pooling_forward>(prim_desc);
AddArgument(DNNL_ARG_SRC, src_desc); AddArgument(DNNL_ARG_SRC, src_desc);
AddArgument(DNNL_ARG_DST, dst_desc); AddArgument(DNNL_ARG_DST, dst_desc);
@@ -69,13 +75,14 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
} }


bool PoolingCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, bool PoolingCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs) { const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.empty() || outputs.empty()) { if (inputs.empty() || outputs.empty()) {
MS_LOG(EXCEPTION) << "error input output size!"; MS_LOG(EXCEPTION) << "error input output size!";
} }
SetArgumentHandle(DNNL_ARG_SRC, inputs[0]->addr); SetArgumentHandle(DNNL_ARG_SRC, inputs[0]->addr);
SetArgumentHandle(DNNL_ARG_DST, outputs[0]->addr); SetArgumentHandle(DNNL_ARG_DST, outputs[0]->addr);
SetArgumentHandle(DNNL_ARG_WORKSPACE, workspace[0]->addr);
ExecutePrimitive(); ExecutePrimitive();
return true; return true;
} }


+ 6
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/pooling_cpu_kernel.h View File

@@ -31,6 +31,12 @@ class PoolingCPUKernel : public MKLCPUKernel {


bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override; const std::vector<AddressPtr> &outputs) override;

protected:
void InitInputOutputSize(const CNodePtr &kernel_node) override;

private:
size_t workspace_size_{0};
}; };


MS_REG_CPU_KERNEL(MaxPool, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), MS_REG_CPU_KERNEL(MaxPool, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),


Loading…
Cancel
Save