Browse Source

!31568 develop nllloss and nlllossgrad for CPU and lite

Merge pull request !31568 from zhangbuxue/develop_NLLLoss_fo_CPU_and_lite_master
r1.7
i-robot Gitee 4 years ago
parent
commit
65bc68867d
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
43 changed files with 1862 additions and 66 deletions
  1. +1
    -1
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/lstm_cpu_kernel.h
  2. +10
    -10
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.cc
  3. +2
    -2
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_cpu_kernel.cc
  4. +1
    -1
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_cpu_kernel.h
  5. +3
    -3
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_grad_cpu_kernel.cc
  6. +1
    -1
      mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_grad_cpu_kernel.h
  7. +76
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_cpu_kernel.cc
  8. +54
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_cpu_kernel.h
  9. +77
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_grad_cpu_kernel.cc
  10. +55
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_grad_cpu_kernel.h
  11. +49
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/nllloss_fp32.c
  12. +31
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/nllloss_fp32.h
  13. +42
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32_grad/nllloss_grad_fp32.c
  14. +31
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32_grad/nllloss_grad_fp32.h
  15. +6
    -2
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/infer_register.c
  16. +54
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_grad_infer.c
  17. +33
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_grad_infer.h
  18. +52
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_infer.c
  19. +33
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_infer.h
  20. +32
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/nllloss_parameter.h
  21. +4
    -1
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h
  22. +0
    -11
      mindspore/ccsrc/plugin/device/cpu/kernel/stridedslice_cpu_kernel.cc
  23. +37
    -0
      mindspore/core/ops/grad/nllloss_grad.cc
  24. +55
    -0
      mindspore/core/ops/grad/nllloss_grad.h
  25. +37
    -0
      mindspore/core/ops/nllloss.cc
  26. +53
    -0
      mindspore/core/ops/nllloss.h
  27. +1
    -0
      mindspore/core/utils/check_convert_utils.cc
  28. +10
    -0
      mindspore/lite/schema/ops.fbs
  29. +10
    -0
      mindspore/lite/src/ops/ops_def.cc
  30. +4
    -0
      mindspore/lite/src/ops/ops_func_declare.h
  31. +43
    -32
      mindspore/lite/src/ops/ops_utils.cc
  32. +62
    -0
      mindspore/lite/src/ops/populate/nllloss_populate.cc
  33. +82
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/nllloss_fp32.cc
  34. +44
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/nllloss_fp32.h
  35. +84
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/nllloss_grad.cc
  36. +44
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/nllloss_grad.h
  37. +110
    -0
      mindspore/lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc
  38. +98
    -0
      mindspore/lite/test/ut/nnacl/infer/nllloss_infer_test.cc
  39. +146
    -0
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc
  40. +153
    -0
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc
  41. +1
    -1
      mindspore/python/mindspore/ops/operations/_grad_ops.py
  42. +1
    -1
      mindspore/python/mindspore/ops/operations/nn_ops.py
  43. +140
    -0
      tests/st/ops/cpu/test_nllloss_op.py

+ 1
- 1
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/lstm_cpu_kernel.h View File

@@ -77,4 +77,4 @@ class LstmCpuKernelMod : public MKLCpuKernelMod {
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LSTM_CPU_KERNEL_H
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LSTM_CPU_KERNEL_H_

+ 10
- 10
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.cc View File

@@ -37,7 +37,7 @@ void GeneratePaddingForPadMode(const PaddingInfo &padding_info, std::vector<int6
for (size_t i = 0; i < dim; ++i) {
size_t l_index = multiple * i;
size_t r_index = multiple * i + 1;
(void)padding_info.padding_l->push_back(pad[l_index]);
padding_info.padding_l->push_back(pad[l_index]);

if (padding_info.ceil_mode) {
int64_t len = shape_exclude_nc[i] + pad[l_index] + pad[r_index] - padding_info.kernel_size[i];
@@ -45,13 +45,13 @@ void GeneratePaddingForPadMode(const PaddingInfo &padding_info, std::vector<int6
FloatToLong(std::ceil(LongToDouble(len) / LongToDouble(padding_info.stride[i]))) * padding_info.stride[i] - len;
int64_t padding_r = pad[r_index] + padding_iv;
if (padding_r > pad[r_index] && padding_r < padding_info.kernel_size[i]) {
(void)padding_info.padding_r->push_back(padding_r);
(void)padding_info.padding_invalid->push_back(LongToFloat(padding_iv));
padding_info.padding_r->push_back(padding_r);
padding_info.padding_invalid->push_back(LongToFloat(padding_iv));
continue;
}
(void)padding_info.padding_invalid->push_back(LongToFloat(0.0));
padding_info.padding_invalid->push_back(LongToFloat(0.0));
}
(void)padding_info.padding_r->push_back(pad[r_index]);
padding_info.padding_r->push_back(pad[r_index]);
}
}
} // namespace
@@ -68,7 +68,7 @@ void MKLCpuKernelMod::GetPadding(const CNodePtr &kernel_node, const std::vector<
const size_t dim_exclude_nc = src_dim - NC_LEN;
std::vector<int64_t> shape_exclude_nc;
for (size_t i = NC_LEN; i < src_dim; ++i) {
(void)shape_exclude_nc.emplace_back(SizeToLong(src_shape[i]));
shape_exclude_nc.push_back(SizeToLong(src_shape[i]));
}

if (padding_info.pad_mode == PAD_MODE_LOWER_SAME || padding_info.pad_mode == PAD_MODE_UPPER_SAME) {
@@ -78,13 +78,13 @@ void MKLCpuKernelMod::GetPadding(const CNodePtr &kernel_node, const std::vector<
int64_t effective_k = (SizeToLong(padding_info.kernel_size[i]) - 1) * padding_info.dilation[i] + 1;
int64_t pad_along = std::max(int64_t(0), (out - 1) * padding_info.stride[i] + effective_k - wh);
int64_t pad = pad_along / 2;
(void)padding_info.padding_l->push_back(pad);
(void)padding_info.padding_r->push_back(pad_along - pad);
padding_info.padding_l->push_back(pad);
padding_info.padding_r->push_back(pad_along - pad);
}
} else if (padding_info.pad_mode == PAD_MODE_LOWER_VALID || padding_info.pad_mode == PAD_MODE_UPPER_VALID) {
for (size_t i = 0; i < dim_exclude_nc; ++i) {
(void)padding_info.padding_l->push_back(0);
(void)padding_info.padding_r->push_back(0);
padding_info.padding_l->push_back(0);
padding_info.padding_r->push_back(0);
}
} else {
std::vector<int64_t> pad = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, PAD_LIST);


+ 2
- 2
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_cpu_kernel.cc View File

@@ -28,7 +28,7 @@ constexpr size_t kPoolingInputsNum = 1;
constexpr size_t kPoolingOutputsNum = 1;
} // namespace

void PoolingCpuKernelMod::InitFields(const CNodePtr &kernel_node) {
void PoolingCpuKernelMod::InitPoolingFields(const CNodePtr &kernel_node) {
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
PrimitivePtr prim = common::AnfAlgo::GetCNodePrimitive(kernel_node);
MS_EXCEPTION_IF_NULL(prim);
@@ -51,7 +51,7 @@ void PoolingCpuKernelMod::InitFields(const CNodePtr &kernel_node) {

void PoolingCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
InitFields(kernel_node);
InitPoolingFields(kernel_node);
std::vector<size_t> src_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
const size_t src_dim = src_shape.size();
if (src_dim != SHAPE_4D && src_dim != SHAPE_5D) {


+ 1
- 1
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_cpu_kernel.h View File

@@ -54,7 +54,7 @@ class PoolingCpuKernelMod : public MKLCpuKernelMod {
std::vector<KernelAttr> GetOpSupport() override;

private:
void InitFields(const CNodePtr &kernel_node);
void InitPoolingFields(const CNodePtr &kernel_node);
std::string kernel_type_{kUnkown};
};
} // namespace kernel


+ 3
- 3
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_grad_cpu_kernel.cc View File

@@ -38,7 +38,7 @@ void PoolingGradCpuKernelMod::InitInputOutputSize(const CNodePtr &kernel_node) {
}
}

void PoolingGradCpuKernelMod::InitFields(const CNodePtr &kernel_node) {
void PoolingGradCpuKernelMod::InitPoolingGradFields(const CNodePtr &kernel_node) {
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
PrimitivePtr prim = common::AnfAlgo::GetCNodePrimitive(kernel_node);
MS_EXCEPTION_IF_NULL(prim);
@@ -62,7 +62,7 @@ void PoolingGradCpuKernelMod::InitFields(const CNodePtr &kernel_node) {

void PoolingGradCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
InitFields(kernel_node);
InitPoolingGradFields(kernel_node);
std::vector<size_t> src_shape = AnfAlgo::GetOutputDeviceShape(kernel_node, 0);
const size_t src_dim = src_shape.size();
if (src_dim != SHAPE_4D && src_dim != SHAPE_5D) {
@@ -77,7 +77,7 @@ void PoolingGradCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
if (src_dim == SHAPE_5D && format != NCDHW) {
MS_LOG(EXCEPTION) << kernel_name_ << " only supports 5D input with NCDHW format, but got format" << format;
}
const std::string pad_mode = common::AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
const auto pad_mode = common::AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
const auto kernel_include_nc = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
const auto strides_include_nc = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
if (kernel_include_nc.size() != src_dim) {


+ 1
- 1
mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/pooling_grad_cpu_kernel.h View File

@@ -74,7 +74,7 @@ class PoolingGradCpuKernelMod : public PoolingCpuKernelMod {
}

private:
void InitFields(const CNodePtr &kernel_node);
void InitPoolingGradFields(const CNodePtr &kernel_node);
void InitInputOutputSize(const CNodePtr &kernel_node) override;
void ComputeMaxValueIndex(void *src, void *dst, void *work_array) const;



+ 76
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_cpu_kernel.cc View File

@@ -0,0 +1,76 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "plugin/device/cpu/kernel/nllloss_cpu_kernel.h"

#include <string>
#include <unordered_map>

#include "nnacl/errorcode.h"

namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kNLLLossInputsNum = 3;
constexpr size_t kNLLLossOutputsNum = 2;
const std::unordered_map<std::string, ReductionType> kReductionMap = {
{MEAN, Reduction_Mean}, {SUM, Reduction_Sum}, {NONE, Reduction_None}};
} // namespace

void NLLLossCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
KernelAttr kernel_attr = GetKernelAttrFromNode(kernel_node);
bool is_match = MatchKernelAttr(kernel_attr, GetOpSupport()).first;
if (!is_match) {
MS_LOG(EXCEPTION) << kernel_name_ << " does not support this kernel data type: " << kernel_attr;
}

std::vector<size_t> logits_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
auto reduction = common::AnfAlgo::GetNodeAttr<std::string>(kernel_node, REDUCTION);
auto pair = kReductionMap.find(reduction);
if (pair == kReductionMap.end()) {
MS_LOG(EXCEPTION) << "For " << kernel_name_
<< ", the attr 'reduction' only support 'mean', 'sum' and 'none', but got " << reduction;
}

nllloss_param_.batch_ = SizeToInt(logits_shape[0]);
nllloss_param_.class_num_ = SizeToInt(logits_shape[1]);
nllloss_param_.reduction_type_ = pair->second;
}

bool NLLLossCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(kNLLLossInputsNum, inputs.size(), kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(kNLLLossOutputsNum, outputs.size(), kernel_name_);

const auto *logits = reinterpret_cast<float *>(inputs[0]->addr);
const auto *labels = reinterpret_cast<int *>(inputs[1]->addr);
const auto *weight = reinterpret_cast<float *>(inputs[2]->addr);
auto *loss = reinterpret_cast<float *>(outputs[0]->addr);
auto *total_weight = reinterpret_cast<float *>(outputs[1]->addr);

int ret = NLLLoss(logits, labels, weight, loss, total_weight, &nllloss_param_);
if (ret != static_cast<int>(NNACL_OK)) {
MS_LOG(EXCEPTION) << "Launch " << kernel_name_ << " failed, the nnacl error code " << ret;
}
return true;
}

MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, NLLLoss, NLLLossCpuKernelMod);
} // namespace kernel
} // namespace mindspore

+ 54
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_cpu_kernel.h View File

@@ -0,0 +1,54 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_CPU_KERNEL_H_

#include <vector>

#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
#include "nnacl/fp32/nllloss_fp32.h"

namespace mindspore {
namespace kernel {
class NLLLossCpuKernelMod : public NativeCpuKernelMod {
public:
NLLLossCpuKernelMod() = default;
~NLLLossCpuKernelMod() override = default;

void InitKernel(const CNodePtr &kernel_node) override;

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;

protected:
std::vector<KernelAttr> GetOpSupport() override {
static std::vector<KernelAttr> support_list = {KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)};
return support_list;
}

private:
NLLLossParameter nllloss_param_{};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_CPU_KERNEL_H_

+ 77
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_grad_cpu_kernel.cc View File

@@ -0,0 +1,77 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "plugin/device/cpu/kernel/nllloss_grad_cpu_kernel.h"

#include <string>
#include <unordered_map>

#include "nnacl/errorcode.h"

namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kNLLLossGradInputsNum = 5;
constexpr size_t kNLLLossGradOutputsNum = 1;
const std::unordered_map<std::string, ReductionType> kReductionMap = {
{MEAN, Reduction_Mean}, {SUM, Reduction_Sum}, {NONE, Reduction_None}};
} // namespace

void NLLLossGradCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
KernelAttr kernel_attr = GetKernelAttrFromNode(kernel_node);
bool is_match = MatchKernelAttr(kernel_attr, GetOpSupport()).first;
if (!is_match) {
MS_LOG(EXCEPTION) << kernel_name_ << " does not support this kernel data type: " << kernel_attr;
}

std::vector<size_t> logits_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
auto reduction = common::AnfAlgo::GetNodeAttr<std::string>(kernel_node, REDUCTION);
auto pair = kReductionMap.find(reduction);
if (pair == kReductionMap.end()) {
MS_LOG(EXCEPTION) << "For " << kernel_name_
<< ", the attr 'reduction' only support 'mean', 'sum' and 'none', but got " << reduction;
}

nllloss_param_.batch_ = SizeToInt(logits_shape[0]);
nllloss_param_.class_num_ = SizeToInt(logits_shape[1]);
nllloss_param_.reduction_type_ = pair->second;
}

bool NLLLossGradCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(kNLLLossGradInputsNum, inputs.size(), kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(kNLLLossGradOutputsNum, outputs.size(), kernel_name_);

const auto *logits = reinterpret_cast<float *>(inputs[0]->addr);
const auto *loss_grad = reinterpret_cast<float *>(inputs[1]->addr);
const auto *labels = reinterpret_cast<int *>(inputs[2]->addr);
const auto *weight = reinterpret_cast<float *>(inputs[3]->addr);
const auto *total_weight = reinterpret_cast<float *>(inputs[4]->addr);
auto *logits_grad = reinterpret_cast<float *>(outputs[0]->addr);

int ret = NLLLossGrad(logits, loss_grad, labels, weight, total_weight, logits_grad, &nllloss_param_);
if (ret != static_cast<int>(NNACL_OK)) {
MS_LOG(EXCEPTION) << "Launch " << kernel_name_ << " failed, the nnacl error code " << ret;
}
return true;
}

MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, NLLLossGrad, NLLLossGradCpuKernelMod);
} // namespace kernel
} // namespace mindspore

+ 55
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nllloss_grad_cpu_kernel.h View File

@@ -0,0 +1,55 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_GRAD_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_GRAD_CPU_KERNEL_H_

#include <vector>

#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
#include "nnacl/fp32_grad/nllloss_grad_fp32.h"

namespace mindspore {
namespace kernel {
class NLLLossGradCpuKernelMod : public NativeCpuKernelMod {
public:
NLLLossGradCpuKernelMod() = default;
~NLLLossGradCpuKernelMod() override = default;

void InitKernel(const CNodePtr &kernel_node) override;

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;

protected:
std::vector<KernelAttr> GetOpSupport() override {
static std::vector<KernelAttr> support_list = {KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)};
return support_list;
}

private:
NLLLossParameter nllloss_param_{};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NLLLOSS_GRAD_CPU_KERNEL_H_

+ 49
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/nllloss_fp32.c View File

@@ -0,0 +1,49 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/fp32/nllloss_fp32.h"

#include "nnacl/op_base.h"
#include "nnacl/errorcode.h"

int NLLLoss(const float *logits, const int *labels, const float *weight, float *loss, float *total_weight,
const NLLLossParameter *parameter) {
if (logits == NULL || labels == NULL || weight == NULL || loss == NULL || total_weight == NULL) {
return NNACL_NULL_PTR;
}

float total_loss = 0.0;
float tmp_total_weight = 0.0;
ReductionType reduction_type = parameter->reduction_type_;
for (int i = 0; i < parameter->batch_; i++) {
int index = i * parameter->class_num_ + labels[i];
float n_weight = weight[labels[i]];
float n_loss = -logits[index] * n_weight;
tmp_total_weight += n_weight;
total_loss += n_loss;
if (reduction_type == Reduction_None) {
loss[i] = n_loss;
}
}

*total_weight = tmp_total_weight;
if (reduction_type == Reduction_Sum) {
*loss = total_loss;
} else if (reduction_type == Reduction_Mean) {
*loss = total_loss / tmp_total_weight;
}
return NNACL_OK;
}

+ 31
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/nllloss_fp32.h View File

@@ -0,0 +1,31 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_NNACL_FP32_NLLLOSS_H_
#define MINDSPORE_LITE_NNACL_FP32_NLLLOSS_H_

#include "nnacl/nllloss_parameter.h"

#ifdef __cplusplus
extern "C" {
#endif
int NLLLoss(const float *logits, const int *labels, const float *weight, float *loss, float *total_weight,
const NLLLossParameter *parameter);
#ifdef __cplusplus
}
#endif

#endif // MINDSPORE_LITE_NNACL_FP32_NLLLOSS_H_

+ 42
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32_grad/nllloss_grad_fp32.c View File

@@ -0,0 +1,42 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/fp32_grad/nllloss_grad_fp32.h"

#include "nnacl/op_base.h"
#include "nnacl/errorcode.h"

int NLLLossGrad(const float *logits, const float *loss_grad, const int *labels, const float *weight,
const float *total_weight, float *logits_grad, const NLLLossParameter *parameter) {
if (logits == NULL || loss_grad == NULL || labels == NULL || weight == NULL || total_weight == NULL ||
logits_grad == NULL) {
return NNACL_NULL_PTR;
}

memset(logits_grad, 0, parameter->batch_ * parameter->class_num_ * sizeof(float));
for (int i = 0; i < parameter->batch_; i++) {
int index = i * parameter->class_num_ + labels[i];
float n_weight = weight[labels[i]];
if (parameter->reduction_type_ == Reduction_Sum) {
logits_grad[index] = -loss_grad[0] * n_weight;
} else if (parameter->reduction_type_ == Reduction_Mean) {
logits_grad[index] = -loss_grad[0] * n_weight / *total_weight;
} else {
logits_grad[index] = -loss_grad[i] * n_weight;
}
}
return NNACL_OK;
}

+ 31
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32_grad/nllloss_grad_fp32.h View File

@@ -0,0 +1,31 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_NNACL_FP32_NLLLOSS_GRAD_H_
#define MINDSPORE_LITE_NNACL_FP32_NLLLOSS_GRAD_H_

#include "nnacl/nllloss_parameter.h"

#ifdef __cplusplus
extern "C" {
#endif
int NLLLossGrad(const float *logits, const float *loss_grad, const int *labels, const float *weight,
const float *total_weight, float *logits_grad, const NLLLossParameter *parameter);
#ifdef __cplusplus
}
#endif

#endif // MINDSPORE_LITE_NNACL_FP32_NLLLOSS_GRAD_H_

+ 6
- 2
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/infer_register.c View File

@@ -139,9 +139,11 @@
#include "nnacl/infer/affine_infer.h"
#include "nnacl/infer/attention_infer.h"
#include "nnacl/infer/scatter_nd_update_infer.h"
#include "nnacl/infer/nllloss.h"
#include "nnacl/infer/nllloss_grad.h"

InferShape g_infer_func[PrimType_MAX * sizeof(InferShape)] = {0};
InferShape g_inner_op_infer_func[(PrimType_InnerOpMax - PrimType_InnerOpMin) * sizeof(InferShape)] = {0};
InferShape g_infer_func[PrimType_MAX] = {0};
InferShape g_inner_op_infer_func[PrimType_InnerOpMax - PrimType_InnerOpMin] = {0};
void RegAllInferFunc1() {
g_infer_func[PrimType_NONE] = NULL;
g_infer_func[PrimType_Abs] = CommonInferShape;
@@ -350,6 +352,8 @@ void RegAllInferFunc3() {
g_infer_func[PrimType_Attention] = AttentionInferShape;
g_infer_func[PrimType_LSTMGrad] = NULL;
g_infer_func[PrimType_ScatterNdUpdate] = ScatterNdUpdateInferShape;
g_infer_func[PrimType_NLLLoss] = NLLLossInferShape;
g_infer_func[PrimType_NLLLossGrad] = NLLLossGradInferShape;

// fused operators.
g_inner_op_infer_func[PrimType_Inner_ShapeFusion - PrimType_InnerOpMin] = ShapeFusionInferShape;


+ 54
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_grad_infer.c View File

@@ -0,0 +1,54 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/infer/nllloss_grad_infer.h"

#include "nnacl/infer/infer_register.h"

int NLLLossGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, C5NUM, C1NUM);
if (ret != NNACL_OK) {
return ret;
}

const TensorC *logits = inputs[0];
const TensorC *loss_grad = inputs[1];
const TensorC *labels = inputs[2];
const TensorC *weight = inputs[3];
const TensorC *total_weight = inputs[4];
if (logits->shape_size_ != C2NUM || labels->shape_size_ != C1NUM || weight->shape_size_ != C1NUM ||
total_weight->shape_size_ != 0) {
return NNACL_INPUT_TENSOR_ERROR;
}
if (labels->shape_[0] != logits->shape_[0] || weight->shape_[0] != logits->shape_[1]) {
return NNACL_INPUT_TENSOR_ERROR;
}

NLLLossParameter *param = (NLLLossParameter *)parameter;
if (param->reduction_type_ == Reduction_None && loss_grad->shape_size_ != C1NUM) {
return NNACL_INPUT_TENSOR_ERROR;
}
if (param->reduction_type_ != Reduction_None && loss_grad->shape_size_ != 0) {
return NNACL_INPUT_TENSOR_ERROR;
}
TensorC *logits_grad = outputs[0];
SetDataTypeFormat(logits_grad, logits);
SetShapeTensor(logits_grad, logits);
return NNACL_OK;
}

REG_INFER(NLLLossGrad, PrimType_NLLLossGrad, NLLLossGradInferShape)

+ 33
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_grad_infer.h View File

@@ -0,0 +1,33 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_NNACL_NLLLOSS_GRAD_INFER_H
#define MINDSPORE_NNACL_NLLLOSS_GRAD_INFER_H

#include "nnacl/infer/common_infer.h"
#include "nnacl/nllloss_parameter.h"

#ifdef __cplusplus
extern "C" {
#endif

int NLLLossGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);

#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_NNACL_NLLLOSS_GRAD_INFER_H

+ 52
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_infer.c View File

@@ -0,0 +1,52 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/infer/nllloss_infer.h"

#include "nnacl/infer/infer_register.h"

int NLLLossInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, C3NUM, C2NUM);
if (ret != NNACL_OK) {
return ret;
}

const TensorC *logits = inputs[0];
const TensorC *labels = inputs[1];
const TensorC *weight = inputs[2];
if (logits->shape_size_ != C2NUM || labels->shape_size_ != C1NUM || weight->shape_size_ != C1NUM) {
return NNACL_INPUT_TENSOR_ERROR;
}
if (labels->shape_[0] != logits->shape_[0] || weight->shape_[0] != logits->shape_[1]) {
return NNACL_INPUT_TENSOR_ERROR;
}
TensorC *loss = outputs[0];
TensorC *total_weight = outputs[1];

NLLLossParameter *param = (NLLLossParameter *)parameter;
if (param->reduction_type_ == Reduction_None) {
SetShapeTensor(loss, labels);
} else {
loss->shape_size_ = 0;
}
total_weight->shape_size_ = 0;
SetDataTypeFormat(loss, logits);
SetDataTypeFormat(total_weight, logits);
return NNACL_OK;
}

REG_INFER(NLLLoss, PrimType_NLLLoss, NLLLossInferShape)

+ 33
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/nllloss_infer.h View File

@@ -0,0 +1,33 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_NNACL_NLLLOSS_INFER_H
#define MINDSPORE_NNACL_NLLLOSS_INFER_H

#include "nnacl/infer/common_infer.h"
#include "nnacl/nllloss_parameter.h"

#ifdef __cplusplus
extern "C" {
#endif

int NLLLossInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);

#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_NNACL_NLLLOSS_INFER_H

+ 32
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/nllloss_parameter.h View File

@@ -0,0 +1,32 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_NNACL_NLLLOSS_PARAMETER_H_
#define MINDSPORE_NNACL_NLLLOSS_PARAMETER_H_

#include "nnacl/op_base.h"

typedef struct NLLLossParameter {
// primitive parameter
OpParameter op_parameter_;
ReductionType reduction_type_;

// input parameter
int batch_;
int class_num_;
} NLLLossParameter;

#endif // MINDSPORE_NNACL_NLLLOSS_PARAMETER_H_

+ 4
- 1
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h View File

@@ -483,8 +483,10 @@ enum PrimType {
PrimType_LSTMGradData = 204,
PrimType_LSTMGradWeight = 205,
PrimType_RandomNormal = 206,
PrimType_NLLLoss = 207,
PrimType_NLLLossGrad = 208,
PrimType_MIN = PrimType_NONE,
PrimType_MAX = PrimType_RandomNormal + 1,
PrimType_MAX = PrimType_NLLLossGrad + 1,

// inner operators.
PrimType_Inner_ToFormat = 10000,
@@ -531,6 +533,7 @@ typedef struct QuantMulArg {
int right_shift_;
} QuantMulArg;

typedef enum ReductionType { Reduction_Sum, Reduction_Mean, Reduction_None } ReductionType;
typedef enum ActType { ActType_No, ActType_Relu, ActType_Sigmod, ActType_Relu6, ActType_Prelu } ActType;
typedef enum PadMode { Pad_pad, Pad_same, Pad_valid } PadMode;
typedef enum RoundingMode { Rounding_No, Rounding_Away_from_zero, Rounding_Up } RoundingMode;


+ 0
- 11
mindspore/ccsrc/plugin/device/cpu/kernel/stridedslice_cpu_kernel.cc View File

@@ -31,17 +31,6 @@ constexpr size_t kStridedSliceDynamicInputsNum = 4;
constexpr size_t kStridedSliceOutputsNum = 1;
} // namespace

enum PosType { kBegin, kEnd };

int NormalizePos(int pos, int dim_len, PosType pos_type) {
if (pos >= 0) {
int max_pos = pos_type == kBegin ? dim_len - 1 : dim_len;
return std::min(pos, max_pos);
}
int min_pos = pos_type == kBegin ? 0 : -1;
return std::max(pos + dim_len, min_pos);
}

void StridedSliceCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);


+ 37
- 0
mindspore/core/ops/grad/nllloss_grad.cc View File

@@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "ops/grad/nllloss_grad.h"

#include "ops/op_utils.h"

namespace mindspore {
namespace ops {
void NLLLossGrad::Init(const Reduction &reduction) { set_reduction(reduction); }

void NLLLossGrad::set_reduction(const Reduction &reduction) {
int64_t reduce = reduction;
(void)AddAttr(kReduction, MakeValue(reduce));
}

Reduction NLLLossGrad::get_reduction() const {
auto value_ptr = GetAttr(kReduction);
return Reduction(GetValue<int64_t>(value_ptr));
}

REGISTER_PRIMITIVE_C(kNameNLLLossGrad, NLLLossGrad)
} // namespace ops
} // namespace mindspore

+ 55
- 0
mindspore/core/ops/grad/nllloss_grad.h View File

@@ -0,0 +1,55 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CORE_OPS_NLLLOSS_GRAD_H_
#define MINDSPORE_CORE_OPS_NLLLOSS_GRAD_H_

#include <string>

#include "ops/primitive_c.h"
#include "mindapi/base/types.h"

namespace mindspore {
namespace ops {
constexpr auto kNameNLLLossGrad = "NLLLossGrad";
/// \brief NLLLossGrad operation. Refer to Python API @ref mindspore.ops.NLLLossGrad for more details.
class MS_CORE_API NLLLossGrad : public PrimitiveC {
public:
/// \brief Constructor.
NLLLossGrad() : PrimitiveC(kNameNLLLossGrad) {
InitIOName({"logits", "loss_grad", "labels", "weight", "total_weight"}, {"logits_grad"});
}

/// \brief Destructor.
~NLLLossGrad() = default;

MS_DECLARE_PARENT(NLLLossGrad, PrimitiveC);

/// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.NLLLossGrad for the inputs.
void Init(const Reduction &reduction = NONE);

/// \brief Set reduction.
void set_reduction(const Reduction &reduction);

/// \brief Get reduction.
///
/// \return reduction.
Reduction get_reduction() const;
};
} // namespace ops
} // namespace mindspore

#endif // MINDSPORE_CORE_OPS_NLLLOSS_GRAD_H_

+ 37
- 0
mindspore/core/ops/nllloss.cc View File

@@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "ops/nllloss.h"

#include "ops/op_utils.h"

namespace mindspore {
namespace ops {
void NLLLoss::Init(const Reduction &reduction) { set_reduction(reduction); }

void NLLLoss::set_reduction(const Reduction &reduction) {
int64_t reduce = reduction;
(void)AddAttr(kReduction, MakeValue(reduce));
}

Reduction NLLLoss::get_reduction() const {
auto value_ptr = GetAttr(kReduction);
return Reduction(GetValue<int64_t>(value_ptr));
}

REGISTER_PRIMITIVE_C(kNameNLLLoss, NLLLoss)
} // namespace ops
} // namespace mindspore

+ 53
- 0
mindspore/core/ops/nllloss.h View File

@@ -0,0 +1,53 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CORE_OPS_NLLLOSS_H_
#define MINDSPORE_CORE_OPS_NLLLOSS_H_

#include <string>

#include "ops/primitive_c.h"
#include "mindapi/base/types.h"

namespace mindspore {
namespace ops {
constexpr auto kNameNLLLoss = "NLLLoss";
/// \brief NLLLoss operation. Refer to Python API @ref mindspore.ops.NLLLoss for more details.
class MS_CORE_API NLLLoss : public PrimitiveC {
public:
/// \brief Constructor.
NLLLoss() : PrimitiveC(kNameNLLLoss) { InitIOName({"logits", "labels", "weight"}, {"loss", "total_weight"}); }

/// \brief Destructor.
~NLLLoss() = default;

MS_DECLARE_PARENT(NLLLoss, PrimitiveC);

/// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.NLLLoss for the inputs.
void Init(const Reduction &reduction = NONE);

/// \brief Set reduction.
void set_reduction(const Reduction &reduction);

/// \brief Get reduction.
///
/// \return reduction.
Reduction get_reduction() const;
};
} // namespace ops
} // namespace mindspore

#endif // MINDSPORE_CORE_OPS_NLLLOSS_H_

+ 1
- 0
mindspore/core/utils/check_convert_utils.cc View File

@@ -137,6 +137,7 @@ static std::map<std::string, std::map<std::string, AttrConverterPair>> PrimAttrC
{"BinaryCrossEntropy", ReductionMap},
{"BinaryCrossEntropyGrad", ReductionMap},
{"NLLLoss", ReductionMap},
{"NLLLossGrad", ReductionMap},
{"DepthToSpace", DataFormatMap},
{"Pooling", DataFormatMap},
{"Deconvolution", DataFormatMap},


+ 10
- 0
mindspore/lite/schema/ops.fbs View File

@@ -224,6 +224,8 @@ union PrimitiveType {
LSTMGradData,
LSTMGradWeight,
RandomNormal,
NLLLoss,
NLLLossGrad,
}

table Abs {
@@ -1256,3 +1258,11 @@ table RandomNormal {
mean: float;
scale: float;
}

table NLLLoss {
reduction: Reduction;
}

table NLLLossGrad {
reduction: Reduction;
}

+ 10
- 0
mindspore/lite/src/ops/ops_def.cc View File

@@ -224,6 +224,8 @@ OP_TYPE(DynamicQuant)
OP_TYPE(LSTMGradData)
OP_TYPE(LSTMGradWeight)
OP_TYPE(RandomNormal)
OP_TYPE(NLLLoss)
OP_TYPE(NLLLossGrad)
OP_TYPE_DEF_END(PrimitiveType)

OP_SCHEMA_DEF(Abs)
@@ -1256,3 +1258,11 @@ OP_ATTR(seed, float)
OP_ATTR(mean, float)
OP_ATTR(scale, float)
OP_SCHEMA_DEF_END(RandomNormal)

OP_SCHEMA_DEF(NLLLoss)
OP_ATTR_ENUM(reduction, Reduction)
OP_SCHEMA_DEF_END(NLLLoss)

OP_SCHEMA_DEF(NLLLossGrad)
OP_ATTR_ENUM(reduction, Reduction)
OP_SCHEMA_DEF_END(NLLLossGrad)

+ 4
- 0
mindspore/lite/src/ops/ops_func_declare.h View File

@@ -254,6 +254,8 @@
#include "ops/reduce_scatter.h"
#include "ops/dynamic_quant.h"
#include "ops/random_normal.h"
#include "ops/nllloss.h"
#include "ops/grad/nllloss_grad.h"

namespace mindspore::lite::ops {
#define FUNC_MSOP2SCHEMAOP_DECLARE(OP) std::unique_ptr<schema::PrimitiveT> MSOp2SchemaOp(const mindspore::ops::OP *op);
@@ -475,6 +477,8 @@ FUNC_MSOP2SCHEMAOP_DECLARE(AllGather)
FUNC_MSOP2SCHEMAOP_DECLARE(ReduceScatter)
FUNC_MSOP2SCHEMAOP_DECLARE(DynamicQuant)
FUNC_MSOP2SCHEMAOP_DECLARE(RandomNormal)
FUNC_MSOP2SCHEMAOP_DECLARE(NLLLoss)
FUNC_MSOP2SCHEMAOP_DECLARE(NLLLossGrad)
#endif
} // namespace mindspore::lite::ops
#else


+ 43
- 32
mindspore/lite/src/ops/ops_utils.cc View File

@@ -838,6 +838,47 @@ std::unique_ptr<schema::PrimitiveT> RandomNormalPrimitiveCreator(const AnfNodePt
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
}

std::unique_ptr<schema::PrimitiveT> NLLLossPrimitiveCreator(const AnfNodePtr &node) {
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::NLLLoss>>(node);
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
}

std::unique_ptr<schema::PrimitiveT> NLLLossGradPrimitiveCreator(const AnfNodePtr &node) {
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::NLLLossGrad>>(node);
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
}

std::unique_ptr<schema::PrimitiveT> CustomPrimitiveCreator(const AnfNodePtr &node) {
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Custom>>(node);
auto schema_op = std::make_unique<schema::CustomT>();
if (schema_op == nullptr) {
return nullptr;
}
if (ms_primc->GetAttr("type") != nullptr) {
schema_op->type = ms_primc->get_type();
}
if (ms_primc->GetAttr("attr") != nullptr) {
auto attr_map = ms_primc->get_attr();
for (const auto &attr_item : attr_map) {
auto attr = std::make_unique<schema::AttributeT>();
if (attr == nullptr) {
return nullptr;
}
attr->name = attr_item.first;
attr->data = attr_item.second;
schema_op->attr.emplace_back(std::move(attr));
}
}

auto prim = std::make_unique<schema::PrimitiveT>();
if (prim == nullptr) {
return nullptr;
}
prim->value.value = schema_op.release();
prim->value.type = schema::PrimitiveType_Custom;
return prim;
}

RegistryMSOps g_absPrimitiveCreatorRegistry("Abs", AbsPrimitiveCreator);
RegistryMSOps g_absGradPrimitiveCreatorRegistry("AbsGrad", AbsGradPrimitiveCreator);
RegistryMSOps g_activationPrimitiveCreatorRegistry("Activation", ActivationPrimitiveCreator);
@@ -1071,38 +1112,8 @@ RegistryMSOps g_AllGatherCreatorRegistry("AllGather", AllGatherPrimitiveCreator)
RegistryMSOps g_ReduceScatterCreatorRegistry("ReduceScatter", ReduceScatterPrimitiveCreator);
RegistryMSOps g_DynamicQuantCreatorRegistry("DynamicQuant", DynamicQuantPrimitiveCreator);
RegistryMSOps g_RandomNormalCreatorRegistry("RandomNormal", RandomNormalPrimitiveCreator);

std::unique_ptr<schema::PrimitiveT> CustomPrimitiveCreator(const AnfNodePtr &node) {
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Custom>>(node);
auto schema_op = std::make_unique<schema::CustomT>();
if (schema_op == nullptr) {
return nullptr;
}
if (ms_primc->GetAttr("type") != nullptr) {
schema_op->type = ms_primc->get_type();
}
if (ms_primc->GetAttr("attr") != nullptr) {
auto attr_map = ms_primc->get_attr();
for (const auto &attr_item : attr_map) {
auto attr = std::make_unique<schema::AttributeT>();
if (attr == nullptr) {
return nullptr;
}
attr->name = attr_item.first;
attr->data = attr_item.second;
schema_op->attr.emplace_back(std::move(attr));
}
}

auto prim = std::make_unique<schema::PrimitiveT>();
if (prim == nullptr) {
return nullptr;
}
prim->value.value = schema_op.release();
prim->value.type = schema::PrimitiveType_Custom;
return prim;
}

RegistryMSOps g_NLLLossCreatorRegistry("NLLLoss", NLLLossPrimitiveCreator);
RegistryMSOps g_NLLLossGradCreatorRegistry("NLLLossGrad", NLLLossGradPrimitiveCreator);
RegistryMSOps g_CustomPrimitiveCreatorRegistry("Custom", CustomPrimitiveCreator);
} // namespace lite
} // namespace mindspore


+ 62
- 0
mindspore/lite/src/ops/populate/nllloss_populate.cc View File

@@ -0,0 +1,62 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/populate/populate_register.h"

#include "nnacl/nllloss_parameter.h"

using mindspore::schema::PrimitiveType_NLLLoss;
using mindspore::schema::PrimitiveType_NLLLossGrad;
using mindspore::schema::Reduction;

namespace mindspore {
namespace lite {
OpParameter *PopulateNLLLossParameter(const void *prim) {
auto primitive = static_cast<const schema::Primitive *>(prim);
MS_ASSERT(primitive != nullptr);
Reduction reduction;
if (primitive->value_type() == PrimitiveType_NLLLoss) {
auto value = primitive->value_as_NLLLoss();
if (value == nullptr) {
MS_LOG(ERROR) << "value is nullptr";
return nullptr;
}
reduction = value->reduction();
} else {
auto value = primitive->value_as_NLLLossGrad();
if (value == nullptr) {
MS_LOG(ERROR) << "value is nullptr";
return nullptr;
}
reduction = value->reduction();
}

auto *param = reinterpret_cast<NLLLossParameter *>(malloc(sizeof(NLLLossParameter)));
if (param == nullptr) {
MS_LOG(ERROR) << "malloc NLLLossParameter failed.";
return nullptr;
}
memset(param, 0, sizeof(NLLLossParameter));

param->op_parameter_.type_ = primitive->value_type();
param->reduction_type_ = static_cast<ReductionType>(reduction);
return reinterpret_cast<OpParameter *>(param);
}

REG_POPULATE(PrimitiveType_NLLLoss, PopulateNLLLossParameter, SCHEMA_CUR)
REG_POPULATE(PrimitiveType_NLLLossGrad, PopulateNLLLossParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

+ 82
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/nllloss_fp32.cc View File

@@ -0,0 +1,82 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/runtime/kernel/arm/fp32/nllloss_fp32.h"

#include <vector>

#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/fp32/nllloss_fp32.h"

using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_NLLLoss;

namespace mindspore::kernel {
namespace {
constexpr size_t kLogitsIndex = 0;
constexpr size_t kLabelsIndex = 1;
constexpr size_t kWeightsIndex = 2;
constexpr size_t kLossIndex = 0;
constexpr size_t kTotalWeightIndex = 1;
} // namespace

int NLLLossCPUKernel::Prepare() {
CHECK_LESS_RETURN(in_tensors_.size(), C3NUM);
CHECK_LESS_RETURN(out_tensors_.size(), C2NUM);
for (size_t i = 0; i < C3NUM; i++) {
CHECK_NULL_RETURN(in_tensors_[i]);
}
for (size_t i = 0; i < C2NUM; i++) {
CHECK_NULL_RETURN(out_tensors_[i]);
}
const auto logits_shape = in_tensors_[kLogitsIndex]->shape();
nllloss_param_->batch_ = logits_shape[0];
nllloss_param_->class_num_ = logits_shape[1];
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}

int NLLLossCPUKernel::ReSize() { return RET_OK; }

int NLLLossCPUKernel::Run() {
const auto *logits = reinterpret_cast<float *>(in_tensors_[kLogitsIndex]->data());
const auto *labels = reinterpret_cast<int *>(in_tensors_[kLabelsIndex]->data());
const auto *weight = reinterpret_cast<float *>(in_tensors_[kWeightsIndex]->data());
auto *loss = reinterpret_cast<float *>(out_tensors_[kLossIndex]->data());
auto *total_weight = reinterpret_cast<float *>(out_tensors_[kTotalWeightIndex]->data());
CHECK_NULL_RETURN(logits);
CHECK_NULL_RETURN(labels);
CHECK_NULL_RETURN(weight);
CHECK_NULL_RETURN(loss);
CHECK_NULL_RETURN(total_weight);

int ret = NLLLoss(logits, labels, weight, loss, total_weight, nllloss_param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "NLLLoss Run error: error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_NLLLoss, LiteKernelCreator<NLLLossCPUKernel>)
} // namespace mindspore::kernel

+ 44
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/nllloss_fp32.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_H_

#include <vector>

#include "src/inner_kernel.h"
#include "nnacl/nllloss_parameter.h"

namespace mindspore::kernel {
class NLLLossCPUKernel : public InnerKernel {
public:
NLLLossCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
: InnerKernel(param, inputs, outputs, ctx) {
nllloss_param_ = reinterpret_cast<NLLLossParameter *>(op_parameter_);
}
~NLLLossCPUKernel() = default;

int Prepare() override;
int ReSize() override;
int Run() override;

private:
NLLLossParameter *nllloss_param_;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_H_

+ 84
- 0
mindspore/lite/src/runtime/kernel/arm/fp32_grad/nllloss_grad.cc View File

@@ -0,0 +1,84 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/runtime/kernel/arm/fp32_grad/nllloss_grad.h"

#include <vector>

#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/fp32_grad/nllloss_grad_fp32.h"

using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_NLLLossGrad;

namespace mindspore::kernel {
namespace {
constexpr size_t kLogitsIndex = 0;
constexpr size_t kLossGradIndex = 1;
constexpr size_t kLabelsIndex = 2;
constexpr size_t kWeightsIndex = 3;
constexpr size_t kTotalWeightIndex = 4;
constexpr size_t kDxIndex = 0;
} // namespace

int NLLLossGradCPUKernel::Prepare() {
CHECK_LESS_RETURN(in_tensors_.size(), C5NUM);
CHECK_LESS_RETURN(out_tensors_.size(), C1NUM);
for (size_t i = 0; i < C5NUM; i++) {
CHECK_NULL_RETURN(in_tensors_[i]);
}
CHECK_NULL_RETURN(out_tensors_[kDxIndex]);

const auto logits_shape = in_tensors_[kLogitsIndex]->shape();
nllloss_param_->batch_ = logits_shape[0];
nllloss_param_->class_num_ = logits_shape[1];
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}

int NLLLossGradCPUKernel::ReSize() { return RET_OK; }

int NLLLossGradCPUKernel::Run() {
const auto *logits = reinterpret_cast<float *>(in_tensors_[kLogitsIndex]->data());
const auto *loss_grad = reinterpret_cast<float *>(in_tensors_[kLossGradIndex]->data());
const auto *labels = reinterpret_cast<int *>(in_tensors_[kLabelsIndex]->data());
const auto *weight = reinterpret_cast<float *>(in_tensors_[kWeightsIndex]->data());
const auto *total_weight = reinterpret_cast<float *>(in_tensors_[kTotalWeightIndex]->data());
auto *logits_grad = reinterpret_cast<float *>(out_tensors_[kDxIndex]->data());
CHECK_NULL_RETURN(logits);
CHECK_NULL_RETURN(loss_grad);
CHECK_NULL_RETURN(labels);
CHECK_NULL_RETURN(weight);
CHECK_NULL_RETURN(total_weight);
CHECK_NULL_RETURN(logits_grad);

int ret = NLLLossGrad(logits, loss_grad, labels, weight, total_weight, logits_grad, nllloss_param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "NLLLossGrad Run error: error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_NLLLossGrad, LiteKernelCreator<NLLLossGradCPUKernel>)
} // namespace mindspore::kernel

+ 44
- 0
mindspore/lite/src/runtime/kernel/arm/fp32_grad/nllloss_grad.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_GRAD_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_GRAD_H_

#include <vector>

#include "src/inner_kernel.h"
#include "nnacl/nllloss_parameter.h"

namespace mindspore::kernel {
class NLLLossGradCPUKernel : public InnerKernel {
public:
NLLLossGradCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
: InnerKernel(param, inputs, outputs, ctx) {
nllloss_param_ = reinterpret_cast<NLLLossParameter *>(op_parameter_);
}
~NLLLossGradCPUKernel() = default;

int Prepare() override;
int ReSize() override;
int Run() override;

private:
NLLLossParameter *nllloss_param_;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_NLLLOSS_GRAD_H_

+ 110
- 0
mindspore/lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc View File

@@ -0,0 +1,110 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "common/common_test.h"
#include "nnacl/infer/nllloss_grad_infer.h"

namespace mindspore {
class TestNLLLossGradInfer : public mindspore::CommonTest {
public:
TestNLLLossGradInfer() {}
};

void NLLLossGradInferInitArgs(std::vector<TensorC *> *inputs, std::vector<TensorC *> *outputs,
ReductionType reduction_type) {
auto *logits = new TensorC;
logits->shape_size_ = 2;
logits->shape_[0] = 3;
logits->shape_[1] = 5;
inputs->push_back(logits);

auto *loss_grad = new TensorC;
if (reduction_type == Reduction_None) {
loss_grad->shape_size_ = 1;
loss_grad->shape_[0] = 3;
} else {
loss_grad->shape_size_ = 0;
}
inputs->push_back(loss_grad);

auto *labels = new TensorC;
labels->shape_size_ = 1;
labels->shape_[0] = 3;
inputs->push_back(labels);

auto *weight = new TensorC;
weight->shape_size_ = 1;
weight->shape_[0] = 5;
inputs->push_back(weight);

auto *total_weight = new TensorC;
total_weight->shape_size_ = 0;
inputs->push_back(total_weight);

auto *logits_grad = new TensorC;
outputs->push_back(logits_grad);
}

void CheckResults(int ret, const NLLLossParameter *param, const std::vector<TensorC *> &outputs) {
ASSERT_EQ(ret, NNACL_OK);
ASSERT_EQ(outputs[0]->shape_size_, 2);
ASSERT_EQ(outputs[0]->shape_[0], 3);
ASSERT_EQ(outputs[0]->shape_[1], 5);
}

void NLLLossGradInferReleaseResources(NLLLossParameter *param, std::vector<TensorC *> inputs,
std::vector<TensorC *> outputs) {
delete param;
for (auto t : inputs) delete t;
for (auto t : outputs) delete t;
}

TEST_F(TestNLLLossGradInfer, ReductionNone) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossGradInferInitArgs(&inputs, &outputs, Reduction_None);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_None;
int ret = NLLLossGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
CheckResults(ret, param, outputs);
NLLLossGradInferReleaseResources(param, inputs, outputs);
}

TEST_F(TestNLLLossGradInfer, ReductionSum) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossGradInferInitArgs(&inputs, &outputs, Reduction_Sum);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_Sum;
int ret = NLLLossGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
CheckResults(ret, param, outputs);
NLLLossGradInferReleaseResources(param, inputs, outputs);
}

TEST_F(TestNLLLossGradInfer, ReductionMean) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossGradInferInitArgs(&inputs, &outputs, Reduction_Mean);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_Mean;
int ret = NLLLossGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
CheckResults(ret, param, outputs);
NLLLossGradInferReleaseResources(param, inputs, outputs);
}
} // namespace mindspore

+ 98
- 0
mindspore/lite/test/ut/nnacl/infer/nllloss_infer_test.cc View File

@@ -0,0 +1,98 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "common/common_test.h"
#include "nnacl/infer/nllloss_infer.h"

namespace mindspore {
class TestNLLLossInfer : public mindspore::CommonTest {
public:
TestNLLLossInfer() {}
};

void NLLLossInferInitArgs(std::vector<TensorC *> *inputs, std::vector<TensorC *> *outputs) {
auto *logits = new TensorC;
logits->shape_size_ = 2;
logits->shape_[0] = 3;
logits->shape_[1] = 5;
inputs->push_back(logits);

auto *labels = new TensorC;
labels->shape_size_ = 1;
labels->shape_[0] = 3;
inputs->push_back(labels);

auto *weight = new TensorC;
weight->shape_size_ = 1;
weight->shape_[0] = 5;
inputs->push_back(weight);

auto *loss = new TensorC;
outputs->push_back(loss);
auto *total_weight = new TensorC;
outputs->push_back(total_weight);
}

void NLLLossInferReleaseResources(NLLLossParameter *param, std::vector<TensorC *> inputs,
std::vector<TensorC *> outputs) {
delete param;
for (auto t : inputs) delete t;
for (auto t : outputs) delete t;
}

TEST_F(TestNLLLossInfer, ReductionNone) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossInferInitArgs(&inputs, &outputs);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_None;
int ret = NLLLossInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
ASSERT_EQ(ret, NNACL_OK);
ASSERT_EQ(outputs[0]->shape_size_, 1);
ASSERT_EQ(outputs[0]->shape_[0], 3);
ASSERT_EQ(outputs[1]->shape_size_, 0);
NLLLossInferReleaseResources(param, inputs, outputs);
}

TEST_F(TestNLLLossInfer, ReductionSum) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossInferInitArgs(&inputs, &outputs);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_Sum;
int ret = NLLLossInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
ASSERT_EQ(ret, NNACL_OK);
ASSERT_EQ(outputs[0]->shape_size_, 0);
ASSERT_EQ(outputs[1]->shape_size_, 0);
NLLLossInferReleaseResources(param, inputs, outputs);
}

TEST_F(TestNLLLossInfer, ReductionMean) {
std::vector<TensorC *> inputs;
std::vector<TensorC *> outputs;
NLLLossInferInitArgs(&inputs, &outputs);
auto *param = new NLLLossParameter;
param->reduction_type_ = Reduction_Mean;
int ret = NLLLossInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(),
reinterpret_cast<OpParameter *>(param));
ASSERT_EQ(ret, NNACL_OK);
ASSERT_EQ(outputs[0]->shape_size_, 0);
ASSERT_EQ(outputs[1]->shape_size_, 0);
NLLLossInferReleaseResources(param, inputs, outputs);
}
} // namespace mindspore

+ 146
- 0
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc View File

@@ -0,0 +1,146 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/common/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/fp32/nllloss_fp32.h"
#include "src/kernel_registry.h"
#include "src/lite_kernel.h"
#include "src/tensor_category.h"

namespace mindspore {
class TestNLLLossFp32 : public mindspore::CommonTest {
public:
TestNLLLossFp32() {}
};

void NLLLossInitArgs(std::vector<lite::Tensor *> *inputs, std::vector<lite::Tensor *> *outputs,
const std::vector<int> &loss_shape) {
float logits_array[15] = {-1.3739, -2.2700, -3.2333, -2.4589, -0.6566, -1.2156, -2.6026, -1.2200,
-1.8731, -1.7119, -0.7130, -3.3672, -1.5368, -1.8289, -2.3058};
int labels_array[3] = {1, 0, 4};
float weight_array[5] = {0.2, 0.3, 0.1, 0.15, 0.25};
std::vector<int> logits_shape = {3, 5};
std::vector<int> labels_shape = {3};
std::vector<int> weight_shape = {5};
std::vector<int> total_weight_shape = {};

auto *logits_t = new lite::Tensor(kNumberTypeFloat32, logits_shape, mindspore::NC, lite::Category::CONST_TENSOR);
logits_t->MallocData();
memcpy(logits_t->MutableData(), logits_array, sizeof(float) * logits_t->ElementsNum());
inputs->push_back(logits_t);

auto *labels_t = new lite::Tensor(kNumberTypeInt32, labels_shape, mindspore::NC, lite::Category::CONST_TENSOR);
labels_t->MallocData();
memcpy(labels_t->MutableData(), labels_array, sizeof(int) * labels_t->ElementsNum());
inputs->push_back(labels_t);

auto *weight_t = new lite::Tensor(kNumberTypeFloat32, weight_shape, mindspore::NC, lite::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), weight_array, sizeof(float) * weight_t->ElementsNum());
inputs->push_back(weight_t);

auto type = loss_shape.empty() ? lite::Category::CONST_SCALAR : lite::Category::CONST_TENSOR;
auto *loss_t = new lite::Tensor(kNumberTypeFloat32, loss_shape, mindspore::NC, type);
loss_t->MallocData();
outputs->push_back(loss_t);

auto *total_weight_t =
new lite::Tensor(kNumberTypeFloat32, total_weight_shape, mindspore::NC, lite::Category::CONST_SCALAR);
total_weight_t->MallocData();
outputs->push_back(total_weight_t);
}

void NLLLossReleaseResources(lite::InnerContext *ctx, kernel::NLLLossCPUKernel *kernel,
std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) {
delete kernel;
delete ctx;
for (auto t : inputs) delete t;
for (auto t : outputs) delete t;
}

TEST_F(TestNLLLossFp32, ReductionNone) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
std::vector<int> loss_shape = {3};
NLLLossInitArgs(&inputs, &outputs, loss_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_None;
auto *kernel = new kernel::NLLLossCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[3] = {0.681, 0.24312, 0.57645};
float expect_total_weight[1] = {0.75};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 3, 0.0001));
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[1]->MutableData()), expect_total_weight, 1, 0.0001));
NLLLossReleaseResources(ctx, kernel, inputs, outputs);
}

TEST_F(TestNLLLossFp32, ReductionSum) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
std::vector<int> loss_shape = {};
NLLLossInitArgs(&inputs, &outputs, loss_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_Sum;
auto *kernel = new kernel::NLLLossCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[1] = {1.50057};
float expect_total_weight[1] = {0.75};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 1, 0.0001));
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[1]->MutableData()), expect_total_weight, 1, 0.0001));
NLLLossReleaseResources(ctx, kernel, inputs, outputs);
}

TEST_F(TestNLLLossFp32, ReductionMean) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
std::vector<int> loss_shape = {};
NLLLossInitArgs(&inputs, &outputs, loss_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_Mean;
auto *kernel = new kernel::NLLLossCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[1] = {2.00076};
float expect_total_weight[1] = {0.75};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 1, 0.0001));
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[1]->MutableData()), expect_total_weight, 1, 0.0001));
NLLLossReleaseResources(ctx, kernel, inputs, outputs);
}
} // namespace mindspore

+ 153
- 0
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc View File

@@ -0,0 +1,153 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/common/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/nllloss_grad.h"
#include "src/kernel_registry.h"
#include "src/lite_kernel.h"
#include "src/tensor_category.h"

namespace mindspore {
class TestNLLLossGradFp32 : public mindspore::CommonTest {
public:
TestNLLLossGradFp32() {}
};

void NLLLossGradInitArgs(std::vector<lite::Tensor *> *inputs, std::vector<lite::Tensor *> *outputs,
const float *loss_grad_ptr, const std::vector<int> &loss_grad_shape) {
float logits_array[15] = {-1.3739, -2.2700, -3.2333, -2.4589, -0.6566, -1.2156, -2.6026, -1.2200,
-1.8731, -1.7119, -0.7130, -3.3672, -1.5368, -1.8289, -2.3058};
int labels_array[3] = {1, 0, 4};
float weight_array[5] = {0.2, 0.3, 0.1, 0.15, 0.25};
float total_weight_array[1] = {0.75};
std::vector<int> logits_shape = {3, 5};
std::vector<int> labels_shape = {3};
std::vector<int> weight_shape = {5};
std::vector<int> total_weight_shape = {};

auto *logits_t = new lite::Tensor(kNumberTypeFloat32, logits_shape, mindspore::NC, lite::Category::CONST_TENSOR);
logits_t->MallocData();
memcpy(logits_t->MutableData(), logits_array, sizeof(float) * logits_t->ElementsNum());
inputs->push_back(logits_t);

auto type = loss_grad_shape.empty() ? lite::Category::CONST_SCALAR : lite::Category::CONST_TENSOR;
auto *loss_grad_t = new lite::Tensor(kNumberTypeFloat32, loss_grad_shape, mindspore::NC, type);
loss_grad_t->MallocData();
memcpy(loss_grad_t->MutableData(), loss_grad_ptr, sizeof(float) * loss_grad_t->ElementsNum());
inputs->push_back(loss_grad_t);

auto *labels_t = new lite::Tensor(kNumberTypeInt32, labels_shape, mindspore::NC, lite::Category::CONST_TENSOR);
labels_t->MallocData();
memcpy(labels_t->MutableData(), labels_array, sizeof(int) * labels_t->ElementsNum());
inputs->push_back(labels_t);

auto *weight_t = new lite::Tensor(kNumberTypeFloat32, weight_shape, mindspore::NC, lite::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), weight_array, sizeof(float) * weight_t->ElementsNum());
inputs->push_back(weight_t);

auto *total_weight_t =
new lite::Tensor(kNumberTypeFloat32, total_weight_shape, mindspore::NC, lite::Category::CONST_SCALAR);
total_weight_t->MallocData();
memcpy(total_weight_t->MutableData(), total_weight_array, sizeof(float) * weight_t->ElementsNum());
inputs->push_back(total_weight_t);

auto *logits_grad_t = new lite::Tensor(kNumberTypeFloat32, logits_shape, mindspore::NC, lite::Category::CONST_TENSOR);
logits_grad_t->MallocData();
outputs->push_back(logits_grad_t);
}

void NLLLossGradReleaseResources(lite::InnerContext *ctx, kernel::NLLLossGradCPUKernel *kernel, NLLLossParameter *param,
std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) {
delete kernel;
delete ctx;
for (auto t : inputs) delete t;
for (auto t : outputs) delete t;
}

TEST_F(TestNLLLossGradFp32, ReductionNone) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
float loss_grad[3] = {1.181, 0.74312, 1.07645};
std::vector<int> loss_grad_shape = {3};
NLLLossGradInitArgs(&inputs, &outputs, loss_grad, loss_grad_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_None;
auto *kernel = new kernel::NLLLossGradCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[15] = {0.0000, -0.35430002, 0.0000, 0.0000, 0.0000, -0.148624, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.2691125};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 15, 0.0001));
NLLLossGradReleaseResources(ctx, kernel, param, inputs, outputs);
}

TEST_F(TestNLLLossGradFp32, ReductionSum) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
float loss_grad[1] = {2.00057};
std::vector<int> loss_grad_shape = {};
NLLLossGradInitArgs(&inputs, &outputs, loss_grad, loss_grad_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_Sum;
auto *kernel = new kernel::NLLLossGradCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[15] = {0.0000, -0.600171, 0.0000, 0.0000, 0.0000, -0.40011403, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.5001425};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 15, 0.0001));
NLLLossGradReleaseResources(ctx, kernel, param, inputs, outputs);
}

TEST_F(TestNLLLossGradFp32, ReductionMean) {
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
float loss_grad[1] = {2.50076};
std::vector<int> loss_grad_shape = {};
NLLLossGradInitArgs(&inputs, &outputs, loss_grad, loss_grad_shape);

auto *ctx = new lite::InnerContext;
ctx->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, ctx->Init());
auto *param = new NLLLossParameter;
param->batch_ = 3;
param->class_num_ = 5;
param->reduction_type_ = Reduction_Mean;
auto *kernel = new kernel::NLLLossGradCPUKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, ctx);
kernel->Prepare();
kernel->Run();

float expect_loss[15] = {0.0000, -1.0003041, 0.0000, 0.0000, 0.0000, -0.6668694, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.8335867};
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0]->MutableData()), expect_loss, 15, 0.0001));
NLLLossGradReleaseResources(ctx, kernel, param, inputs, outputs);
}
} // namespace mindspore

+ 1
- 1
mindspore/python/mindspore/ops/operations/_grad_ops.py View File

@@ -1883,7 +1883,7 @@ class NLLLossGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, reduction="mean"):
"""Initialize NLLLoss"""
self.init_prim_io_names(inputs=['x', 'target', "weight"], outputs=['loss'])
self.init_prim_io_names(inputs=['x', 'loss_grad', 'target', 'weight', 'total_weight'], outputs=['x_grad'])
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
self.add_prim_attr('reduction', self.reduction)



+ 1
- 1
mindspore/python/mindspore/ops/operations/nn_ops.py View File

@@ -2235,7 +2235,7 @@ class NLLLoss(PrimitiveWithInfer):
and `weight` should be equal to each other.

Supported Platforms:
``Ascend`` ``GPU``
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> logits = Tensor(np.array([[0.5488135, 0.71518934],


+ 140
- 0
tests/st/ops/cpu/test_nllloss_op.py View File

@@ -0,0 +1,140 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""test NLLLoss forward and backward"""

import pytest
import numpy as np

from mindspore import context
from mindspore import Tensor
from mindspore import nn
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore import export, load

context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True)


class NLLLoss(nn.Cell):
def __init__(self, reduction="none"):
super(NLLLoss, self).__init__()
self.nllloss = P.NLLLoss(reduction=reduction)

def construct(self, x, t, w):
return self.nllloss(x, t, w)


class NLLLossGrad(nn.Cell):
def __init__(self, forward, sens):
super(NLLLossGrad, self).__init__()
self.forward = forward
self.grad = C.GradOperation(get_all=True, sens_param=True)
self.sens = sens

def construct(self, x, t, w):
return self.grad(self.forward)(x, t, w, self.sens)


np_type = np.float32
logits = Tensor(np.array([[-1.3739, -2.2700, -3.2333, -2.4589, -0.6566],
[-1.2156, -2.6026, -1.2200, -1.8731, -1.7119],
[-0.7130, -3.3672, -1.5368, -1.8289, -2.3058]]).astype(np_type))
target = Tensor(np.array([1, 0, 4]).astype(np.int32))
weight = Tensor(np.array([0.2, 0.3, 0.1, 0.15, 0.25]).astype(np_type))


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_NLLLoss_none():
"""
Feature: test nlllosss op with reduction none.
Description: including forward and backward.
Expectation: expect correct forward and backward result.
"""
nllloss = NLLLoss(reduction="none")
actual_output = nllloss(logits, target, weight)
expect_loss = np.array([0.681, 0.24312, 0.57645]).astype(np_type)
expect_total_weight = np.array(0.75).astype(np_type)
assert np.allclose(actual_output[0].asnumpy(), expect_loss)
assert np.allclose(actual_output[1].asnumpy(), expect_total_weight)

nllloss_grad = NLLLossGrad(nllloss, sens=(actual_output[0] + 0.5, actual_output[1] + 0.5))
expect_grad = nllloss_grad(logits, target, weight)
expect_dx = np.array([[0.0000, -0.35430002, 0.0000, 0.0000, 0.0000],
[-0.148624, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, -0.2691125]]).astype(np_type)
assert np.allclose(expect_grad[0].asnumpy(), expect_dx)

export(nllloss_grad, logits, target, weight, file_name="nllloss_none", file_format='MINDIR')
net = nn.GraphCell(load("nllloss_none.mindir"))
assert np.allclose(net(logits, target, weight)[0].asnumpy(), expect_dx)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_NLLLoss_sum():
"""
Feature: test nlllosss op with reduction sum.
Description: including forward and backward.
Expectation: expect correct forward and backward result.
"""
nllloss = NLLLoss(reduction="sum")
actual_output = nllloss(logits, target, weight)
expect_loss = np.array(1.50057).astype(np_type)
expect_total_weight = np.array(0.75).astype(np_type)
assert np.allclose(actual_output[0].asnumpy(), expect_loss)
assert np.allclose(actual_output[1].asnumpy(), expect_total_weight)

nllloss_grad = NLLLossGrad(nllloss, sens=(actual_output[0] + 0.5, actual_output[1] + 0.5))
expect_grad = nllloss_grad(logits, target, weight)
expect_dx = np.array([[0.0000, -0.600171, 0.0000, 0.0000, 0.0000],
[-0.40011403, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, -0.5001425]]).astype(np_type)
assert np.allclose(expect_grad[0].asnumpy(), expect_dx)

export(nllloss_grad, logits, target, weight, file_name="nllloss_sum", file_format='MINDIR')
net = nn.GraphCell(load("nllloss_sum.mindir"))
assert np.allclose(net(logits, target, weight)[0].asnumpy(), expect_dx)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_NLLLoss_mean():
"""
Feature: test nllloss op with reduction mean.
Description: including forward and backward.
Expectation: expect correct forward and backward result.
"""
nllloss = NLLLoss("mean")
actual_output = nllloss(logits, target, weight)
expect_loss = np.array(2.00076).astype(np_type)
expect_total_weight = np.array(0.75).astype(np_type)
assert np.allclose(actual_output[0].asnumpy(), expect_loss)
assert np.allclose(actual_output[1].asnumpy(), expect_total_weight)

nllloss_grad = NLLLossGrad(nllloss, sens=(actual_output[0] + 0.5, actual_output[1] + 0.5))
expect_grad = nllloss_grad(logits, target, weight)
expect_dx = np.array([[0.0000, -1.0003041, 0.0000, 0.0000, 0.0000],
[-0.6668694, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, -0.8335867]]).astype(np_type)
assert np.allclose(expect_grad[0].asnumpy(), expect_dx)

export(nllloss_grad, logits, target, weight, file_name="nllloss_mean", file_format='MINDIR')
net = nn.GraphCell(load("nllloss_mean.mindir"))
assert np.allclose(net(logits, target, weight)[0].asnumpy(), expect_dx)

Loading…
Cancel
Save