Browse Source

[feat] [assistant] [I40GJE] Add new AICPU operator CholeskyInverse

feature/build-system-rewrite
zy 4 years ago
parent
commit
7543172bd9
11 changed files with 345 additions and 1 deletions
  1. +69
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/cholesky_inverse_cpu_kernel.cc
  2. +47
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/cholesky_inverse_cpu_kernel.h
  3. +1
    -0
      mindspore/core/base/core_ops.h
  4. +64
    -0
      mindspore/core/ops/cholesky_inverse_.cc
  5. +42
    -0
      mindspore/core/ops/cholesky_inverse_.h
  6. +33
    -0
      mindspore/python/mindspore/ops/_grad_experimental/grad_math_ops.py
  7. +1
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py
  8. +31
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/cholesky_inverse.py
  9. +3
    -1
      mindspore/python/mindspore/ops/operations/__init__.py
  10. +50
    -0
      mindspore/python/mindspore/ops/operations/math_ops.py
  11. +4
    -0
      tests/ut/python/ops/test_ops.py

+ 69
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/cholesky_inverse_cpu_kernel.cc View File

@@ -0,0 +1,69 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/cholesky_inverse_cpu_kernel.h"
#include "runtime/device/cpu/cpu_device_address.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kDimNum = 2;
}
template <typename T>
void CholeskyInverseCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
node_wpt_ = kernel_node;
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
auto x_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
kernel_name_ = AnfAlgo::GetCNodeName(kernel_node);
CHECK_KERNEL_INPUTS_NUM(input_num, kInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(output_num, kOutputNum, kernel_name_);
if (x_shape.size() != kDimNum) {
MS_EXCEPTION(ValueError) << "The dimension of x must be equal to 2, while got x_dim: " << x_shape.size() << ".";
}
if (x_shape[x_shape.size() - 1] != x_shape[x_shape.size() - kDimNum]) {
MS_EXCEPTION(ValueError) << "For CholeskyInverse"
<< " input cholesky_inverse should be square matrix "
<< "while row is " << x_shape[x_shape.size() - kDimNum] << ", col is "
<< x_shape[x_shape.size() - 1];
}
}
template <typename T>
bool CholeskyInverseCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
auto input_x0 = reinterpret_cast<T *>(inputs[0]->addr);
auto output_y = reinterpret_cast<T *>(outputs[0]->addr);
auto inputShape = AnfAlgo::GetInputDeviceShape(node_wpt_, 0);
int64_t n = SizeToLong(inputShape[0]);
typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXd;
Eigen::Map<MatrixXd> A(input_x0, n, n);
MatrixXd result;
auto upper = AnfAlgo::GetNodeAttr<bool>(node_wpt_, "upper");
if (upper) {
result = (A.transpose() * A).inverse();
} else {
result = (A * A.transpose()).inverse();
}
for (int64_t i = 0; i < n; i++) {
for (int64_t j = 0; j < n; j++) {
*(output_y + i * n + j) = result(i, j);
}
}
return true;
}
} // namespace kernel
} // namespace mindspore

+ 47
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/cholesky_inverse_cpu_kernel.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CHOLESKYINVERSE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CHOLESKYINVERSE_CPU_KERNEL_H_
#include <Eigen/Dense>
#include <vector>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
constexpr size_t kInputNum = 1;
constexpr size_t kOutputNum = 1;
namespace kernel {
template <typename T>
class CholeskyInverseCPUKernel : public CPUKernel {
public:
CholeskyInverseCPUKernel() = default;
~CholeskyInverseCPUKernel() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
private:
CNodePtr node_wpt_;
};
MS_REG_CPU_KERNEL_T(CholeskyInverse, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
CholeskyInverseCPUKernel, float)
MS_REG_CPU_KERNEL_T(CholeskyInverse, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
CholeskyInverseCPUKernel, double)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CHOLESKYINVERSE_CPU_KERNEL_H_

+ 1
- 0
mindspore/core/base/core_ops.h View File

@@ -562,6 +562,7 @@ inline const PrimitivePtr kPrimBesselI1 = std::make_shared<Primitive>("BesselI1"
inline const PrimitivePtr kPrimGer = std::make_shared<Primitive>("Ger");
inline const PrimitivePtr kPrimCeil = std::make_shared<Primitive>("Ceil");
inline const PrimitivePtr kPrimLuSolve = std::make_shared<Primitive>("LuSolve");
inline const PrimitivePtr kPrimCholeskyInverse = std::make_shared<Primitive>("CholeskyInverse");
inline const PrimitivePtr kPrimTensorAdd = std::make_shared<Primitive>("TensorAdd");
inline const PrimitivePtr kPrimAdd = std::make_shared<Primitive>(kAdd);
inline const PrimitivePtr kPrimAddcdiv = std::make_shared<Primitive>(kAddcdiv);


+ 64
- 0
mindspore/core/ops/cholesky_inverse_.cc View File

@@ -0,0 +1,64 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/cholesky_inverse_.h"
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/primitive_infer_map.h"
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr CholeskyInverseInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t kDimNum = 2;
auto op_name = primitive->name();
auto x_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
auto x_shape = x_shape_map[kShape];
if (x_shape.size() != kDimNum) {
MS_EXCEPTION(ValueError) << "The dimension of x must be equal to 2, while got x_dim: " << x_shape.size() << ".";
}
if (x_shape[x_shape.size() - 1] != x_shape[x_shape.size() - kDimNum])
MS_EXCEPTION(ValueError) << "For " << op_name << " input should be square matrix "
<< "while row is " << x_shape[x_shape.size() - kDimNum] << ", col is "
<< x_shape[x_shape.size() - 1];
return std::make_shared<abstract::Shape>(x_shape);
}
TypePtr CholeskyInverseInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
const std::set<TypePtr> valid_types = {kFloat32, kFloat64};
auto out_type =
CheckAndConvertUtils::CheckTensorTypeValid("x", input_args[0]->BuildType(), valid_types, prim->name());
return out_type;
}
} // namespace
AbstractBasePtr CholeskyInverseInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto infer_type = CholeskyInverseInferType(primitive, input_args);
auto infer_shape = CholeskyInverseInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(CholeskyInverse, prim::kPrimCholeskyInverse, CholeskyInverseInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

+ 42
- 0
mindspore/core/ops/cholesky_inverse_.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#ifndef MINDSPORE_CORE_OPS_CHOLESKYINVERSE_H_
#define MINDSPORE_CORE_OPS_CHOLESKYINVERSE_H_
#include <map>
#include <set>
#include <memory>
#include <vector>
#include "ops/primitive_c.h"
#include "abstract/abstract_value.h"
#include "utils/check_convert_utils.h"
namespace mindspore {
namespace ops {
constexpr auto kNameCholeskyInverse = "CholeskyInverse";
class CholeskyInverse : public PrimitiveC {
public:
CholeskyInverse() : PrimitiveC(kNameCholeskyInverse) { InitIOName({"x"}, {"y"}); }
~CholeskyInverse() = default;
MS_DECLARE_PARENT(CholeskyInverse, PrimitiveC);
};
AbstractBasePtr CholeskyInverseInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
using PrimCholeskyInversePtr = std::shared_ptr<CholeskyInverse>;
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_CHOLESKYINVERSE_H_

+ 33
- 0
mindspore/python/mindspore/ops/_grad_experimental/grad_math_ops.py View File

@@ -208,6 +208,39 @@ def get_bprop_log_matrix_determinant(self):

return bprop

@bprop_getters.register(P.CholeskyInverse)
def get_bprop_cholesky_inverse(self):
"""Grad definition for `CholeskyInverse` operation."""
matmul = P.MatMul()
upper = self.upper
neg = P.Neg()

def bprop(input_x, out, dout):
input_perm = (1, 0)
if dout.dtype == mstype.float64:
input_x = F.cast(input_x, mstype.float32)
out = F.cast(out, mstype.float32)
dout = F.cast(dout, mstype.float32)
common_term = dout + transpose(dout, input_perm)
common_term = F.cast(common_term, mstype.float32)
common_term = matmul(out, matmul(common_term, out))
if upper is True:
dx = neg(matmul(input_x, common_term))
dx = F.cast(dx, mstype.float64)
else:
dx = neg(matmul(common_term, input_x))
dx = F.cast(dx, mstype.float64)
return (dx,)
common_term = dout + transpose(dout, input_perm)
common_term = matmul(out, matmul(common_term, out))
if upper is True:
dx = neg(matmul(input_x, common_term))
else:
dx = neg(matmul(common_term, input_x))
return (dx,)

return bprop


@bprop_getters.register(P.Erfinv)
def get_bprop_erfinv(self):


+ 1
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py View File

@@ -15,6 +15,7 @@
"""aicpu ops"""
from .unique import _unique_aicpu
from .lu_solve import _lu_solve_aicpu
from .cholesky_inverse import _cholesky_inverse_aicpu
from .no_repeat_ngram import _no_repeat_ngram_aicpu
from .init_data_set_queue import _init_data_set_queue_aicpu
from .embedding_lookup import _embedding_lookup_aicpu


+ 31
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/cholesky_inverse.py View File

@@ -0,0 +1,31 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CholeskyInverse op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
cholesky_inverse_op_info = AiCPURegOp("CholeskyInverse") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.attr("upper", "bool") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.get_op_info()
@op_info_register(cholesky_inverse_op_info)
def _cholesky_inverse_aicpu():
"""CholeskyInverse aicpu register"""
return

+ 3
- 1
mindspore/python/mindspore/ops/operations/__init__.py View File

@@ -62,7 +62,8 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
Reciprocal, CumSum, HistogramFixedWidth, SquaredDifference, Xdivy, Xlogy,
Sin, Sqrt, Rsqrt, BesselI0, BesselI1, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, Addcdiv,
Addcmul, Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps,
Tan, MatrixInverse, IndexAdd, Erfinv, Conj, Real, Imag, Complex, Trunc, IsClose, LuSolve)
Tan, MatrixInverse, IndexAdd, Erfinv, Conj, Real, Imag, Complex, Trunc, IsClose, LuSolve,
CholeskyInverse)

from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal,
RandomCategorical, StandardLaplace, Multinomial, UniformCandidateSampler,
@@ -511,6 +512,7 @@ __all__ = [
"AlltoAll",
"Custom",
"LuSolve",
"CholeskyInverse",
]

__sponge__ = [


+ 50
- 0
mindspore/python/mindspore/ops/operations/math_ops.py View File

@@ -5927,3 +5927,53 @@ class LuSolve(Primitive):
@prim_attr_register
def __init__(self):
pass


class CholeskyInverse(Primitive):
"""
Returns the inverse of the positive definite matrix using cholesky matrix factorization.

If upper is False, u is a lower triangular such that the output tensor is

.. math::
inv = (uu^{T})^{{-1}}

If upper is True, u is an upper triangular such that the output tensor is

.. math::
inv = (u^{T}u)^{{-1}}

Note:
The input must be either an upper triangular matrix or a lower triangular matrix.

Args:
upper(bool): Whether to return a lower or upper triangular matrix. Default: False.

Inputs:
- **x** (Tensor) - The input tensor. types: float32, float64.

Outputs:
Tensor, has the same shape and dtype as x.

Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not one of: float32, float64.
ValueError: If the dimension of `x` is not equal to 2.

Supported Platforms:
``CPU``

Examples:
>>> x = Tensor(np.array([[2,0,0], [4,1,0], [-1,1,2]]), mindspore.float32)
>>> net = ops.CholeskyInverse()
>>> y = net(x)
>>> print(y)
[[ 5.8125 -2.625 0.625 ]
[-2.625 1.25 -0.25 ]
[ 0.625 -0.25 0.25 ]]
"""
@prim_attr_register
def __init__(self, upper=False):
"""Initialize CholeskyInverse"""
validator.check_value_type("upper", upper, [bool], self.name)
self.upper = upper

+ 4
- 0
tests/ut/python/ops/test_ops.py View File

@@ -1435,6 +1435,10 @@ test_case_math_ops = [
'desc_inputs': [Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32)),
Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))],
'desc_bprop': [Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))]}),
('CholeskyInverse', {
'block': P.CholeskyInverse(upper=False),
'desc_inputs': [Tensor(np.array([[2.0, 0.0, 0.0], [4.0, 1.0, 0.0], [-8.0, 1.0, 2.0]]).astype(np.float32))],
'desc_bprop': [Tensor(np.array([[2.0, 0.0, 0.0], [4.0, 1.0, 0.0], [-8.0, 1.0, 2.0]]).astype(np.float32))]}),
('Embedding_1', {
'block': Embedding(vocab_size=10, embedding_size=3),
'desc_inputs': [Tensor(np.array([0, 2, 2, 7]).astype(np.int32))],


Loading…
Cancel
Save