Browse Source

[feat] [assistant] [I48O93, I48O5Q] Add Sin and Cos operators.

feature/build-system-rewrite
xialingtian 桂宁馨 4 years ago
parent
commit
da641630cb
10 changed files with 122 additions and 24 deletions
  1. +23
    -4
      mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc
  2. +8
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.h
  3. +7
    -5
      mindspore/core/ops/cos.cc
  4. +3
    -0
      mindspore/core/ops/cos.h
  5. +6
    -11
      mindspore/core/ops/sin.cc
  6. +3
    -2
      mindspore/core/ops/sin.h
  7. +2
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py
  8. +34
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/cos.py
  9. +34
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/sin.py
  10. +2
    -2
      mindspore/python/mindspore/ops/operations/math_ops.py

+ 23
- 4
mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc View File

@@ -203,6 +203,26 @@ void Cos(ArithmeticSelfCpuKernelMod *content, const T *in, T *out, size_t size)
ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_);
}

template <typename T>
void ComplexSin(ArithmeticSelfCpuKernelMod *content, const T *in, T *out, size_t size) {
auto task = [&in, &out](size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
out[i] = static_cast<T>(sin(in[i]));
}
};
ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_);
}

template <typename T>
void ComplexCos(ArithmeticSelfCpuKernelMod *content, const T *in, T *out, size_t size) {
auto task = [&in, &out](size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
out[i] = static_cast<T>(cos(in[i]));
}
};
ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_);
}

template <typename T>
void Tan(ArithmeticSelfCpuKernelMod *content, const T *in, T *out, size_t size) {
auto task = [&in, &out](size_t start, size_t end) {
@@ -399,10 +419,9 @@ void ArithmeticSelfCpuKernelMod::LaunchKernelComplex(const std::vector<AddressPt
const size_t lens = outputs[0]->size / sizeof(T);
static const std::unordered_map<std::string,
std::function<void(ArithmeticSelfCpuKernelMod *, const T *, T *, size_t)>>
arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square<T>},
{prim::kPrimAcosh->name(), ComplexAcosh<T>},
{prim::kPrimAsinh->name(), ComplexAsinh<T>},
{prim::kPrimNeg->name(), Neg<T>}};
arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square<T>}, {prim::kPrimAcosh->name(), ComplexAcosh<T>},
{prim::kPrimAsinh->name(), ComplexAsinh<T>}, {prim::kPrimNeg->name(), Neg<T>},
{prim::kPrimSin->name(), ComplexSin<T>}, {prim::kPrimCos->name(), ComplexCos<T>}};
const auto func_pair = arithmeticSelfFuncMap.find(kernel_name_);
if (arithmeticSelfFuncMap.find(kernel_name_) == arithmeticSelfFuncMap.end()) {
MS_LOG(EXCEPTION) << "ArithmeticSelfCpuKernelMod does not support " << kernel_name_;


+ 8
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.h View File

@@ -143,6 +143,14 @@ MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAt
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Sin, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Sin, KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeComplex128),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeComplex128),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Tan, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
ArithmeticSelfCpuKernelMod);
MS_REG_CPU_KERNEL(Tan, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),


+ 7
- 5
mindspore/core/ops/cos.cc View File

@@ -34,20 +34,22 @@ abstract::ShapePtr CosInferShape(const PrimitivePtr &primitive, const std::vecto
}

TypePtr CosInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64, kComplex64, kComplex128};
auto x_dtype = input_args[0]->BuildType();
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_dtype, common_valid_types, prim->name());
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_dtype, valid_types, prim->name());
return x_dtype;
}
} // namespace

AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, primitive->name());

return abstract::MakeAbstract(CosInferShape(primitive, input_args), CosInferType(primitive, input_args));
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto infer_type = CosInferType(primitive, input_args);
auto infer_shape = CosInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
} // namespace
REGISTER_PRIMITIVE_EVAL_IMPL(Cos, prim::kPrimCos, CosInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

+ 3
- 0
mindspore/core/ops/cos.h View File

@@ -36,6 +36,9 @@ class MS_CORE_API Cos : public PrimitiveC {
/// \brief Init. Refer to the parameters of Python API @ref mindspore.ops.Cos for the inputs.
void Init(float alpha = 0.0);
};

AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_COS_H_

+ 6
- 11
mindspore/core/ops/sin.cc View File

@@ -13,16 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "ops/sin.h"
#include <string>
#include <algorithm>
#include <memory>

#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/primitive_infer_map.h"
#include <map>
#include <string>

namespace mindspore {
namespace ops {
@@ -30,7 +25,6 @@ namespace {
abstract::ShapePtr SinInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name);
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, 0);
auto x = input_args[0]->BuildShape();
MS_EXCEPTION_IF_NULL(x);
@@ -40,8 +34,9 @@ abstract::ShapePtr SinInferShape(const PrimitivePtr &primitive, const std::vecto
}

TypePtr SinInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64, kComplex64, kComplex128};
auto x_dtype = input_args[0]->BuildType();
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_dtype, common_valid_types_with_complex, prim->name());
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_dtype, valid_types, prim->name());
return x_dtype;
}
} // namespace
@@ -49,7 +44,7 @@ AbstractBasePtr SinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, primitive->name());
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto infer_type = SinInferType(primitive, input_args);
auto infer_shape = SinInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);


+ 3
- 2
mindspore/core/ops/sin.h View File

@@ -18,9 +18,10 @@
#define MINDSPORE_CORE_OPS_SIN_H_
#include <vector>
#include <memory>

#include "ops/primitive_c.h"
#include "ops/op_utils.h"
#include "abstract/abstract_value.h"
#include "utils/check_convert_utils.h"

namespace mindspore {
namespace ops {
@@ -36,10 +37,10 @@ class MS_CORE_API Sin : public PrimitiveC {
/// \brief Init.
void Init() const {}
};

AbstractBasePtr SinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
using kPrimSinPtr = std::shared_ptr<Sin>;
} // namespace ops
} // namespace mindspore

#endif // MINDSPORE_CORE_OPS_SIN_H_

+ 2
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py View File

@@ -46,6 +46,8 @@ from .is_inf import _is_inf_aicpu
from .is_nan import _is_nan_aicpu
from .reshape import _reshape_aicpu
from .flatten import _flatten_aicpu
from .sin import _sin_aicpu
from .cos import _cos_aicpu
from .squeeze import _squeeze_aicpu
from .acos import _acos_aicpu
from .acos_grad import _acos_grad_aicpu


+ 34
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/cos.py View File

@@ -0,0 +1,34 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cos op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
cos_op_info = AiCPURegOp("Cos") \
.fusion_type("ELEMWISE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.C64_Default, DataType.C64_Default) \
.dtype_format(DataType.C128_Default, DataType.C128_Default) \
.get_op_info()
@op_info_register(cos_op_info)
def _cos_aicpu():
"""Cos AiCPU register"""
return

+ 34
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/sin.py View File

@@ -0,0 +1,34 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sin op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
sin_op_info = AiCPURegOp("Sin") \
.fusion_type("ELEMWISE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.C64_Default, DataType.C64_Default) \
.dtype_format(DataType.C128_Default, DataType.C128_Default) \
.get_op_info()
@op_info_register(sin_op_info)
def _sin_aicpu():
"""Sin AiCPU register"""
return

+ 2
- 2
mindspore/python/mindspore/ops/operations/math_ops.py View File

@@ -4537,7 +4537,7 @@ class Cos(Primitive):
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = cos(x)
>>> print(output)
[0.971338 0.67487574 0.95233357 0.9959527 ]
[0.971338 0.6748758 0.95233357 0.9959527]
"""

@prim_attr_register
@@ -4608,7 +4608,7 @@ class Sin(Primitive):
>>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = sin(x)
>>> print(output)
[0.5810352 0.27635565 0.41687083 0.5810352 ]
[0.5810352 0.27635565 0.41687083 0.5810352]
"""

@prim_attr_register


Loading…
Cancel
Save