Browse Source

!23919 [assistant][ops] Add HSVToRGB

Merge pull request !23919 from 苏家毅/HSVToRGB
feature/build-system-rewrite
i-robot Gitee 4 years ago
parent
commit
fe2a557901
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
10 changed files with 399 additions and 1 deletions
  1. +173
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/hsv_to_rgb_cpu_kernel.cc
  2. +54
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/hsv_to_rgb_cpu_kernel.h
  3. +1
    -0
      mindspore/core/base/core_ops.h
  4. +60
    -0
      mindspore/core/ops/hsv_to_rgb.cc
  5. +41
    -0
      mindspore/core/ops/hsv_to_rgb.h
  6. +1
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py
  7. +32
    -0
      mindspore/python/mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py
  8. +2
    -1
      mindspore/python/mindspore/ops/operations/__init__.py
  9. +31
    -0
      mindspore/python/mindspore/ops/operations/image_ops.py
  10. +4
    -0
      tests/ut/python/ops/test_ops.py

+ 173
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/hsv_to_rgb_cpu_kernel.cc View File

@@ -0,0 +1,173 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/hsv_to_rgb_cpu_kernel.h"
#include <iostream>
#include <vector>
#include "runtime/device/cpu/cpu_device_address.h"

namespace mindspore {
namespace kernel {
template <typename T>
void HSVToRGBCpuKernel<T>::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
const size_t kNumDims = 3;
const size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
const size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
if (input_num != kInputNum) {
MS_LOG(EXCEPTION) << "Needs " << kInputNum << " input, but got " << input_num << ".";
}
if (output_num != kOutputNum) {
MS_LOG(EXCEPTION) << "Needs " << kOutputNum << " output, but got " << output_num << ".";
}
shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
input_dtype = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
if (shape.cend()[-1] != kNumDims) {
MS_LOG(EXCEPTION) << "The last dimension of the input tensor must be size 3.";
}
}

template <typename T>
template <typename T1>
void HSVToRGBCpuKernel<T>::ConvertOnePixel(T1 h, T1 s, T1 v, T1 *r, T1 *g, T1 *b) {
T1 c = s * v;
T1 m = v - c;
T1 dh = h * 6;
T1 rr, gg, bb;
const int32_t h_category = static_cast<int32_t>(std::floor(dh));
T1 fmodu = dh;
const int32_t kLimitMin = 0;
const int32_t kLimitMax = 2;
if (fmodu <= kLimitMin || fmodu >= kLimitMax) {
const int32_t tmp = static_cast<int32_t>(fmodu);
fmodu -= static_cast<T1>((tmp / kLimitMax) * kLimitMax);
if (fmodu <= kLimitMin) {
fmodu += kLimitMax;
} else if (fmodu >= kLimitMax) {
fmodu -= kLimitMax;
}
}
const int32_t h_category_value_0 = 0;
const int32_t h_category_value_1 = 1;
const int32_t h_category_value_2 = 2;
const int32_t h_category_value_3 = 3;
const int32_t h_category_value_4 = 4;
const int32_t h_category_value_5 = 5;

T1 x = c * (1 - std::abs(fmodu - 1));
switch (h_category) {
case h_category_value_0:
rr = c;
gg = x;
bb = 0;
break;
case h_category_value_1:
rr = x;
gg = c;
bb = 0;
break;
case h_category_value_2:
rr = 0;
gg = c;
bb = x;
break;
case h_category_value_3:
rr = 0;
gg = x;
bb = c;
break;
case h_category_value_4:
rr = x;
gg = 0;
bb = c;
break;
case h_category_value_5:
rr = c;
gg = 0;
bb = x;
break;
default:
rr = c;
gg = 0;
bb = 0;
}
*r = rr + m;
*g = gg + m;
*b = bb + m;
}

template <typename T>
template <typename T1>
void HSVToRGBCpuKernel<T>::ComputeFloat(void *input, void *output, int64_t pixel_num) {
T1 *input_ptr = reinterpret_cast<T1 *>(input);
T1 *output_ptr = reinterpret_cast<T1 *>(output);
auto shard_hsv_to_rgb = [&](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
T1 *h = input_ptr + 3 * i + 0;
T1 *s = input_ptr + 3 * i + 1;
T1 *v = input_ptr + 3 * i + 2;
T1 *r = output_ptr + 3 * i + 0;
T1 *g = output_ptr + 3 * i + 1;
T1 *b = output_ptr + 3 * i + 2;
ConvertOnePixel<T1>(*h, *s, *v, r, g, b);
}
};
CPUKernelUtils::ParallelFor(shard_hsv_to_rgb, pixel_num);
}

template <typename T>
void HSVToRGBCpuKernel<T>::ComputeHalf(void *input, void *output, int64_t pixel_num) {
float16 *input_ptr = reinterpret_cast<float16 *>(input);
float16 *output_ptr = reinterpret_cast<float16 *>(output);
auto shard_hsv_to_rgb = [&](size_t start, size_t end) {
float tmp[3];
for (size_t i = start; i < end; ++i) {
float h = static_cast<float>(input_ptr[3 * i + 0]);
float s = static_cast<float>(input_ptr[3 * i + 1]);
float v = static_cast<float>(input_ptr[3 * i + 2]);
ConvertOnePixel<float>(h, s, v, tmp, tmp + 1, tmp + 2);
output_ptr[3 * i + 0] = float16(tmp[0]);
output_ptr[3 * i + 1] = float16(tmp[1]);
output_ptr[3 * i + 2] = float16(tmp[2]);
}
};
CPUKernelUtils::ParallelFor(shard_hsv_to_rgb, pixel_num);
}

template <typename T>
bool HSVToRGBCpuKernel<T>::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) {
const int64_t pixel_num =
accumulate(shape.begin(), shape.end(), static_cast<int64_t>(1), [=](int64_t a, int64_t b) { return a * b; }) / 3;
void *input = inputs[0]->addr;
void *output = outputs[0]->addr;
switch (input_dtype) {
case kNumberTypeFloat16:
ComputeHalf(input, output, pixel_num);
break;
case kNumberTypeFloat32:
ComputeFloat<float>(input, output, pixel_num);
break;
case kNumberTypeFloat64:
ComputeFloat<double>(input, output, pixel_num);
break;
default:
MS_LOG(EXCEPTION) << "Input Tensor data type is not surpported.";
break;
}
return true;
}
} // namespace kernel
} // namespace mindspore

+ 54
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/hsv_to_rgb_cpu_kernel.h View File

@@ -0,0 +1,54 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_HSV_TO_RGB_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_HSV_TO_RGB_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"

namespace mindspore {
namespace kernel {
template <typename T>
class HSVToRGBCpuKernel : public CPUKernel {
public:
HSVToRGBCpuKernel() = default;
~HSVToRGBCpuKernel() override = default;

void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;

private:
TypeId input_dtype;
template <typename T1>
void ConvertOnePixel(T1 h, T1 s, T1 v, T1 *r, T1 *g, T1 *b);
template <typename T1>
void ComputeFloat(void *input, void *output, int64_t pixel_num);
void ComputeHalf(void *input, void *output, int64_t pixel_num);
std::vector<size_t> shape;
const size_t kInputNum = 1;
const size_t kOutputNum = 1;
};
MS_REG_CPU_KERNEL_T(HSVToRGB, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
HSVToRGBCpuKernel, float16);
MS_REG_CPU_KERNEL_T(HSVToRGB, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
HSVToRGBCpuKernel, float);
MS_REG_CPU_KERNEL_T(HSVToRGB, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
HSVToRGBCpuKernel, double);
} // namespace kernel
} // namespace mindspore
#endif

+ 1
- 0
mindspore/core/base/core_ops.h View File

@@ -476,6 +476,7 @@ inline const PrimitivePtr kPrimSoftShrink = std::make_shared<Primitive>("SoftShr
inline const PrimitivePtr kPrimSoftShrinkGrad = std::make_shared<Primitive>("SoftShrinkGrad");
inline const PrimitivePtr kPrimHShrink = std::make_shared<Primitive>("HShrink");
inline const PrimitivePtr kPrimHShrinkGrad = std::make_shared<Primitive>("HShrinkGrad");
inline const PrimitivePtr kPrimHSVToRGB = std::make_shared<Primitive>("HSVToRGB");
inline const PrimitivePtr kPrimApplyAdagradDA = std::make_shared<Primitive>("ApplyAdagradDA");
inline const PrimitivePtr kPrimApplyAdagradV2 = std::make_shared<Primitive>("ApplyAdagradV2");
inline const PrimitivePtr kPrimApplyProximalGradientDescent =


+ 60
- 0
mindspore/core/ops/hsv_to_rgb.cc View File

@@ -0,0 +1,60 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/hsv_to_rgb.h"
#include <set>
#include <memory>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/primitive_infer_map.h"

namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
const int64_t kNumDims = 4;
const int64_t kLastDim = 3;
const int64_t input_dims = SizeToLong(input_shape.size());
const int64_t input_last_dims = SizeToLong(input_shape.cend()[-1]);
CheckAndConvertUtils::CheckInteger("the dimension of [x]", input_dims, kEqual, kNumDims, kNameHSVToRGB);
CheckAndConvertUtils::CheckInteger("the last dimension of the shape of [x]", input_last_dims, kEqual, kLastDim,
kNameHSVToRGB);

return std::make_shared<abstract::Shape>(input_shape);
}

TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto input_dtype = input_args[0]->BuildType();
const std::set<TypePtr> input_valid_types = {kFloat16, kFloat32, kFloat64};
CheckAndConvertUtils::CheckTensorTypeValid("x", input_dtype, input_valid_types, kNameHSVToRGB);
return input_dtype;
}
} // namespace

AbstractBasePtr HSVToRGBInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
constexpr int64_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto types = InferType(primitive, input_args);
auto shapes = InferShape(primitive, input_args);
return abstract::MakeAbstract(shapes, types);
}

REGISTER_PRIMITIVE_EVAL_IMPL(HSVToRGB, prim::kPrimHSVToRGB, HSVToRGBInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

+ 41
- 0
mindspore/core/ops/hsv_to_rgb.h View File

@@ -0,0 +1,41 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CORE_OPS_HSV_TO_RGB_H_
#define MINDSPORE_CORE_OPS_HSV_TO_RGB_H_
#include <vector>
#include <string>
#include <memory>
#include "ops/primitive_c.h"
#include "abstract/abstract_value.h"
#include "utils/check_convert_utils.h"

namespace mindspore {
namespace ops {
constexpr auto kNameHSVToRGB = "HSVToRGB";
class HSVToRGB : public PrimitiveC {
public:
HSVToRGB() : PrimitiveC(kNameHSVToRGB) { InitIOName({"x"}, {"y"}); }
~HSVToRGB() = default;
MS_DECLARE_PARENT(HSVToRGB, PrimitiveC);
};
AbstractBasePtr HSVToRGBInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
using HSVToRGBPtr = std::shared_ptr<HSVToRGB>;
} // namespace ops
} // namespace mindspore

#endif

+ 1
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py View File

@@ -13,6 +13,7 @@
# limitations under the License.

"""aicpu ops"""
from .hsv_to_rgb import _hsv_to_rgb_aicpu
from .unique import _unique_aicpu
from .lu_solve import _lu_solve_aicpu
from .cholesky_inverse import _cholesky_inverse_aicpu


+ 32
- 0
mindspore/python/mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py View File

@@ -0,0 +1,32 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""HSVToRGB op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType

hsv_to_rgb_op_info = AiCPURegOp("HSVToRGB") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.get_op_info()


@op_info_register(hsv_to_rgb_op_info)
def _hsv_to_rgb_aicpu():
"""HSVToRGB aicpu register"""
return

+ 2
- 1
mindspore/python/mindspore/ops/operations/__init__.py View File

@@ -19,7 +19,7 @@ Primitive operator classes.
A collection of operators to build neural networks or to compute functions.
"""

from .image_ops import (CropAndResize, NonMaxSuppressionV3)
from .image_ops import (CropAndResize, NonMaxSuppressionV3, HSVToRGB)
from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Stack, Unpack, Unstack,
Diag, DiagPart, DType, ExpandDims, Eye,
Fill, Ones, Zeros, GatherNd, GatherV2, Gather, SparseGatherV2, InvertPermutation,
@@ -128,6 +128,7 @@ from ._inner_ops import (MatmulDDS, DSDMatmul, NonZero)
from .custom_ops import (Custom)

__all__ = [
'HSVToRGB',
'CeLU',
'Ger',
'GridSampler3D',


+ 31
- 0
mindspore/python/mindspore/ops/operations/image_ops.py View File

@@ -206,3 +206,34 @@ class NonMaxSuppressionV3(Primitive):
@prim_attr_register
def __init__(self):
"""Initialize NonMaxSuppressionV3"""

class HSVToRGB(Primitive):
"""
Convert one or more images from HSV to RGB. The format of the image(s) should be NHWC.

Inputs:
- **x** (Tensor) - The input image must be a 4-D tensor of shape [batch, image_height, image_width, channel].
Number of channel must be 3.
Types allowed: float16, float32, float64.
Outputs:
A 4-D tensor of shape [batch, image_height, image_width, channel] with same type of input.

Raises:
TypeError: If `x` is not a Tensor.
TypeError: If the dtype of `x` is not float16, float32, float64.
ValueError: If rank of the `x` is not equal to 4.
ValueError: If the last dimension of `x` is not equal to 3.

Supported Platforms:
``CPU``

Examples:
>>> image = np.array([0.5, 0.5, 0.5]).astype(np.float32).reshape([1, 1, 1, 3])
>>> hsv_to_rgb = P.HSVToRGB()
>>> output = hsv_to_rgb(Tensor(image))
>>> print(output)
[[[[0.25 0.5 0.5 ]]]]
"""
@prim_attr_register
def __init__(self):
pass

+ 4
- 0
tests/ut/python/ops/test_ops.py View File

@@ -3085,6 +3085,10 @@ test_case_other_ops = [
Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
Tensor(np.random.rand(2, 8, 16).astype(np.float16))]}),
('HSVToRGB', {
'block': P.HSVToRGB(),
'desc_inputs': [Tensor(np.array([[[[0.5, 0.5, 0.5]]]], np.float32))],
'skip': ['backward']}),
]

test_case_quant_ops = [


Loading…
Cancel
Save