Browse Source

!4494 add c_ops

Merge pull request !4494 from yeyunpeng2020/master_cops
tags/v0.7.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
a7553636d5
100 changed files with 5722 additions and 0 deletions
  1. +3
    -0
      mindspore/lite/c_ops/CMakeLists.txt
  2. +42
    -0
      mindspore/lite/c_ops/abs.h
  3. +35
    -0
      mindspore/lite/c_ops/activation.cc
  4. +46
    -0
      mindspore/lite/c_ops/activation.h
  5. +33
    -0
      mindspore/lite/c_ops/activation_grad.cc
  6. +44
    -0
      mindspore/lite/c_ops/activation_grad.h
  7. +33
    -0
      mindspore/lite/c_ops/add.cc
  8. +43
    -0
      mindspore/lite/c_ops/add.h
  9. +59
    -0
      mindspore/lite/c_ops/addn.cc
  10. +45
    -0
      mindspore/lite/c_ops/addn.h
  11. +75
    -0
      mindspore/lite/c_ops/argmax.cc
  12. +53
    -0
      mindspore/lite/c_ops/argmax.h
  13. +74
    -0
      mindspore/lite/c_ops/argmin.cc
  14. +53
    -0
      mindspore/lite/c_ops/argmin.h
  15. +99
    -0
      mindspore/lite/c_ops/arithmetic.cc
  16. +55
    -0
      mindspore/lite/c_ops/arithmetic.h
  17. +34
    -0
      mindspore/lite/c_ops/arithmetic_self.cc
  18. +42
    -0
      mindspore/lite/c_ops/arithmetic_self.h
  19. +31
    -0
      mindspore/lite/c_ops/batch_norm.cc
  20. +44
    -0
      mindspore/lite/c_ops/batch_norm.h
  21. +114
    -0
      mindspore/lite/c_ops/batch_to_space.cc
  22. +47
    -0
      mindspore/lite/c_ops/batch_to_space.h
  23. +34
    -0
      mindspore/lite/c_ops/bias_add.cc
  24. +44
    -0
      mindspore/lite/c_ops/bias_add.h
  25. +34
    -0
      mindspore/lite/c_ops/bias_grad.cc
  26. +44
    -0
      mindspore/lite/c_ops/bias_grad.h
  27. +35
    -0
      mindspore/lite/c_ops/bn_grad_input.cc
  28. +46
    -0
      mindspore/lite/c_ops/bn_grad_input.h
  29. +79
    -0
      mindspore/lite/c_ops/broadcast_to.cc
  30. +45
    -0
      mindspore/lite/c_ops/broadcast_to.h
  31. +33
    -0
      mindspore/lite/c_ops/caffe_p_relu.cc
  32. +45
    -0
      mindspore/lite/c_ops/caffe_p_relu.h
  33. +64
    -0
      mindspore/lite/c_ops/cast.cc
  34. +47
    -0
      mindspore/lite/c_ops/cast.h
  35. +35
    -0
      mindspore/lite/c_ops/clip.cc
  36. +46
    -0
      mindspore/lite/c_ops/clip.h
  37. +93
    -0
      mindspore/lite/c_ops/concat.cc
  38. +47
    -0
      mindspore/lite/c_ops/concat.h
  39. +163
    -0
      mindspore/lite/c_ops/conv2d.cc
  40. +91
    -0
      mindspore/lite/c_ops/conv2d.h
  41. +107
    -0
      mindspore/lite/c_ops/conv2d_grad_filter.cc
  42. +76
    -0
      mindspore/lite/c_ops/conv2d_grad_filter.h
  43. +105
    -0
      mindspore/lite/c_ops/conv2d_grad_input.cc
  44. +76
    -0
      mindspore/lite/c_ops/conv2d_grad_input.h
  45. +42
    -0
      mindspore/lite/c_ops/cos.h
  46. +55
    -0
      mindspore/lite/c_ops/crop.cc
  47. +47
    -0
      mindspore/lite/c_ops/crop.h
  48. +145
    -0
      mindspore/lite/c_ops/deconv2d.cc
  49. +88
    -0
      mindspore/lite/c_ops/deconv2d.h
  50. +161
    -0
      mindspore/lite/c_ops/dedepthwise_conv2d.cc
  51. +86
    -0
      mindspore/lite/c_ops/dedepthwise_conv2d.h
  52. +75
    -0
      mindspore/lite/c_ops/depth_to_space.cc
  53. +47
    -0
      mindspore/lite/c_ops/depth_to_space.h
  54. +164
    -0
      mindspore/lite/c_ops/depthwise_conv2d.cc
  55. +86
    -0
      mindspore/lite/c_ops/depthwise_conv2d.h
  56. +131
    -0
      mindspore/lite/c_ops/detection_post_process.cc
  57. +68
    -0
      mindspore/lite/c_ops/detection_post_process.h
  58. +33
    -0
      mindspore/lite/c_ops/div.cc
  59. +46
    -0
      mindspore/lite/c_ops/div.h
  60. +31
    -0
      mindspore/lite/c_ops/dropout.cc
  61. +44
    -0
      mindspore/lite/c_ops/dropout.h
  62. +31
    -0
      mindspore/lite/c_ops/eltwise.cc
  63. +44
    -0
      mindspore/lite/c_ops/eltwise.h
  64. +31
    -0
      mindspore/lite/c_ops/elu.cc
  65. +44
    -0
      mindspore/lite/c_ops/elu.h
  66. +72
    -0
      mindspore/lite/c_ops/embedding_lookup.cc
  67. +45
    -0
      mindspore/lite/c_ops/embedding_lookup.h
  68. +57
    -0
      mindspore/lite/c_ops/embedding_lookup_sparse.cc
  69. +48
    -0
      mindspore/lite/c_ops/embedding_lookup_sparse.h
  70. +38
    -0
      mindspore/lite/c_ops/equal.h
  71. +42
    -0
      mindspore/lite/c_ops/exp.h
  72. +60
    -0
      mindspore/lite/c_ops/expand_dims.cc
  73. +45
    -0
      mindspore/lite/c_ops/expand_dims.h
  74. +45
    -0
      mindspore/lite/c_ops/fake_quant_with_min_max_vars.cc
  75. +46
    -0
      mindspore/lite/c_ops/fake_quant_with_min_max_vars.h
  76. +56
    -0
      mindspore/lite/c_ops/fill.cc
  77. +45
    -0
      mindspore/lite/c_ops/fill.h
  78. +47
    -0
      mindspore/lite/c_ops/flatten.cc
  79. +43
    -0
      mindspore/lite/c_ops/flatten.h
  80. +42
    -0
      mindspore/lite/c_ops/floor.h
  81. +42
    -0
      mindspore/lite/c_ops/floor_div.h
  82. +42
    -0
      mindspore/lite/c_ops/floor_mod.h
  83. +80
    -0
      mindspore/lite/c_ops/full_connection.cc
  84. +49
    -0
      mindspore/lite/c_ops/full_connection.h
  85. +39
    -0
      mindspore/lite/c_ops/fused_batchnorm.cc
  86. +48
    -0
      mindspore/lite/c_ops/fused_batchnorm.h
  87. +87
    -0
      mindspore/lite/c_ops/gather.cc
  88. +47
    -0
      mindspore/lite/c_ops/gather.h
  89. +74
    -0
      mindspore/lite/c_ops/gather_nd.cc
  90. +45
    -0
      mindspore/lite/c_ops/gather_nd.h
  91. +42
    -0
      mindspore/lite/c_ops/greater.h
  92. +42
    -0
      mindspore/lite/c_ops/greater_equal.h
  93. +38
    -0
      mindspore/lite/c_ops/l2_norm.cc
  94. +46
    -0
      mindspore/lite/c_ops/l2_norm.h
  95. +33
    -0
      mindspore/lite/c_ops/leaky_relu.cc
  96. +44
    -0
      mindspore/lite/c_ops/leaky_relu.h
  97. +42
    -0
      mindspore/lite/c_ops/less.h
  98. +42
    -0
      mindspore/lite/c_ops/less_equal.h
  99. +67
    -0
      mindspore/lite/c_ops/local_response_normalization.cc
  100. +50
    -0
      mindspore/lite/c_ops/local_response_normalization.h

+ 3
- 0
mindspore/lite/c_ops/CMakeLists.txt View File

@@ -0,0 +1,3 @@
file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc)

add_library(c_ops_mid OBJECT ${C_OPS_SRC})

+ 42
- 0
mindspore/lite/c_ops/abs.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic_self.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ABS_H_
#define LITE_MINDSPORE_LITE_C_OPS_ABS_H_

namespace mindspore {
class Abs : public ArithmeticSelf {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Abs(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else
explicit Abs(schema::Primitive *primitive) : ArithmeticSelf(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ABS_H_

+ 35
- 0
mindspore/lite/c_ops/activation.cc View File

@@ -0,0 +1,35 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/activation.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Activation::GetType() const { return this->primitive->value.AsActivation()->type; }
float Activation::GetAlpha() const { return this->primitive->value.AsActivation()->alpha; }

void Activation::SetType(int type) { this->primitive->value.AsActivation()->type = (schema::ActivationType)type; }
void Activation::SetAlpha(float alpha) { this->primitive->value.AsActivation()->alpha = alpha; }

#else

int Activation::GetType() const { return this->primitive->value_as_Activation()->type(); }
float Activation::GetAlpha() const { return this->primitive->value_as_Activation()->alpha(); }

void Activation::SetType(int type) {}
void Activation::SetAlpha(float alpha) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/activation.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_
#define LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_

namespace mindspore {
class Activation : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Activation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Activation(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetType() const;
float GetAlpha() const;
void SetType(int type);
void SetAlpha(float alpha);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_

+ 33
- 0
mindspore/lite/c_ops/activation_grad.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/activation_grad.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int ActivationGrad::GetType() const { return this->primitive->value.AsActivationGrad()->type; }

void ActivationGrad::SetType(int type) {
this->primitive->value.AsActivationGrad()->type = (schema::ActivationGradType)type;
}

#else

int ActivationGrad::GetType() const { return this->primitive->value_as_ActivationGrad()->type(); }

void ActivationGrad::SetType(int type) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/activation_grad.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_GRAD_H_
#define LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_GRAD_H_

namespace mindspore {
class ActivationGrad : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit ActivationGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit ActivationGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetType() const;
void SetType(int type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_GRAD_H_

+ 33
- 0
mindspore/lite/c_ops/add.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/add.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Add::GetActivationType() const { return this->primitive->value.AsAdd()->activationType; }

void Add::SetActivationType(int activation_type) {
this->primitive->value.AsAdd()->activationType = (schema::ActivationType)activation_type;
}

#else

int Add::GetActivationType() const { return this->primitive->value_as_Add()->activationType(); }

void Add::SetActivationType(int activation_type) {}
#endif
} // namespace mindspore

+ 43
- 0
mindspore/lite/c_ops/add.h View File

@@ -0,0 +1,43 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif
#ifndef LITE_MINDSPORE_LITE_C_OPS_ADD_H_
#define LITE_MINDSPORE_LITE_C_OPS_ADD_H_

namespace mindspore {
class Add : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Add(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit Add(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
int GetActivationType() const;
void SetActivationType(int activation_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ADD_H_

+ 59
- 0
mindspore/lite/c_ops/addn.cc View File

@@ -0,0 +1,59 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/addn.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int AddN::GetN() const { return this->primitive->value.AsAddN()->N; }

void AddN::SetN(int n) { this->primitive->value.AsAddN()->N = n; }

#else

int AddN::GetN() const { return this->primitive->value_as_AddN()->N(); }

void AddN::SetN(int n) {}
#endif
namespace {
constexpr int kLeastInputNum = 2;
}
int AddN::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs.front();
MS_ASSERT(input != nullptr);
auto output = outputs.front();
MS_ASSERT(output != nullptr);
if (inputs.size() < kLeastInputNum) {
MS_LOG(ERROR) << "input size" << inputs.size() << " is error!";
return 1;
}
for (int i = 1; i < inputs.size(); ++i) {
if (inputs.at(i)->shape() != inputs.at(0)->shape()) {
MS_LOG(ERROR) << "AddN inputs shape is not equal!";
return 1;
}
if (inputs.at(i)->data_type() != inputs.at(0)->data_type()) {
MS_LOG(ERROR) << "AddN all input data type should be the same!";
return 1;
}
}
output->SetFormat(input->GetFormat());
output->set_shape(input->shape());
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/addn.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ADD_N_H_
#define LITE_MINDSPORE_LITE_C_OPS_ADD_N_H_

namespace mindspore {
class AddN : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit AddN(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit AddN(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetN() const;
void SetN(int n);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ADD_N_H_

+ 75
- 0
mindspore/lite/c_ops/argmax.cc View File

@@ -0,0 +1,75 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/argmax.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int ArgMax::GetAxis() const { return this->primitive->value.AsArgMax()->axis; }
bool ArgMax::GetOutMaxValue() const { return this->primitive->value.AsArgMax()->outMaxValue; }
int ArgMax::GetTopK() const { return this->primitive->value.AsArgMax()->topK; }
bool ArgMax::GetKeepDims() const { return this->primitive->value.AsArgMax()->keepDims; }
int ArgMax::GetAxisType() const { return this->primitive->value.AsArgMax()->axisType; }

void ArgMax::SetAxis(int axis) { this->primitive->value.AsArgMax()->axis = axis; }
void ArgMax::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMax()->outMaxValue = out_max_value; }
void ArgMax::SetTopK(int top_k) { this->primitive->value.AsArgMax()->topK = top_k; }
void ArgMax::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMax()->keepDims = keep_dims; }
void ArgMax::SetAxisType(int axis_type) { this->primitive->value.AsArgMax()->axisType = axis_type; }

#else

int ArgMax::GetAxis() const { return this->primitive->value_as_ArgMax()->axis(); }
bool ArgMax::GetOutMaxValue() const { return this->primitive->value_as_ArgMax()->outMaxValue(); }
int ArgMax::GetTopK() const { return this->primitive->value_as_ArgMax()->topK(); }
bool ArgMax::GetKeepDims() const { return this->primitive->value_as_ArgMax()->keepDims(); }
int ArgMax::GetAxisType() const { return this->primitive->value_as_ArgMax()->axisType(); }

void ArgMax::SetAxis(int axis) {}
void ArgMax::SetOutMaxValue(bool out_max_value) {}
void ArgMax::SetTopK(int top_k) {}
void ArgMax::SetKeepDims(bool keep_dims) {}
void ArgMax::SetAxisType(int axis_type) {}
#endif
int ArgMax::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "tensor number is error.";
}

std::vector<int> output_shape(input->shape());
auto input_shape_size = input->shape().size();
int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis();
if (axis >= input_shape_size || axis < 0) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size;
return 1;
}
if (GetTopK() == 1 && !GetKeepDims()) {
output_shape.erase(output_shape.begin() + axis);
} else {
output_shape[axis] = GetTopK();
}

output->SetFormat(input->GetFormat());
output->set_shape(output_shape);
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 53
- 0
mindspore/lite/c_ops/argmax.h View File

@@ -0,0 +1,53 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_
#define LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_

namespace mindspore {
class ArgMax : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit ArgMax(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit ArgMax(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const;
bool GetOutMaxValue() const;
int GetTopK() const;
bool GetKeepDims() const;
int GetAxisType() const;
void SetAxis(int axis);
void SetOutMaxValue(bool out_max_value);
void SetTopK(int top_k);
void SetKeepDims(bool keep_dims);
void SetAxisType(int axis_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_

+ 74
- 0
mindspore/lite/c_ops/argmin.cc View File

@@ -0,0 +1,74 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/argmin.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int ArgMin::GetAxis() const { return this->primitive->value.AsArgMin()->axis; }
bool ArgMin::GetOutMaxValue() const { return this->primitive->value.AsArgMin()->outMaxValue; }
int ArgMin::GetTopK() const { return this->primitive->value.AsArgMin()->topK; }
bool ArgMin::GetKeepDims() const { return this->primitive->value.AsArgMin()->keepDims; }
int ArgMin::GetAxisType() const { return this->primitive->value.AsArgMin()->axisType; }

void ArgMin::SetAxis(int axis) { this->primitive->value.AsArgMin()->axis = axis; }
void ArgMin::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMin()->outMaxValue = out_max_value; }
void ArgMin::SetTopK(int top_k) { this->primitive->value.AsArgMin()->topK = top_k; }
void ArgMin::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMin()->keepDims = keep_dims; }
void ArgMin::SetAxisType(int axis_type) { this->primitive->value.AsArgMin()->axisType = axis_type; }

#else

int ArgMin::GetAxis() const { return this->primitive->value_as_ArgMin()->axis(); }
bool ArgMin::GetOutMaxValue() const { return this->primitive->value_as_ArgMin()->outMaxValue(); }
int ArgMin::GetTopK() const { return this->primitive->value_as_ArgMin()->topK(); }
bool ArgMin::GetKeepDims() const { return this->primitive->value_as_ArgMin()->keepDims(); }
int ArgMin::GetAxisType() const { return this->primitive->value_as_ArgMin()->axisType(); }

void ArgMin::SetAxis(int axis) {}
void ArgMin::SetOutMaxValue(bool out_max_value) {}
void ArgMin::SetTopK(int top_k) {}
void ArgMin::SetKeepDims(bool keep_dims) {}
void ArgMin::SetAxisType(int axis_type) {}
#endif
int ArgMin::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "tensor number is error.";
}
auto input_shape_size = input->shape().size();
int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis();
if (axis >= input_shape_size || axis < 0) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size;
return 1;
}
std::vector<int> output_shape(input->shape());
if (GetTopK() == 1 && !GetKeepDims()) {
output_shape.erase(output_shape.begin() + axis);
} else {
output_shape[axis] = GetTopK();
}

output->SetFormat(input->GetFormat());
output->set_shape(output_shape);
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 53
- 0
mindspore/lite/c_ops/argmin.h View File

@@ -0,0 +1,53 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_
#define LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_

namespace mindspore {
class ArgMin : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit ArgMin(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit ArgMin(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const;
bool GetOutMaxValue() const;
int GetTopK() const;
bool GetKeepDims() const;
int GetAxisType() const;
void SetAxis(int axis);
void SetOutMaxValue(bool out_max_value);
void SetTopK(int top_k);
void SetKeepDims(bool keep_dims);
void SetAxisType(int axis_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_

+ 99
- 0
mindspore/lite/c_ops/arithmetic.cc View File

@@ -0,0 +1,99 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/arithmetic.h"

namespace mindspore {
int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
if (inputs_.size() != kDoubleNum) {
MS_LOG(ERROR) << "The number of input must be " << kDoubleNum;
return 1;
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "The number of output must be " << kSingleNum;
return 1;
}
auto input0 = inputs_[0];
MS_ASSERT(input0 != nullptr);
auto input1 = inputs_[1];
MS_ASSERT(input1 != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

auto input_shape0 = input0->shape();
auto input_shape1 = input1->shape();
auto format = input0->GetFormat();
in_shape0_.resize(5);
in_shape1_.resize(5);
out_shape_.resize(5);

ndim_ = input_shape0.size();
if (input_shape0.size() < input_shape1.size()) {
ndim_ = input_shape1.size();
auto fill_dim_num = input_shape1.size() - input_shape0.size();
int j = 0;
for (int i = 0; i < input_shape1.size(); i++) {
if (i < fill_dim_num) {
in_shape0_[i] = 1;
} else {
in_shape0_[i] = input_shape0[j++];
}
in_shape1_[i] = input_shape1[i];
}
format = input0->GetFormat();
} else if (input_shape0.size() > input_shape1.size()) {
ndim_ = input_shape0.size();
auto fill_dim_num = input_shape0.size() - input_shape1.size();
int j = 0;
for (int i = 0; i < input_shape0.size(); i++) {
if (i < fill_dim_num) {
in_shape1_[i] = 1;
} else {
in_shape1_[i] = input_shape1[j++];
}
in_shape0_[i] = input_shape0[i];
}
} else {
for (int i = 0; i < input_shape0.size(); i++) {
in_shape1_[i] = input_shape1[i];
in_shape0_[i] = input_shape0[i];
}
}

std::vector<int> output_shape;
for (size_t i = 0; i < ndim_; i++) {
if (in_shape0_[i] != in_shape1_[i]) {
if (in_shape0_[i] == 1) {
out_shape_[i] = in_shape1_[i];
} else if (in_shape1_[i] == 1) {
out_shape_[i] = in_shape0_[i];
} else {
MS_LOG(ERROR) << "shapes of input tensors can not be broadCasted";
return -1;
}
broadcasting_ = true;
} else {
out_shape_[i] = in_shape0_[i];
}
output_shape.push_back(out_shape_[i]);
}
output->SetFormat(format);
output->set_shape(output_shape);
output->set_data_type(input0->data_type());
return 0;
}
} // namespace mindspore

+ 55
- 0
mindspore/lite/c_ops/arithmetic.h View File

@@ -0,0 +1,55 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_
#define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_

namespace mindspore {
class Arithmetic : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Arithmetic(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Arithmetic(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
bool Broadcasting() { return this->broadcasting_; }
int NDims() { return this->ndim_; }
std::vector<int> InShape0() { return this->in_shape0_; }
std::vector<int> InShape1() { return this->in_shape1_; }
std::vector<int> OutputShape() { return this->out_shape_; }

protected:
bool broadcasting_ = false;
int ndim_;
std::vector<int> in_shape0_;
std::vector<int> in_shape1_;
std::vector<int> out_shape_;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_

+ 34
- 0
mindspore/lite/c_ops/arithmetic_self.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/arithmetic_self.h"

namespace mindspore {
int ArithmeticSelf::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

output->SetFormat(input->GetFormat());
output->set_shape(input->shape());
output->set_data_type(input->data_type());

return 0;
}
} // namespace mindspore

+ 42
- 0
mindspore/lite/c_ops/arithmetic_self.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif
#ifndef LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_
#define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_

namespace mindspore {
class ArithmeticSelf : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit ArithmeticSelf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit ArithmeticSelf(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_

+ 31
- 0
mindspore/lite/c_ops/batch_norm.cc View File

@@ -0,0 +1,31 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/batch_norm.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float BatchNorm::GetEpsilon() const { return this->primitive->value.AsBatchNorm()->epsilon; }

void BatchNorm::SetEpsilon(float epsilon) { this->primitive->value.AsBatchNorm()->epsilon = epsilon; }

#else

float BatchNorm::GetEpsilon() const { return this->primitive->value_as_BatchNorm()->epsilon(); }

void BatchNorm::SetEpsilon(float epsilon) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/batch_norm.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_
#define LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_

namespace mindspore {
class BatchNorm : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetEpsilon() const;
void SetEpsilon(float epsilon);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_

+ 114
- 0
mindspore/lite/c_ops/batch_to_space.cc View File

@@ -0,0 +1,114 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/batch_to_space.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> BatchToSpace::GetBlockShape() const { return this->primitive->value.AsBatchToSpace()->blockShape; }
std::vector<int> BatchToSpace::GetCrops() const { return this->primitive->value.AsBatchToSpace()->crops; }

void BatchToSpace::SetBlockShape(const std::vector<int> &block_shape) {
this->primitive->value.AsBatchToSpace()->blockShape = block_shape;
}
void BatchToSpace::SetCrops(const std::vector<int> &crops) { this->primitive->value.AsBatchToSpace()->crops = crops; }

#else

std::vector<int> BatchToSpace::GetBlockShape() const {
auto fb_vector = this->primitive->value_as_BatchToSpace()->blockShape();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
std::vector<int> BatchToSpace::GetCrops() const {
auto fb_vector = this->primitive->value_as_BatchToSpace()->crops();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}

void BatchToSpace::SetBlockShape(const std::vector<int> &block_shape) {}
void BatchToSpace::SetCrops(const std::vector<int> &crops) {}
#endif
namespace {
constexpr int kBatchToSpaceOutputNum = 1;
constexpr int kBatchToSpaceInputNum = 1;
constexpr int kBlockShapeSize = 2;
constexpr int kCropsSize = 4;
} // namespace

int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) {
MS_ASSERT(this->primitive != nullptr);
if (outputs.size() != kBatchToSpaceOutputNum || inputs.size() != kBatchToSpaceInputNum) {
MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size();
return 1;
}

auto input = inputs.at(0);
if (input->GetFormat() != schema::Format_NHWC) {
MS_LOG(ERROR) << "batch_to_space only support NHWC now!";
return 1;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {
MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d;
return 1;
}

auto block_shape = GetBlockShape();
if (block_shape.size() != kBlockShapeSize) {
MS_LOG(ERROR) << "Block shape size should be " << kBlockShapeSize;
return 1;
}
auto crops = GetCrops();
if (crops.size() != kCropsSize) {
MS_LOG(ERROR) << "Crops size should be " << kCropsSize;
return 1;
}
size_t mul_block_shape = 1;

for (size_t i = 0; i < kBlockShapeSize; ++i) {
if (block_shape[i] <= 0) {
MS_LOG(ERROR) << "Input block_shape should > 0!";
return 1;
}
if (input_shape[NHWC_N] % block_shape[i]) {
MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " can not divide block_shape[" << i << "] "
<< block_shape[i];
return 1;
}
mul_block_shape *= block_shape[i];
}

if (input_shape[NHWC_N] < mul_block_shape) {
MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " < product of block shape!";
return 1;
}
for (size_t i = 0; i < kCropsSize; ++i) {
if (crops[i] < 0) {
MS_LOG(ERROR) << "Input crops should >= 0";
return 1;
}
}
std::vector<int32_t> output_shape(input_shape.size());
output_shape[NHWC_N] = input_shape[NHWC_N] / mul_block_shape;
output_shape[NHWC_H] = input_shape[NHWC_H] * block_shape[0] - crops[0] - crops[1];
output_shape[NHWC_W] = input_shape[NHWC_W] * block_shape[1] - crops[2] - crops[3];
output_shape[NHWC_C] = input_shape[NHWC_C];

outputs[0]->SetFormat(input->GetFormat());
outputs[0]->set_shape(output_shape);
outputs[0]->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/batch_to_space.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_
#define LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_

namespace mindspore {
class BatchToSpace : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BatchToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BatchToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetBlockShape() const;
std::vector<int> GetCrops() const;
void SetBlockShape(const std::vector<int> &block_shape);
void SetCrops(const std::vector<int> &crops);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_

+ 34
- 0
mindspore/lite/c_ops/bias_add.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/bias_add.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> BiasAdd::GetAxis() const { return this->primitive->value.AsBiasAdd()->axis; }

void BiasAdd::SetAxis(const std::vector<int> &axis) { this->primitive->value.AsBiasAdd()->axis = axis; }

#else

std::vector<int> BiasAdd::GetAxis() const {
auto fb_vector = this->primitive->value_as_BiasAdd()->axis();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}

void BiasAdd::SetAxis(const std::vector<int> &axis) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/bias_add.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_
#define LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_

namespace mindspore {
class BiasAdd : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BiasAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BiasAdd(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
std::vector<int> GetAxis() const;
void SetAxis(const std::vector<int> &axis);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_

+ 34
- 0
mindspore/lite/c_ops/bias_grad.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/bias_grad.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> BiasGrad::GetAxis() const { return this->primitive->value.AsBiasGrad()->axis; }

void BiasGrad::SetAxis(const std::vector<int> &axis) { this->primitive->value.AsBiasGrad()->axis = axis; }

#else

std::vector<int> BiasGrad::GetAxis() const {
auto fb_vector = this->primitive->value_as_BiasGrad()->axis();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}

void BiasGrad::SetAxis(const std::vector<int> &axis) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/bias_grad.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_BIAS_GRAD_H_
#define LITE_MINDSPORE_LITE_C_OPS_BIAS_GRAD_H_

namespace mindspore {
class BiasGrad : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BiasGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BiasGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
std::vector<int> GetAxis() const;
void SetAxis(const std::vector<int> &axis);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_BIAS_GRAD_H_

+ 35
- 0
mindspore/lite/c_ops/bn_grad_input.cc View File

@@ -0,0 +1,35 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/bn_grad_input.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float BNGradInput::GetEps() const { return this->primitive->value.AsBNGradInput()->eps; }
int BNGradInput::GetChannels() const { return this->primitive->value.AsBNGradInput()->channels; }

void BNGradInput::SetEps(float eps) { this->primitive->value.AsBNGradInput()->eps = eps; }
void BNGradInput::SetChannels(int channels) { this->primitive->value.AsBNGradInput()->channels = channels; }

#else

float BNGradInput::GetEps() const { return this->primitive->value_as_BNGradInput()->eps(); }
int BNGradInput::GetChannels() const { return this->primitive->value_as_BNGradInput()->channels(); }

void BNGradInput::SetEps(float eps) {}
void BNGradInput::SetChannels(int channels) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/bn_grad_input.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_B_N_GRAD_INPUT_H_
#define LITE_MINDSPORE_LITE_C_OPS_B_N_GRAD_INPUT_H_

namespace mindspore {
class BNGradInput : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BNGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BNGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetEps() const;
int GetChannels() const;
void SetEps(float eps);
void SetChannels(int channels);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_B_N_GRAD_INPUT_H_

+ 79
- 0
mindspore/lite/c_ops/broadcast_to.cc View File

@@ -0,0 +1,79 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/broadcast_to.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> BroadcastTo::GetDstShape() const { return this->primitive->value.AsBroadcastTo()->dst_shape; }

void BroadcastTo::SetDstShape(const std::vector<int> &dst_shape) {
this->primitive->value.AsBroadcastTo()->dst_shape = dst_shape;
}

#else

std::vector<int> BroadcastTo::GetDstShape() const {
auto fb_vector = this->primitive->value_as_BroadcastTo()->dst_shape();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}

void BroadcastTo::SetDstShape(const std::vector<int> &dst_shape) {}
#endif
namespace {
constexpr int kBroadcastToInputNum = 1;
constexpr int kBroadcastToOutputNum = 1;
} // namespace

int BroadcastTo::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) {
MS_ASSERT(this->primitive != nullptr);
if (inputs.size() != kBroadcastToInputNum || outputs.size() != kBroadcastToOutputNum) {
MS_LOG(ERROR) << "input size:" << inputs.size() << ", output size:" << outputs.size();
return 1;
}
auto input = inputs.at(0);
std::vector<int32_t> dst_shape(this->primitive->value_as_BroadcastTo()->dst_shape()->begin(),
this->primitive->value_as_BroadcastTo()->dst_shape()->end());
auto input_shape = input->shape();
std::vector<int> shape(dst_shape.size());
int input_shape_index = input_shape.size() - 1;
if (input_shape.size() > dst_shape.size()) {
MS_LOG(ERROR) << "input shape size " << input_shape.size() << " should <= broadcast to shape size "
<< dst_shape.size() << "!";
return 1;
}

for (int i = dst_shape.size() - 1; i >= 0; --i) {
if (dst_shape[i] < 0) {
MS_LOG(ERROR) << "shape[" << i << "] = " << dst_shape[i] << " ] should be > 0!";
return 1;
}
if (input_shape_index >= 0) {
auto dim = input_shape[input_shape_index];
if (dim != dst_shape[i] && dim != 1) {
MS_LOG(ERROR) << "Invalid broadcast shape!";
return 1;
}
}
shape[i] = dst_shape[i];
--input_shape_index;
}
outputs[0]->SetFormat(input->GetFormat());
outputs[0]->set_shape(shape);
outputs[0]->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/broadcast_to.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_
#define LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_

namespace mindspore {
class BroadcastTo : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit BroadcastTo(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit BroadcastTo(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetDstShape() const;
void SetDstShape(const std::vector<int> &dst_shape);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_

+ 33
- 0
mindspore/lite/c_ops/caffe_p_relu.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/caffe_p_relu.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
bool CaffePReLU::GetChannelShared() const { return this->primitive->value.AsCaffePReLU()->channelShared; }

void CaffePReLU::SetChannelShared(bool channel_shared) {
this->primitive->value.AsCaffePReLU()->channelShared = channel_shared;
}

#else

bool CaffePReLU::GetChannelShared() const { return this->primitive->value_as_CaffePReLU()->channelShared(); }

void CaffePReLU::SetChannelShared(bool channel_shared) {}
#endif
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/caffe_p_relu.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#include "c_ops/activation.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_
#define LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_

namespace mindspore {
class CaffePReLU : public Activation {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit CaffePReLU(schema::PrimitiveT *primitive) : Activation(primitive) {}
#else
explicit CaffePReLU(schema::Primitive *primitive) : Activation(primitive) {}
#endif
bool GetChannelShared() const;
void SetChannelShared(bool channel_shared);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_

+ 64
- 0
mindspore/lite/c_ops/cast.cc View File

@@ -0,0 +1,64 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/cast.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Cast::GetSrcT() const { return this->primitive->value.AsCast()->srcT; }
int Cast::GetDstT() const { return this->primitive->value.AsCast()->dstT; }

void Cast::SetSrcT(int src_t) { this->primitive->value.AsCast()->srcT = src_t; }
void Cast::SetDstT(int dst_t) { this->primitive->value.AsCast()->dstT = dst_t; }

#else

int Cast::GetSrcT() const { return this->primitive->value_as_Cast()->srcT(); }
int Cast::GetDstT() const { return this->primitive->value_as_Cast()->dstT(); }

void Cast::SetSrcT(int src_t) {}
void Cast::SetDstT(int dst_t) {}
#endif
int Cast::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "tensor number is error.";
return 1;
}

MS_ASSERT(cast_prim != nullptr);
if (input->data_type() != GetSrcT()) {
MS_LOG(ERROR) << "input dataType is error";
return 1;
}
if (kSupportDataType.find(input->data_type()) == kSupportDataType.end()) {
MS_LOG(ERROR) << "Unsupport input data type " << input->data_type();
return 1;
}
if (GetDstT() != kNumberTypeFloat && GetDstT() != kNumberTypeFloat32) {
MS_LOG(ERROR) << "Invalid output datatype " << GetDstT();
return 1;
}
output->SetFormat(input->GetFormat());
output->set_shape(input->shape());
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/cast.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CAST_H_
#define LITE_MINDSPORE_LITE_C_OPS_CAST_H_

namespace mindspore {
class Cast : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Cast(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Cast(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetSrcT() const;
int GetDstT() const;
void SetSrcT(int src_t);
void SetDstT(int dst_t);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CAST_H_

+ 35
- 0
mindspore/lite/c_ops/clip.cc View File

@@ -0,0 +1,35 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/clip.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float Clip::GetMax() const { return this->primitive->value.AsClip()->max; }
float Clip::GetMin() const { return this->primitive->value.AsClip()->min; }

void Clip::SetMax(float max) { this->primitive->value.AsClip()->max = max; }
void Clip::SetMin(float min) { this->primitive->value.AsClip()->min = min; }

#else

float Clip::GetMax() const { return this->primitive->value_as_Clip()->max(); }
float Clip::GetMin() const { return this->primitive->value_as_Clip()->min(); }

void Clip::SetMax(float max) {}
void Clip::SetMin(float min) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/clip.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CLIP_H_
#define LITE_MINDSPORE_LITE_C_OPS_CLIP_H_

namespace mindspore {
class Clip : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Clip(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Clip(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetMax() const;
float GetMin() const;
void SetMax(float max);
void SetMin(float min);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CLIP_H_

+ 93
- 0
mindspore/lite/c_ops/concat.cc View File

@@ -0,0 +1,93 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/concat.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Concat::GetAxis() const { return this->primitive->value.AsConcat()->axis; }
int Concat::GetN() const { return this->primitive->value.AsConcat()->n; }

void Concat::SetAxis(int axis) { this->primitive->value.AsConcat()->axis = axis; }
void Concat::SetN(int n) { this->primitive->value.AsConcat()->n = n; }

#else

int Concat::GetAxis() const { return this->primitive->value_as_Concat()->axis(); }
int Concat::GetN() const { return this->primitive->value_as_Concat()->n(); }

void Concat::SetAxis(int axis) {}
void Concat::SetN(int n) {}
#endif
namespace {
constexpr int kConcatOutputNum = 1;
}
int Concat::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
if (this->primitive == nullptr) {
MS_LOG(ERROR) << "primitive is nullptr!";
return 1;
}
auto input0 = inputs_.front();
auto output = outputs_.front();
if (outputs_.size() != kConcatOutputNum) {
MS_LOG(ERROR) << "output size is error";
return 1;
}
MS_ASSERT(concat_prim != nullptr);
auto input0_shape = inputs_.at(0)->shape();
int axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis();
if (axis < 0 || axis >= input0_shape.size()) {
MS_LOG(ERROR) << "Invalid axis: " << axis;
return 1;
}

auto input0_shape_without_axis = input0_shape;
input0_shape_without_axis.erase(input0_shape_without_axis.begin() + axis);
auto input0_data_type = inputs_.at(0)->data_type();
schema::Format input0_format = inputs_[0]->GetFormat();
int output_axis_dim = input0_shape.at(axis);
for (size_t i = 1; i < inputs_.size(); ++i) {
if (inputs_.at(i)->data_type() != input0_data_type) {
MS_LOG(ERROR) << "All inputs should have the same data type!";
return 1;
}

if (inputs_.at(i)->GetFormat() != input0_format) {
MS_LOG(ERROR) << "All input format should be the same!";
return 1;
}
auto shape_tmp = inputs_.at(i)->shape();
if (shape_tmp.size() != input0_shape.size()) {
MS_LOG(ERROR) << "All inputs should have the same dim num!";
return 1;
}
auto axis_tmp = shape_tmp[axis];
shape_tmp.erase(shape_tmp.begin() + axis);
if (input0_shape_without_axis != shape_tmp) {
MS_LOG(ERROR) << "Inputs should have the same dim except axis!";
return 1;
}
output_axis_dim += axis_tmp;
}
auto output_shape = input0_shape;
output_shape[axis] = output_axis_dim;
outputs_[0]->set_shape(output_shape);
output->set_data_type(input0->data_type());
output->SetFormat(input0->GetFormat());

return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/concat.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_
#define LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_

namespace mindspore {
class Concat : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Concat(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Concat(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const;
int GetN() const;
void SetAxis(int axis);
void SetN(int n);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_

+ 163
- 0
mindspore/lite/c_ops/conv2d.cc View File

@@ -0,0 +1,163 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "mindspore/lite/c_ops/conv2d.h"
namespace mindspore {
int Conv2D::PadUp() const { return this->pad_u_; }
int Conv2D::PadDown() const { return this->pad_d_; }
int Conv2D::PadLeft() const { return this->pad_l_; }
int Conv2D::PadRight() const { return this->pad_r_; }
#ifdef PRIMITIVE_WRITEABLE
int Conv2D::GetFormat() const { return this->primitive->value.AsConv2D()->format; }
int Conv2D::GetGroup() const { return this->primitive->value.AsConv2D()->group; }
int Conv2D::GetChannelIn() const { return this->primitive->value.AsConv2D()->channelIn; }
int Conv2D::GetChannelOut() const { return this->primitive->value.AsConv2D()->channelOut; }
int Conv2D::GetKernelW() const { return this->primitive->value.AsConv2D()->kernelW; }
int Conv2D::GetKernelH() const { return this->primitive->value.AsConv2D()->kernelH; }
int Conv2D::GetStrideW() const { return this->primitive->value.AsConv2D()->strideW; }
int Conv2D::GetStrideH() const { return this->primitive->value.AsConv2D()->strideH; }
int Conv2D::GetPadMode() const { return this->primitive->value.AsConv2D()->padMode; }
int Conv2D::GetPadUp() const { return this->primitive->value.AsConv2D()->padUp; }
int Conv2D::GetPadDown() const { return this->primitive->value.AsConv2D()->padDown; }
int Conv2D::GetPadLeft() const { return this->primitive->value.AsConv2D()->padLeft; }
int Conv2D::GetPadRight() const { return this->primitive->value.AsConv2D()->padRight; }
int Conv2D::GetDilateW() const { return this->primitive->value.AsConv2D()->dilateW; }
int Conv2D::GetDilateH() const { return this->primitive->value.AsConv2D()->dilateH; }
bool Conv2D::GetHasBias() const { return this->primitive->value.AsConv2D()->hasBias; }
int Conv2D::GetActivationType() const { return this->primitive->value.AsConv2D()->activationType; }

void Conv2D::SetFormat(int format) { this->primitive->value.AsConv2D()->format = (schema::Format)format; }
void Conv2D::SetGroup(int group) { this->primitive->value.AsConv2D()->group = group; }
void Conv2D::SetChannelIn(int channel_in) { this->primitive->value.AsConv2D()->channelIn = channel_in; }
void Conv2D::SetChannelOut(int channel_out) { this->primitive->value.AsConv2D()->channelOut = channel_out; }
void Conv2D::SetKernelW(int kernel_w) { this->primitive->value.AsConv2D()->kernelW = kernel_w; }
void Conv2D::SetKernelH(int kernel_h) { this->primitive->value.AsConv2D()->kernelH = kernel_h; }
void Conv2D::SetStrideW(int stride_w) { this->primitive->value.AsConv2D()->strideW = stride_w; }
void Conv2D::SetStrideH(int stride_h) { this->primitive->value.AsConv2D()->strideH = stride_h; }
void Conv2D::SetPadMode(int pad_mode) { this->primitive->value.AsConv2D()->padMode = (schema::PadMode)pad_mode; }
void Conv2D::SetPadUp(int pad_up) { this->primitive->value.AsConv2D()->padUp = pad_up; }
void Conv2D::SetPadDown(int pad_down) { this->primitive->value.AsConv2D()->padDown = pad_down; }
void Conv2D::SetPadLeft(int pad_left) { this->primitive->value.AsConv2D()->padLeft = pad_left; }
void Conv2D::SetPadRight(int pad_right) { this->primitive->value.AsConv2D()->padRight = pad_right; }
void Conv2D::SetDilateW(int dilate_w) { this->primitive->value.AsConv2D()->dilateW = dilate_w; }
void Conv2D::SetDilateH(int dilate_h) { this->primitive->value.AsConv2D()->dilateH = dilate_h; }
void Conv2D::SetHasBias(bool has_bias) { this->primitive->value.AsConv2D()->hasBias = has_bias; }
void Conv2D::SetActivationType(int activation_type) {
this->primitive->value.AsConv2D()->activationType = (schema::ActivationType)activation_type;
}

#else

int Conv2D::GetFormat() const { return this->primitive->value_as_Conv2D()->format(); }
int Conv2D::GetGroup() const { return this->primitive->value_as_Conv2D()->group(); }
int Conv2D::GetChannelIn() const { return this->primitive->value_as_Conv2D()->channelIn(); }
int Conv2D::GetChannelOut() const { return this->primitive->value_as_Conv2D()->channelOut(); }
int Conv2D::GetKernelW() const { return this->primitive->value_as_Conv2D()->kernelW(); }
int Conv2D::GetKernelH() const { return this->primitive->value_as_Conv2D()->kernelH(); }
int Conv2D::GetStrideW() const { return this->primitive->value_as_Conv2D()->strideW(); }
int Conv2D::GetStrideH() const { return this->primitive->value_as_Conv2D()->strideH(); }
int Conv2D::GetPadMode() const { return this->primitive->value_as_Conv2D()->padMode(); }
int Conv2D::GetPadUp() const { return this->primitive->value_as_Conv2D()->padUp(); }
int Conv2D::GetPadDown() const { return this->primitive->value_as_Conv2D()->padDown(); }
int Conv2D::GetPadLeft() const { return this->primitive->value_as_Conv2D()->padLeft(); }
int Conv2D::GetPadRight() const { return this->primitive->value_as_Conv2D()->padRight(); }
int Conv2D::GetDilateW() const { return this->primitive->value_as_Conv2D()->dilateW(); }
int Conv2D::GetDilateH() const { return this->primitive->value_as_Conv2D()->dilateH(); }
bool Conv2D::GetHasBias() const { return this->primitive->value_as_Conv2D()->hasBias(); }
int Conv2D::GetActivationType() const { return this->primitive->value_as_Conv2D()->activationType(); }

void Conv2D::SetFormat(int format) {}
void Conv2D::SetGroup(int group) {}
void Conv2D::SetChannelIn(int channel_in) {}
void Conv2D::SetChannelOut(int channel_out) {}
void Conv2D::SetKernelW(int kernel_w) {}
void Conv2D::SetKernelH(int kernel_h) {}
void Conv2D::SetStrideW(int stride_w) {}
void Conv2D::SetStrideH(int stride_h) {}
void Conv2D::SetPadMode(int pad_mode) {}
void Conv2D::SetPadUp(int pad_up) {}
void Conv2D::SetPadDown(int pad_down) {}
void Conv2D::SetPadLeft(int pad_left) {}
void Conv2D::SetPadRight(int pad_right) {}
void Conv2D::SetDilateW(int dilate_w) {}
void Conv2D::SetDilateH(int dilate_h) {}
void Conv2D::SetHasBias(bool has_bias) {}
void Conv2D::SetActivationType(int activation_type) {}
#endif
void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output_w) {
MS_ASSERT(this->primitive != nullptr);
int kernel_w = GetKernelW();
int kernel_h = GetKernelH();
int stride_w = GetStrideW();
int stride_h = GetStrideH();
int dilate_w = GetDilateW();
int dilate_h = GetDilateH();
pad_l_ = GetPadLeft();
pad_u_ = GetPadUp();
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();

if (GetPadMode() == schema::PadMode_SAME) {
*output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(stride_w));
*output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(stride_h));
auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h);
auto pad_w_all = ((*output_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - input_w);
pad_u_ = pad_h_all / 2;
pad_d_ = pad_h_all - pad_u_;
pad_l_ = pad_w_all / 2;
pad_r_ = pad_w_all - pad_l_;
} else {
*output_w = std::ceil((static_cast<float>(input_w) + pad_l_ + pad_r_ -
(static_cast<float>(kernel_w) - 1) * static_cast<float>(dilate_w)) /
static_cast<float>(stride_w));
*output_h = std::ceil((static_cast<float>(input_h) + pad_u_ + pad_d_ -
(static_cast<float>(kernel_h) - 1) * static_cast<float>(dilate_h)) /
static_cast<float>(stride_h));
}
}

int Conv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
if (inputs_.size() != 2 && inputs_.size() != 3) {
MS_LOG(ERROR) << "Add should has two or three inputs";
return 1;
}
if (outputs_.size() != 1) {
MS_LOG(ERROR) << "Add should has one outputs";
return 1;
}
auto *input_tensor = inputs_.front();
auto *weight_tensor = inputs_.at(1);
auto *out_tensor = outputs_.front();
MS_ASSERT(input_tensor != nullptr);
MS_ASSERT(out_tensor != nullptr);

auto in_shape = input_tensor->shape();
int input_h = in_shape.at(1);
int input_w = in_shape.at(2);
int output_w = 0, output_h = 0;

this->ConvInferShape(input_h, input_w, &output_h, &output_w);

std::vector<int> out_shape{input_tensor->shape()};
out_shape.at(1) = output_h;
out_shape.at(2) = output_w;
out_shape.at(3) = weight_tensor->shape()[0];
out_tensor->set_shape(out_shape);
out_tensor->SetFormat(input_tensor->GetFormat());
out_tensor->set_data_type(input_tensor->data_type());
return 0;
}
} // namespace mindspore

+ 91
- 0
mindspore/lite/c_ops/conv2d.h View File

@@ -0,0 +1,91 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_
#define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_

namespace mindspore {
class Conv2D : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Conv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Conv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int PadUp() const;
int PadDown() const;
int PadLeft() const;
int PadRight() const;

int GetFormat() const;
int GetGroup() const;
int GetChannelIn() const;
int GetChannelOut() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetGroup(int group);
void SetChannelIn(int channel_in);
void SetChannelOut(int channel_out);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);

protected:
void ConvInferShape(int input_h, int input_w, int *output_h, int *output_w);

protected:
int pad_u_ = 0;
int pad_d_ = 0;
int pad_l_ = 0;
int pad_r_ = 0;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_

+ 107
- 0
mindspore/lite/c_ops/conv2d_grad_filter.cc View File

@@ -0,0 +1,107 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/conv2d_grad_filter.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Conv2DGradFilter::GetFormat() const { return this->primitive->value.AsConv2DGradFilter()->format; }
int Conv2DGradFilter::GetGroup() const { return this->primitive->value.AsConv2DGradFilter()->group; }
int Conv2DGradFilter::GetChannelIn() const { return this->primitive->value.AsConv2DGradFilter()->channelIn; }
int Conv2DGradFilter::GetChannelOut() const { return this->primitive->value.AsConv2DGradFilter()->channelOut; }
int Conv2DGradFilter::GetKernelW() const { return this->primitive->value.AsConv2DGradFilter()->kernelW; }
int Conv2DGradFilter::GetKernelH() const { return this->primitive->value.AsConv2DGradFilter()->kernelH; }
int Conv2DGradFilter::GetStrideW() const { return this->primitive->value.AsConv2DGradFilter()->strideW; }
int Conv2DGradFilter::GetStrideH() const { return this->primitive->value.AsConv2DGradFilter()->strideH; }
int Conv2DGradFilter::GetPadMode() const { return this->primitive->value.AsConv2DGradFilter()->padMode; }
int Conv2DGradFilter::GetPadUp() const { return this->primitive->value.AsConv2DGradFilter()->padUp; }
int Conv2DGradFilter::GetPadDown() const { return this->primitive->value.AsConv2DGradFilter()->padDown; }
int Conv2DGradFilter::GetPadLeft() const { return this->primitive->value.AsConv2DGradFilter()->padLeft; }
int Conv2DGradFilter::GetPadRight() const { return this->primitive->value.AsConv2DGradFilter()->padRight; }
int Conv2DGradFilter::GetDilateW() const { return this->primitive->value.AsConv2DGradFilter()->dilateW; }
int Conv2DGradFilter::GetDilateH() const { return this->primitive->value.AsConv2DGradFilter()->dilateH; }
bool Conv2DGradFilter::GetHasBias() const { return this->primitive->value.AsConv2DGradFilter()->hasBias; }
int Conv2DGradFilter::GetActivationType() const { return this->primitive->value.AsConv2DGradFilter()->activationType; }

void Conv2DGradFilter::SetFormat(int format) {
this->primitive->value.AsConv2DGradFilter()->format = (schema::Format)format;
}
void Conv2DGradFilter::SetGroup(int group) { this->primitive->value.AsConv2DGradFilter()->group = group; }
void Conv2DGradFilter::SetChannelIn(int channel_in) {
this->primitive->value.AsConv2DGradFilter()->channelIn = channel_in;
}
void Conv2DGradFilter::SetChannelOut(int channel_out) {
this->primitive->value.AsConv2DGradFilter()->channelOut = channel_out;
}
void Conv2DGradFilter::SetKernelW(int kernel_w) { this->primitive->value.AsConv2DGradFilter()->kernelW = kernel_w; }
void Conv2DGradFilter::SetKernelH(int kernel_h) { this->primitive->value.AsConv2DGradFilter()->kernelH = kernel_h; }
void Conv2DGradFilter::SetStrideW(int stride_w) { this->primitive->value.AsConv2DGradFilter()->strideW = stride_w; }
void Conv2DGradFilter::SetStrideH(int stride_h) { this->primitive->value.AsConv2DGradFilter()->strideH = stride_h; }
void Conv2DGradFilter::SetPadMode(int pad_mode) {
this->primitive->value.AsConv2DGradFilter()->padMode = (schema::PadMode)pad_mode;
}
void Conv2DGradFilter::SetPadUp(int pad_up) { this->primitive->value.AsConv2DGradFilter()->padUp = pad_up; }
void Conv2DGradFilter::SetPadDown(int pad_down) { this->primitive->value.AsConv2DGradFilter()->padDown = pad_down; }
void Conv2DGradFilter::SetPadLeft(int pad_left) { this->primitive->value.AsConv2DGradFilter()->padLeft = pad_left; }
void Conv2DGradFilter::SetPadRight(int pad_right) { this->primitive->value.AsConv2DGradFilter()->padRight = pad_right; }
void Conv2DGradFilter::SetDilateW(int dilate_w) { this->primitive->value.AsConv2DGradFilter()->dilateW = dilate_w; }
void Conv2DGradFilter::SetDilateH(int dilate_h) { this->primitive->value.AsConv2DGradFilter()->dilateH = dilate_h; }
void Conv2DGradFilter::SetHasBias(bool has_bias) { this->primitive->value.AsConv2DGradFilter()->hasBias = has_bias; }
void Conv2DGradFilter::SetActivationType(int activation_type) {
this->primitive->value.AsConv2DGradFilter()->activationType = (schema::ActivationType)activation_type;
}

#else

int Conv2DGradFilter::GetFormat() const { return this->primitive->value_as_Conv2DGradFilter()->format(); }
int Conv2DGradFilter::GetGroup() const { return this->primitive->value_as_Conv2DGradFilter()->group(); }
int Conv2DGradFilter::GetChannelIn() const { return this->primitive->value_as_Conv2DGradFilter()->channelIn(); }
int Conv2DGradFilter::GetChannelOut() const { return this->primitive->value_as_Conv2DGradFilter()->channelOut(); }
int Conv2DGradFilter::GetKernelW() const { return this->primitive->value_as_Conv2DGradFilter()->kernelW(); }
int Conv2DGradFilter::GetKernelH() const { return this->primitive->value_as_Conv2DGradFilter()->kernelH(); }
int Conv2DGradFilter::GetStrideW() const { return this->primitive->value_as_Conv2DGradFilter()->strideW(); }
int Conv2DGradFilter::GetStrideH() const { return this->primitive->value_as_Conv2DGradFilter()->strideH(); }
int Conv2DGradFilter::GetPadMode() const { return this->primitive->value_as_Conv2DGradFilter()->padMode(); }
int Conv2DGradFilter::GetPadUp() const { return this->primitive->value_as_Conv2DGradFilter()->padUp(); }
int Conv2DGradFilter::GetPadDown() const { return this->primitive->value_as_Conv2DGradFilter()->padDown(); }
int Conv2DGradFilter::GetPadLeft() const { return this->primitive->value_as_Conv2DGradFilter()->padLeft(); }
int Conv2DGradFilter::GetPadRight() const { return this->primitive->value_as_Conv2DGradFilter()->padRight(); }
int Conv2DGradFilter::GetDilateW() const { return this->primitive->value_as_Conv2DGradFilter()->dilateW(); }
int Conv2DGradFilter::GetDilateH() const { return this->primitive->value_as_Conv2DGradFilter()->dilateH(); }
bool Conv2DGradFilter::GetHasBias() const { return this->primitive->value_as_Conv2DGradFilter()->hasBias(); }
int Conv2DGradFilter::GetActivationType() const {
return this->primitive->value_as_Conv2DGradFilter()->activationType();
}

void Conv2DGradFilter::SetFormat(int format) {}
void Conv2DGradFilter::SetGroup(int group) {}
void Conv2DGradFilter::SetChannelIn(int channel_in) {}
void Conv2DGradFilter::SetChannelOut(int channel_out) {}
void Conv2DGradFilter::SetKernelW(int kernel_w) {}
void Conv2DGradFilter::SetKernelH(int kernel_h) {}
void Conv2DGradFilter::SetStrideW(int stride_w) {}
void Conv2DGradFilter::SetStrideH(int stride_h) {}
void Conv2DGradFilter::SetPadMode(int pad_mode) {}
void Conv2DGradFilter::SetPadUp(int pad_up) {}
void Conv2DGradFilter::SetPadDown(int pad_down) {}
void Conv2DGradFilter::SetPadLeft(int pad_left) {}
void Conv2DGradFilter::SetPadRight(int pad_right) {}
void Conv2DGradFilter::SetDilateW(int dilate_w) {}
void Conv2DGradFilter::SetDilateH(int dilate_h) {}
void Conv2DGradFilter::SetHasBias(bool has_bias) {}
void Conv2DGradFilter::SetActivationType(int activation_type) {}
#endif
} // namespace mindspore

+ 76
- 0
mindspore/lite/c_ops/conv2d_grad_filter.h View File

@@ -0,0 +1,76 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_FILTER_H_
#define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_FILTER_H_

namespace mindspore {
class Conv2DGradFilter : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Conv2DGradFilter(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Conv2DGradFilter(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetFormat() const;
int GetGroup() const;
int GetChannelIn() const;
int GetChannelOut() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetGroup(int group);
void SetChannelIn(int channel_in);
void SetChannelOut(int channel_out);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_FILTER_H_

+ 105
- 0
mindspore/lite/c_ops/conv2d_grad_input.cc View File

@@ -0,0 +1,105 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/conv2d_grad_input.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Conv2DGradInput::GetFormat() const { return this->primitive->value.AsConv2DGradInput()->format; }
int Conv2DGradInput::GetGroup() const { return this->primitive->value.AsConv2DGradInput()->group; }
int Conv2DGradInput::GetChannelIn() const { return this->primitive->value.AsConv2DGradInput()->channelIn; }
int Conv2DGradInput::GetChannelOut() const { return this->primitive->value.AsConv2DGradInput()->channelOut; }
int Conv2DGradInput::GetKernelW() const { return this->primitive->value.AsConv2DGradInput()->kernelW; }
int Conv2DGradInput::GetKernelH() const { return this->primitive->value.AsConv2DGradInput()->kernelH; }
int Conv2DGradInput::GetStrideW() const { return this->primitive->value.AsConv2DGradInput()->strideW; }
int Conv2DGradInput::GetStrideH() const { return this->primitive->value.AsConv2DGradInput()->strideH; }
int Conv2DGradInput::GetPadMode() const { return this->primitive->value.AsConv2DGradInput()->padMode; }
int Conv2DGradInput::GetPadUp() const { return this->primitive->value.AsConv2DGradInput()->padUp; }
int Conv2DGradInput::GetPadDown() const { return this->primitive->value.AsConv2DGradInput()->padDown; }
int Conv2DGradInput::GetPadLeft() const { return this->primitive->value.AsConv2DGradInput()->padLeft; }
int Conv2DGradInput::GetPadRight() const { return this->primitive->value.AsConv2DGradInput()->padRight; }
int Conv2DGradInput::GetDilateW() const { return this->primitive->value.AsConv2DGradInput()->dilateW; }
int Conv2DGradInput::GetDilateH() const { return this->primitive->value.AsConv2DGradInput()->dilateH; }
bool Conv2DGradInput::GetHasBias() const { return this->primitive->value.AsConv2DGradInput()->hasBias; }
int Conv2DGradInput::GetActivationType() const { return this->primitive->value.AsConv2DGradInput()->activationType; }

void Conv2DGradInput::SetFormat(int format) {
this->primitive->value.AsConv2DGradInput()->format = (schema::Format)format;
}
void Conv2DGradInput::SetGroup(int group) { this->primitive->value.AsConv2DGradInput()->group = group; }
void Conv2DGradInput::SetChannelIn(int channel_in) {
this->primitive->value.AsConv2DGradInput()->channelIn = channel_in;
}
void Conv2DGradInput::SetChannelOut(int channel_out) {
this->primitive->value.AsConv2DGradInput()->channelOut = channel_out;
}
void Conv2DGradInput::SetKernelW(int kernel_w) { this->primitive->value.AsConv2DGradInput()->kernelW = kernel_w; }
void Conv2DGradInput::SetKernelH(int kernel_h) { this->primitive->value.AsConv2DGradInput()->kernelH = kernel_h; }
void Conv2DGradInput::SetStrideW(int stride_w) { this->primitive->value.AsConv2DGradInput()->strideW = stride_w; }
void Conv2DGradInput::SetStrideH(int stride_h) { this->primitive->value.AsConv2DGradInput()->strideH = stride_h; }
void Conv2DGradInput::SetPadMode(int pad_mode) {
this->primitive->value.AsConv2DGradInput()->padMode = (schema::PadMode)pad_mode;
}
void Conv2DGradInput::SetPadUp(int pad_up) { this->primitive->value.AsConv2DGradInput()->padUp = pad_up; }
void Conv2DGradInput::SetPadDown(int pad_down) { this->primitive->value.AsConv2DGradInput()->padDown = pad_down; }
void Conv2DGradInput::SetPadLeft(int pad_left) { this->primitive->value.AsConv2DGradInput()->padLeft = pad_left; }
void Conv2DGradInput::SetPadRight(int pad_right) { this->primitive->value.AsConv2DGradInput()->padRight = pad_right; }
void Conv2DGradInput::SetDilateW(int dilate_w) { this->primitive->value.AsConv2DGradInput()->dilateW = dilate_w; }
void Conv2DGradInput::SetDilateH(int dilate_h) { this->primitive->value.AsConv2DGradInput()->dilateH = dilate_h; }
void Conv2DGradInput::SetHasBias(bool has_bias) { this->primitive->value.AsConv2DGradInput()->hasBias = has_bias; }
void Conv2DGradInput::SetActivationType(int activation_type) {
this->primitive->value.AsConv2DGradInput()->activationType = (schema::ActivationType)activation_type;
}

#else

int Conv2DGradInput::GetFormat() const { return this->primitive->value_as_Conv2DGradInput()->format(); }
int Conv2DGradInput::GetGroup() const { return this->primitive->value_as_Conv2DGradInput()->group(); }
int Conv2DGradInput::GetChannelIn() const { return this->primitive->value_as_Conv2DGradInput()->channelIn(); }
int Conv2DGradInput::GetChannelOut() const { return this->primitive->value_as_Conv2DGradInput()->channelOut(); }
int Conv2DGradInput::GetKernelW() const { return this->primitive->value_as_Conv2DGradInput()->kernelW(); }
int Conv2DGradInput::GetKernelH() const { return this->primitive->value_as_Conv2DGradInput()->kernelH(); }
int Conv2DGradInput::GetStrideW() const { return this->primitive->value_as_Conv2DGradInput()->strideW(); }
int Conv2DGradInput::GetStrideH() const { return this->primitive->value_as_Conv2DGradInput()->strideH(); }
int Conv2DGradInput::GetPadMode() const { return this->primitive->value_as_Conv2DGradInput()->padMode(); }
int Conv2DGradInput::GetPadUp() const { return this->primitive->value_as_Conv2DGradInput()->padUp(); }
int Conv2DGradInput::GetPadDown() const { return this->primitive->value_as_Conv2DGradInput()->padDown(); }
int Conv2DGradInput::GetPadLeft() const { return this->primitive->value_as_Conv2DGradInput()->padLeft(); }
int Conv2DGradInput::GetPadRight() const { return this->primitive->value_as_Conv2DGradInput()->padRight(); }
int Conv2DGradInput::GetDilateW() const { return this->primitive->value_as_Conv2DGradInput()->dilateW(); }
int Conv2DGradInput::GetDilateH() const { return this->primitive->value_as_Conv2DGradInput()->dilateH(); }
bool Conv2DGradInput::GetHasBias() const { return this->primitive->value_as_Conv2DGradInput()->hasBias(); }
int Conv2DGradInput::GetActivationType() const { return this->primitive->value_as_Conv2DGradInput()->activationType(); }

void Conv2DGradInput::SetFormat(int format) {}
void Conv2DGradInput::SetGroup(int group) {}
void Conv2DGradInput::SetChannelIn(int channel_in) {}
void Conv2DGradInput::SetChannelOut(int channel_out) {}
void Conv2DGradInput::SetKernelW(int kernel_w) {}
void Conv2DGradInput::SetKernelH(int kernel_h) {}
void Conv2DGradInput::SetStrideW(int stride_w) {}
void Conv2DGradInput::SetStrideH(int stride_h) {}
void Conv2DGradInput::SetPadMode(int pad_mode) {}
void Conv2DGradInput::SetPadUp(int pad_up) {}
void Conv2DGradInput::SetPadDown(int pad_down) {}
void Conv2DGradInput::SetPadLeft(int pad_left) {}
void Conv2DGradInput::SetPadRight(int pad_right) {}
void Conv2DGradInput::SetDilateW(int dilate_w) {}
void Conv2DGradInput::SetDilateH(int dilate_h) {}
void Conv2DGradInput::SetHasBias(bool has_bias) {}
void Conv2DGradInput::SetActivationType(int activation_type) {}
#endif
} // namespace mindspore

+ 76
- 0
mindspore/lite/c_ops/conv2d_grad_input.h View File

@@ -0,0 +1,76 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_INPUT_H_
#define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_INPUT_H_

namespace mindspore {
class Conv2DGradInput : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Conv2DGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Conv2DGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetFormat() const;
int GetGroup() const;
int GetChannelIn() const;
int GetChannelOut() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetGroup(int group);
void SetChannelIn(int channel_in);
void SetChannelOut(int channel_out);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_INPUT_H_

+ 42
- 0
mindspore/lite/c_ops/cos.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic_self.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_COS_H_
#define LITE_MINDSPORE_LITE_C_OPS_COS_H_

namespace mindspore {
class Cos : public ArithmeticSelf {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Cos(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else
explicit Cos(schema::Primitive *primitive) : ArithmeticSelf(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_COS_H_

+ 55
- 0
mindspore/lite/c_ops/crop.cc View File

@@ -0,0 +1,55 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/crop.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
long Crop::GetAxis() const { return this->primitive->value.AsCrop()->axis; }
std::vector<long> Crop::GetOffsets() const { return this->primitive->value.AsCrop()->offsets; }

void Crop::SetAxis(long axis) { this->primitive->value.AsCrop()->axis = axis; }
void Crop::SetOffsets(const std::vector<long> &offsets) { this->primitive->value.AsCrop()->offsets = offsets; }

#else

long Crop::GetAxis() const { return this->primitive->value_as_Crop()->axis(); }
std::vector<long> Crop::GetOffsets() const {
auto fb_vector = this->primitive->value_as_Crop()->offsets();
return std::vector<long>(fb_vector->begin(), fb_vector->end());
}

void Crop::SetAxis(long axis) {}
void Crop::SetOffsets(const std::vector<long> &offsets) {}
#endif
namespace {
constexpr int kCropOutputNum = 1;
constexpr int kCropInputNum = 2;
} // namespace

int Crop::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) {
MS_ASSERT(this->primitive != nullptr);
if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) {
MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size();
return 1;
}
outputs[0]->set_shape(inputs[1]->shape());
outputs[0]->SetFormat(inputs[0]->GetFormat());
outputs[0]->set_data_type(inputs[0]->data_type());

return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/crop.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_CROP_H_
#define LITE_MINDSPORE_LITE_C_OPS_CROP_H_

namespace mindspore {
class Crop : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Crop(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Crop(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
long GetAxis() const;
std::vector<long> GetOffsets() const;
void SetAxis(long axis);
void SetOffsets(const std::vector<long> &offsets);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_CROP_H_

+ 145
- 0
mindspore/lite/c_ops/deconv2d.cc View File

@@ -0,0 +1,145 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/deconv2d.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int DeConv2D::GetFormat() const { return this->primitive->value.AsDeConv2D()->format; }
int DeConv2D::GetGroup() const { return this->primitive->value.AsDeConv2D()->group; }
int DeConv2D::GetChannelIn() const { return this->primitive->value.AsDeConv2D()->channelIn; }
int DeConv2D::GetChannelOut() const { return this->primitive->value.AsDeConv2D()->channelOut; }
int DeConv2D::GetKernelW() const { return this->primitive->value.AsDeConv2D()->kernelW; }
int DeConv2D::GetKernelH() const { return this->primitive->value.AsDeConv2D()->kernelH; }
int DeConv2D::GetStrideW() const { return this->primitive->value.AsDeConv2D()->strideW; }
int DeConv2D::GetStrideH() const { return this->primitive->value.AsDeConv2D()->strideH; }
int DeConv2D::GetPadMode() const { return this->primitive->value.AsDeConv2D()->padMode; }
int DeConv2D::GetPadUp() const { return this->primitive->value.AsDeConv2D()->padUp; }
int DeConv2D::GetPadDown() const { return this->primitive->value.AsDeConv2D()->padDown; }
int DeConv2D::GetPadLeft() const { return this->primitive->value.AsDeConv2D()->padLeft; }
int DeConv2D::GetPadRight() const { return this->primitive->value.AsDeConv2D()->padRight; }
int DeConv2D::GetDilateW() const { return this->primitive->value.AsDeConv2D()->dilateW; }
int DeConv2D::GetDilateH() const { return this->primitive->value.AsDeConv2D()->dilateH; }
bool DeConv2D::GetHasBias() const { return this->primitive->value.AsDeConv2D()->hasBias; }
int DeConv2D::GetActivationType() const { return this->primitive->value.AsDeConv2D()->activationType; }

void DeConv2D::SetFormat(int format) { this->primitive->value.AsDeConv2D()->format = (schema::Format)format; }
void DeConv2D::SetGroup(int group) { this->primitive->value.AsDeConv2D()->group = group; }
void DeConv2D::SetChannelIn(int channel_in) { this->primitive->value.AsDeConv2D()->channelIn = channel_in; }
void DeConv2D::SetChannelOut(int channel_out) { this->primitive->value.AsDeConv2D()->channelOut = channel_out; }
void DeConv2D::SetKernelW(int kernel_w) { this->primitive->value.AsDeConv2D()->kernelW = kernel_w; }
void DeConv2D::SetKernelH(int kernel_h) { this->primitive->value.AsDeConv2D()->kernelH = kernel_h; }
void DeConv2D::SetStrideW(int stride_w) { this->primitive->value.AsDeConv2D()->strideW = stride_w; }
void DeConv2D::SetStrideH(int stride_h) { this->primitive->value.AsDeConv2D()->strideH = stride_h; }
void DeConv2D::SetPadMode(int pad_mode) { this->primitive->value.AsDeConv2D()->padMode = (schema::PadMode)pad_mode; }
void DeConv2D::SetPadUp(int pad_up) { this->primitive->value.AsDeConv2D()->padUp = pad_up; }
void DeConv2D::SetPadDown(int pad_down) { this->primitive->value.AsDeConv2D()->padDown = pad_down; }
void DeConv2D::SetPadLeft(int pad_left) { this->primitive->value.AsDeConv2D()->padLeft = pad_left; }
void DeConv2D::SetPadRight(int pad_right) { this->primitive->value.AsDeConv2D()->padRight = pad_right; }
void DeConv2D::SetDilateW(int dilate_w) { this->primitive->value.AsDeConv2D()->dilateW = dilate_w; }
void DeConv2D::SetDilateH(int dilate_h) { this->primitive->value.AsDeConv2D()->dilateH = dilate_h; }
void DeConv2D::SetHasBias(bool has_bias) { this->primitive->value.AsDeConv2D()->hasBias = has_bias; }
void DeConv2D::SetActivationType(int activation_type) {
this->primitive->value.AsDeConv2D()->activationType = (schema::ActivationType)activation_type;
}

#else

int DeConv2D::GetFormat() const { return this->primitive->value_as_DeConv2D()->format(); }
int DeConv2D::GetGroup() const { return this->primitive->value_as_DeConv2D()->group(); }
int DeConv2D::GetChannelIn() const { return this->primitive->value_as_DeConv2D()->channelIn(); }
int DeConv2D::GetChannelOut() const { return this->primitive->value_as_DeConv2D()->channelOut(); }
int DeConv2D::GetKernelW() const { return this->primitive->value_as_DeConv2D()->kernelW(); }
int DeConv2D::GetKernelH() const { return this->primitive->value_as_DeConv2D()->kernelH(); }
int DeConv2D::GetStrideW() const { return this->primitive->value_as_DeConv2D()->strideW(); }
int DeConv2D::GetStrideH() const { return this->primitive->value_as_DeConv2D()->strideH(); }
int DeConv2D::GetPadMode() const { return this->primitive->value_as_DeConv2D()->padMode(); }
int DeConv2D::GetPadUp() const { return this->primitive->value_as_DeConv2D()->padUp(); }
int DeConv2D::GetPadDown() const { return this->primitive->value_as_DeConv2D()->padDown(); }
int DeConv2D::GetPadLeft() const { return this->primitive->value_as_DeConv2D()->padLeft(); }
int DeConv2D::GetPadRight() const { return this->primitive->value_as_DeConv2D()->padRight(); }
int DeConv2D::GetDilateW() const { return this->primitive->value_as_DeConv2D()->dilateW(); }
int DeConv2D::GetDilateH() const { return this->primitive->value_as_DeConv2D()->dilateH(); }
bool DeConv2D::GetHasBias() const { return this->primitive->value_as_DeConv2D()->hasBias(); }
int DeConv2D::GetActivationType() const { return this->primitive->value_as_DeConv2D()->activationType(); }

void DeConv2D::SetFormat(int format) {}
void DeConv2D::SetGroup(int group) {}
void DeConv2D::SetChannelIn(int channel_in) {}
void DeConv2D::SetChannelOut(int channel_out) {}
void DeConv2D::SetKernelW(int kernel_w) {}
void DeConv2D::SetKernelH(int kernel_h) {}
void DeConv2D::SetStrideW(int stride_w) {}
void DeConv2D::SetStrideH(int stride_h) {}
void DeConv2D::SetPadMode(int pad_mode) {}
void DeConv2D::SetPadUp(int pad_up) {}
void DeConv2D::SetPadDown(int pad_down) {}
void DeConv2D::SetPadLeft(int pad_left) {}
void DeConv2D::SetPadRight(int pad_right) {}
void DeConv2D::SetDilateW(int dilate_w) {}
void DeConv2D::SetDilateH(int dilate_h) {}
void DeConv2D::SetHasBias(bool has_bias) {}
void DeConv2D::SetActivationType(int activation_type) {}
#endif
int DeConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto weight = inputs_.at(1);
MS_ASSERT(weight != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

int32_t input_h = input->Height();
int32_t input_w = input->Width();

int32_t output_n = input->Batch();
int32_t output_h = 0;
int32_t output_w = 0;
int32_t output_c = weight->Channel();

int kernel_w = GetKernelW();
int kernel_h = GetKernelH();
int stride_w = GetStrideW();
int stride_h = GetStrideH();
int dilate_w = GetDilateW();
int dilate_h = GetDilateH();
pad_l_ = GetPadLeft();
pad_u_ = GetPadUp();
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();
auto pad_mode = (schema::PadMode)GetPadMode();

if (pad_mode == schema::PadMode_CAFFE) {
output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_;
output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_;
} else if (pad_mode == schema::PadMode_SAME) {
output_h = input_h * stride_h;
output_w = input_w * stride_w;
} else if (pad_mode == schema::PadMode_VALID) {
output_h = (input_h - 1) * stride_h + kernel_h;
output_w = (input_w - 1) * stride_w + kernel_w;
} else {
MS_LOG(ERROR) << "unsupported pad mode for deconv";
}

std::vector<int> out_shape = {output_n, output_h, output_w, output_c};
output->set_shape(out_shape);
output->SetFormat(input->GetFormat());
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 88
- 0
mindspore/lite/c_ops/deconv2d.h View File

@@ -0,0 +1,88 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_
#define LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_

namespace mindspore {
class DeConv2D : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit DeConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit DeConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetFormat() const;
int GetGroup() const;
int GetChannelIn() const;
int GetChannelOut() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetGroup(int group);
void SetChannelIn(int channel_in);
void SetChannelOut(int channel_out);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);

int PadUp() const { return this->pad_u_; }
int PadDown() const { return this->pad_d_; }
int PadLeft() const { return this->pad_l_; }
int PadRight() const { return this->pad_r_; }

protected:
int pad_u_ = 0;
int pad_d_ = 0;
int pad_l_ = 0;
int pad_r_ = 0;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_

+ 161
- 0
mindspore/lite/c_ops/dedepthwise_conv2d.cc View File

@@ -0,0 +1,161 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/dedepthwise_conv2d.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int DeDepthwiseConv2D::GetFormat() const { return this->primitive->value.AsDeDepthwiseConv2D()->format; }
int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive->value.AsDeDepthwiseConv2D()->channelIn; }
int DeDepthwiseConv2D::GetChannelMultiplier() const {
return this->primitive->value.AsDeDepthwiseConv2D()->channelMultiplier;
}
int DeDepthwiseConv2D::GetKernelW() const { return this->primitive->value.AsDeDepthwiseConv2D()->kernelW; }
int DeDepthwiseConv2D::GetKernelH() const { return this->primitive->value.AsDeDepthwiseConv2D()->kernelH; }
int DeDepthwiseConv2D::GetStrideW() const { return this->primitive->value.AsDeDepthwiseConv2D()->strideW; }
int DeDepthwiseConv2D::GetStrideH() const { return this->primitive->value.AsDeDepthwiseConv2D()->strideH; }
int DeDepthwiseConv2D::GetPadMode() const { return this->primitive->value.AsDeDepthwiseConv2D()->padMode; }
int DeDepthwiseConv2D::GetPadUp() const { return this->primitive->value.AsDeDepthwiseConv2D()->padUp; }
int DeDepthwiseConv2D::GetPadDown() const { return this->primitive->value.AsDeDepthwiseConv2D()->padDown; }
int DeDepthwiseConv2D::GetPadLeft() const { return this->primitive->value.AsDeDepthwiseConv2D()->padLeft; }
int DeDepthwiseConv2D::GetPadRight() const { return this->primitive->value.AsDeDepthwiseConv2D()->padRight; }
int DeDepthwiseConv2D::GetDilateW() const { return this->primitive->value.AsDeDepthwiseConv2D()->dilateW; }
int DeDepthwiseConv2D::GetDilateH() const { return this->primitive->value.AsDeDepthwiseConv2D()->dilateH; }
bool DeDepthwiseConv2D::GetHasBias() const { return this->primitive->value.AsDeDepthwiseConv2D()->hasBias; }
int DeDepthwiseConv2D::GetActivationType() const {
return this->primitive->value.AsDeDepthwiseConv2D()->activationType;
}

void DeDepthwiseConv2D::SetFormat(int format) {
this->primitive->value.AsDeDepthwiseConv2D()->format = (schema::Format)format;
}
void DeDepthwiseConv2D::SetChannelIn(int channel_in) {
this->primitive->value.AsDeDepthwiseConv2D()->channelIn = channel_in;
}
void DeDepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) {
this->primitive->value.AsDeDepthwiseConv2D()->channelMultiplier = channel_multiplier;
}
void DeDepthwiseConv2D::SetKernelW(int kernel_w) { this->primitive->value.AsDeDepthwiseConv2D()->kernelW = kernel_w; }
void DeDepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive->value.AsDeDepthwiseConv2D()->kernelH = kernel_h; }
void DeDepthwiseConv2D::SetStrideW(int stride_w) { this->primitive->value.AsDeDepthwiseConv2D()->strideW = stride_w; }
void DeDepthwiseConv2D::SetStrideH(int stride_h) { this->primitive->value.AsDeDepthwiseConv2D()->strideH = stride_h; }
void DeDepthwiseConv2D::SetPadMode(int pad_mode) {
this->primitive->value.AsDeDepthwiseConv2D()->padMode = (schema::PadMode)pad_mode;
}
void DeDepthwiseConv2D::SetPadUp(int pad_up) { this->primitive->value.AsDeDepthwiseConv2D()->padUp = pad_up; }
void DeDepthwiseConv2D::SetPadDown(int pad_down) { this->primitive->value.AsDeDepthwiseConv2D()->padDown = pad_down; }
void DeDepthwiseConv2D::SetPadLeft(int pad_left) { this->primitive->value.AsDeDepthwiseConv2D()->padLeft = pad_left; }
void DeDepthwiseConv2D::SetPadRight(int pad_right) {
this->primitive->value.AsDeDepthwiseConv2D()->padRight = pad_right;
}
void DeDepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive->value.AsDeDepthwiseConv2D()->dilateW = dilate_w; }
void DeDepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive->value.AsDeDepthwiseConv2D()->dilateH = dilate_h; }
void DeDepthwiseConv2D::SetHasBias(bool has_bias) { this->primitive->value.AsDeDepthwiseConv2D()->hasBias = has_bias; }
void DeDepthwiseConv2D::SetActivationType(int activation_type) {
this->primitive->value.AsDeDepthwiseConv2D()->activationType = (schema::ActivationType)activation_type;
}

#else

int DeDepthwiseConv2D::GetFormat() const { return this->primitive->value_as_DeDepthwiseConv2D()->format(); }
int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive->value_as_DeDepthwiseConv2D()->channelIn(); }
int DeDepthwiseConv2D::GetChannelMultiplier() const {
return this->primitive->value_as_DeDepthwiseConv2D()->channelMultiplier();
}
int DeDepthwiseConv2D::GetKernelW() const { return this->primitive->value_as_DeDepthwiseConv2D()->kernelW(); }
int DeDepthwiseConv2D::GetKernelH() const { return this->primitive->value_as_DeDepthwiseConv2D()->kernelH(); }
int DeDepthwiseConv2D::GetStrideW() const { return this->primitive->value_as_DeDepthwiseConv2D()->strideW(); }
int DeDepthwiseConv2D::GetStrideH() const { return this->primitive->value_as_DeDepthwiseConv2D()->strideH(); }
int DeDepthwiseConv2D::GetPadMode() const { return this->primitive->value_as_DeDepthwiseConv2D()->padMode(); }
int DeDepthwiseConv2D::GetPadUp() const { return this->primitive->value_as_DeDepthwiseConv2D()->padUp(); }
int DeDepthwiseConv2D::GetPadDown() const { return this->primitive->value_as_DeDepthwiseConv2D()->padDown(); }
int DeDepthwiseConv2D::GetPadLeft() const { return this->primitive->value_as_DeDepthwiseConv2D()->padLeft(); }
int DeDepthwiseConv2D::GetPadRight() const { return this->primitive->value_as_DeDepthwiseConv2D()->padRight(); }
int DeDepthwiseConv2D::GetDilateW() const { return this->primitive->value_as_DeDepthwiseConv2D()->dilateW(); }
int DeDepthwiseConv2D::GetDilateH() const { return this->primitive->value_as_DeDepthwiseConv2D()->dilateH(); }
bool DeDepthwiseConv2D::GetHasBias() const { return this->primitive->value_as_DeDepthwiseConv2D()->hasBias(); }
int DeDepthwiseConv2D::GetActivationType() const {
return this->primitive->value_as_DeDepthwiseConv2D()->activationType();
}

void DeDepthwiseConv2D::SetFormat(int format) {}
void DeDepthwiseConv2D::SetChannelIn(int channel_in) {}
void DeDepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) {}
void DeDepthwiseConv2D::SetKernelW(int kernel_w) {}
void DeDepthwiseConv2D::SetKernelH(int kernel_h) {}
void DeDepthwiseConv2D::SetStrideW(int stride_w) {}
void DeDepthwiseConv2D::SetStrideH(int stride_h) {}
void DeDepthwiseConv2D::SetPadMode(int pad_mode) {}
void DeDepthwiseConv2D::SetPadUp(int pad_up) {}
void DeDepthwiseConv2D::SetPadDown(int pad_down) {}
void DeDepthwiseConv2D::SetPadLeft(int pad_left) {}
void DeDepthwiseConv2D::SetPadRight(int pad_right) {}
void DeDepthwiseConv2D::SetDilateW(int dilate_w) {}
void DeDepthwiseConv2D::SetDilateH(int dilate_h) {}
void DeDepthwiseConv2D::SetHasBias(bool has_bias) {}
void DeDepthwiseConv2D::SetActivationType(int activation_type) {}
#endif
int DeDepthwiseConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
std::vector<lite::tensor::Tensor *> outputs_) {
if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) {
MS_LOG(ERROR) << "inputs number is invalid";
return 1;
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "output number is invalid";
return 1;
}
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto weight = inputs_.at(1);
MS_ASSERT(weight != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

auto in_shape = input->shape();
int input_h = in_shape.at(1);
int input_w = in_shape.at(2);
int input_channel = in_shape.at(3);
int output_w = 0, output_h = 0;

pad_l_ = GetPadLeft();
pad_u_ = GetPadUp();
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();
output_h = GetStrideH() * (input_h - 1) * GetKernelH() - pad_u_ - pad_d_;
output_w = GetStrideW() * (input_w - 1) * GetKernelW() - pad_l_ - pad_r_;
if ((output_h + GetPadUp() + GetPadDown() - GetKernelH()) % GetStrideH() != 0) {
output_h += (output_h + GetPadLeft() + GetPadRight() - GetKernelH()) % GetStrideH();
}
if ((output_w + GetPadLeft() + GetPadRight() - GetKernelW()) % GetStrideW() != 0) {
output_w += (output_w + GetPadLeft() + GetPadRight() - GetKernelW()) % GetStrideW();
}
std::vector<int> out_shape{input->shape()};
out_shape.at(1) = output_h;
out_shape.at(2) = output_w;
if (GetChannelMultiplier() * input_channel != weight->shape()[0]) {
MS_LOG(ERROR) << "Conv depthwise only support group equals output channel.";
return 1;
}
out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel

output->set_shape(out_shape);
output->SetFormat(input->GetFormat());
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 86
- 0
mindspore/lite/c_ops/dedepthwise_conv2d.h View File

@@ -0,0 +1,86 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DE_DEPTHWISE_CONV2_D_H_
#define LITE_MINDSPORE_LITE_C_OPS_DE_DEPTHWISE_CONV2_D_H_

namespace mindspore {
class DeDepthwiseConv2D : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit DeDepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit DeDepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetFormat() const;
int GetChannelIn() const;
int GetChannelMultiplier() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetChannelIn(int channel_in);
void SetChannelMultiplier(int channel_multiplier);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);

int PadUp() const { return this->pad_u_; }
int PadDown() const { return this->pad_d_; }
int PadLeft() const { return this->pad_l_; }
int PadRight() const { return this->pad_r_; }

protected:
int pad_u_ = 0;
int pad_d_ = 0;
int pad_l_ = 0;
int pad_r_ = 0;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DE_DEPTHWISE_CONV2_D_H_

+ 75
- 0
mindspore/lite/c_ops/depth_to_space.cc View File

@@ -0,0 +1,75 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/depth_to_space.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int DepthToSpace::GetBlockSize() const { return this->primitive->value.AsDepthToSpace()->blockSize; }
int DepthToSpace::GetFormat() const { return this->primitive->value.AsDepthToSpace()->format; }

void DepthToSpace::SetBlockSize(int block_size) { this->primitive->value.AsDepthToSpace()->blockSize = block_size; }
void DepthToSpace::SetFormat(int format) { this->primitive->value.AsDepthToSpace()->format = format; }

#else

int DepthToSpace::GetBlockSize() const { return this->primitive->value_as_DepthToSpace()->blockSize(); }
int DepthToSpace::GetFormat() const { return this->primitive->value_as_DepthToSpace()->format(); }

void DepthToSpace::SetBlockSize(int block_size) {}
void DepthToSpace::SetFormat(int format) {}
#endif
namespace {
constexpr int kDepthToSpaceOutputNum = 1;
constexpr int kDepthToSpaceInputNum = 1;
} // namespace

int DepthToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) {
MS_ASSERT(this->primitive != nullptr);
if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) {
MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size();
return 1;
}

auto input = inputs.at(0);
if (input->GetFormat() != schema::Format_NHWC) {
MS_LOG(ERROR) << "depth_to_space only support NHWC now!";
return 1;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {
MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d;
return 1;
}

int32_t block_size = GetBlockSize();
if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) {
MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be mulitple of block_size("
<< block_size << ") * block_size)!";
return 1;
}
std::vector<int32_t> output_shape(input_shape.size());
output_shape[NHWC_N] = input_shape[NHWC_N];
output_shape[NHWC_H] = input_shape[NHWC_H] * block_size;
output_shape[NHWC_W] = input_shape[NHWC_W] * block_size;
output_shape[NHWC_C] = input_shape[NHWC_C] / (block_size * block_size);
outputs[0]->set_shape(output_shape);
outputs[0]->set_data_type(input->data_type());
outputs[0]->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/depth_to_space.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_
#define LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_

namespace mindspore {
class DepthToSpace : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit DepthToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit DepthToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetBlockSize() const;
int GetFormat() const;
void SetBlockSize(int block_size);
void SetFormat(int format);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_

+ 164
- 0
mindspore/lite/c_ops/depthwise_conv2d.cc View File

@@ -0,0 +1,164 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/depthwise_conv2d.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int DepthwiseConv2D::GetFormat() const { return this->primitive->value.AsDepthwiseConv2D()->format; }
int DepthwiseConv2D::GetChannelIn() const { return this->primitive->value.AsDepthwiseConv2D()->channelIn; }
int DepthwiseConv2D::GetChannelMultiplier() const {
return this->primitive->value.AsDepthwiseConv2D()->channelMultiplier;
}
int DepthwiseConv2D::GetKernelW() const { return this->primitive->value.AsDepthwiseConv2D()->kernelW; }
int DepthwiseConv2D::GetKernelH() const { return this->primitive->value.AsDepthwiseConv2D()->kernelH; }
int DepthwiseConv2D::GetStrideW() const { return this->primitive->value.AsDepthwiseConv2D()->strideW; }
int DepthwiseConv2D::GetStrideH() const { return this->primitive->value.AsDepthwiseConv2D()->strideH; }
int DepthwiseConv2D::GetPadMode() const { return this->primitive->value.AsDepthwiseConv2D()->padMode; }
int DepthwiseConv2D::GetPadUp() const { return this->primitive->value.AsDepthwiseConv2D()->padUp; }
int DepthwiseConv2D::GetPadDown() const { return this->primitive->value.AsDepthwiseConv2D()->padDown; }
int DepthwiseConv2D::GetPadLeft() const { return this->primitive->value.AsDepthwiseConv2D()->padLeft; }
int DepthwiseConv2D::GetPadRight() const { return this->primitive->value.AsDepthwiseConv2D()->padRight; }
int DepthwiseConv2D::GetDilateW() const { return this->primitive->value.AsDepthwiseConv2D()->dilateW; }
int DepthwiseConv2D::GetDilateH() const { return this->primitive->value.AsDepthwiseConv2D()->dilateH; }
bool DepthwiseConv2D::GetHasBias() const { return this->primitive->value.AsDepthwiseConv2D()->hasBias; }
int DepthwiseConv2D::GetActivationType() const { return this->primitive->value.AsDepthwiseConv2D()->activationType; }

void DepthwiseConv2D::SetFormat(int format) {
this->primitive->value.AsDepthwiseConv2D()->format = (schema::Format)format;
}
void DepthwiseConv2D::SetChannelIn(int channel_in) {
this->primitive->value.AsDepthwiseConv2D()->channelIn = channel_in;
}
void DepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) {
this->primitive->value.AsDepthwiseConv2D()->channelMultiplier = channel_multiplier;
}
void DepthwiseConv2D::SetKernelW(int kernel_w) { this->primitive->value.AsDepthwiseConv2D()->kernelW = kernel_w; }
void DepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive->value.AsDepthwiseConv2D()->kernelH = kernel_h; }
void DepthwiseConv2D::SetStrideW(int stride_w) { this->primitive->value.AsDepthwiseConv2D()->strideW = stride_w; }
void DepthwiseConv2D::SetStrideH(int stride_h) { this->primitive->value.AsDepthwiseConv2D()->strideH = stride_h; }
void DepthwiseConv2D::SetPadMode(int pad_mode) {
this->primitive->value.AsDepthwiseConv2D()->padMode = (schema::PadMode)pad_mode;
}
void DepthwiseConv2D::SetPadUp(int pad_up) { this->primitive->value.AsDepthwiseConv2D()->padUp = pad_up; }
void DepthwiseConv2D::SetPadDown(int pad_down) { this->primitive->value.AsDepthwiseConv2D()->padDown = pad_down; }
void DepthwiseConv2D::SetPadLeft(int pad_left) { this->primitive->value.AsDepthwiseConv2D()->padLeft = pad_left; }
void DepthwiseConv2D::SetPadRight(int pad_right) { this->primitive->value.AsDepthwiseConv2D()->padRight = pad_right; }
void DepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive->value.AsDepthwiseConv2D()->dilateW = dilate_w; }
void DepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive->value.AsDepthwiseConv2D()->dilateH = dilate_h; }
void DepthwiseConv2D::SetHasBias(bool has_bias) { this->primitive->value.AsDepthwiseConv2D()->hasBias = has_bias; }
void DepthwiseConv2D::SetActivationType(int activation_type) {
this->primitive->value.AsDepthwiseConv2D()->activationType = (schema::ActivationType)activation_type;
}

#else

int DepthwiseConv2D::GetFormat() const { return this->primitive->value_as_DepthwiseConv2D()->format(); }
int DepthwiseConv2D::GetChannelIn() const { return this->primitive->value_as_DepthwiseConv2D()->channelIn(); }
int DepthwiseConv2D::GetChannelMultiplier() const {
return this->primitive->value_as_DepthwiseConv2D()->channelMultiplier();
}
int DepthwiseConv2D::GetKernelW() const { return this->primitive->value_as_DepthwiseConv2D()->kernelW(); }
int DepthwiseConv2D::GetKernelH() const { return this->primitive->value_as_DepthwiseConv2D()->kernelH(); }
int DepthwiseConv2D::GetStrideW() const { return this->primitive->value_as_DepthwiseConv2D()->strideW(); }
int DepthwiseConv2D::GetStrideH() const { return this->primitive->value_as_DepthwiseConv2D()->strideH(); }
int DepthwiseConv2D::GetPadMode() const { return this->primitive->value_as_DepthwiseConv2D()->padMode(); }
int DepthwiseConv2D::GetPadUp() const { return this->primitive->value_as_DepthwiseConv2D()->padUp(); }
int DepthwiseConv2D::GetPadDown() const { return this->primitive->value_as_DepthwiseConv2D()->padDown(); }
int DepthwiseConv2D::GetPadLeft() const { return this->primitive->value_as_DepthwiseConv2D()->padLeft(); }
int DepthwiseConv2D::GetPadRight() const { return this->primitive->value_as_DepthwiseConv2D()->padRight(); }
int DepthwiseConv2D::GetDilateW() const { return this->primitive->value_as_DepthwiseConv2D()->dilateW(); }
int DepthwiseConv2D::GetDilateH() const { return this->primitive->value_as_DepthwiseConv2D()->dilateH(); }
bool DepthwiseConv2D::GetHasBias() const { return this->primitive->value_as_DepthwiseConv2D()->hasBias(); }
int DepthwiseConv2D::GetActivationType() const { return this->primitive->value_as_DepthwiseConv2D()->activationType(); }

void DepthwiseConv2D::SetFormat(int format) {}
void DepthwiseConv2D::SetChannelIn(int channel_in) {}
void DepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) {}
void DepthwiseConv2D::SetKernelW(int kernel_w) {}
void DepthwiseConv2D::SetKernelH(int kernel_h) {}
void DepthwiseConv2D::SetStrideW(int stride_w) {}
void DepthwiseConv2D::SetStrideH(int stride_h) {}
void DepthwiseConv2D::SetPadMode(int pad_mode) {}
void DepthwiseConv2D::SetPadUp(int pad_up) {}
void DepthwiseConv2D::SetPadDown(int pad_down) {}
void DepthwiseConv2D::SetPadLeft(int pad_left) {}
void DepthwiseConv2D::SetPadRight(int pad_right) {}
void DepthwiseConv2D::SetDilateW(int dilate_w) {}
void DepthwiseConv2D::SetDilateH(int dilate_h) {}
void DepthwiseConv2D::SetHasBias(bool has_bias) {}
void DepthwiseConv2D::SetActivationType(int activation_type) {}
#endif
int DepthwiseConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
std::vector<lite::tensor::Tensor *> outputs_) {
if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) {
MS_LOG(ERROR) << "inputs number is invalid";
return 1;
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "output number is invalid";
return 1;
}
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto weight = inputs_.at(1);
MS_ASSERT(weight != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

auto in_shape = input->shape();
int input_h = in_shape.at(1);
int input_w = in_shape.at(2);
int input_channel = in_shape.at(3);
int output_w = 0, output_h = 0;

pad_l_ = GetPadLeft();
pad_u_ = GetPadUp();
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();
if (GetPadMode() == schema::PadMode_SAME) {
output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
auto pad_h_all = ((output_h - 1) * GetStrideH() + (GetKernelH() - 1) * GetDilateH() + 1 - input_h);
auto pad_w_all = ((output_w - 1) * GetStrideW() + (GetKernelW() - 1) * GetDilateW() + 1 - input_w);
pad_u_ = pad_h_all / 2;
pad_d_ = pad_h_all - pad_u_;
pad_l_ = pad_w_all / 2;
pad_r_ = pad_w_all - pad_l_;
} else {
output_h = std::ceil((static_cast<float>(input_h) + pad_u_ + pad_d_ -
(static_cast<float>(GetKernelH()) - 1) * static_cast<float>(GetDilateH())) /
static_cast<float>(GetStrideH()));
output_w = std::ceil((static_cast<float>(input_w) + pad_l_ + pad_r_ -
(static_cast<float>(GetKernelW()) - 1) * static_cast<float>(GetDilateW())) /
static_cast<float>(GetStrideW()));
}
std::vector<int> out_shape{input->shape()};
out_shape.at(1) = output_h;
out_shape.at(2) = output_w;
if (GetChannelMultiplier() * input_channel != weight->shape()[0]) {
MS_LOG(ERROR) << "Conv depthwise only support group equals output channel.";
return 1;
}
out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel

output->set_shape(out_shape);
output->SetFormat(input->GetFormat());
output->set_data_type(input->data_type());
return 0;
}
} // namespace mindspore

+ 86
- 0
mindspore/lite/c_ops/depthwise_conv2d.h View File

@@ -0,0 +1,86 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DEPTHWISE_CONV2_D_H_
#define LITE_MINDSPORE_LITE_C_OPS_DEPTHWISE_CONV2_D_H_

namespace mindspore {
class DepthwiseConv2D : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit DepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit DepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetFormat() const;
int GetChannelIn() const;
int GetChannelMultiplier() const;
int GetKernelW() const;
int GetKernelH() const;
int GetStrideW() const;
int GetStrideH() const;
int GetPadMode() const;
int GetPadUp() const;
int GetPadDown() const;
int GetPadLeft() const;
int GetPadRight() const;
int GetDilateW() const;
int GetDilateH() const;
bool GetHasBias() const;
int GetActivationType() const;
void SetFormat(int format);
void SetChannelIn(int channel_in);
void SetChannelMultiplier(int channel_multiplier);
void SetKernelW(int kernel_w);
void SetKernelH(int kernel_h);
void SetStrideW(int stride_w);
void SetStrideH(int stride_h);
void SetPadMode(int pad_mode);
void SetPadUp(int pad_up);
void SetPadDown(int pad_down);
void SetPadLeft(int pad_left);
void SetPadRight(int pad_right);
void SetDilateW(int dilate_w);
void SetDilateH(int dilate_h);
void SetHasBias(bool has_bias);
void SetActivationType(int activation_type);

int PadUp() const { return this->pad_u_; }
int PadDown() const { return this->pad_d_; }
int PadLeft() const { return this->pad_l_; }
int PadRight() const { return this->pad_r_; }

protected:
int pad_u_ = 0;
int pad_d_ = 0;
int pad_l_ = 0;
int pad_r_ = 0;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DEPTHWISE_CONV2_D_H_

+ 131
- 0
mindspore/lite/c_ops/detection_post_process.cc View File

@@ -0,0 +1,131 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/detection_post_process.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int DetectionPostProcess::GetFormat() const { return this->primitive->value.AsDetectionPostProcess()->format; }
int DetectionPostProcess::GetInputSize() const { return this->primitive->value.AsDetectionPostProcess()->inputSize; }
float DetectionPostProcess::GetHScale() const { return this->primitive->value.AsDetectionPostProcess()->hScale; }
float DetectionPostProcess::GetWScale() const { return this->primitive->value.AsDetectionPostProcess()->wScale; }
float DetectionPostProcess::GetXScale() const { return this->primitive->value.AsDetectionPostProcess()->xScale; }
float DetectionPostProcess::GetYScale() const { return this->primitive->value.AsDetectionPostProcess()->yScale; }
float DetectionPostProcess::GetNmsIouThreshold() const {
return this->primitive->value.AsDetectionPostProcess()->NmsIouThreshold;
}
float DetectionPostProcess::GetNmsScoreThreshold() const {
return this->primitive->value.AsDetectionPostProcess()->NmsScoreThreshold;
}
long DetectionPostProcess::GetMaxDetections() const {
return this->primitive->value.AsDetectionPostProcess()->MaxDetections;
}
long DetectionPostProcess::GetDetectionsPreClass() const {
return this->primitive->value.AsDetectionPostProcess()->DetectionsPreClass;
}
long DetectionPostProcess::GetMaxClassesPreDetection() const {
return this->primitive->value.AsDetectionPostProcess()->MaxClassesPreDetection;
}
long DetectionPostProcess::GetNumClasses() const { return this->primitive->value.AsDetectionPostProcess()->NumClasses; }
bool DetectionPostProcess::GetUseRegularNms() const {
return this->primitive->value.AsDetectionPostProcess()->UseRegularNms;
}

void DetectionPostProcess::SetFormat(int format) {
this->primitive->value.AsDetectionPostProcess()->format = (schema::Format)format;
}
void DetectionPostProcess::SetInputSize(int input_size) {
this->primitive->value.AsDetectionPostProcess()->inputSize = input_size;
}
void DetectionPostProcess::SetHScale(float h_scale) {
this->primitive->value.AsDetectionPostProcess()->hScale = h_scale;
}
void DetectionPostProcess::SetWScale(float w_scale) {
this->primitive->value.AsDetectionPostProcess()->wScale = w_scale;
}
void DetectionPostProcess::SetXScale(float x_scale) {
this->primitive->value.AsDetectionPostProcess()->xScale = x_scale;
}
void DetectionPostProcess::SetYScale(float y_scale) {
this->primitive->value.AsDetectionPostProcess()->yScale = y_scale;
}
void DetectionPostProcess::SetNmsIouThreshold(float nms_iou_threshold) {
this->primitive->value.AsDetectionPostProcess()->NmsIouThreshold = nms_iou_threshold;
}
void DetectionPostProcess::SetNmsScoreThreshold(float nms_score_threshold) {
this->primitive->value.AsDetectionPostProcess()->NmsScoreThreshold = nms_score_threshold;
}
void DetectionPostProcess::SetMaxDetections(long max_detections) {
this->primitive->value.AsDetectionPostProcess()->MaxClassesPreDetection = max_detections;
}
void DetectionPostProcess::SetDetectionsPreClass(long detections_pre_class) {
this->primitive->value.AsDetectionPostProcess()->DetectionsPreClass = detections_pre_class;
}
void DetectionPostProcess::SetMaxClassesPreDetection(long max_classes_pre_detection) {
this->primitive->value.AsDetectionPostProcess()->MaxClassesPreDetection = max_classes_pre_detection;
}
void DetectionPostProcess::SetNumClasses(long num_classes) {
this->primitive->value.AsDetectionPostProcess()->NumClasses = num_classes;
}
void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) {
this->primitive->value.AsDetectionPostProcess()->UseRegularNms = use_regular_nms;
}

#else

int DetectionPostProcess::GetFormat() const { return this->primitive->value_as_DetectionPostProcess()->format(); }
int DetectionPostProcess::GetInputSize() const { return this->primitive->value_as_DetectionPostProcess()->inputSize(); }
float DetectionPostProcess::GetHScale() const { return this->primitive->value_as_DetectionPostProcess()->hScale(); }
float DetectionPostProcess::GetWScale() const { return this->primitive->value_as_DetectionPostProcess()->wScale(); }
float DetectionPostProcess::GetXScale() const { return this->primitive->value_as_DetectionPostProcess()->xScale(); }
float DetectionPostProcess::GetYScale() const { return this->primitive->value_as_DetectionPostProcess()->yScale(); }
float DetectionPostProcess::GetNmsIouThreshold() const {
return this->primitive->value_as_DetectionPostProcess()->NmsIouThreshold();
}
float DetectionPostProcess::GetNmsScoreThreshold() const {
return this->primitive->value_as_DetectionPostProcess()->NmsScoreThreshold();
}
long DetectionPostProcess::GetMaxDetections() const {
return this->primitive->value_as_DetectionPostProcess()->MaxDetections();
}
long DetectionPostProcess::GetDetectionsPreClass() const {
return this->primitive->value_as_DetectionPostProcess()->DetectionsPreClass();
}
long DetectionPostProcess::GetMaxClassesPreDetection() const {
return this->primitive->value_as_DetectionPostProcess()->MaxClassesPreDetection();
}
long DetectionPostProcess::GetNumClasses() const {
return this->primitive->value_as_DetectionPostProcess()->NumClasses();
}
bool DetectionPostProcess::GetUseRegularNms() const {
return this->primitive->value_as_DetectionPostProcess()->UseRegularNms();
}

void DetectionPostProcess::SetFormat(int format) {}
void DetectionPostProcess::SetInputSize(int input_size) {}
void DetectionPostProcess::SetHScale(float h_scale) {}
void DetectionPostProcess::SetWScale(float w_scale) {}
void DetectionPostProcess::SetXScale(float x_scale) {}
void DetectionPostProcess::SetYScale(float y_scale) {}
void DetectionPostProcess::SetNmsIouThreshold(float nms_iou_threshold) {}
void DetectionPostProcess::SetNmsScoreThreshold(float nms_score_threshold) {}
void DetectionPostProcess::SetMaxDetections(long max_detections) {}
void DetectionPostProcess::SetDetectionsPreClass(long detections_pre_class) {}
void DetectionPostProcess::SetMaxClassesPreDetection(long max_classes_pre_detection) {}
void DetectionPostProcess::SetNumClasses(long num_classes) {}
void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) {}
#endif
} // namespace mindspore

+ 68
- 0
mindspore/lite/c_ops/detection_post_process.h View File

@@ -0,0 +1,68 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_
#define LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_

namespace mindspore {
class DetectionPostProcess : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit DetectionPostProcess(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit DetectionPostProcess(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetFormat() const;
int GetInputSize() const;
float GetHScale() const;
float GetWScale() const;
float GetXScale() const;
float GetYScale() const;
float GetNmsIouThreshold() const;
float GetNmsScoreThreshold() const;
long GetMaxDetections() const;
long GetDetectionsPreClass() const;
long GetMaxClassesPreDetection() const;
long GetNumClasses() const;
bool GetUseRegularNms() const;
void SetFormat(int format);
void SetInputSize(int input_size);
void SetHScale(float h_scale);
void SetWScale(float w_scale);
void SetXScale(float x_scale);
void SetYScale(float y_scale);
void SetNmsIouThreshold(float nms_iou_threshold);
void SetNmsScoreThreshold(float nms_score_threshold);
void SetMaxDetections(long max_detections);
void SetDetectionsPreClass(long detections_pre_class);
void SetMaxClassesPreDetection(long max_classes_pre_detection);
void SetNumClasses(long num_classes);
void SetUseRegularNms(bool use_regular_nms);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_

+ 33
- 0
mindspore/lite/c_ops/div.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/div.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Div::GetActivationType() const { return this->primitive->value.AsDiv()->activationType; }

void Div::SetActivationType(int activation_type) {
this->primitive->value.AsDiv()->activationType = (schema::ActivationType)activation_type;
}

#else

int Div::GetActivationType() const { return this->primitive->value_as_Div()->activationType(); }

void Div::SetActivationType(int activation_type) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/div.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"

#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DIV_H_
#define LITE_MINDSPORE_LITE_C_OPS_DIV_H_

namespace mindspore {
class Div : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Div(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit Div(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetActivationType() const;
void SetActivationType(int activation_type);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DIV_H_

+ 31
- 0
mindspore/lite/c_ops/dropout.cc View File

@@ -0,0 +1,31 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/dropout.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float Dropout::GetRatio() const { return this->primitive->value.AsDropout()->ratio; }

void Dropout::SetRatio(float ratio) { this->primitive->value.AsDropout()->ratio = ratio; }

#else

float Dropout::GetRatio() const { return this->primitive->value_as_Dropout()->ratio(); }

void Dropout::SetRatio(float ratio) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/dropout.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_DROPOUT_H_
#define LITE_MINDSPORE_LITE_C_OPS_DROPOUT_H_

namespace mindspore {
class Dropout : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Dropout(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Dropout(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetRatio() const;
void SetRatio(float ratio);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_DROPOUT_H_

+ 31
- 0
mindspore/lite/c_ops/eltwise.cc View File

@@ -0,0 +1,31 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/eltwise.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Eltwise::GetMode() const { return this->primitive->value.AsEltwise()->mode; }

void Eltwise::SetMode(int mode) { this->primitive->value.AsEltwise()->mode = (schema::EltwiseMode)mode; }

#else

int Eltwise::GetMode() const { return this->primitive->value_as_Eltwise()->mode(); }

void Eltwise::SetMode(int mode) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/eltwise.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_
#define LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_

namespace mindspore {
class Eltwise : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Eltwise(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Eltwise(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetMode() const;
void SetMode(int mode);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_

+ 31
- 0
mindspore/lite/c_ops/elu.cc View File

@@ -0,0 +1,31 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/elu.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float Elu::GetAlpha() const { return this->primitive->value.AsElu()->alpha; }

void Elu::SetAlpha(float alpha) { this->primitive->value.AsElu()->alpha = alpha; }

#else

float Elu::GetAlpha() const { return this->primitive->value_as_Elu()->alpha(); }

void Elu::SetAlpha(float alpha) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/elu.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_ELU_H_
#define LITE_MINDSPORE_LITE_C_OPS_ELU_H_

namespace mindspore {
class Elu : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Elu(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Elu(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetAlpha() const;
void SetAlpha(float alpha);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ELU_H_

+ 72
- 0
mindspore/lite/c_ops/embedding_lookup.cc View File

@@ -0,0 +1,72 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/embedding_lookup.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value.AsEmbeddingLookup()->maxNorm; }

void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive->value.AsEmbeddingLookup()->maxNorm = max_norm; }

#else

float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value_as_EmbeddingLookup()->maxNorm(); }

void EmbeddingLookup::SetMaxNorm(float max_norm) {}
#endif
int EmbeddingLookup::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
if (inputs_.size() < kDoubleNum) {
MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs";
return 1;
}

if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "Embedding Lookup should have one outputs";
return 1;
}

auto params_ = inputs_.front();
MS_ASSERT(params_ != nullptr);
auto ids = inputs_.back();
MS_ASSERT(ids != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

auto embedding_shape = params_->shape();
embedding_shape.erase(embedding_shape.begin());

std::vector<int> output_shape(ids->shape());
for (size_t i = 0; i < embedding_shape.size(); ++i) {
output_shape.push_back(embedding_shape.at(i));
}

for (int i = 1; i < inputs_.size() - 1; ++i) {
auto embedding_shape_t = inputs_.at(i)->shape();
embedding_shape_t.erase(embedding_shape_t.begin());
if (embedding_shape_t != embedding_shape) {
MS_LOG(ERROR) << "The embedded layers should have the same shape";
return 1;
}
}

output->set_shape(output_shape);
output->set_data_type(params_->data_type());
return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/embedding_lookup.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_
#define LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_

namespace mindspore {
class EmbeddingLookup : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit EmbeddingLookup(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit EmbeddingLookup(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
float GetMaxNorm() const;
void SetMaxNorm(float max_norm);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_

+ 57
- 0
mindspore/lite/c_ops/embedding_lookup_sparse.cc View File

@@ -0,0 +1,57 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/embedding_lookup_sparse.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> EmbeddingLookupSparse::GetSpIds() const {
return this->primitive->value.AsEmbeddingLookupSparse()->spIds;
}
std::vector<float> EmbeddingLookupSparse::GetSpWeights() const {
return this->primitive->value.AsEmbeddingLookupSparse()->spWeights;
}
float EmbeddingLookupSparse::GetMaxNortm() const { return this->primitive->value.AsEmbeddingLookupSparse()->maxNortm; }

void EmbeddingLookupSparse::SetSpIds(const std::vector<int> &sp_ids) {
this->primitive->value.AsEmbeddingLookupSparse()->spIds = sp_ids;
}
void EmbeddingLookupSparse::SetSpWeights(const std::vector<float> &sp_weights) {
this->primitive->value.AsEmbeddingLookupSparse()->spWeights = sp_weights;
}
void EmbeddingLookupSparse::SetMaxNortm(float max_nortm) {
this->primitive->value.AsEmbeddingLookupSparse()->maxNortm = max_nortm;
}

#else

std::vector<int> EmbeddingLookupSparse::GetSpIds() const {
auto fb_vector = this->primitive->value_as_EmbeddingLookupSparse()->spIds();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
std::vector<float> EmbeddingLookupSparse::GetSpWeights() const {
auto fb_vector = this->primitive->value_as_EmbeddingLookupSparse()->spWeights();
return std::vector<float>(fb_vector->begin(), fb_vector->end());
}
float EmbeddingLookupSparse::GetMaxNortm() const {
return this->primitive->value_as_EmbeddingLookupSparse()->maxNortm();
}

void EmbeddingLookupSparse::SetSpIds(const std::vector<int> &sp_ids) {}
void EmbeddingLookupSparse::SetSpWeights(const std::vector<float> &sp_weights) {}
void EmbeddingLookupSparse::SetMaxNortm(float max_nortm) {}
#endif
} // namespace mindspore

+ 48
- 0
mindspore/lite/c_ops/embedding_lookup_sparse.h View File

@@ -0,0 +1,48 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_SPARSE_H_
#define LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_SPARSE_H_

namespace mindspore {
class EmbeddingLookupSparse : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit EmbeddingLookupSparse(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit EmbeddingLookupSparse(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
std::vector<int> GetSpIds() const;
std::vector<float> GetSpWeights() const;
float GetMaxNortm() const;
void SetSpIds(const std::vector<int> &sp_ids);
void SetSpWeights(const std::vector<float> &sp_weights);
void SetMaxNortm(float max_nortm);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_SPARSE_H_

+ 38
- 0
mindspore/lite/c_ops/equal.h View File

@@ -0,0 +1,38 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_
#define LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_

namespace mindspore {
class Equal : public Arithmetic {
public:
explicit Equal(schema::Primitive *primitive) : Arithmetic(primitive) {}
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_

+ 42
- 0
mindspore/lite/c_ops/exp.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic_self.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_EXP_H_
#define LITE_MINDSPORE_LITE_C_OPS_EXP_H_

namespace mindspore {
class Exp : public ArithmeticSelf {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Exp(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else
explicit Exp(schema::Primitive *primitive) : ArithmeticSelf(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_EXP_H_

+ 60
- 0
mindspore/lite/c_ops/expand_dims.cc View File

@@ -0,0 +1,60 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/expand_dims.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int ExpandDims::GetDim() const { return this->primitive->value.AsExpandDims()->dim; }

void ExpandDims::SetDim(int dim) { this->primitive->value.AsExpandDims()->dim = dim; }

#else

int ExpandDims::GetDim() const { return this->primitive->value_as_ExpandDims()->dim(); }

void ExpandDims::SetDim(int dim) {}
#endif
int ExpandDims::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (inputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "input size is invalid";
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "output size is invalid";
}

int dim = GetDim();
if (dim < 0) {
dim += input->shape().size() + 1;
}
if (dim > input->shape().size()) {
MS_LOG(ERROR) << "attribute dim out of range";
return 1;
}
auto out_shape = input->shape();
out_shape.insert(out_shape.begin() + dim, 1, 1);
output->set_shape(out_shape);
output->set_data_type(input->data_type());
output->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/expand_dims.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_
#define LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_

namespace mindspore {
class ExpandDims : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit ExpandDims(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit ExpandDims(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetDim() const;
void SetDim(int dim);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_

+ 45
- 0
mindspore/lite/c_ops/fake_quant_with_min_max_vars.cc View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/fake_quant_with_min_max_vars.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
bool FakeQuantWithMinMaxVars::GetNarrowRange() const {
return this->primitive->value.AsFakeQuantWithMinMaxVars()->narrowRange;
}
int FakeQuantWithMinMaxVars::GetNumBits() const { return this->primitive->value.AsFakeQuantWithMinMaxVars()->numBits; }

void FakeQuantWithMinMaxVars::SetNarrowRange(bool narrow_range) {
this->primitive->value.AsFakeQuantWithMinMaxVars()->narrowRange = narrow_range;
}
void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) {
this->primitive->value.AsFakeQuantWithMinMaxVars()->numBits = num_bits;
}

#else

bool FakeQuantWithMinMaxVars::GetNarrowRange() const {
return this->primitive->value_as_FakeQuantWithMinMaxVars()->narrowRange();
}
int FakeQuantWithMinMaxVars::GetNumBits() const {
return this->primitive->value_as_FakeQuantWithMinMaxVars()->numBits();
}

void FakeQuantWithMinMaxVars::SetNarrowRange(bool narrow_range) {}
void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/fake_quant_with_min_max_vars.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_
#define LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_

namespace mindspore {
class FakeQuantWithMinMaxVars : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit FakeQuantWithMinMaxVars(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit FakeQuantWithMinMaxVars(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
bool GetNarrowRange() const;
int GetNumBits() const;
void SetNarrowRange(bool narrow_range);
void SetNumBits(int num_bits);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_

+ 56
- 0
mindspore/lite/c_ops/fill.cc View File

@@ -0,0 +1,56 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/fill.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> Fill::GetDims() const { return this->primitive->value.AsFill()->dims; }

void Fill::SetDims(const std::vector<int> &dims) { this->primitive->value.AsFill()->dims = dims; }

#else

std::vector<int> Fill::GetDims() const {
auto fb_vector = this->primitive->value_as_Fill()->dims();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}

void Fill::SetDims(const std::vector<int> &dims) {}
#endif
int Fill::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
auto output = outputs_.front();
if (input == nullptr || output == nullptr) {
MS_LOG(ERROR) << "Fill input or output is null!";
return 1;
}

if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size();
return 1;
}

std::vector<int> output_shape;
(void)output_shape.insert(output_shape.begin(), GetDims().begin(), GetDims().end());
output->set_shape(output_shape);
output->set_data_type(input->data_type());
output->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/fill.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FILL_H_
#define LITE_MINDSPORE_LITE_C_OPS_FILL_H_

namespace mindspore {
class Fill : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Fill(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Fill(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetDims() const;
void SetDims(const std::vector<int> &dims);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FILL_H_

+ 47
- 0
mindspore/lite/c_ops/flatten.cc View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/flatten.h"

namespace mindspore {
int Flatten::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input = inputs_.front();
auto output = outputs_.front();
if (input == nullptr || output == nullptr) {
MS_LOG(ERROR) << "Flatten input or output is null!";
return 1;
}

if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size();
return 1;
}

auto input_shape = input->shape();
std::vector<int> output_shape(2);
output_shape[0] = input_shape[0];
output_shape[1] = 1;
for (int i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i];
}
output->set_shape(output_shape);
output->set_data_type(input->data_type());
output->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 43
- 0
mindspore/lite/c_ops/flatten.h View File

@@ -0,0 +1,43 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_
#define LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_

namespace mindspore {
class Flatten : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Flatten(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Flatten(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_

+ 42
- 0
mindspore/lite/c_ops/floor.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic_self.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_
#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_

namespace mindspore {
class Floor : public ArithmeticSelf {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else
explicit Floor(schema::Primitive *primitive) : ArithmeticSelf(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_

+ 42
- 0
mindspore/lite/c_ops/floor_div.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_
#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_

namespace mindspore {
class FloorDiv : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit FloorDiv(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_

+ 42
- 0
mindspore/lite/c_ops/floor_mod.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_
#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_

namespace mindspore {
class FloorMod : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit FloorMod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit FloorMod(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_

+ 80
- 0
mindspore/lite/c_ops/full_connection.cc View File

@@ -0,0 +1,80 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/full_connection.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
bool FullConnection::GetHasBias() const { return this->primitive->value.AsFullConnection()->hasBias; }
int FullConnection::GetAxis() const { return this->primitive->value.AsFullConnection()->axis; }
bool FullConnection::GetUseAxis() const { return this->primitive->value.AsFullConnection()->useAxis; }

void FullConnection::SetHasBias(bool has_bias) { this->primitive->value.AsFullConnection()->hasBias = has_bias; }
void FullConnection::SetAxis(int axis) { this->primitive->value.AsFullConnection()->axis = axis; }
void FullConnection::SetUseAxis(bool use_axis) { this->primitive->value.AsFullConnection()->useAxis = use_axis; }

#else

bool FullConnection::GetHasBias() const { return this->primitive->value_as_FullConnection()->hasBias(); }
int FullConnection::GetAxis() const { return this->primitive->value_as_FullConnection()->axis(); }
bool FullConnection::GetUseAxis() const { return this->primitive->value_as_FullConnection()->useAxis(); }

void FullConnection::SetHasBias(bool has_bias) {}
void FullConnection::SetAxis(int axis) {}
void FullConnection::SetUseAxis(bool use_axis) {}
#endif
int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr);
auto input1 = inputs_.at(1);
MS_ASSERT(input1 != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

if ((GetHasBias() && inputs_.size() != kMultiNum) || (!GetHasBias() && inputs_.size() != kDoubleNum)) {
MS_LOG(ERROR) << "Input tensors num error";
return 1;
}
if (GetAxis() < 1 || GetAxis() > input0->shape().size()) {
MS_LOG(ERROR) << "FullConnection axis invalid";
return 1;
}
int new_k = 1;
for (size_t i = GetAxis(); i < input0->shape().size(); ++i) {
new_k *= input0->shape().at(i);
}
if (new_k != input1->shape().at(1)) {
MS_LOG(ERROR) << "Input1 size invalid";
return 1;
}
if (GetHasBias()) {
if (inputs_.at(2)->shape()[0] != input1->shape()[0]) {
MS_LOG(ERROR) << "bias size invalid";
return 1;
}
}
std::vector<int> out_shape{inputs_[0]->shape()};
out_shape.resize(GetAxis() + 1);
out_shape[GetAxis()] = input1->shape()[0];
output->set_shape(out_shape);
output->set_data_type(input0->data_type());
output->SetFormat(input0->GetFormat());

return 0;
}
} // namespace mindspore

+ 49
- 0
mindspore/lite/c_ops/full_connection.h View File

@@ -0,0 +1,49 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_
#define LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_

namespace mindspore {
class FullConnection : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit FullConnection(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit FullConnection(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
bool GetHasBias() const;
int GetAxis() const;
bool GetUseAxis() const;
void SetHasBias(bool has_bias);
void SetAxis(int axis);
void SetUseAxis(bool use_axis);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_

+ 39
- 0
mindspore/lite/c_ops/fused_batchnorm.cc View File

@@ -0,0 +1,39 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/fused_batchnorm.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float FusedBatchNorm::GetEpsilon() const { return this->primitive->value.AsFusedBatchNorm()->epsilon; }
float FusedBatchNorm::GetMomentum() const { return this->primitive->value.AsFusedBatchNorm()->momentum; }
int FusedBatchNorm::GetSpatial() const { return this->primitive->value.AsFusedBatchNorm()->spatial; }

void FusedBatchNorm::SetEpsilon(float epsilon) { this->primitive->value.AsFusedBatchNorm()->epsilon = epsilon; }
void FusedBatchNorm::SetMomentum(float momentum) { this->primitive->value.AsFusedBatchNorm()->momentum = momentum; }
void FusedBatchNorm::SetSpatial(int spatial) { this->primitive->value.AsFusedBatchNorm()->spatial = spatial; }

#else

float FusedBatchNorm::GetEpsilon() const { return this->primitive->value_as_FusedBatchNorm()->epsilon(); }
float FusedBatchNorm::GetMomentum() const { return this->primitive->value_as_FusedBatchNorm()->momentum(); }
int FusedBatchNorm::GetSpatial() const { return this->primitive->value_as_FusedBatchNorm()->spatial(); }

void FusedBatchNorm::SetEpsilon(float epsilon) {}
void FusedBatchNorm::SetMomentum(float momentum) {}
void FusedBatchNorm::SetSpatial(int spatial) {}
#endif
} // namespace mindspore

+ 48
- 0
mindspore/lite/c_ops/fused_batchnorm.h View File

@@ -0,0 +1,48 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_FUSED_BATCH_NORM_H_
#define LITE_MINDSPORE_LITE_C_OPS_FUSED_BATCH_NORM_H_

namespace mindspore {
class FusedBatchNorm : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit FusedBatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit FusedBatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetEpsilon() const;
float GetMomentum() const;
int GetSpatial() const;
void SetEpsilon(float epsilon);
void SetMomentum(float momentum);
void SetSpatial(int spatial);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_FUSED_BATCH_NORM_H_

+ 87
- 0
mindspore/lite/c_ops/gather.cc View File

@@ -0,0 +1,87 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/gather.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int Gather::GetAxis() const { return this->primitive->value.AsGather()->axis; }
int Gather::GetBatchDims() const { return this->primitive->value.AsGather()->batchDims; }

void Gather::SetAxis(int axis) { this->primitive->value.AsGather()->axis = axis; }
void Gather::SetBatchDims(int batch_dims) { this->primitive->value.AsGather()->batchDims = batch_dims; }

#else

int Gather::GetAxis() const { return this->primitive->value_as_Gather()->axis(); }
int Gather::GetBatchDims() const { return this->primitive->value_as_Gather()->batchDims(); }

void Gather::SetAxis(int axis) {}
void Gather::SetBatchDims(int batch_dims) {}
#endif
int Gather::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
if (inputs_.size() != kDoubleNum) {
MS_LOG(ERROR) << "Gather should have two inputs";
return 1;
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "Gather should have one outputs";
return 1;
}

auto input = inputs_.at(0);
MS_ASSERT(input != nullptr);
auto indices = inputs_.at(1);
MS_ASSERT(input != nullptr);
auto output = outputs_.front();
MS_ASSERT(input != nullptr);

int axis = GetAxis();
int batch_dims = GetBatchDims();
if (axis < 0) {
axis += input->shape().size();
}
auto indices_shape = indices->shape();
int indices_rank = indices_shape.size();
if (indices_rank < batch_dims + 1) {
MS_LOG(ERROR) << "input[1]'s rank is less than batchDim + 1";
return 1;
}
if (batch_dims != 0) {
MS_LOG(ERROR) << "batchDims " << batch_dims << " != 0, which is not support";
return 1;
}
auto in_shape = input->shape();
int in_rank = in_shape.size();
if (in_rank < axis + 1) {
MS_LOG(ERROR) << "input[0]'s rank is less than axis + 1";
return 1;
}

std::vector<int> out_shape{in_shape};
out_shape.erase(out_shape.begin() + axis);
for (size_t i = 0; i < indices_rank; i++) {
out_shape.insert(out_shape.begin() + axis, indices_shape[i]);
}

output->set_shape(out_shape);
output->set_data_type(input->data_type());
output->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 47
- 0
mindspore/lite/c_ops/gather.h View File

@@ -0,0 +1,47 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_GATHER_H_
#define LITE_MINDSPORE_LITE_C_OPS_GATHER_H_

namespace mindspore {
class Gather : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Gather(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit Gather(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const;
int GetBatchDims() const;
void SetAxis(int axis);
void SetBatchDims(int batch_dims);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_H_

+ 74
- 0
mindspore/lite/c_ops/gather_nd.cc View File

@@ -0,0 +1,74 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/gather_nd.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int GatherNd::GetBatchDims() const { return this->primitive->value.AsGatherNd()->batchDims; }

void GatherNd::SetBatchDims(int batch_dims) { this->primitive->value.AsGatherNd()->batchDims = batch_dims; }

#else

int GatherNd::GetBatchDims() const { return this->primitive->value_as_GatherNd()->batchDims(); }

void GatherNd::SetBatchDims(int batch_dims) {}
#endif
int GatherNd::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
MS_ASSERT(this->primitive != nullptr);
if (inputs_.size() != kDoubleNum) {
MS_LOG(ERROR) << "GatherNd should have two inputs";
return 1;
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "GatherNd should have one outputs";
return 1;
}

auto input = inputs_.at(0);
MS_ASSERT(input != nullptr);
auto indices = inputs_.at(1);
MS_ASSERT(indices != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

auto in_shape = input->shape();
int in_rank = in_shape.size();
auto indices_shape = indices->shape();
int indices_rank = indices_shape.size();

if (indices_shape[indices_rank - 1] > in_rank) {
MS_LOG(ERROR) << "Input of indices data is error!";
return 1;
}

std::vector<int> out_shape;
int i = 0;
for (i = 0; i < indices_rank - 1; ++i) {
out_shape.emplace_back(indices_shape[i]);
}
for (i = indices_shape[indices_rank - 1]; i < in_rank; ++i) {
out_shape.emplace_back(in_shape[i]);
}

output->set_shape(out_shape);
output->set_data_type(input->data_type());
output->SetFormat(input->GetFormat());

return 0;
}
} // namespace mindspore

+ 45
- 0
mindspore/lite/c_ops/gather_nd.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_
#define LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_

namespace mindspore {
class GatherNd : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit GatherNd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit GatherNd(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetBatchDims() const;
void SetBatchDims(int batch_dims);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_

+ 42
- 0
mindspore/lite/c_ops/greater.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_GREATER_H_
#define LITE_MINDSPORE_LITE_C_OPS_GREATER_H_

namespace mindspore {
class Greater : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Greater(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit Greater(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_H_

+ 42
- 0
mindspore/lite/c_ops/greater_equal.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_
#define LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_

namespace mindspore {
class GreaterEqual : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit GreaterEqual(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit GreaterEqual(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_

+ 38
- 0
mindspore/lite/c_ops/l2_norm.cc View File

@@ -0,0 +1,38 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/l2_norm.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
std::vector<int> L2Norm::GetAxis() const { return this->primitive->value.AsL2Norm()->axis; }
float L2Norm::GetEpsilon() const { return this->primitive->value.AsL2Norm()->epsilon; }

void L2Norm::SetAxis(const std::vector<int> &axis) { this->primitive->value.AsL2Norm()->axis = axis; }
void L2Norm::SetEpsilon(float epsilon) { this->primitive->value.AsL2Norm()->epsilon = epsilon; }

#else

std::vector<int> L2Norm::GetAxis() const {
auto fb_vector = this->primitive->value_as_L2Norm()->axis();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
float L2Norm::GetEpsilon() const { return this->primitive->value_as_L2Norm()->epsilon(); }

void L2Norm::SetAxis(const std::vector<int> &axis) {}
void L2Norm::SetEpsilon(float epsilon) {}
#endif
} // namespace mindspore

+ 46
- 0
mindspore/lite/c_ops/l2_norm.h View File

@@ -0,0 +1,46 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_
#define LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_

namespace mindspore {
class L2Norm : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit L2Norm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit L2Norm(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
std::vector<int> GetAxis() const;
float GetEpsilon() const;
void SetAxis(const std::vector<int> &axis);
void SetEpsilon(float epsilon);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_

+ 33
- 0
mindspore/lite/c_ops/leaky_relu.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/leaky_relu.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
float LeakyReLU::GetNegativeSlope() const { return this->primitive->value.AsLeakyReLU()->negativeSlope; }

void LeakyReLU::SetNegativeSlope(float negative_slope) {
this->primitive->value.AsLeakyReLU()->negativeSlope = negative_slope;
}

#else

float LeakyReLU::GetNegativeSlope() const { return this->primitive->value_as_LeakyReLU()->negativeSlope(); }

void LeakyReLU::SetNegativeSlope(float negative_slope) {}
#endif
} // namespace mindspore

+ 44
- 0
mindspore/lite/c_ops/leaky_relu.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_
#define LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_

namespace mindspore {
class LeakyReLU : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit LeakyReLU(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit LeakyReLU(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
float GetNegativeSlope() const;
void SetNegativeSlope(float negative_slope);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_

+ 42
- 0
mindspore/lite/c_ops/less.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_LESS_H_
#define LITE_MINDSPORE_LITE_C_OPS_LESS_H_

namespace mindspore {
class Less : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit Less(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit Less(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_LESS_H_

+ 42
- 0
mindspore/lite/c_ops/less_equal.h View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "c_ops/arithmetic.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_
#define LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_

namespace mindspore {
class LessEqual : public Arithmetic {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit LessEqual(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else
explicit LessEqual(schema::Primitive *primitive) : Arithmetic(primitive) {}
#endif
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_

+ 67
- 0
mindspore/lite/c_ops/local_response_normalization.cc View File

@@ -0,0 +1,67 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "c_ops/local_response_normalization.h"

namespace mindspore {
#ifdef PRIMITIVE_WRITEABLE
int LocalResponseNormalization::GetDepthRadius() const {
return this->primitive->value.AsLocalResponseNormalization()->depth_radius;
}
float LocalResponseNormalization::GetBias() const {
return this->primitive->value.AsLocalResponseNormalization()->bias;
}
float LocalResponseNormalization::GetAlpha() const {
return this->primitive->value.AsLocalResponseNormalization()->alpha;
}
float LocalResponseNormalization::GetBeta() const {
return this->primitive->value.AsLocalResponseNormalization()->beta;
}

void LocalResponseNormalization::SetDepthRadius(int depth_radius) {
this->primitive->value.AsLocalResponseNormalization()->depth_radius = depth_radius;
}
void LocalResponseNormalization::SetBias(float bias) {
this->primitive->value.AsLocalResponseNormalization()->bias = bias;
}
void LocalResponseNormalization::SetAlpha(float alpha) {
this->primitive->value.AsLocalResponseNormalization()->alpha = alpha;
}
void LocalResponseNormalization::SetBeta(float beta) {
this->primitive->value.AsLocalResponseNormalization()->beta = beta;
}

#else

int LocalResponseNormalization::GetDepthRadius() const {
return this->primitive->value_as_LocalResponseNormalization()->depth_radius();
}
float LocalResponseNormalization::GetBias() const {
return this->primitive->value_as_LocalResponseNormalization()->bias();
}
float LocalResponseNormalization::GetAlpha() const {
return this->primitive->value_as_LocalResponseNormalization()->alpha();
}
float LocalResponseNormalization::GetBeta() const {
return this->primitive->value_as_LocalResponseNormalization()->beta();
}

void LocalResponseNormalization::SetDepthRadius(int depth_radius) {}
void LocalResponseNormalization::SetBias(float bias) {}
void LocalResponseNormalization::SetAlpha(float alpha) {}
void LocalResponseNormalization::SetBeta(float beta) {}
#endif
} // namespace mindspore

+ 50
- 0
mindspore/lite/c_ops/local_response_normalization.h View File

@@ -0,0 +1,50 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <vector>
#include <set>
#include <cmath>
#include "ir/dtype/type_id.h"
#include "mindspore/lite/c_ops/primitive_c.h"
#ifdef PRIMITIVE_WRITEABLE
#include "schema/inner/model_generated.h"
#else
#include "schema/model_generated.h"
#endif

#ifndef LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_
#define LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_

namespace mindspore {
class LocalResponseNormalization : public PrimitiveC {
public:
#ifdef PRIMITIVE_WRITEABLE
explicit LocalResponseNormalization(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else
explicit LocalResponseNormalization(schema::Primitive *primitive) : PrimitiveC(primitive) {}
#endif
int GetDepthRadius() const;
float GetBias() const;
float GetAlpha() const;
float GetBeta() const;
void SetDepthRadius(int depth_radius);
void SetBias(float bias);
void SetAlpha(float alpha);
void SetBeta(float beta);
};
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save