Browse Source

!8833 [MS][LITE]Add the parser of the adder and range, Fix clip and range

From: @gongdaguo
Reviewed-by: 
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
9c03f0e67c
16 changed files with 429 additions and 28 deletions
  1. +18
    -0
      mindspore/lite/nnacl/adder.c
  2. +34
    -0
      mindspore/lite/nnacl/adder.h
  3. +12
    -8
      mindspore/lite/nnacl/fp32/activation_fp32.c
  4. +1
    -0
      mindspore/lite/schema/model.fbs
  5. +5
    -2
      mindspore/lite/schema/ops.fbs
  6. +71
    -0
      mindspore/lite/src/ops/adder.cc
  7. +43
    -0
      mindspore/lite/src/ops/adder.h
  8. +38
    -0
      mindspore/lite/src/ops/populate/adder_populate.cc
  9. +3
    -0
      mindspore/lite/src/ops/primitive_c.cc
  10. +8
    -3
      mindspore/lite/src/ops/range.cc
  11. +13
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.cc
  12. +47
    -0
      mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc
  13. +34
    -0
      mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h
  14. +48
    -0
      mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc
  15. +34
    -0
      mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h
  16. +20
    -14
      mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc

+ 18
- 0
mindspore/lite/nnacl/adder.c View File

@@ -0,0 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/adder.h"
#include "nnacl/errorcode.h"

+ 34
- 0
mindspore/lite/nnacl/adder.h View File

@@ -0,0 +1,34 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_NNACL_ADDER_H_
#define MINDSPORE_LITE_NNACL_ADDER_H_

#include <math.h>
#include "nnacl/op_base.h"
#include "nnacl/quantization/fixed_point.h"

typedef struct AdderParameter {
OpParameter op_parameter_;
} AdderParameter;

#ifdef __cplusplus
extern "C" {
#endif

#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_LITE_NNACL_ADDER_H_

+ 12
- 8
mindspore/lite/nnacl/fp32/activation_fp32.c View File

@@ -15,6 +15,7 @@
*/ */


#include "nnacl/fp32/activation_fp32.h" #include "nnacl/fp32/activation_fp32.h"
#include <float.h>
#include "nnacl/errorcode.h" #include "nnacl/errorcode.h"


int Fp32Relu(const float *src, int length, float *dst) { int Fp32Relu(const float *src, int length, float *dst) {
@@ -150,14 +151,17 @@ int HardTanh(const float *src, int length, float *dst, float min_val, float max_
return NNACL_ERR; return NNACL_ERR;
} }
int i = 0; int i = 0;
for (i = 0; i < length; ++i) {
float in = src[i];
if (in < min_val) {
dst[i] = min_val;
} else if (in > max_val) {
dst[i] = max_val;
} else {
dst[i] = in;
if (min_val == FLT_MIN) {
for (i = 0; i < length; ++i) {
dst[i] = src[i] > max_val ? max_val : src[i];
}
} else if (max_val == FLT_MAX) {
for (i = 0; i < length; ++i) {
dst[i] = src[i] < min_val ? min_val : src[i];
}
} else {
for (i = 0; i < length; ++i) {
dst[i] = src[i] < min_val ? min_val : (src[i] > max_val ? max_val : src[i]);
} }
} }
return NNACL_OK; return NNACL_OK;


+ 1
- 0
mindspore/lite/schema/model.fbs View File

@@ -251,6 +251,7 @@ union PrimitiveType {
TensorListReserve, TensorListReserve,
All, All,
Assert, Assert,
Adder,
} }


enum QuantType: int { enum QuantType: int {


+ 5
- 2
mindspore/lite/schema/ops.fbs View File

@@ -674,7 +674,7 @@ table Range {
dType: int; dType: int;
start: int; start: int;
limit: int; limit: int;
delta: int;
delta: int = 1;
} }


table ExpandDims { table ExpandDims {
@@ -1176,4 +1176,7 @@ table All {


table Assert { table Assert {
summarize : int; summarize : int;
}
}

table Adder {
}

+ 71
- 0
mindspore/lite/src/ops/adder.cc View File

@@ -0,0 +1,71 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/adder.h"

#ifndef PRIMITIVE_WRITEABLE
#include "src/ops/ops_register.h"
#endif

namespace mindspore {
namespace lite {
#ifdef PRIMITIVE_WRITEABLE

#else
int Adder::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Adder();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Adder return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateAdder(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Adder, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}

PrimitiveC *AdderCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Adder>(primitive); }
Registry AdderRegistry(schema::PrimitiveType_Adder, AdderCreator);
#endif

int Adder::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
MS_ASSERT(this->primitive_ != nullptr);
MS_ASSERT(inputs_.size() == 2);
auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr);
MS_ASSERT(input0->shape().size() == 2);
auto input1 = inputs_.at(1);
MS_ASSERT(input1 != nullptr);
MS_ASSERT(input1->shape().size() == 2);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);

output->set_data_type(input0->data_type());
output->set_format(input0->format());
if (!infer_flag()) {
return RET_OK;
}
std::vector<int> in_shape;
in_shape.push_back(input0->shape().at(0));
in_shape.push_back(input1->shape().at(1));
output->set_shape(in_shape);

return RET_OK;
}
} // namespace lite
} // namespace mindspore

+ 43
- 0
mindspore/lite/src/ops/adder.h View File

@@ -0,0 +1,43 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef LITE_MINDSPORE_LITE_C_OPS_ADDER_H_
#define LITE_MINDSPORE_LITE_C_OPS_ADDER_H_

#include <vector>
#include <set>
#include <cmath>
#include "src/ops/primitive_c.h"

namespace mindspore {
namespace lite {
class Adder : public PrimitiveC {
public:
Adder() = default;
~Adder() = default;
#ifdef PRIMITIVE_WRITEABLE
MS_DECLARE_PARENT(Adder, PrimitiveC);
explicit Adder(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}

#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
#endif
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
};
} // namespace lite
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_ADDER_H_

+ 38
- 0
mindspore/lite/src/ops/populate/adder_populate.cc View File

@@ -0,0 +1,38 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/adder.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "nnacl/adder.h"

namespace mindspore {
namespace lite {

OpParameter *PopulateAdderParameter(const mindspore::lite::PrimitiveC *primitive) {
auto *adder_param = reinterpret_cast<AdderParameter *>(malloc(sizeof(AdderParameter)));
if (adder_param == nullptr) {
MS_LOG(ERROR) << "malloc AdderParameter failed.";
return nullptr;
}
memset(adder_param, 0, sizeof(AdderParameter));
adder_param->op_parameter_.type_ = primitive->Type();
return reinterpret_cast<OpParameter *>(adder_param);
}
Registry AdderParameterRegistry(schema::PrimitiveType_Adder, PopulateAdderParameter);

} // namespace lite
} // namespace mindspore

+ 3
- 0
mindspore/lite/src/ops/primitive_c.cc View File

@@ -132,6 +132,7 @@
#include "src/ops/hashtable_lookup.h" #include "src/ops/hashtable_lookup.h"
#include "src/ops/skip_gram.h" #include "src/ops/skip_gram.h"
#include "src/ops/clip.h" #include "src/ops/clip.h"
#include "src/ops/adder.h"
#include "src/ops/custom_predict.h" #include "src/ops/custom_predict.h"
#include "src/ops/custom_normalize.h" #include "src/ops/custom_normalize.h"
#include "src/ops/custom_extract_features.h" #include "src/ops/custom_extract_features.h"
@@ -858,6 +859,8 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
return new (std::nothrow) SkipGram(primitive); return new (std::nothrow) SkipGram(primitive);
case schema::PrimitiveType_Clip: case schema::PrimitiveType_Clip:
return new (std::nothrow) Clip(primitive); return new (std::nothrow) Clip(primitive);
case schema::PrimitiveType_Adder:
return new (std::nothrow) Adder(primitive);
case schema::PrimitiveType_CustomPredict: case schema::PrimitiveType_CustomPredict:
return new (std::nothrow) CustomPredict(primitive); return new (std::nothrow) CustomPredict(primitive);
case schema::PrimitiveType_CustomNormalize: case schema::PrimitiveType_CustomNormalize:


+ 8
- 3
mindspore/lite/src/ops/range.cc View File

@@ -64,14 +64,19 @@ int Range::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu
auto output = outputs_.front(); auto output = outputs_.front();
MS_ASSERT(output != nullptr); MS_ASSERT(output != nullptr);


output->set_data_type(input->data_type());
output->set_data_type(mindspore::kNumberTypeFloat32);
output->set_format(input->format()); output->set_format(input->format());
if (!infer_flag()) { if (!infer_flag()) {
return RET_OK; return RET_OK;
} }


int shape_size = std::ceil(static_cast<float>(GetLimit() - GetStart()) / GetDelta());
std::vector<int> in_shape(1);
int shape_size = 0;
if (inputs_.size() == 3) {
shape_size = -1;
} else {
shape_size = std::ceil(static_cast<float>(GetLimit() - GetStart()) / GetDelta());
}
std::vector<int> in_shape;
in_shape.push_back(shape_size); in_shape.push_back(shape_size);
output->set_shape(in_shape); output->set_shape(in_shape);




+ 13
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.cc View File

@@ -35,7 +35,19 @@ int RangeCPUKernel::Run() {
size_t start = (reinterpret_cast<RangeParameter *>(op_parameter_))->start_; size_t start = (reinterpret_cast<RangeParameter *>(op_parameter_))->start_;
size_t limit = (reinterpret_cast<RangeParameter *>(op_parameter_))->limit_; size_t limit = (reinterpret_cast<RangeParameter *>(op_parameter_))->limit_;
size_t delta = (reinterpret_cast<RangeParameter *>(op_parameter_))->delta_; size_t delta = (reinterpret_cast<RangeParameter *>(op_parameter_))->delta_;
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
if (in_tensors_.size() == 3) {
if ((in_tensors_.at(0)->data_type() == mindspore::kNumberTypeInt32) &&
(in_tensors_.at(1)->data_type() == mindspore::kNumberTypeInt32) &&
(in_tensors_.at(2)->data_type() == mindspore::kNumberTypeInt32)) {
start = *reinterpret_cast<int *>(in_tensors_.at(0)->data_c());
limit = *reinterpret_cast<int *>(in_tensors_.at(1)->data_c());
delta = *reinterpret_cast<int *>(in_tensors_.at(2)->data_c());
} else {
MS_LOG(ERROR) << "Unsupported parameter type : " << in_tensors_.at(0)->data_type() << ".";
return RET_ERROR;
}
}
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->data_c());
Range(output_ptr, start, limit, delta); Range(output_ptr, start, limit, delta);
return RET_OK; return RET_OK;
} }


+ 47
- 0
mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc View File

@@ -0,0 +1,47 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "tools/converter/parser/onnx/onnx_adder_parser.h"
#include <memory>

namespace mindspore {
namespace lite {
STATUS OnnxAdderParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AdderParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}

auto attr = std::make_unique<schema::AdderT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_Adder;
op->primitive->value.value = attr.release();
return RET_OK;
}

OnnxNodeRegistrar g_onnxAdderParser("adder_f", new OnnxAdderParser());
} // namespace lite
} // namespace mindspore

+ 34
- 0
mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h View File

@@ -0,0 +1,34 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ADDER_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ADDER_PARSER_H

#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"

namespace mindspore {
namespace lite {
class OnnxAdderParser : public OnnxNodeParser {
public:
OnnxAdderParser() : OnnxNodeParser("Adder") {}
~OnnxAdderParser() override = default;

STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ADDER_PARSER_H

+ 48
- 0
mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc View File

@@ -0,0 +1,48 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "tools/converter/parser/onnx/onnx_range_parser.h"
#include <memory>

namespace mindspore {
namespace lite {
STATUS OnnxRangeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx RangeParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}

std::unique_ptr<schema::RangeT> attr = std::make_unique<schema::RangeT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
attr->dType = 0;
op->primitive->value.type = schema::PrimitiveType_Range;
op->primitive->value.value = attr.release();
return RET_OK;
}

OnnxNodeRegistrar g_onnxRangeParser("Range", new OnnxRangeParser());
} // namespace lite
} // namespace mindspore

+ 34
- 0
mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h View File

@@ -0,0 +1,34 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RANGE_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RANGE_PARSER_H

#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"

namespace mindspore {
namespace lite {
class OnnxRangeParser : public OnnxNodeParser {
public:
OnnxRangeParser() : OnnxNodeParser("Range") {}
~OnnxRangeParser() override = default;

STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RANGE_PARSER_H

+ 20
- 14
mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc View File

@@ -42,9 +42,7 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) {
continue; continue;
} }
auto clip_cnode = node->cast<CNodePtr>(); auto clip_cnode = node->cast<CNodePtr>();
MS_ASSERT(clip_cnode->inputs().size() > kClipMinIndex);
MS_ASSERT(clip_cnode->inputs().size() > kClipMaxIndex);

MS_ASSERT(clip_cnode->size() >= kClipMinIndex);
auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(clip_cnode->input(0)); auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(clip_cnode->input(0));
MS_ASSERT(primitive_c != nullptr); MS_ASSERT(primitive_c != nullptr);
auto primT = primitive_c->primitiveT(); auto primT = primitive_c->primitiveT();
@@ -55,19 +53,27 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) {
float max = primT->value.AsClip()->max; float max = primT->value.AsClip()->max;
float min = primT->value.AsClip()->min; float min = primT->value.AsClip()->min;
if ((min == -1) && (max == -1)) { if ((min == -1) && (max == -1)) {
if (clip_cnode->size() != 4) {
MS_LOG(ERROR) << "Clip param invalid";
return false;
if (clip_cnode->size() > kClipMinIndex) {
auto min_param_value = GetLiteParamValue(clip_cnode->input(kClipMinIndex));
if (min_param_value->tensor_type() != mindspore::kNumberTypeFloat32) {
MS_LOG(ERROR) << "Clip param type invalid";
return false;
}
min = *reinterpret_cast<float *>(min_param_value->tensor_addr());
} else {
min = FLT_MIN;
} }
auto min_param_value = GetLiteParamValue(clip_cnode->input(kClipMinIndex));
auto max_param_value = GetLiteParamValue(clip_cnode->input(kClipMaxIndex));
if ((min_param_value->tensor_type() != mindspore::kNumberTypeFloat32) ||
(max_param_value->tensor_type() != mindspore::kNumberTypeFloat32)) {
MS_LOG(ERROR) << "Clip param type invalid";
return false;

if (clip_cnode->size() > kClipMaxIndex) {
auto max_param_value = GetLiteParamValue(clip_cnode->input(kClipMaxIndex));
if (max_param_value->tensor_type() != mindspore::kNumberTypeFloat32) {
MS_LOG(ERROR) << "Clip param type invalid";
return false;
}
max = *reinterpret_cast<float *>(max_param_value->tensor_addr());
} else {
max = FLT_MAX;
} }
min = *reinterpret_cast<float *>(min_param_value->tensor_addr());
max = *reinterpret_cast<float *>(max_param_value->tensor_addr());
} }
auto manager = graph->manager(); auto manager = graph->manager();




Loading…
Cancel
Save