Browse Source

!21005 Support bert export onnx and some code check for master branch.

Merge pull request !21005 from liuyang/code_check_master
tags/v1.5.0-rc1
i-robot Gitee 4 years ago
parent
commit
82b2493a2d
27 changed files with 613 additions and 109 deletions
  1. +572
    -88
      mindspore/ccsrc/transform/express_ir/onnx_exporter.cc
  2. +3
    -0
      mindspore/core/ops/apply_momentum.cc
  3. +1
    -0
      mindspore/core/ops/arg_min.cc
  4. +1
    -0
      mindspore/core/ops/asin.cc
  5. +3
    -0
      mindspore/core/ops/assert.cc
  6. +1
    -1
      mindspore/core/ops/batch_to_space_nd.cc
  7. +1
    -1
      mindspore/core/ops/batch_to_space_nd.h
  8. +3
    -0
      mindspore/core/ops/conv2d.cc
  9. +1
    -1
      mindspore/core/ops/cos.cc
  10. +1
    -1
      mindspore/core/ops/fake_quant_with_min_max_vars.cc
  11. +1
    -0
      mindspore/core/ops/logical_not.cc
  12. +2
    -1
      mindspore/core/ops/lrn.cc
  13. +2
    -1
      mindspore/core/ops/max_pool.cc
  14. +2
    -0
      mindspore/core/ops/ones_like.cc
  15. +1
    -0
      mindspore/core/ops/pack.cc
  16. +1
    -0
      mindspore/core/ops/rank.cc
  17. +1
    -0
      mindspore/core/ops/round.cc
  18. +1
    -1
      mindspore/core/ops/space_to_batch_nd.cc
  19. +1
    -1
      mindspore/core/ops/space_to_batch_nd.h
  20. +1
    -1
      mindspore/core/ops/squeeze.cc
  21. +3
    -0
      mindspore/core/ops/stack.cc
  22. +3
    -0
      mindspore/core/ops/topk.cc
  23. +1
    -0
      mindspore/core/ops/unpack.cc
  24. +3
    -0
      mindspore/core/ops/unsorted_segment_sum.cc
  25. +1
    -0
      mindspore/core/ops/unstack.cc
  26. +0
    -8
      mindspore/nn/wrap/grad_reducer.py
  27. +2
    -4
      mindspore/nn/wrap/loss_scale.py

+ 572
- 88
mindspore/ccsrc/transform/express_ir/onnx_exporter.cc
File diff suppressed because it is too large
View File


+ 3
- 0
mindspore/core/ops/apply_momentum.cc View File

@@ -63,6 +63,9 @@ AbstractBasePtr ApplyMomentumInfer(const abstract::AnalysisEnginePtr &, const Pr
auto prim_name = primitive->name();
(void)CheckAndConvertUtils::CheckInteger("apply_momentum_infer", SizeToLong(input_args.size()), kEqual, 5, prim_name);

for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
// Infer shape
auto v_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];



+ 1
- 0
mindspore/core/ops/arg_min.cc View File

@@ -42,6 +42,7 @@ AbstractBasePtr ArgMinInfer(const abstract::AnalysisEnginePtr &, const Primitive

// Infer shape
auto axis = GetValue<int64_t>(primitive->GetAttr(kAxis));
MS_EXCEPTION_IF_NULL(input_args[0]);
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
auto x_rank = SizeToLong(x_shape.size());
CheckAndConvertUtils::CheckInRange<int64_t>("axis", axis, kIncludeLeft, {-x_rank, x_rank}, prim_name);


+ 1
- 0
mindspore/core/ops/asin.cc View File

@@ -32,6 +32,7 @@ AbstractBasePtr AsinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
auto infer_shape = std::make_shared<abstract::Shape>(x_shape);

MS_EXCEPTION_IF_NULL(input_args[0]);
// Infer Type
auto dtype = input_args[0]->BuildType();
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kInt32};


+ 3
- 0
mindspore/core/ops/assert.cc View File

@@ -38,6 +38,9 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto op_name = primitive->name();
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
TypePtr condition;
if (!(input_args[0]->BuildType()->type_id() == kObjectTypeTensorType)) {
auto condition_values = GetValue<std::vector<bool>>(input_args[0]->BuildValue());


+ 1
- 1
mindspore/core/ops/batch_to_space_nd.cc View File

@@ -92,7 +92,7 @@ std::vector<int64_t> BatchToSpaceND::get_block_shape() const {
return GetValue<std::vector<int64_t>>(value_ptr);
}

void BatchToSpaceND::Init(std::vector<int64_t> block_shape, std::vector<std::vector<int64_t>> crops) {
void BatchToSpaceND::Init(const std::vector<int64_t> block_shape, const std::vector<std::vector<int64_t>> crops) {
this->set_crops(crops);
this->set_block_shape(block_shape);
}


+ 1
- 1
mindspore/core/ops/batch_to_space_nd.h View File

@@ -33,7 +33,7 @@ class BatchToSpaceND : public PrimitiveC {
BatchToSpaceND() : PrimitiveC(kNameBatchToSpaceND) {}
~BatchToSpaceND() = default;
MS_DECLARE_PARENT(BatchToSpaceND, PrimitiveC);
void Init(std::vector<int64_t> block_shape, std::vector<std::vector<int64_t>> crops);
void Init(const std::vector<int64_t> block_shape, const std::vector<std::vector<int64_t>> crops);
void set_crops(std::vector<std::vector<int64_t>> crops);
void set_block_shape(std::vector<int64_t> block_shape);
std::vector<int64_t> get_block_shape() const;


+ 3
- 0
mindspore/core/ops/conv2d.cc View File

@@ -144,6 +144,9 @@ void Conv2DPadFunction(std::vector<int64_t> *output_hw, std::vector<int64_t> *pa
abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto x_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
auto w_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
auto x_shape = x_shape_map[kShape];


+ 1
- 1
mindspore/core/ops/cos.cc View File

@@ -32,7 +32,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
}

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr arg) { return arg == nullptr; })) {
MS_LOG(EXCEPTION) << "nullptr";
}
std::map<std::string, TypePtr> types;


+ 1
- 1
mindspore/core/ops/fake_quant_with_min_max_vars.cc View File

@@ -47,7 +47,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr arg) { return arg == nullptr; })) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr arg) { return arg == nullptr; })) {
MS_LOG(EXCEPTION) << "nullptr";
}
std::map<std::string, TypePtr> types;


+ 1
- 0
mindspore/core/ops/logical_not.cc View File

@@ -32,6 +32,7 @@ abstract::ShapePtr LogicalNotInferShape(const PrimitivePtr &primitive, const std
TypePtr LogicalNotInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(prim);
auto op_name = prim->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto infer_dtype = input_args[0]->BuildType();
std::set<TypePtr> local_bool = {kBool};
return CheckAndConvertUtils::CheckTensorTypeValid("x", infer_dtype, local_bool, op_name);


+ 2
- 1
mindspore/core/ops/lrn.cc View File

@@ -86,10 +86,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr arg) { return arg == nullptr; })) {
MS_LOG(EXCEPTION) << "nullptr";
}
std::map<std::string, TypePtr> types;
MS_EXCEPTION_IF_NULL(input_args[0]);
types.emplace("x", input_args[0]->BuildType());
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name());
}


+ 2
- 1
mindspore/core/ops/max_pool.cc View File

@@ -82,6 +82,7 @@ namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto op_name = primitive->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
auto format = Format(GetValue<int64_t>(primitive->GetAttr(kFormat)));
if (format == NHWC) {
@@ -123,7 +124,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
}

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr arg) { return arg == nullptr; })) {
MS_LOG(EXCEPTION) << "nullptr";
}
auto input_type = input_args[0]->BuildType();


+ 2
- 0
mindspore/core/ops/ones_like.cc View File

@@ -34,7 +34,9 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
}

TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto op_name = primitive->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto infer_type = input_args[0]->BuildType();
auto valid_type = common_valid_types;
valid_type.insert(kBool);


+ 1
- 0
mindspore/core/ops/pack.cc View File

@@ -58,6 +58,7 @@ AbstractBasePtr PackInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();

MS_EXCEPTION_IF_NULL(input_args[0]);
auto x_shapes = input_args[0]->BuildShape()->cast<abstract::TupleShapePtr>()->shape();
auto x_types = input_args[0]->BuildType()->cast<TuplePtr>()->elements();
auto all_shape = _get_pack_shape(x_shapes, x_types, GetValue<int64_t>(primitive->GetAttr(kAxis)), prim_name);


+ 1
- 0
mindspore/core/ops/rank.cc View File

@@ -22,6 +22,7 @@ namespace {
TypePtr RankInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(prim);
auto op_name = prim->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto infer_dtype = input_args[0]->BuildType();
CheckAndConvertUtils::CheckTensorTypeValid("x", infer_dtype, {kTensorType}, op_name);
return kTypeNone;


+ 1
- 0
mindspore/core/ops/round.cc View File

@@ -28,6 +28,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
}

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(input_args[0]);
auto infer_type = input_args[0]->BuildType();
return CheckAndConvertUtils::CheckTensorTypeValid("x", infer_type, common_valid_types, prim->name());
}


+ 1
- 1
mindspore/core/ops/space_to_batch_nd.cc View File

@@ -89,7 +89,7 @@ std::vector<int64_t> SpaceToBatchND::get_block_shape() const {
return GetValue<std::vector<int64_t>>(GetAttr(kBlockShape));
}

void SpaceToBatchND::Init(std::vector<int64_t> block_shape, std::vector<std::vector<int64_t>> paddings) {
void SpaceToBatchND::Init(const std::vector<int64_t> block_shape, const std::vector<std::vector<int64_t>> paddings) {
this->set_paddings(paddings);
this->set_block_shape(block_shape);
}


+ 1
- 1
mindspore/core/ops/space_to_batch_nd.h View File

@@ -33,7 +33,7 @@ class SpaceToBatchND : public PrimitiveC {
SpaceToBatchND() : PrimitiveC(kNameSpaceToBatchND) {}
~SpaceToBatchND() = default;
MS_DECLARE_PARENT(SpaceToBatchND, PrimitiveC);
void Init(std::vector<int64_t> block_shape, const std::vector<std::vector<int64_t>> paddings);
void Init(const std::vector<int64_t> block_shape, const std::vector<std::vector<int64_t>> paddings);
void set_paddings(const std::vector<std::vector<int64_t>> paddings);
void set_block_shape(std::vector<int64_t> block_shape);
std::vector<int64_t> get_block_shape() const;


+ 1
- 1
mindspore/core/ops/squeeze.cc View File

@@ -54,7 +54,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
}

TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
if (std::any_of(input_args.begin(), input_args.end(), [](AbstractBasePtr a) { return a == nullptr; })) {
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr arg) { return arg == nullptr; })) {
MS_LOG(EXCEPTION) << "nullptr";
}
return input_args[0]->BuildType();


+ 3
- 0
mindspore/core/ops/stack.cc View File

@@ -28,6 +28,9 @@ abstract::AbstractBasePtr StackInfer(const PrimitivePtr &primitive, const std::v
if (input_args.size() < 1) {
MS_LOG(ERROR) << "Invalid input size " << input_args.size();
}
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
for (int64_t i = 1; i < SizeToLong(input_args.size()); ++i) {
auto input_shape_tmp = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[i]->BuildShape())[kShape];


+ 3
- 0
mindspore/core/ops/topk.cc View File

@@ -35,6 +35,9 @@ AbstractBasePtr TopKInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt
(void)CheckAndConvertUtils::CheckInteger("top_k_infer", SizeToLong(input_args.size()), kEqual, 2, prim_name);

// Infer dtype
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto output1_type = kInt32;
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
auto output0_type =


+ 1
- 0
mindspore/core/ops/unpack.cc View File

@@ -26,6 +26,7 @@ AbstractBasePtr UnpackInfer(const abstract::AnalysisEnginePtr &, const Primitive
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
CheckAndConvertUtils::CheckSubClass("x", input_args[0]->BuildType(), {TypeIdToType(kObjectTypeTensorType)},
prim_name);
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];


+ 3
- 0
mindspore/core/ops/unsorted_segment_sum.cc View File

@@ -31,6 +31,9 @@ AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, con
auto prim_name = primitive->name();

// Infer type
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto x_type = input_args[0]->BuildType()->cast<TensorTypePtr>()->element();
// Infer shape
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];


+ 1
- 0
mindspore/core/ops/unstack.cc View File

@@ -25,6 +25,7 @@ AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const Primitiv
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
MS_EXCEPTION_IF_NULL(input_args[0]);
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
int64_t dim = x_shape.size();
int64_t axis = GetValue<int64_t>(primitive->GetAttr(kAxis));


+ 0
- 8
mindspore/nn/wrap/grad_reducer.py View File

@@ -101,7 +101,6 @@ def _tensors_allreduce(degree, mean, allgather, allreduce, allreduce_filter, gra


@reduce_opt.register("Tensor", "Bool", "Bool", "Tensor")

def _tensors_allreduce_post(degree, mean, allreduce_filter, grad):
"""
Apply allreduce on gradient in PyNative mode.
@@ -125,7 +124,6 @@ def _tensors_allreduce_post(degree, mean, allreduce_filter, grad):


@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "Tensor", "Bool")

def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, ps_parameter):
"""
Apply allreduce on gradient.
@@ -154,7 +152,6 @@ def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter,


@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "RowTensor")

def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce_filter, grad):
"""
Apply allgather on gradient instead of allreduce for sparse feature.
@@ -181,7 +178,6 @@ def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce


@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "RowTensor", "Bool")

def _tensors_allreduce_with_sparse_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, ps_parameter):
"""
Apply allgather on gradient instead of allreduce for sparse feature.
@@ -215,7 +211,6 @@ _get_datatype = C.MultitypeFuncGraph("_get_datatype")


@_get_datatype.register("Tensor")

def _tensors_get_datatype(grad):
"""
Acquire gradient datatype.
@@ -230,7 +225,6 @@ def _tensors_get_datatype(grad):


@_get_datatype.register("RowTensor")

def _tensors_get_datatype_with_sparse(grad):
"""
Acquire gradient datatype.
@@ -248,7 +242,6 @@ _cast_datatype = C.MultitypeFuncGraph("_cast_datatype")


@_cast_datatype.register("TypeType", "Tensor")

def _tensors_cast_datatype(datatype, grad):
"""
Cast gradient to datatype.
@@ -264,7 +257,6 @@ def _tensors_cast_datatype(datatype, grad):


@_cast_datatype.register("TypeType", "RowTensor")

def _tensors_cast_datatype_with_sparse(datatype, grad):
"""
Cast gradient to datatype.


+ 2
- 4
mindspore/nn/wrap/loss_scale.py View File

@@ -30,12 +30,11 @@ reciprocal = P.Reciprocal()


@_grad_scale.register("Tensor", "Tensor")

def tensor_grad_scale(scale, grad):
return grad * F.cast(reciprocal(scale), F.dtype(grad))

@_grad_scale.register("Tensor", "RowTensor")

@_grad_scale.register("Tensor", "RowTensor")
def tensor_grad_scale_row_tensor(scale, grad):
return RowTensor(grad.indices,
grad.values * F.cast(reciprocal(scale), F.dtype(grad.values)),
@@ -46,12 +45,11 @@ grad_overflow = P.FloatStatus()


@_grad_overflow.register("Tensor")

def _tensor_grad_overflow(grad):
return grad_overflow(grad)

@_grad_overflow.register("RowTensor")

@_grad_overflow.register("RowTensor")
def _tensor_grad_overflow_row_tensor(grad):
return grad_overflow(grad.values)



Loading…
Cancel
Save