diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h index 6ee45e7624..ef14444f0c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h @@ -173,9 +173,9 @@ class BroadcastOpGpuKernel : public GpuKernel { BroadcastOpType op_type_; bool need_broadcast_; bool is_comp_op_; - int input1_num_; - int input2_num_; - int output_num_; + size_t input1_num_; + size_t input2_num_; + size_t output_num_; std::vector lhs_shape_; std::vector rhs_shape_; std::vector output_shape_; diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index f4f19b6ff3..ebe8fc39b2 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -47,18 +47,18 @@ constexpr size_t kNopNodeRealInputIndex = 1; bool IsShapeDynamic(const abstract::ShapePtr &shape) { MS_EXCEPTION_IF_NULL(shape); - return std::any_of(shape->shape().begin(), shape->shape().end(), [](int s) { return s < 0; }); + return std::any_of(shape->shape().begin(), shape->shape().end(), [](int64_t s) { return s < 0; }); } bool IsShapeDynamic(const std::vector &shape) { - return std::any_of(shape.begin(), shape.end(), [](int s) { return s < 0; }); + return std::any_of(shape.begin(), shape.end(), [](int64_t s) { return s < 0; }); } std::vector TransShapeToSizet(const abstract::ShapePtr &shape) { MS_EXCEPTION_IF_NULL(shape); std::vector shape_size_t; if (IsShapeDynamic(shape)) { - if (std::all_of(shape->max_shape().begin(), shape->max_shape().end(), [](int s) { return s >= 0; })) { + if (std::all_of(shape->max_shape().begin(), shape->max_shape().end(), [](int64_t s) { return s >= 0; })) { std::transform(shape->max_shape().begin(), shape->max_shape().end(), std::back_inserter(shape_size_t), LongToSize); } else {