Browse Source

fix code checking warning for r1.2

tags/v1.3.0
l00591931 4 years ago
parent
commit
319c20824e
12 changed files with 111 additions and 102 deletions
  1. +7
    -8
      mindspore/_extends/parse/parser.py
  2. +3
    -0
      mindspore/_extends/remote/kernel_build_server.py
  3. +4
    -0
      mindspore/_extends/remote/kernel_build_server_ascend.py
  4. +2
    -3
      mindspore/ccsrc/debug/data_dump/dump_utils.cc
  5. +6
    -6
      mindspore/ccsrc/debug/debugger/debugger.cc
  6. +3
    -3
      mindspore/ccsrc/frontend/operator/composite/map.cc
  7. +15
    -15
      mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc
  8. +3
    -3
      mindspore/core/abstract/abstract_value.cc
  9. +0
    -1
      mindspore/core/abstract/analysis_context.cc
  10. +60
    -57
      mindspore/core/abstract/prim_maths.cc
  11. +4
    -2
      mindspore/core/abstract/prim_nn.cc
  12. +4
    -4
      mindspore/core/utils/log_adapter.cc

+ 7
- 8
mindspore/_extends/parse/parser.py View File

@@ -101,14 +101,13 @@ def get_parse_method_of_class(obj, parse_method=None):
method_name = None
if parse_method is not None:
method_name = parse_method
else:
if isinstance(obj, nn.Cell):
if obj.enable_hook:
if context.get_context("mode") == context.GRAPH_MODE:
raise ValueError("The graph mode does not support hook function.")
method_name = "_hook_construct"
else:
method_name = "construct"
elif isinstance(obj, nn.Cell):
if obj.enable_hook:
if context.get_context("mode") == context.GRAPH_MODE:
raise ValueError("The graph mode does not support hook function.")
method_name = "_hook_construct"
else:
method_name = "construct"
if method_name is not None:
if hasattr(obj, method_name):
method = getattr(obj, method_name)


+ 3
- 0
mindspore/_extends/remote/kernel_build_server.py View File

@@ -17,6 +17,7 @@ import os
from mindspore import log as logger
from mindspore._extends.parallel_compile.akg_compiler.akg_process import create_akg_parallel_process


class AkgBuilder:
"""Akg building wrapper"""

@@ -32,7 +33,9 @@ class AkgBuilder:
def compile(self):
return self.akg_builder.compile()


class Messager:

'''Messager'''

def __init__(self, fdin, fdout):


+ 4
- 0
mindspore/_extends/remote/kernel_build_server_ascend.py View File

@@ -19,6 +19,7 @@ from mindspore._extends.remote.kernel_build_server import Messager, get_logger,
from mindspore._extends.parallel_compile.tbe_compiler.tbe_process import create_tbe_parallel_process, op_select_format
from mindspore._extends.parallel_compile.tbe_compiler.tbe_process import check_supported


class TbeBuilder:
"""Tbe building wrapper"""

@@ -43,7 +44,9 @@ class TbeBuilder:
def exit(self):
self.tbe_builder.exit()


class AscendMessager(Messager):

'''
Ascend Messager
It works as a server, communicating with c++ client.
@@ -140,6 +143,7 @@ class AscendMessager(Messager):
get_logger().info("[TRACE] Ascend Messager Exit...")
exit()


if __name__ == '__main__':
warnings.simplefilter("ignore")
if len(sys.argv) != 3:


+ 2
- 3
mindspore/ccsrc/debug/data_dump/dump_utils.cc View File

@@ -71,8 +71,7 @@ void SetConstNodeId(const AnfNodePtr &node, std::map<std::string, size_t> *const
}
}

void GetCNodeConstantId(const session::KernelGraph *graph, const CNodePtr &node,
std::map<std::string, size_t> *const_map) {
void GetCNodeConstantId(const CNodePtr &node, std::map<std::string, size_t> *const_map) {
auto &inputs = node->inputs();
if (inputs.empty()) {
MS_LOG(EXCEPTION) << "Inputs of apply node is empty";
@@ -99,7 +98,7 @@ void GetConstantId(const session::KernelGraph *graph, std::map<std::string, size
}
auto cnode = node->cast<CNodePtr>();
if (cnode != graph->get_return()) {
GetCNodeConstantId(graph, cnode, const_map);
GetCNodeConstantId(cnode, const_map);
} else {
SetConstNodeId(cnode->input(1), const_map);
}


+ 6
- 6
mindspore/ccsrc/debug/debugger/debugger.cc View File

@@ -1053,12 +1053,12 @@ void Debugger::SetStepNum(int32_t cur_num_step) {
int32_t Debugger::step_num() const { return num_step_; }

uint64_t BytestoInt64(const std::vector<char> &buffer) {
uint64_t ret;
ret = ((uint64_t)buffer[7] << 56) | ((uint64_t)buffer[6] << 48) | ((uint64_t)buffer[5] << 40) |
((uint64_t)buffer[4] << 32) | ((uint64_t)buffer[3] << 24) | ((uint64_t)buffer[2] << 16) |
((uint64_t)buffer[1] << 8) | ((uint64_t)buffer[0]);
uint64_t ret = (uint64_t)buffer[0];
const int SHIFT = 8;
const int MAX_INDEX = 8;
for (int i = 1; i < MAX_INDEX; i++) {
ret = ((uint64_t)buffer[i] << (i * SHIFT)) | ret;
}
return ret;
}



+ 3
- 3
mindspore/ccsrc/frontend/operator/composite/map.cc View File

@@ -186,10 +186,10 @@ AnfNodePtr Map::Make(const FuncGraphPtr &func_graph, const AnfNodePtr &fn_arg, c
bool found = false;
TypeId id = kObjectTypeEnd;
std::pair<AnfNodePtr, TypePtr> pair;
for (auto &item : arg_pairs) {
pair = item;
for (auto &arg_pair : arg_pairs) {
pair = arg_pair;
MS_LOG(DEBUG) << "Map " << pair.second->ToString();
id = item.second->type_id();
id = arg_pair.second->type_id();
if (nonleaf_.count(id)) {
found = true;
break;


+ 15
- 15
mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc View File

@@ -562,12 +562,12 @@ class SideEffectFinder {
return {EffectInfo::kDetected, false, false, false};
}
// Pop out tuple index.
auto index = tuple_indexes->top();
auto top_index = tuple_indexes->top();
tuple_indexes->pop();
// Follow the tuple item according the index.
size_t input_index = static_cast<size_t>(index) + 1;
size_t input_index = static_cast<size_t>(top_index) + 1;
if (input_index >= cnode->size()) {
MS_LOG(EXCEPTION) << "Invalid make_tuple: " << cnode->DebugString() << " index=" << index;
MS_LOG(EXCEPTION) << "Invalid make_tuple: " << cnode->DebugString() << " index=" << top_index;
}
if (tuple_indexes->empty()) {
// Trace non-tuple.
@@ -734,12 +734,12 @@ class SideEffectFinder {
}

int GetParameterIndex(const FuncGraphPtr &func_graph, const ParameterPtr &para) {
int index = 0;
int parameter_index = 0;
for (auto &parameter : func_graph->parameters()) {
if (para == parameter) {
return index;
return parameter_index;
}
++index;
++parameter_index;
}
MS_LOG(EXCEPTION) << "Parameter not found: " << (para ? para->DebugString() : "<null>");
}
@@ -1130,9 +1130,9 @@ class AutoMonadConverter {
(void)GetUniverse();
bool load_with_primitive = (info.load && IsPrimitiveCNode(cnode));
if (!cnode->IsEffectHandled() && !load_with_primitive) {
auto u = NewValueNode(kUMonad);
u->set_abstract(kUMonad->ToAbstract());
cnode->add_input(u);
auto u_node = NewValueNode(kUMonad);
u_node->set_abstract(kUMonad->ToAbstract());
cnode->add_input(u_node);
}
}
if (info.io) {
@@ -1209,9 +1209,9 @@ class AutoMonadConverter {
return;
}
// Current u monad.
auto u = GetUniverse();
auto current_u = GetUniverse();
// Create Load cnodes.
auto loads = MakeLoads(cnode, ref_inputs, u);
auto loads = MakeLoads(cnode, ref_inputs, current_u);
if (loads.empty() || !update_state) {
// Skip UpdateState insertion.
return;
@@ -1219,7 +1219,7 @@ class AutoMonadConverter {
// Insert UpdateState if required.
if (loads.size() == 1) {
// One Load, no make_tuple needed.
u_ = UpdateState(u, loads.front());
u_ = UpdateState(current_u, loads.front());
return;
}
// Multiple Loads, Create a MakeTuple before UpdateState.
@@ -1229,7 +1229,7 @@ class AutoMonadConverter {
loads.insert(loads.begin(), NewValueNode(prim::kPrimMakeTuple));
auto make_tuple = func_graph_->NewCNode(loads);
make_tuple->set_abstract(std::make_shared<abstract::AbstractTuple>(load_abstracts));
u_ = UpdateState(u, make_tuple);
u_ = UpdateState(current_u, make_tuple);
}

std::vector<AnfNodePtr> MakeLoads(const CNodePtr &cnode, const RefInputs &ref_inputs, const AnfNodePtr &u) {
@@ -1391,8 +1391,8 @@ class AutoMonadConverter {
}

bool HasSideEffect(const CNodePtr &cnode) const {
const auto &info = GetEffectInfo(cnode);
return (info.memory || info.load || info.io);
const auto &cnode_info = GetEffectInfo(cnode);
return (cnode_info.memory || cnode_info.load || cnode_info.io);
}

// The func graph to be converted.


+ 3
- 3
mindspore/core/abstract/abstract_value.cc View File

@@ -459,12 +459,12 @@ BaseShapePtr AbstractTensor::BuildShape() const {

AbstractBasePtr AbstractTensor::Join(const AbstractBasePtr &other) {
if (other->BuildType()->type_id() == kObjectTypeUndeterminedType) {
auto other_tensor = dyn_cast<AbstractUndetermined>(other);
auto element = element_->Join(other_tensor->element());
auto other_undetermined_tensor = dyn_cast<AbstractUndetermined>(other);
auto element = element_->Join(other_undetermined_tensor->element());
if (element == nullptr) {
return nullptr;
}
auto shape = ShapeJoin(this->shape(), other_tensor->shape());
auto shape = ShapeJoin(this->shape(), other_undetermined_tensor->shape());
auto ret = std::make_shared<AbstractUndetermined>(element, shape);
return ret;
}


+ 0
- 1
mindspore/core/abstract/analysis_context.cc View File

@@ -147,7 +147,6 @@ bool AnalysisContext::operator==(const AnalysisContext &other) const {
}

// brief The key which controls the graph cloning in Specialize.
//
// Originally, specialize use context directly as the key for cloning graph. The graph will be cloned multiple times
// for different context, which means the graph is called from different node with different arguments and different
// free values. In order to decrease the number of cloned graphs, we add this `SpecializeKey` method to control what


+ 60
- 57
mindspore/core/abstract/prim_maths.cc View File

@@ -106,6 +106,61 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr
return ret;
}

int64_t InferImplReduceFuncCheckAxis(const int64_t &axis, const size_t dim) {
int64_t dim_ = static_cast<int64_t>(dim);
if (axis < -dim_ || axis >= dim_) {
MS_LOG(EXCEPTION) << "axis should be in [" << -dim_ << ", " << dim_ << "). But got axis = " << axis;
}
int64_t ret_axis = axis;
if (axis >= -dim_ && axis < 0) {
ret_axis += dim_;
}
return ret_axis;
}

void InferImplReduceFuncCalShape(ShapeVector *shape, const ShapeVector &x_shape, const ValuePtr &axis,
bool keep_dims_value) {
if (axis->isa<ValueTuple>() || axis->isa<ValueList>()) {
auto axis_ptr_list =
axis->isa<ValueTuple>() ? axis->cast<ValueTuplePtr>()->value() : axis->cast<ValueListPtr>()->value();
if (!axis_ptr_list.size()) {
if (keep_dims_value) shape->insert(shape->end(), x_shape.size(), 1);
} else {
shape->insert(shape->end(), x_shape.begin(), x_shape.end());
ValuePtrList axis_items = axis_ptr_list;
ValuePtrList::iterator it;
ValuePtrList::reverse_iterator it_re;
int64_t axis_value;
if (keep_dims_value) {
for (it = axis_items.begin(); it != axis_items.end(); ++it) {
axis_value = GetValue<int64_t>(*it);
axis_value = InferImplReduceFuncCheckAxis(axis_value, x_shape.size());
shape->at(axis_value) = 1;
}
} else {
std::sort(axis_items.begin(), axis_items.end());
for (it_re = axis_items.rbegin(); it_re != axis_items.rend(); ++it_re) {
axis_value = GetValue<int64_t>(*it_re);
axis_value = InferImplReduceFuncCheckAxis(axis_value, x_shape.size());
shape->erase(shape->begin() + axis_value);
}
}
}
} else if (axis->isa<Int32Imm>() || axis->isa<Int64Imm>()) {
shape->insert(shape->end(), x_shape.begin(), x_shape.end());
int64_t axis_value = GetValue<int64_t>(axis);
axis_value = InferImplReduceFuncCheckAxis(axis_value, x_shape.size());
if (keep_dims_value) {
shape->at(axis_value) = 1;
} else {
shape->erase(shape->begin() + axis_value);
}
} else {
MS_LOG(EXCEPTION) << "Axis should be one of types: [int/tuple/list].";
}
return;
}

// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum,
// ReduceAll, ReduceAny, ReduceMax, ReduceMin.
AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
@@ -126,62 +181,9 @@ AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePt
ValuePtr axis = primitive->GetAttr("axis");
MS_EXCEPTION_IF_NULL(axis);

auto check_axis = [](int64_t &axis, const size_t dim) -> void {
int64_t dim_ = static_cast<int64_t>(dim);
if (axis < -dim_ || axis >= dim_) {
MS_LOG(EXCEPTION) << "axis should be in [" << -dim_ << ", " << dim_ << "). But got axis = " << axis;
}
if (axis >= -dim_ && axis < 0) {
axis += dim_;
}
return;
};

auto cal_shape = [axis, keep_dims_value, check_axis](ShapeVector &shape, const ShapeVector &x_shape) -> void {
if (axis->isa<ValueTuple>() || axis->isa<ValueList>()) {
auto axis_ptr_list =
axis->isa<ValueTuple>() ? axis->cast<ValueTuplePtr>()->value() : axis->cast<ValueListPtr>()->value();
if (!axis_ptr_list.size()) {
if (keep_dims_value) shape.insert(shape.end(), x_shape.size(), 1);
} else {
shape.insert(shape.end(), x_shape.begin(), x_shape.end());
ValuePtrList axis_items = axis_ptr_list;
ValuePtrList::iterator it;
ValuePtrList::reverse_iterator it_re;
int64_t axis_value;
if (keep_dims_value) {
for (it = axis_items.begin(); it != axis_items.end(); ++it) {
axis_value = GetValue<int64_t>(*it);
check_axis(axis_value, x_shape.size());
shape[axis_value] = 1;
}
} else {
std::sort(axis_items.begin(), axis_items.end());
for (it_re = axis_items.rbegin(); it_re != axis_items.rend(); ++it_re) {
axis_value = GetValue<int64_t>(*it_re);
check_axis(axis_value, x_shape.size());
shape.erase(std::begin(shape) + axis_value);
}
}
}
} else if (axis->isa<Int32Imm>() || axis->isa<Int64Imm>()) {
shape.insert(shape.end(), x_shape.begin(), x_shape.end());
int64_t axis_value = GetValue<int64_t>(axis);
check_axis(axis_value, x_shape.size());
if (keep_dims_value) {
shape[axis_value] = 1;
} else {
shape.erase(std::begin(shape) + axis_value);
}
} else {
MS_LOG(EXCEPTION) << "Axis should be one of types: [int/tuple/list].";
}
return;
};

ShapeVector shape = {};
ShapeVector x_shape = input_x->shape()->shape();
cal_shape(shape, x_shape);
InferImplReduceFuncCalShape(&shape, x_shape, axis, keep_dims_value);

bool x_is_dyn = (!input_x->shape()->min_shape().empty() && !input_x->shape()->max_shape().empty());
if (x_is_dyn) {
@@ -189,8 +191,8 @@ AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePt
ShapeVector shape_max = {};
ShapeVector x_shape_min = input_x->shape()->min_shape();
ShapeVector x_shape_max = input_x->shape()->max_shape();
cal_shape(shape_min, x_shape_min);
cal_shape(shape_max, x_shape_max);
InferImplReduceFuncCalShape(&shape_min, x_shape_min, axis, keep_dims_value);
InferImplReduceFuncCalShape(&shape_max, x_shape_max, axis, keep_dims_value);
return std::make_shared<AbstractTensor>(input_x->element(), std::make_shared<Shape>(shape, shape_min, shape_max));
}
return std::make_shared<AbstractTensor>(input_x->element(), std::make_shared<Shape>(shape));
@@ -326,7 +328,8 @@ AbstractBasePtr InferImplMatMul(const AnalysisEnginePtr &, const PrimitivePtr &p
MS_EXCEPTION_IF_NULL(y->shape());
auto x_shp = x->shape()->shape();
auto y_shp = y->shape()->shape();
if (x_shp.size() != 2 || y_shp.size() != 2) {
const size_t SHAPE_SIZE = 2;
if (x_shp.size() != SHAPE_SIZE || y_shp.size() != SHAPE_SIZE) {
MS_LOG(EXCEPTION) << "MatMul inputs should have the same dimension size and equal to 2.";
}
ValuePtr transpose_a_ptr = primitive->GetAttr("transpose_a");


+ 4
- 2
mindspore/core/abstract/prim_nn.cc View File

@@ -52,8 +52,10 @@ AbstractBasePtr InferImplPooling(const AnalysisEnginePtr &, const PrimitivePtr &
if (input_shape->shape().size() != input_shape_size) {
MS_LOG(EXCEPTION) << "Pooling input should be a 4-D tensor.";
}
int64_t h_input = input_shape->shape()[2];
int64_t w_input = input_shape->shape()[3];
const size_t H_INDEX = 2;
const size_t W_INDEX = 3;
int64_t h_input = input_shape->shape()[H_INDEX];
int64_t w_input = input_shape->shape()[W_INDEX];

int64_t window = primitive->GetAttr("window")->cast<Int64ImmPtr>()->value();
int64_t stride = primitive->GetAttr("stride")->cast<Int64ImmPtr>()->value();


+ 4
- 4
mindspore/core/utils/log_adapter.cc View File

@@ -28,11 +28,11 @@ namespace mindspore {
#define google mindspore_private
static std::string GetProcName() {
#if defined(__APPLE__) || defined(__FreeBSD__)
const char *appname = getprogname();
const std::string appname = getprogname();
#elif defined(_GNU_SOURCE)
const char *appname = program_invocation_name;
const std::string appname = program_invocation_name;
#else
const char *appname = "?";
const std::string appname = "?";
#endif
// some times, the appname is an absolute path, its too long
std::string app_name(appname);
@@ -76,7 +76,7 @@ static int GetGlogLevel(MsLogLevel level) {
}

// get threshold level
static int GetThresholdLevel(std::string threshold) {
static int GetThresholdLevel(const std::string &threshold) {
if (threshold.empty()) {
return google::GLOG_WARNING;
} else if (threshold == std::to_string(DEBUG) || threshold == std::to_string(INFO)) {


Loading…
Cancel
Save