diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index d475bf02fd..fadd62c2c1 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -1287,7 +1287,8 @@ void ApplyParallelOptOnParam(TensorLayout *tensor_layout, const OperatorInfoPtr auto allgather = cnode->input(index)->cast(); auto prim = GetValueNode(allgather->input(0)); auto attrs = prim->attrs(); - attrs["fusion"] = MakeValue(1); + // enable fusion flag later when it's supported in backend + attrs["fusion"] = MakeValue(0); prim->SetAttrs(attrs); MS_LOG(INFO) << "Parallel optimizer is applied on " << parameter->ToString(); } else { diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index fe383a4098..f29e19f4c2 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -151,8 +151,8 @@ class Optimizer(Cell): if context.get_auto_parallel_context("enable_parallel_optimizer"): if _get_parallel_mode() == ParallelMode.DATA_PARALLEL: self.use_parallel = True - elif _get_parallel_mode() == ParallelMode.STAND_ALONE: - raise RuntimeError("Parallel optimizer is not supported in stand alone mode.") + elif _get_parallel_mode() in (ParallelMode.STAND_ALONE, ParallelMode.HYBRID_PARALLEL): + raise RuntimeError("Parallel optimizer is not supported in {}.".format(_get_parallel_mode())) else: self.use_parallel = False else: