Browse Source

Code Alarm clearance for parallel modules.

feature/build-system-rewrite
liuluobin 4 years ago
parent
commit
9d6a6b7959
8 changed files with 21 additions and 8 deletions
  1. +1
    -1
      mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.cc
  2. +1
    -1
      mindspore/ccsrc/frontend/parallel/ops_info/iou_info.cc
  3. +5
    -1
      mindspore/python/mindspore/parallel/nn/__init__.py
  4. +2
    -1
      mindspore/python/mindspore/parallel/nn/layers.py
  5. +2
    -1
      mindspore/python/mindspore/parallel/nn/loss.py
  6. +2
    -1
      mindspore/python/mindspore/parallel/nn/moe.py
  7. +2
    -1
      mindspore/python/mindspore/parallel/nn/op_parallel_config.py
  8. +6
    -1
      mindspore/python/mindspore/parallel/nn/transformer.py

+ 1
- 1
mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.cc View File

@@ -166,7 +166,7 @@ Status BatchNormInfo::InferAllReduceGroupBySize() {

RankList group_rank_list;
for (size_t i = 0; i < LongToSize(group_size_); ++i) {
group_rank_list.push_back(tmp + i);
group_rank_list.push_back(tmp + SizeToLong(i));
}
MS_LOG(INFO) << name_ << ": The group rank list is " << group_rank_list;



+ 1
- 1
mindspore/ccsrc/frontend/parallel/ops_info/iou_info.cc View File

@@ -52,7 +52,7 @@ Status IOUInfo::InferTensorMap() {

inputs_tensor_map_.emplace_back(TensorMap({1, -1}));
inputs_tensor_map_.emplace_back(TensorMap({0, -1}));
outputs_tensor_map_.emplace_back(TensorMap({1, 0}));
outputs_tensor_map_.emplace_back(TensorMap({0, 1}));
return SUCCESS;
}



+ 5
- 1
mindspore/python/mindspore/parallel/nn/__init__.py View File

@@ -27,11 +27,15 @@ NOTE:
from mindspore.nn.transformer import Transformer
"""
from mindspore import log
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer import AttentionMask, VocabEmbedding, MultiHeadAttention, FeedForward, \
TransformerEncoder, TransformerDecoder, TransformerEncoderLayer, TransformerDecoderLayer, Transformer, \
TransformerOpParallelConfig, \
EmbeddingOpParallelConfig, TransformerRecomputeConfig, MoEConfig, FixedSparseAttention, CrossEntropyLoss, \
OpParallelConfig

__all__ = ["AttentionMask", "VocabEmbedding", "MultiHeadAttention", "FeedForward", "TransformerEncoder",
"TransformerDecoder", "TransformerEncoderLayer", "TransformerDecoderLayer", "Transformer",
"TransformerOpParallelConfig", "EmbeddingOpParallelConfig", "TransformerRecomputeConfig", "MoEConfig",
"FixedSparseAttention", "CrossEntropyLoss", "OpParallelConfig"]

log.warning("'mindspore.parallel.nn' will be deprecated in the future. Please use 'mindspore.nn.transformer' instead.")

+ 2
- 1
mindspore/python/mindspore/parallel/nn/layers.py View File

@@ -16,5 +16,6 @@
The basic layer of the Transformer Networks. This is an experimental interface that is subject to
change or deletion.
"""
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer.layers import FixedSparseAttention

__all__ = ["FixedSparseAttention"]

+ 2
- 1
mindspore/python/mindspore/parallel/nn/loss.py View File

@@ -16,5 +16,6 @@
Parallel Loss for the Parallel Training
This is an experimental interface that is subject to change or deletion.
"""
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer.loss import CrossEntropyLoss

__all__ = ["CrossEntropyLoss"]

+ 2
- 1
mindspore/python/mindspore/parallel/nn/moe.py View File

@@ -15,5 +15,6 @@
"""
Note: Mixture of Expert (MoE) structure. This is an experimental interface that is subject to change or deletion.
"""
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer.moe import MoEConfig, default_moe_config

__all__ = ["MoEConfig", "default_moe_config"]

+ 2
- 1
mindspore/python/mindspore/parallel/nn/op_parallel_config.py View File

@@ -16,5 +16,6 @@
Parallel Config for the Parallel Training
This is an experimental interface that is subject to change and/or deletion.
"""
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer.op_parallel_config import OpParallelConfig, default_dpmp_config

__all__ = ["OpParallelConfig", "default_dpmp_config"]

+ 6
- 1
mindspore/python/mindspore/parallel/nn/transformer.py View File

@@ -16,10 +16,15 @@
Note:
Transformer Networks. This is interface that is subject to change or deletion.
"""
# pylint: disable=W0614,W0401,W0611
from mindspore.nn.transformer.transformer import AttentionMask, VocabEmbedding, MultiHeadAttention, FeedForward, \
TransformerEncoder, TransformerDecoder, TransformerEncoderLayer, TransformerDecoderLayer, Transformer, \
TransformerOpParallelConfig, \
EmbeddingOpParallelConfig, TransformerRecomputeConfig, \
default_transformer_config, default_embedding_parallel_config, default_dpmp_config, default_moe_config, \
default_transformer_recompute_config

__all__ = ["AttentionMask", "VocabEmbedding", "MultiHeadAttention", "FeedForward", "TransformerEncoder",
"TransformerDecoder", "TransformerEncoderLayer", "TransformerDecoderLayer", "Transformer",
"TransformerOpParallelConfig", "EmbeddingOpParallelConfig", "TransformerRecomputeConfig",
"default_transformer_config", "default_embedding_parallel_config", "default_dpmp_config",
"default_moe_config", "default_transformer_recompute_config"]

Loading…
Cancel
Save