Browse Source

!1197 clean pylint in test dir

Merge pull request !1197 from jinyaohui/pylint1
tags/v0.3.0-alpha
mindspore-ci-bot Gitee 5 years ago
parent
commit
a3e29e61f5
100 changed files with 514 additions and 262 deletions
  1. +4
    -2
      tests/mindspore_test_framework/apps/bert_attention_submodules.py
  2. +4
    -4
      tests/mindspore_test_framework/apps/test_bert_check_gradient.py
  3. +9
    -8
      tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py
  4. +1
    -0
      tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py
  5. +3
    -0
      tests/mindspore_test_framework/apps/test_check_exception.py
  6. +1
    -0
      tests/mindspore_test_framework/apps/test_lamb_check_loss.py
  7. +1
    -0
      tests/mindspore_test_framework/apps/test_model_loss.py
  8. +2
    -0
      tests/mindspore_test_framework/apps/test_no_facade.py
  9. +1
    -0
      tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py
  10. +2
    -0
      tests/mindspore_test_framework/components/executor/check_exceptions.py
  11. +2
    -0
      tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py
  12. +2
    -0
      tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py
  13. +2
    -0
      tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py
  14. +2
    -0
      tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py
  15. +2
    -0
      tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py
  16. +2
    -0
      tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py
  17. +2
    -0
      tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py
  18. +2
    -0
      tests/mindspore_test_framework/components/executor/exec_forward.py
  19. +2
    -0
      tests/mindspore_test_framework/components/executor/exec_gradient.py
  20. +2
    -0
      tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py
  21. +2
    -0
      tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py
  22. +2
    -0
      tests/mindspore_test_framework/components/facade/me_facade.py
  23. +2
    -0
      tests/mindspore_test_framework/components/function/compile_block.py
  24. +2
    -0
      tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py
  25. +2
    -0
      tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py
  26. +2
    -0
      tests/mindspore_test_framework/components/function/get_function_from_config.py
  27. +2
    -0
      tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py
  28. +1
    -0
      tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py
  29. +1
    -0
      tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py
  30. +2
    -0
      tests/mindspore_test_framework/components/function/run_block.py
  31. +1
    -0
      tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py
  32. +1
    -0
      tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py
  33. +1
    -0
      tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py
  34. +2
    -0
      tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py
  35. +9
    -0
      tests/mindspore_test_framework/components/icomponent.py
  36. +2
    -0
      tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py
  37. +2
    -0
      tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py
  38. +2
    -0
      tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py
  39. +2
    -0
      tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py
  40. +2
    -0
      tests/mindspore_test_framework/components/verifier/compare_forward.py
  41. +2
    -0
      tests/mindspore_test_framework/components/verifier/compare_gradient.py
  42. +2
    -0
      tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py
  43. +2
    -0
      tests/mindspore_test_framework/components/verifier/verify_shapetype.py
  44. +4
    -2
      tests/mindspore_test_framework/mindspore_test.py
  45. +2
    -2
      tests/mindspore_test_framework/pipeline/forward/compare_forward.py
  46. +5
    -5
      tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py
  47. +2
    -2
      tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py
  48. +22
    -0
      tests/mindspore_test_framework/utils/block_util.py
  49. +5
    -0
      tests/mindspore_test_framework/utils/bprop_util.py
  50. +4
    -1
      tests/mindspore_test_framework/utils/check_gradient.py
  51. +1
    -0
      tests/mindspore_test_framework/utils/compare_util.py
  52. +4
    -0
      tests/mindspore_test_framework/utils/config_util.py
  53. +1
    -0
      tests/mindspore_test_framework/utils/dataset_util.py
  54. +5
    -1
      tests/mindspore_test_framework/utils/debug_util.py
  55. +4
    -2
      tests/mindspore_test_framework/utils/facade_util.py
  56. +3
    -0
      tests/mindspore_test_framework/utils/keyword.py
  57. +11
    -0
      tests/mindspore_test_framework/utils/model_util.py
  58. +2
    -0
      tests/mindspore_test_framework/utils/npy_util.py
  59. +6
    -1
      tests/mindspore_test_framework/utils/other_util.py
  60. +6
    -4
      tests/mindspore_test_framework/utils/verifier_util.py
  61. +18
    -1
      tests/ops_common.py
  62. +1
    -1
      tests/st/auto_parallel/test_expand_loss.py
  63. +1
    -1
      tests/st/auto_parallel/test_model_parallel_onehot.py
  64. +12
    -3
      tests/st/control/test_cont_break.py
  65. +6
    -2
      tests/st/gnn/aggregator.py
  66. +1
    -0
      tests/st/gnn/test_gnn_aggregator.py
  67. +4
    -4
      tests/st/nccl/test_nccl_all.py
  68. +2
    -2
      tests/st/nccl/test_nccl_lenet.py
  69. +1
    -0
      tests/st/nccl/test_nccl_reduce_scatter_op.py
  70. +15
    -6
      tests/st/networks/models/bert/bert_tdt_lossscale.py
  71. +2
    -2
      tests/st/networks/test_gpu_alexnet.py
  72. +5
    -4
      tests/st/networks/test_gpu_lstm.py
  73. +5
    -4
      tests/st/networks/test_gpu_resnet.py
  74. +5
    -5
      tests/st/ops/cpu/test_argmax_op.py
  75. +12
    -8
      tests/st/ops/cpu/test_bias_add.py
  76. +10
    -6
      tests/st/ops/cpu/test_bias_add_grad.py
  77. +24
    -21
      tests/st/ops/cpu/test_conv2d_backprop_filter_op.py
  78. +22
    -19
      tests/st/ops/cpu/test_conv2d_backprop_input_op.py
  79. +5
    -5
      tests/st/ops/cpu/test_conv2d_op.py
  80. +4
    -5
      tests/st/ops/cpu/test_equalcount_op.py
  81. +1
    -2
      tests/st/ops/cpu/test_maxpool_grad_op.py
  82. +5
    -0
      tests/st/ops/cpu/test_maxpool_op.py
  83. +5
    -3
      tests/st/ops/cpu/test_momentum_op.py
  84. +16
    -14
      tests/st/ops/cpu/test_mul_op.py
  85. +4
    -1
      tests/st/ops/cpu/test_relu_grad_op.py
  86. +6
    -3
      tests/st/ops/cpu/test_relu_op.py
  87. +4
    -3
      tests/st/ops/cpu/test_softmax_op.py
  88. +11
    -10
      tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py
  89. +4
    -1
      tests/st/ops/custom_ops_tbe/conv2d.py
  90. +19
    -14
      tests/st/ops/custom_ops_tbe/conv_layer.py
  91. +6
    -3
      tests/st/ops/custom_ops_tbe/conv_layer_fast.py
  92. +5
    -3
      tests/st/ops/custom_ops_tbe/cus_conv2d.py
  93. +1
    -0
      tests/st/ops/custom_ops_tbe/cus_square.py
  94. +12
    -9
      tests/st/ops/custom_ops_tbe/test_cus_conv.py
  95. +5
    -2
      tests/st/ops/custom_ops_tbe/test_square.py
  96. +1
    -0
      tests/st/ops/gpu/test_addn_op.py
  97. +11
    -9
      tests/st/ops/gpu/test_argmax_op.py
  98. +13
    -11
      tests/st/ops/gpu/test_assign_add_op.py
  99. +2
    -0
      tests/st/ops/gpu/test_assign_op.py
  100. +45
    -41
      tests/st/ops/gpu/test_batch_matmul.py

+ 4
- 2
tests/mindspore_test_framework/apps/bert_attention_submodules.py View File

@@ -166,7 +166,7 @@ class BertAttentionMask(nn.Cell):

super(BertAttentionMask, self).__init__()
self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply_data = Tensor([-1000.0, ], dtype=dtype)
self.multiply = P.Mul()

if self.has_attention_mask:
@@ -189,6 +189,7 @@ class BertAttentionMask(nn.Cell):

return attention_scores


class BertAttentionMaskBackward(nn.Cell):
def __init__(self,
attention_mask_shape,
@@ -196,7 +197,7 @@ class BertAttentionMaskBackward(nn.Cell):
dtype=mstype.float32):
super(BertAttentionMaskBackward, self).__init__()
self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply_data = Tensor([-1000.0, ], dtype=dtype)
self.multiply = P.Mul()
self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32))
if self.has_attention_mask:
@@ -218,6 +219,7 @@ class BertAttentionMaskBackward(nn.Cell):
attention_scores = self.add(adder, attention_scores)
return attention_scores


class BertAttentionSoftmax(nn.Cell):
def __init__(self,
batch_size,


+ 4
- 4
tests/mindspore_test_framework/apps/test_bert_check_gradient.py View File

@@ -20,7 +20,7 @@ import numpy as np
from mindspore.model_zoo.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \
EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \
BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal,\
BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal, \
BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel

from mindspore import context, nn
@@ -373,9 +373,9 @@ verification_set = {
'id': 'BertDense_CICase',
'group': 'bert',
'block': BertDense(
hidden_size=8,
intermediate_size=8,
initializer_range=0.02),
hidden_size=8,
intermediate_size=8,
initializer_range=0.02),
'reduce_output': False
},
{


+ 9
- 8
tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py View File

@@ -19,17 +19,18 @@ import numpy as np
import mindspore.common.dtype as mstype
from mindspore import context
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \
EmbeddingLookup, BertModel, \
BertConfig, EmbeddingPostprocessor, \
BertTransformer, BertEncoderCell, \
BertSelfAttention, CreateAttentionMaskFromInputMask, \
RelaPosMatrixGenerator, BertOutput, \
RelaPosEmbeddingsGenerator
EmbeddingLookup, BertModel, \
BertConfig, EmbeddingPostprocessor, \
BertTransformer, BertEncoderCell, \
BertSelfAttention, CreateAttentionMaskFromInputMask, \
RelaPosMatrixGenerator, BertOutput, \
RelaPosEmbeddingsGenerator

from ..mindspore_test import mindspore_test
from ..pipeline.forward.compare_forward import pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy
from ..pipeline.forward.compare_forward import \
pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy
from .bert_attention_submodules import BertAttentionQueryKeyMul, BertAttentionRelativePositionKeys, BertAttentionMask, \
BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense
BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense

verification_set = {
'inputs': [


+ 1
- 0
tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py View File

@@ -22,6 +22,7 @@ from ..pipeline.gradient.compare_gradient import \
pipeline_for_compare_inputs_grad_with_numerical_diff_for_group_by_group_config, \
pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_group_by_group_config
from ..mindspore_test import mindspore_test

# from ...vm_impl import *




+ 3
- 0
tests/mindspore_test_framework/apps/test_check_exception.py View File

@@ -18,9 +18,11 @@
from ..mindspore_test import mindspore_test
from ..pipeline.forward.verify_exception import pipeline_for_verify_exception_for_case_by_case_config


def func_raise_exception(x, y):
raise ValueError()


verification_set = [
('func_raise_exception', {
'block': (func_raise_exception, {'exception': ValueError}),
@@ -28,6 +30,7 @@ verification_set = [
})
]


@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
def test_check_exception():
return verification_set

+ 1
- 0
tests/mindspore_test_framework/apps/test_lamb_check_loss.py View File

@@ -42,6 +42,7 @@ verification_set = [
})
]


@mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config)
def test_lamb_loss():
context.set_context(mode=context.GRAPH_MODE)


+ 1
- 0
tests/mindspore_test_framework/apps/test_model_loss.py View File

@@ -40,6 +40,7 @@ verification_set = [
})
]


@mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config)
def test_model_loss():
context.set_context(mode=context.GRAPH_MODE)


+ 2
- 0
tests/mindspore_test_framework/apps/test_no_facade.py View File

@@ -21,6 +21,8 @@ import numpy as np

from ..mindspore_test import mindspore_test
from ..pipeline.forward.verify_shapetype import pipeline_for_verify_shapetype_for_group_by_group_config


# from ...vm_impl import *

# functions could be operations or NN cell


+ 1
- 0
tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py View File

@@ -53,6 +53,7 @@ verification_set = [
})
]


@mindspore_test(pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config)
def test_reid_check_gradient():
context.set_context(mode=context.PYNATIVE_MODE)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_exceptions.py View File

@@ -21,6 +21,7 @@ import pytest
from ...components.icomponent import IExectorComponent
from ...utils import keyword


class CheckExceptionsEC(IExectorComponent):
"""
Check if the function raises the expected Exception and the error message contains specified keywords if not None.
@@ -32,6 +33,7 @@ class CheckExceptionsEC(IExectorComponent):
'error_keywords': ['TensorAdd', 'shape']
}
"""

def __call__(self):
f = self.function[keyword.block]
args = self.inputs[keyword.desc_inputs]


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, ScalarGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckGradientForScalarFunctionEC(IExectorComponent):
"""
Check gradient against numeric with respect to inputs for scalar function, execute and verify.
@@ -26,6 +27,7 @@ class CheckGradientForScalarFunctionEC(IExectorComponent):
Examples:
'block': scalar_function
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, sampling_times, _ = \
get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, OperationGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckGradientWrtInputsEC(IExectorComponent):
"""
Check gradient against numeric with respect to inputs, execute and verify.
@@ -35,6 +36,7 @@ class CheckGradientWrtInputsEC(IExectorComponent):
key_act=None,
initializer_range=0.02)
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, \
sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, NNGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckGradientWrtParamsEC(IExectorComponent):
"""
Check gradient against numeric with respect to params, execute and verify.
@@ -35,6 +36,7 @@ class CheckGradientWrtParamsEC(IExectorComponent):
key_act=None,
initializer_range=0.02)
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, \
sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, ScalarGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckJacobianForScalarFunctionEC(IExectorComponent):
"""
Check jacobian against numeric with respect to inputs for scalar_func, execute and verify.
@@ -26,6 +27,7 @@ class CheckJacobianForScalarFunctionEC(IExectorComponent):
Examples:
'block': scalar_function
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, OperationGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckJacobianWrtInputsEC(IExectorComponent):
"""
Check jacobian against numeric with respect to inputs, execute and verify.
@@ -35,6 +36,7 @@ class CheckJacobianWrtInputsEC(IExectorComponent):
key_act=None,
initializer_range=0.02)
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, NNGradChecker
from ...utils.config_util import get_grad_checking_options


class CheckJacobianWrtParamsEC(IExectorComponent):
"""
Check jacobian against numeric with respect to params, execute and verify.
@@ -35,6 +36,7 @@ class CheckJacobianWrtParamsEC(IExectorComponent):
key_act=None,
initializer_range=0.02)
"""

def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs)


+ 2
- 0
tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.model_util import Model
from ...utils import keyword


class LossVerifierEC(IExectorComponent):
"""
Verify if the model can converge to expected loss.
@@ -32,6 +33,7 @@ class LossVerifierEC(IExectorComponent):
'loss_upper_bound': 0.03,
}
"""

def __call__(self):
model = self.function[keyword.block][keyword.model]
loss = self.function[keyword.block][keyword.loss]


+ 2
- 0
tests/mindspore_test_framework/components/executor/exec_forward.py View File

@@ -18,10 +18,12 @@
from ...components.icomponent import IExectorComponent
from ...utils import keyword


class IdentityEC(IExectorComponent):
"""
Execute function/inputs.
"""

def __call__(self):
result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id]
group = self.function[keyword.group] + '-' + self.inputs[keyword.group]


+ 2
- 0
tests/mindspore_test_framework/components/executor/exec_gradient.py View File

@@ -18,10 +18,12 @@
from ...components.icomponent import IExectorComponent
from ...utils import keyword


class IdentityBackwardEC(IExectorComponent):
"""
Execute function/inputs, with all bprops attached, the bprop function created by BC should handle these bprops.
"""

def __call__(self):
result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id]
group = self.function[keyword.group] + '-' + self.inputs[keyword.group]


+ 2
- 0
tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py View File

@@ -18,10 +18,12 @@
from ...components.icomponent import IERPolicyComponent
from ...utils import keyword


class GroupCartesianProductERPC(IERPolicyComponent):
"""
Combine expect/result by do cartesian product on group.
"""

def __call__(self):
ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.group] == s2[keyword.group]]
return ret

+ 2
- 0
tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py View File

@@ -18,10 +18,12 @@
from ...components.icomponent import IERPolicyComponent
from ...utils import keyword


class IdCartesianProductERPC(IERPolicyComponent):
"""
Combine expect/result by do cartesian product on id.
"""

def __call__(self):
ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.id] == s2[keyword.id]]
return ret

+ 2
- 0
tests/mindspore_test_framework/components/facade/me_facade.py View File

@@ -21,6 +21,7 @@ from ...components.icomponent import IFacadeComponent
from ...utils.facade_util import get_block_config, fill_block_config
from ...utils import keyword


class MeFacadeFC(IFacadeComponent):
"""
Transform ME style config to mindspore_test_framework style.
@@ -47,6 +48,7 @@ class MeFacadeFC(IFacadeComponent):
}
})
"""

def __call__(self):
ret = get_block_config()
for config in self.verification_set:


+ 2
- 0
tests/mindspore_test_framework/components/function/compile_block.py View File

@@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_net, create_funcs


class CompileBlockBC(IBuilderComponent):
"""
Build a function that do mindspore compile.
@@ -42,5 +43,6 @@ class CompileBlockBC(IBuilderComponent):
dtype=mstype.float32,
compute_type=mstype.float32)
"""

def __call__(self):
return create_funcs(self.verification_set, gen_net, compile_block)

+ 2
- 0
tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_grad_net, create_funcs


class CompileBackwardBlockWrtInputsBC(IBuilderComponent):
"""
Build a function that do mindspore gradient compile with respect to inputs.
@@ -43,6 +44,7 @@ class CompileBackwardBlockWrtInputsBC(IBuilderComponent):
dtype=mstype.float32,
compute_type=mstype.float32)
"""

def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True)
return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op)

+ 2
- 0
tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_grad_net, create_funcs


class CompileBackwardBlockWrtParamsBC(IBuilderComponent):
"""
Build a function that do mindspore gradient compile with respect to params.
@@ -43,6 +44,7 @@ class CompileBackwardBlockWrtParamsBC(IBuilderComponent):
dtype=mstype.float32,
compute_type=mstype.float32)
"""

def __call__(self, verification_set):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True)
return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op)

+ 2
- 0
tests/mindspore_test_framework/components/function/get_function_from_config.py View File

@@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent
from ...utils import keyword


class IdentityBC(IBuilderComponent):
"""
Return function.
@@ -25,5 +26,6 @@ class IdentityBC(IBuilderComponent):
Examples:
'function': Add
"""

def __call__(self):
return self.verification_set[keyword.function]

+ 2
- 0
tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py View File

@@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, get_uniform_with_shape, gen_net, create_funcs


class RunBlockWithRandParamBC(IBuilderComponent):
"""
Build a function with uniformed params that run mindspore pipeline.
@@ -42,5 +43,6 @@ class RunBlockWithRandParamBC(IBuilderComponent):
dtype=mstype.float32,
compute_type=mstype.float32)
"""

def __call__(self):
return create_funcs(self.verification_set, gen_net, run_block, default_rand_func=get_uniform_with_shape)

+ 1
- 0
tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape


class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent):
def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True)


+ 1
- 0
tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape


class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent):
def __call__(self):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True)


+ 2
- 0
tests/mindspore_test_framework/components/function/run_block.py View File

@@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_net, create_funcs


class RunBlockBC(IBuilderComponent):
"""
Build a function that run mindspore pipeline.
@@ -42,5 +43,6 @@ class RunBlockBC(IBuilderComponent):
dtype=mstype.float32,
compute_type=mstype.float32)
"""

def __call__(self):
return create_funcs(self.verification_set, gen_net, run_block)

+ 1
- 0
tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs


class RunBackwardBlockWrtInputsBC(IBuilderComponent):
def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True)


+ 1
- 0
tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py View File

@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs


class RunBackwardBlockWrtParamsBC(IBuilderComponent):
def __call__(self):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True)


+ 1
- 0
tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py View File

@@ -23,6 +23,7 @@ class GroupCartesianProductFIPC(IFIPolicyComponent):
"""
Combine function/inputs by do cartesian product on group.
"""

def __call__(self):
ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.group] == s2[keyword.group]]
return ret

+ 2
- 0
tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py View File

@@ -18,10 +18,12 @@
from ...components.icomponent import IFIPolicyComponent
from ...utils import keyword


class IdCartesianProductFIPC(IFIPolicyComponent):
"""
Combine function/inputs by do cartesian product on id.
"""

def __call__(self):
ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.id] == s2[keyword.id]]
return ret

+ 9
- 0
tests/mindspore_test_framework/components/icomponent.py View File

@@ -15,8 +15,10 @@

"""Component interfaces."""


class IComponent:
"""Component interfaces."""

def __init__(self, verification_set):
self.verification_set = verification_set

@@ -26,18 +28,21 @@ class IComponent:

class IDataComponent(IComponent):
"""Create inputs for verification_set."""

def __call__(self):
raise NotImplementedError


class IBuilderComponent(IComponent):
"""Build system under test."""

def __call__(self):
raise NotImplementedError


class IExectorComponent(IComponent):
"""Execute sut, take (function, input) pairs as input."""

def __init__(self, verification_set, function, inputs):
super(IExectorComponent, self).__init__(verification_set)
self.function = function
@@ -49,6 +54,7 @@ class IExectorComponent(IComponent):

class IVerifierComponent(IComponent):
"""Verify sut result, take (expect, result) pairs as input."""

def __init__(self, verification_set, expect, result):
super(IVerifierComponent, self).__init__(verification_set)
self.expect = expect
@@ -60,6 +66,7 @@ class IVerifierComponent(IComponent):

class IFIPolicyComponent(IComponent):
"""Combine functions/inputs."""

def __init__(self, verification_set, function, inputs):
super(IFIPolicyComponent, self).__init__(verification_set)
self.function = function
@@ -71,6 +78,7 @@ class IFIPolicyComponent(IComponent):

class IERPolicyComponent(IComponent):
"""Combine expects and results."""

def __init__(self, verification_set, expect, result):
super(IERPolicyComponent, self).__init__(verification_set)
self.expect = expect
@@ -82,5 +90,6 @@ class IERPolicyComponent(IComponent):

class IFacadeComponent(IComponent):
"""Adapt verification_set."""

def __call__(self):
raise NotImplementedError

+ 2
- 0
tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent
from ...utils.dataset_util import generate_dataset_for_linear_regression
from ...utils import keyword


class GenerateDataSetForLRDC(IDataComponent):
"""
Create dataset for linear regression, with salt from normal distribution.
@@ -30,6 +31,7 @@ class GenerateDataSetForLRDC(IDataComponent):
'batch_size': 20,
}
"""

def __call__(self):
result = []
for config in self.verification_set[keyword.inputs]:


+ 2
- 0
tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py View File

@@ -23,6 +23,7 @@ from ...utils.other_util import shape2tensor
from ...utils.config_util import get_input_config
from ...utils import keyword


class GenerateFromShapeDC(IDataComponent):
"""
Generate inputs from shape, desc_inputs must be configured, desc_bprop is optional.
@@ -41,6 +42,7 @@ class GenerateFromShapeDC(IDataComponent):
([1, 16, 128, 64], np.float32, 6), # (inputs, dtype, scale)
]
"""

def __call__(self):
result = []
for config in self.verification_set[keyword.inputs]:


+ 2
- 0
tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py View File

@@ -17,6 +17,7 @@

from ...components.icomponent import IDataComponent


class IdentityDC(IDataComponent):
"""
Return inputs.
@@ -26,5 +27,6 @@ class IdentityDC(IDataComponent):
np.array([[2, 2], [2, 2]]).astype(np.float32)
]
"""

def __call__(self):
return self.verification_set['inputs']

+ 2
- 0
tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent
from ...utils.npy_util import load_data_from_npy_or_shape
from ...utils import keyword


class LoadFromNpyDC(IDataComponent):
"""
Load inputs from npy data, inputs could be shape/tensor/np.ndarray/file path.
@@ -43,6 +44,7 @@ class LoadFromNpyDC(IDataComponent):
([2, 2], np.float32, 6)
]
"""

def __call__(self):
result = []
for config in self.verification_set[keyword.inputs]:


+ 2
- 0
tests/mindspore_test_framework/components/verifier/compare_forward.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.compare_util import compare
from ...utils import keyword


class CompareWithVC(IVerifierComponent):
"""
Compare the result with baseline functions configured in 'compare' config item.
@@ -41,5 +42,6 @@ class CompareWithVC(IVerifierComponent):
'max_error': 1e-3
}
"""

def __call__(self):
compare(self.expect, self.func_result, baseline=keyword.compare_with)

+ 2
- 0
tests/mindspore_test_framework/components/verifier/compare_gradient.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.compare_util import compare
from ...utils import keyword


class CompareGradientWithVC(IVerifierComponent):
"""
Compare the result with baseline functions configured in 'compare_gradient_with' config item.
@@ -35,5 +36,6 @@ class CompareGradientWithVC(IVerifierComponent):
'max_error': 1e-3
}
"""

def __call__(self):
compare(self.expect, self.func_result, baseline=keyword.compare_gradient_with)

+ 2
- 0
tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py View File

@@ -22,6 +22,7 @@ from ...utils.npy_util import load_data_from_npy_or_shape
from ...utils.verifier_util import tolerance_assert
from ...utils import keyword


class LoadFromNpyVC(IVerifierComponent):
"""
Verify if the results are like expects from npy data, expects could be shape/tensor/np.ndarray/file path.
@@ -37,6 +38,7 @@ class LoadFromNpyVC(IVerifierComponent):
([2, 2], np.float32, 6, 1e-3) # (shape, dtype, scale, max_error)
]
"""

def __call__(self):
dpaths = self.expect.get(keyword.desc_expect)
expects = load_data_from_npy_or_shape(dpaths, False)


+ 2
- 0
tests/mindspore_test_framework/components/verifier/verify_shapetype.py View File

@@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.other_util import to_numpy_list
from ...utils import keyword


class ShapeTypeVC(IVerifierComponent):
"""
Verify if the result's shape and type are correct.
@@ -33,6 +34,7 @@ class ShapeTypeVC(IVerifierComponent):
]
}
"""

def __call__(self):
results = to_numpy_list(self.func_result[keyword.result])
expects = self.expect[keyword.desc_expect][keyword.shape_type]


+ 4
- 2
tests/mindspore_test_framework/mindspore_test.py View File

@@ -18,10 +18,11 @@
import logging
import pytest
from .components.icomponent import IDataComponent, IBuilderComponent, IExectorComponent, \
IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \
IFacadeComponent
IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \
IFacadeComponent
from .utils import keyword


def mindspore_test(verification_pipeline):
"""
Run verification pipeline.
@@ -31,6 +32,7 @@ def mindspore_test(verification_pipeline):

Returns:
"""

def decorate(get_verification_set):
verification_set = get_verification_set()



+ 2
- 2
tests/mindspore_test_framework/pipeline/forward/compare_forward.py View File

@@ -107,7 +107,7 @@ Example:
]
}
"""
pipeline_for_compare_forward_with_npy_for_group_by_group_config =\
pipeline_for_compare_forward_with_npy_for_group_by_group_config = \
[LoadFromNpyDC, RunBlockWithRandParamBC, IdCartesianProductFIPC,
IdentityEC, IdCartesianProductERPC, LoadFromNpyVC]

@@ -161,7 +161,7 @@ Example:
]
}
"""
pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy =\
pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy = \
[LoadFromNpyDC, RunBlockWithRandParamBC,
GroupCartesianProductFIPC, IdentityEC,
IdCartesianProductERPC, LoadFromNpyVC]

+ 5
- 5
tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py View File

@@ -49,7 +49,7 @@ Example:
})
]
"""
pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config =\
pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC,
RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC,
IdentityBackwardEC, IdCartesianProductERPC,
@@ -89,7 +89,7 @@ Example:
})
]
"""
pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config =\
pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config = \
[MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC,
IdCartesianProductERPC, LoadFromNpyVC]
@@ -128,7 +128,7 @@ Example:
})
]
"""
pipeline_for_compare_params_grad_with_npy_for_case_by_case_config =\
pipeline_for_compare_params_grad_with_npy_for_case_by_case_config = \
[MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtParamsBC,
IdCartesianProductFIPC, IdentityBackwardEC,
IdCartesianProductERPC, LoadFromNpyVC]
@@ -146,7 +146,7 @@ Example:
})
]
"""
pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config =\
pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, IdentityBC,
IdCartesianProductFIPC,
CheckGradientWrtInputsEC]
@@ -244,7 +244,7 @@ Example:
}),
]
"""
pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config =\
pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, IdentityBC,
IdCartesianProductFIPC,
CheckJacobianWrtInputsEC]


+ 2
- 2
tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py View File

@@ -35,7 +35,7 @@ Example:
})
]
"""
pipeline_for_compile_grad_anf_graph_for_case_by_case_config =\
pipeline_for_compile_grad_anf_graph_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, CompileBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC]

@@ -51,6 +51,6 @@ Example:
})
]
"""
pipeline_for_compile_grad_ge_graph_for_case_by_case_config =\
pipeline_for_compile_grad_ge_graph_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, RunBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC]

+ 22
- 0
tests/mindspore_test_framework/utils/block_util.py View File

@@ -28,10 +28,12 @@ from mindspore.ops import operations as P
from mindspore import ParameterTuple
from . import keyword


def get_uniform_with_shape(shape):
np.random.seed(1)
return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)


def set_block_param_with_rand(net, rand_func=None):
if not isinstance(net, nn.Cell) or rand_func is None:
return
@@ -39,11 +41,13 @@ def set_block_param_with_rand(net, rand_func=None):
for param in net.trainable_params():
param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape))


def compile_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
return _executor.compile(net, *inputs)


def run_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
@@ -52,10 +56,13 @@ def run_block(net, *inputs, rand_func=None, training=True):
@ms_function
def _func_pynative(*inputs):
return net(*inputs)

return _func_pynative(*inputs)

return func_pynative(*inputs)
return net(*inputs)


class IthOutputCell(nn.Cell):
def __init__(self, network, output_index):
if isinstance(network, nn.Cell):
@@ -69,12 +76,14 @@ class IthOutputCell(nn.Cell):
predict = self.network(*inputs)[self.output_index]
return predict


def get_output_cell(network, num_input, output_index, training=True):
_ = num_input
net = IthOutputCell(network, output_index)
set_block_training(net, training)
return net


class OutputReduceSumCell(nn.Cell):
def __init__(self, network, output_num):
super(OutputReduceSumCell, self).__init__()
@@ -92,11 +101,13 @@ class OutputReduceSumCell(nn.Cell):
ret = ret + F.make_tuple(predict_reduce)
return ret


def get_output_reduce_cell(network, output_num, training=True):
net = OutputReduceSumCell(network, output_num)
set_block_training(net, training)
return net


class InputOpNet(nn.Cell):
def __init__(self, op, c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__()
@@ -112,6 +123,7 @@ class InputOpNet(nn.Cell):
def construct0_c0_fake(self, data):
x = self.op() + data
return x

def construct0_c1_fake(self, data):
x = self.op(self.c1) + data
return x
@@ -212,6 +224,7 @@ class InputOpNet(nn.Cell):
x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4)
return x


def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False):
if isinstance(op, nn.Cell):
return op
@@ -227,6 +240,7 @@ def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_
set_block_training(net, training)
return net


class OperationBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
@@ -240,6 +254,7 @@ class OperationBackward(nn.Cell):
def construct(self, *inputs):
return self.grad(self.network)(*inputs, self.sens)


class OperationBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
@@ -252,6 +267,7 @@ class OperationBackwardWithNoSens(nn.Cell):
def construct(self, *inputs):
return self.grad(self.network)(*inputs)


class NNBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
@@ -266,6 +282,7 @@ class NNBackward(nn.Cell):
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs, self.sens)


class NNBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
@@ -279,6 +296,7 @@ class NNBackwardWithNoSens(nn.Cell):
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs)


def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(),
const_first=False, add_fake_input=False):
if not isinstance(net, nn.Cell):
@@ -296,14 +314,17 @@ def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(
set_block_training(net, training)
return net


def set_block_training(net, training=True):
if isinstance(net, nn.Cell):
net.set_train(training)


def set_block_phase(net, phase='train'):
if isinstance(net, nn.Cell):
net.phase = phase


def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None):
def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs):
def function(*inputs):
@@ -347,6 +368,7 @@ def create_funcs(verification_set, block_generator, block_runner, grad_op=None,
b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)

return function

bc_configs = verification_set[keyword.function]


+ 5
- 0
tests/mindspore_test_framework/utils/bprop_util.py View File

@@ -21,10 +21,12 @@ from mindspore.nn import Cell
from mindspore.common import ParameterTuple
from mindspore.common.api import ms_function


class Bprop(Cell):
"""
The gradient wraper.
"""

def __init__(self, func, wrt_params, params, grad_op, sens):
super(Bprop, self).__init__(auto_prefix=False)
self.func = func
@@ -50,6 +52,7 @@ class Bprop(Cell):
else:
return self.grad(self.func)(*inputs)


def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list = None):
"""
Compute gradients of function.
@@ -90,6 +93,8 @@ def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list
@ms_function
def _func_pynative(*inputs):
return grad(*inputs)

return _func_pynative(*inputs)

return func_pynative(*inputs)
return grad(*inputs)

+ 4
- 1
tests/mindspore_test_framework/utils/check_gradient.py View File

@@ -27,6 +27,7 @@ import mindspore._c_expression as _c_expression
from .block_util import get_output_cell, gen_net, gen_grad_net, \
get_uniform_with_shape, set_block_phase, get_output_reduce_cell, set_block_param_with_rand


class _GradChecker:
"""
Check the theoretical Jacobian against numeric
@@ -130,6 +131,7 @@ class _GradChecker:
@ms_function
def _func_pynative(*inputs):
return net(*inputs)

return _func_pynative(*inputs)

return func_forward_pynative
@@ -277,7 +279,7 @@ class _GradChecker:
print('GradChecker.compute_theoretical.args', args)
gout = self.wrap(self.gfns[out_index](*args))
gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \
else self.to_numpy_and_scale(np.array(g)) for g in gout]
else self.to_numpy_and_scale(np.array(g)) for g in gout]
print('GradChecker.compute_theoretical.gout', gout)
dy_mask.ravel().view()[jacobian_col] = 0.0

@@ -433,6 +435,7 @@ def check_gradient(fn, *args, delta=1e-3, max_error=1e-3,
reduce_output=reduce_output)
grad_checker.assert_match()


def check_jacobian(fn, *args, delta=1e-3, max_error=1e-3,
grad_checker_class=OperationGradChecker,
input_selector=None,


+ 1
- 0
tests/mindspore_test_framework/utils/compare_util.py View File

@@ -19,6 +19,7 @@ import numpy as np
from .other_util import to_numpy_list
from . import keyword


def compare(expect, func_result, baseline):
"""
Compare results of function with baseline functions.


+ 4
- 0
tests/mindspore_test_framework/utils/config_util.py View File

@@ -20,6 +20,7 @@ import numpy as np
from . import keyword
from .other_util import select_from_config_tuple


def get_input_config(d):
"""
Get input config.
@@ -38,6 +39,7 @@ def get_input_config(d):
scale = ext_config.get(keyword.scale, 1)
return s, dtype, scale


def get_expect_config(d):
"""
Get input config.
@@ -66,6 +68,7 @@ def get_expect_config(d):
absolute_tolerance = ext_config.get(keyword.absolute_tolerance, 0.0)
return s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance


def get_function_config(function):
"""
Get input config.
@@ -91,6 +94,7 @@ def get_function_config(function):
return delta, max_error, input_selector, output_selector, sampling_times, \
reduce_output, init_param_with, split_outputs, exception, error_keywords


def get_grad_checking_options(function, inputs):
"""
Get input config.


+ 1
- 0
tests/mindspore_test_framework/utils/dataset_util.py View File

@@ -19,6 +19,7 @@ import random
import numpy as np
from mindspore import Tensor


def generate_dataset_for_linear_regression(true_w, true_b, num_samples, batch_size):
features = np.random.normal(scale=1, size=(num_samples, len(true_w)))
labels = np.matmul(features, np.reshape(np.array(true_w), (-1, 1))) + true_b


+ 5
- 1
tests/mindspore_test_framework/utils/debug_util.py View File

@@ -24,9 +24,10 @@ from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer

logging.basicConfig(level=logging.DEBUG, format=
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
logger = logging.getLogger(__name__)


class PrintShapeType(PrimitiveWithInfer):
"""
PrintShapeType input's shape and type.
@@ -78,14 +79,17 @@ class PrintShapeType(PrimitiveWithInfer):
@bprop_getters.register(PrintShapeType)
def get_bprop_print_shape_type(self):
"""Generate bprop for PrintShapeType"""

def bprop(x, out, dout):
return (dout,)

return bprop


class PrintShapeTypeCell(nn.Cell):
def __init__(self):
super(PrintShapeTypeCell, self).__init__()

def construct(self, msg, x):
PrintShapeType(msg)(x)
return x


+ 4
- 2
tests/mindspore_test_framework/utils/facade_util.py View File

@@ -18,6 +18,7 @@
from . import keyword
from .config_util import get_function_config


def get_block_config():
"""
Get Empty function config.
@@ -28,6 +29,7 @@ def get_block_config():
ret[keyword.expect] = []
return ret


def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, expect,
desc_const, const_first, add_fake_input, fake_input_type):
"""
@@ -95,7 +97,7 @@ def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, ex

if expect:
expect_list.append({
keyword.id: tid+'-'+tid,
keyword.group: group+'-'+group,
keyword.id: tid + '-' + tid,
keyword.group: group + '-' + group,
keyword.desc_expect: expect
})

+ 3
- 0
tests/mindspore_test_framework/utils/keyword.py View File

@@ -17,11 +17,14 @@

import sys


class _MindsporeTestFrameworkkeyword:
def __setattr__(self, name, value):
if name in self.__dict__:
raise TypeError("can not rebind keyword (%s)" % name)
self.__dict__[name] = value


keyword = _MindsporeTestFrameworkkeyword()

keyword.function = "function"


+ 11
- 0
tests/mindspore_test_framework/utils/model_util.py View File

@@ -24,8 +24,10 @@ from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C


class SquaredLoss(nn.Cell):
"""Squared loss function."""

def __init__(self):
super(SquaredLoss, self).__init__()
self.reshape = P.Reshape()
@@ -37,7 +39,10 @@ class SquaredLoss(nn.Cell):
ret = y_hat - self.reshape(y, self.shape(y_hat))
return self.reduce_sum((ret * ret) / self.two, (0,))


opt_step = C.MultitypeFuncGraph("opt_step")


@opt_step.register("Tensor", "Tensor",
"Tensor", "Tensor")
def update_opt_step(learning_rate, batch_size, parameter, gradient):
@@ -56,8 +61,10 @@ def update_opt_step(learning_rate, batch_size, parameter, gradient):
F.assign(parameter, next_param)
return next_param


class SGD(nn.Cell):
"""SGD optimizer."""

def __init__(self, parameters, learning_rate=0.001, batch_size=1):
super(SGD, self).__init__()
self.parameters = ParameterTuple(parameters)
@@ -73,8 +80,10 @@ class SGD(nn.Cell):
self.parameters, gradients)
return success


class Linreg(nn.Cell):
"""Linear regression model."""

def __init__(self, num_features):
super(Linreg, self).__init__()
self.matmul = P.MatMul()
@@ -84,8 +93,10 @@ class Linreg(nn.Cell):
def construct(self, x):
return self.matmul(x, self.w) + self.b


class Model:
"""Simplified model."""

def __init__(self, network, loss_fn, optimizer):
self.optimizer = optimizer
self.step = nn.TrainOneStepCell(nn.WithLossCell(network, loss_fn), self.optimizer)


+ 2
- 0
tests/mindspore_test_framework/utils/npy_util.py View File

@@ -22,6 +22,7 @@ from mindspore.common.tensor import Tensor
from .other_util import shape2tensor
from .config_util import get_expect_config


def load_npy(p):
s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance = get_expect_config(p)
if isinstance(s, str):
@@ -33,6 +34,7 @@ def load_npy(p):
ret = shape2tensor(s, dtype, scale)
return ret, max_error, check_tolerance, relative_tolerance, absolute_tolerance


def load_data_from_npy_or_shape(dpaths, skip_expect_config=True):
ret = []
for p in dpaths:


+ 6
- 1
tests/mindspore_test_framework/utils/other_util.py View File

@@ -20,11 +20,13 @@ import numpy as np
import mindspore._c_expression as _c_expression
from mindspore.common.tensor import Tensor


def wrap(x):
if isinstance(x, (tuple, list)):
return x
return (x,)


def to_numpy_list(tl):
tl = wrap(tl)
ret = []
@@ -35,11 +37,13 @@ def to_numpy_list(tl):
ret.append(x)
return ret


def to_numpy(x):
if isinstance(x, (Tensor, _c_expression.Tensor)):
return x.asnumpy()
return x


def shape2tensor(shp, dtype=np.float32, scale=6):
if isinstance(shp, list):
if not shp:
@@ -47,11 +51,12 @@ def shape2tensor(shp, dtype=np.float32, scale=6):
return Tensor((np.random.rand(*shp) * scale).astype(dtype))
return shp


def select_from_config_tuple(t, index, default):
if not isinstance(t, tuple):
return default
if not isinstance(t[-1], dict):
return default
if index > len(t)-1:
if index > len(t) - 1:
return default
return t[index]

+ 6
- 4
tests/mindspore_test_framework/utils/verifier_util.py View File

@@ -17,6 +17,7 @@

import numpy as np


def count_unequal_element(expect, result, rtol, atol):
"""
Count unequal element.
@@ -33,15 +34,16 @@ def count_unequal_element(expect, result, rtol, atol):
raise ValueError(f'expect.shape {expect.shape}, result.shape {result.shape}')
total_count = len(expect.flatten())
error = np.abs(expect - result)
count = np.count_nonzero(np.less_equal(error, atol + np.abs(result)*rtol))
if ((total_count-count)/total_count) >= rtol:
count = np.count_nonzero(np.less_equal(error, atol + np.abs(result) * rtol))
if ((total_count - count) / total_count) >= rtol:
raise ValueError(f'expect {expect}, but got {result}, '
f'{total_count-count} / {total_count} elements out of tolerance, '
f'{total_count - count} / {total_count} elements out of tolerance, '
f'absolute_tolerance {atol}, relative_tolerance {rtol}')
print(f'expect {expect}, got {result}, '
f'{total_count-count} / {total_count} elements out of tolerance, '
f'{total_count - count} / {total_count} elements out of tolerance, '
f'absolute_tolerance {atol}, relative_tolerance {rtol}')


def tolerance_assert(expect, result, rtol, atol):
"""
Verify if results are in expected tolerance.


+ 18
- 1
tests/ops_common.py View File

@@ -21,8 +21,10 @@ import mindspore.ops.operations as P
from mindspore import Tensor
from mindspore.common.api import _executor


class InputBackward(nn.Cell):
""" InputBackward definition """

def __init__(self, network, c1=None, c2=None):
super(InputBackward, self).__init__()
self.network = network
@@ -58,6 +60,7 @@ class InputBackward(nn.Cell):

class InputOpNet(nn.Cell):
""" InputOpNet definition """

def __init__(self, op, get_first=False,
c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__()
@@ -76,6 +79,7 @@ class InputOpNet(nn.Cell):
if self.get_first:
x = x[0]
return x

def construct0_c1_fack(self, data):
x = self.op(self.c1) + data
if self.get_first:
@@ -148,7 +152,6 @@ class InputOpNet(nn.Cell):
x = x[0]
return x


def construct2_c1(self, x1, x2):
x = self.op(x1, x2, self.c1)
if self.get_first:
@@ -203,8 +206,10 @@ class InputOpNet(nn.Cell):
x = x[0]
return x


class NetOutputAsLoss(nn.Cell):
""" NetOutputAsLoss definition """

def __init__(self, network, output_index):
super(NetOutputAsLoss, self).__init__()
self.network = network
@@ -233,18 +238,21 @@ class NetOutputAsLoss(nn.Cell):
predict = self.network(x1, x2, x3, x4, x5)[self.output_index]
return predict


def get_loss_fun(construct_net, num_input, output_index):
net = NetOutputAsLoss(construct_net, output_index)
f = getattr(net, 'construct%d' % num_input)
setattr(net, "construct", f)
return net


def build_construct_graph(net, *inputs, execute=True):
net.set_train()
_executor.compile(net, *inputs)
if execute:
_executor(net, inputs)


def build_backward_graph(net, output_shapes, inputs, execute=True):
inputs = append_sens_to_inputs(output_shapes, inputs)
net = gen_backward_net(net, len(inputs) - 1)
@@ -253,6 +261,7 @@ def build_backward_graph(net, output_shapes, inputs, execute=True):
if execute:
_executor(net, inputs)


def convert(shp, dtype=np.float32, scale=6):
if isinstance(shp, list):
if not shp:
@@ -260,12 +269,14 @@ def convert(shp, dtype=np.float32, scale=6):
return Tensor((np.random.rand(*shp) * scale).astype(dtype))
return shp


def gen_inputs(input_shapes, config):
add_fack_input = config.get('add_fack_input', False)
if not input_shapes and add_fack_input:
return [Tensor(np.array([1.0]).astype(config.get('fack_input_type', np.float32)))]
return [convert(shp) for shp in input_shapes]


def gen_backward_inputs(input_shapes, output_shapes, config):
add_fack_input = config.get('add_fack_input', False)
if not input_shapes and add_fack_input:
@@ -276,11 +287,13 @@ def gen_backward_inputs(input_shapes, output_shapes, config):
sens = convert(sens_shape)
return inputs + [sens]


def append_sens_to_inputs(output_shapes, inputs):
inputs = inputs
sens = Tensor(np.random.normal(0, 1, output_shapes).astype(np.float32))
return inputs + [sens]


def gen_net(shapes, config, get_first=False):
"""
gen_net function
@@ -313,14 +326,17 @@ def gen_backward_net(construct_net, input_num):
setattr(net, "construct", f)
return net


def batch_tuple_tensor(data, batch_size):
ret = [Tensor(np.tile(d.asnumpy(), (batch_size, 1))) for d in data]
return tuple(ret)


class OutPutWrap(nn.Cell):
"""
OutPutWrap definition
"""

def __init__(self, network, num_output, output_is_tuple):
super(OutPutWrap, self).__init__()
self.network = network
@@ -387,6 +403,7 @@ class OutPutWrap(nn.Cell):
ret = ret + F.make_tuple(predict[i] * self.cast(self.one, self.dtype(predict[i])))
return ret


def get_output_wrap(network, num_input, num_output, output_is_tuple=0):
net = OutPutWrap(network, num_output, output_is_tuple)
f = getattr(net, 'construct%d' % num_input)


+ 1
- 1
tests/st/auto_parallel/test_expand_loss.py View File

@@ -23,4 +23,4 @@ import pytest
def test_expand_loss():
sh_path = os.path.split(os.path.realpath(__file__))[0]
ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh")
assert(ret == 0)
assert (ret == 0)

+ 1
- 1
tests/st/auto_parallel/test_model_parallel_onehot.py View File

@@ -19,4 +19,4 @@ import pytest
def test_expand_loss():
ret = os.system("sh run_onehot_model_parallel.sh")
assert(ret == 0)
assert (ret == 0)

+ 12
- 3
tests/st/control/test_cont_break.py View File

@@ -18,6 +18,7 @@ import numpy as np
from mindspore.nn import Cell
from mindspore import Tensor, Model, context


def run_test(netclass, count, dev):
context.set_context(mode=context.GRAPH_MODE, device_target=dev)
net = netclass()
@@ -25,10 +26,11 @@ def run_test(netclass, count, dev):
for _ in range(count):
input_np = np.random.randn(2, 3).astype(np.float32)
input_ms = Tensor(input_np)
output_np = net.construct(input_np) # run python
output_ms = model.predict(input_ms) # run graph
output_np = net.construct(input_np) # run python
output_ms = model.predict(input_ms) # run graph
np.testing.assert_array_almost_equal(output_np, output_ms.asnumpy(), decimal=3)


class for_loop_with_break(Cell):
def __init__(self):
super().__init__()
@@ -42,6 +44,7 @@ class for_loop_with_break(Cell):
pass
return x


class for_loop_with_continue(Cell):
def __init__(self):
super().__init__()
@@ -54,6 +57,7 @@ class for_loop_with_continue(Cell):
x = x * 2
return x


class for_loop_with_cont_break(Cell):
def __init__(self):
super().__init__()
@@ -71,6 +75,7 @@ class for_loop_with_cont_break(Cell):
pass
return x


class for_nested_loop_with_break(Cell):
def __init__(self):
super().__init__()
@@ -84,6 +89,7 @@ class for_nested_loop_with_break(Cell):
x = x * 1.5
return x


class while_with_break(Cell):
def __init__(self):
super().__init__()
@@ -98,6 +104,7 @@ class while_with_break(Cell):
i += 1
return x


class while_with_continue(Cell):
def __init__(self):
super().__init__()
@@ -113,6 +120,7 @@ class while_with_continue(Cell):
i += 1
return x


class while_for_nested(Cell):
def __init__(self):
super().__init__()
@@ -131,6 +139,7 @@ class while_for_nested(Cell):
i += 1
return x


class pass_branch(Cell):
def __init__(self):
super().__init__()
@@ -145,6 +154,7 @@ class pass_branch(Cell):
i += 1
return x


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@@ -159,4 +169,3 @@ def test_cont_break():
run_test(while_with_continue, count, dev)
run_test(while_for_nested, count, dev)
run_test(pass_branch, count, dev)


+ 6
- 2
tests/st/gnn/aggregator.py View File

@@ -64,6 +64,7 @@ class GNNFeatureTransform(nn.Cell):
[[ 2.5246444 2.2738023 0.5711005 -3.9399147 ]
[ 1.0739875 4.0155234 0.94188046 -5.459526 ]]
"""

@cell_attr_register
def __init__(self,
in_channels,
@@ -78,7 +79,7 @@ class GNNFeatureTransform(nn.Cell):

if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels:
weight_init.shape()[1] != in_channels:
raise ValueError("weight_init shape error")

self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
@@ -104,7 +105,7 @@ class GNNFeatureTransform(nn.Cell):

def extend_repr(self):
str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \
.format(self.in_channels, self.out_channels, self.weight, self.has_bias)
.format(self.in_channels, self.out_channels, self.weight, self.has_bias)
if self.has_bias:
str_info = str_info + ', bias={}'.format(self.bias)

@@ -136,6 +137,7 @@ class _BaseAggregator(nn.Cell):
>>> def construct(self, x):
>>> return self.reduce_mean(x, 1)
"""

def __init__(self,
feature_in_dim,
feature_out_dim,
@@ -191,6 +193,7 @@ class MeanAggregator(_BaseAggregator):
>>> input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtypy=np.float32))
>>> output = net(input_data)
"""

def __init__(self,
feature_in_dim,
feature_out_dim,
@@ -349,6 +352,7 @@ class AttentionAggregator(nn.Cell):
8)
>>> net(input_data, biases)
"""

def __init__(self,
in_channels,
out_channels,


+ 1
- 0
tests/st/gnn/test_gnn_aggregator.py View File

@@ -27,6 +27,7 @@ context.set_context(mode=context.GRAPH_MODE)

class MeanAggregatorGrad(nn.Cell):
"""Backward of MeanAggregator"""

def __init__(self, network):
super(MeanAggregatorGrad, self).__init__()
self.grad_op = C.grad_all_with_sens


+ 4
- 4
tests/st/nccl/test_nccl_all.py View File

@@ -21,7 +21,7 @@ import pytest
@pytest.mark.env_single
def test_nccl_lenet():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py")
assert(return_code == 0)
assert (return_code == 0)


@pytest.mark.level0
@@ -29,7 +29,7 @@ def test_nccl_lenet():
@pytest.mark.env_single
def test_nccl_all_reduce_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py")
assert(return_code == 0)
assert (return_code == 0)


@pytest.mark.level0
@@ -37,7 +37,7 @@ def test_nccl_all_reduce_op():
@pytest.mark.env_single
def test_nccl_all_gather_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py")
assert(return_code == 0)
assert (return_code == 0)


@pytest.mark.level0
@@ -45,4 +45,4 @@ def test_nccl_all_gather_op():
@pytest.mark.env_single
def test_nccl_reduce_scatter_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py")
assert(return_code == 0)
assert (return_code == 0)

+ 2
- 2
tests/st/nccl/test_nccl_lenet.py View File

@@ -71,7 +71,7 @@ class LeNet(nn.Cell):
def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
lr = []
for step in range(total_steps):
lr_ = base_lr * gamma ** (step//gap)
lr_ = base_lr * gamma ** (step // gap)
lr.append(lr_)
return Tensor(np.array(lr), dtype)

@@ -104,4 +104,4 @@ def test_lenet_nccl():
with open("ms_loss.txt", "w") as fo2:
fo2.write("loss:")
fo2.write(str(losses[-5:]))
assert(losses[-1] < 0.01)
assert (losses[-1] < 0.01)

+ 1
- 0
tests/st/nccl/test_nccl_reduce_scatter_op.py View File

@@ -20,6 +20,7 @@ from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size

context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

init('nccl')


+ 15
- 6
tests/st/networks/models/bert/bert_tdt_lossscale.py View File

@@ -30,10 +30,12 @@ from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell
from mindspore.nn.optim import Momentum
from mindspore import log as logger

_current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]
SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json"


def get_config(version='base', batch_size=1):
"""get config"""
if version == 'base':
@@ -80,13 +82,14 @@ def get_config(version='base', batch_size=1):
bert_config = BertConfig(batch_size=batch_size)
return bert_config


def me_de_train_dataset():
"""test me de train dataset"""
# apply repeat operations
repeat_count = 1
ds = de.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"], shuffle=False)
"next_sentence_labels", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"], shuffle=False)
type_cast_op = C.TypeCast(mstype.int32)
ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
@@ -100,12 +103,14 @@ def me_de_train_dataset():
ds = ds.repeat(repeat_count)
return ds


def weight_variable(shape):
"""weight variable"""
np.random.seed(1)
ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
return Tensor(ones)


class ModelCallback(Callback):
def __init__(self):
super(ModelCallback, self).__init__()
@@ -120,6 +125,7 @@ class ModelCallback(Callback):
self.lossscale_list.append(cb_params.net_outputs[2].asnumpy())
print("epoch: {}, outputs are: {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs)))


@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@@ -134,8 +140,9 @@ def test_bert_tdt():
netwithloss = BertNetworkWithLoss(config, True)
optimizer = Momentum(netwithloss.trainable_params(), learning_rate=2e-5, momentum=0.9)
scale_window = 3
scale_manager = DynamicLossScaleManager(2**16, 2, scale_window)
netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer, scale_update_cell=scale_manager.get_update_cell())
scale_manager = DynamicLossScaleManager(2 ** 16, 2, scale_window)
netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer,
scale_update_cell=scale_manager.get_update_cell())
netwithgrads.set_train(True)
model = Model(netwithgrads)
callback = ModelCallback()
@@ -162,10 +169,11 @@ def test_bert_tdt():

# assertion occurs while the loss value, overflow state or loss_scale value is wrong
loss_value = np.array(callback.loss_list)
expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248, 12.40294, 12.621653]
expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248,
12.40294, 12.621653]
print("loss value: {}".format(loss_value))
assert np.allclose(loss_value, expect_loss_value, 0.00001, 0.00001)
overflow = np.array(callback.overflow_list)
expect_overflow = [True, True, False, False, False, True, False, False, False, True]
print("overflow: {}".format(overflow))
@@ -176,5 +184,6 @@ def test_bert_tdt():
print("loss scale: {}".format(loss_scale))
assert np.allclose(loss_scale, expect_loss_scale, 0.00001, 0.00001)


if __name__ == '__main__':
test_bert_tdt()

+ 2
- 2
tests/st/networks/test_gpu_alexnet.py View File

@@ -42,7 +42,7 @@ class AlexNet(nn.Cell):
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(6*6*256, 4096)
self.fc1 = nn.Dense(6 * 6 * 256, 4096)
self.fc2 = nn.Dense(4096, 4096)
self.fc3 = nn.Dense(4096, num_classes)

@@ -87,4 +87,4 @@ def test_trainTensor(num_classes=10, epoch=15, batch_size=32):
label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)
assert(losses[-1].asnumpy() < 0.01)
assert (losses[-1].asnumpy() < 0.01)

+ 5
- 4
tests/st/networks/test_gpu_lstm.py View File

@@ -25,7 +25,6 @@ from mindspore.nn import Dense
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter


context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


@@ -104,6 +103,8 @@ class SentimentNet(nn.Cell):


batch_size = 64


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@@ -117,9 +118,9 @@ def test_LSTM():
vocab_size = 252193
max_len = 500

weight = np.ones((vocab_size+1, embed_size)).astype(np.float32)
weight = np.ones((vocab_size + 1, embed_size)).astype(np.float32)

net = SentimentNet(vocab_size=(vocab_size+1), embed_size=embed_size,
net = SentimentNet(vocab_size=(vocab_size + 1), embed_size=embed_size,
num_hiddens=num_hiddens, num_layers=num_layers,
bidirectional=bidirectional, weight=weight,
labels=labels, batch_size=batch_size)
@@ -140,4 +141,4 @@ def test_LSTM():
loss = train_network(train_features, train_labels)
losses.append(loss)
print("loss:", loss.asnumpy())
assert(losses[-1].asnumpy() < 0.01)
assert (losses[-1].asnumpy() < 0.01)

+ 5
- 4
tests/st/networks/test_gpu_resnet.py View File

@@ -340,7 +340,8 @@ def test_trainTensor(num_classes=10, epoch=8, batch_size=1):
label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)
assert(losses[-1].asnumpy() < 1)
assert (losses[-1].asnumpy() < 1)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@@ -359,6 +360,6 @@ def test_trainTensor_amp(num_classes=10, epoch=18, batch_size=16):
label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)
assert(losses[-1][0].asnumpy() < 1)
assert(losses[-1][1].asnumpy() == False)
assert(losses[-1][2].asnumpy() > 1)
assert (losses[-1][0].asnumpy() < 1)
assert (losses[-1][1].asnumpy() == False)
assert (losses[-1][2].asnumpy() > 1)

+ 5
- 5
tests/st/ops/cpu/test_argmax_op.py View File

@@ -25,27 +25,27 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetArgmax(nn.Cell):
def __init__( self):
def __init__(self):
super(NetArgmax, self).__init__()
self.argmax = P.Argmax(output_type=mstype.int32)
x = Tensor(np.array([[1., 20., 5.],
[67., 8., 9.],
[130., 24., 15.]]).astype(np.float32))
self.x = Parameter(initializer(x, x.shape()), name ='x')
self.x = Parameter(initializer(x, x.shape()), name='x')
def construct(self):
return self.argmax(self.x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_argmax():
Argmax = NetArgmax()
output = Argmax()
print("================================")
expect = np.array([1,0,0]).astype(np.float32)
expect = np.array([1, 0, 0]).astype(np.float32)
print(output)
assert (output.asnumpy() == expect).all()

+ 12
- 8
tests/st/ops/cpu/test_bias_add.py View File

@@ -18,8 +18,10 @@ from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@@ -28,26 +30,28 @@ class Net(nn.Cell):
def construct(self, x, b):
return self.bias_add(x, b)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bias_add1():
x = np.ones([2,3,4,4]).astype(np.float32)
b = np.array([1,1,1]).astype(np.float32)
x = np.ones([2, 3, 4, 4]).astype(np.float32)
b = np.array([1, 1, 1]).astype(np.float32)
bias_add = Net()
output = bias_add(Tensor(x), Tensor(b))
expect_output = np.ones([2,3,4,4]).astype(np.float32)*2
expect_output = np.ones([2, 3, 4, 4]).astype(np.float32) * 2
print(output)
assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit"
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bias_add2():
x = np.ones([2,3]).astype(np.float32)
b = np.array([1,1,1]).astype(np.float32)
x = np.ones([2, 3]).astype(np.float32)
b = np.array([1, 1, 1]).astype(np.float32)
bias_add = Net()
output = bias_add(Tensor(x), Tensor(b))
expect_output = np.ones([2,3]).astype(np.float32)*2
expect_output = np.ones([2, 3]).astype(np.float32) * 2
print(output)
assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit"
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"

+ 10
- 6
tests/st/ops/cpu/test_bias_add_grad.py View File

@@ -19,8 +19,10 @@ from mindspore.ops.operations import _grad_ops as G
import mindspore.nn as nn
import numpy as np
import mindspore.context as context

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@@ -29,24 +31,26 @@ class Net(nn.Cell):
def construct(self, dout):
return self.bias_add_grad(dout)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bias_add_grad1():
dout = np.ones([2,3]).astype(np.float32)
dout = np.ones([2, 3]).astype(np.float32)
bias_add_grad = Net()
output = bias_add_grad(Tensor(dout))
expect_output = np.array([2.,2.,2.]).astype(np.float32)
expect_output = np.array([2., 2., 2.]).astype(np.float32)
print(output.asnumpy())
assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit"
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bias_add_grad2():
dout = np.ones([2,3,4,4]).astype(np.float32)
dout = np.ones([2, 3, 4, 4]).astype(np.float32)
bias_add_grad = Net()
output = bias_add_grad(Tensor(dout))
expect_output = np.array([32.,32.,32.]).astype(np.float32)
expect_output = np.array([32., 32., 32.]).astype(np.float32)
print(output.asnumpy())
assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit"
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"

+ 24
- 21
tests/st/ops/cpu/test_conv2d_backprop_filter_op.py View File

@@ -25,32 +25,35 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class Net4(nn.Cell):
def __init__(self):
super(Net4, self).__init__()
out_channel = 4
kernel_size = 1
self.conv_filter = G.Conv2DBackpropFilter(out_channel,
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=(1, 1),
dilation=(1, 1, 1, 1),
group=1)
self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w')
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=(1, 1),
dilation=(1, 1, 1, 1),
group=1)
self.w = Parameter(
initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]),
name='w')
self.x = Parameter(initializer(Tensor(np.array([[[
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x')
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x')
self.out = Parameter(initializer(Tensor(np.array([[[
[ -5, -4, 0, 8],
[-10, -2, 2, 3],
[ 0, -2, -4, -7],
[ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y')
[-5, -4, 0, 8],
[-10, -2, 2, 3],
[0, -2, -4, -7],
[-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y')
self.get_shape = P.Shape()

def construct(self):
@@ -70,8 +73,8 @@ def test_conv2d_backprop_filter():
[-104, -211, -322]
[-102, -144, -248]]]]
"""
expect = np.array([[[[ -60, -142, -265],
[-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32)
expect = np.array([[[[-60, -142, -265],
[-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32)
print(output)
assert (output.asnumpy() == expect).all()

+ 22
- 19
tests/st/ops/cpu/test_conv2d_backprop_input_op.py View File

@@ -24,32 +24,35 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class Net5(nn.Cell):
def __init__(self):
super(Net5, self).__init__()
out_channel = 4
kernel_size = 1
self.conv_input = P.Conv2DBackpropInput(out_channel,
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=1,
dilation=1,
group=1)
self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w')
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=1,
dilation=1,
group=1)
self.w = Parameter(
initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]),
name='w')
self.x = Parameter(initializer(Tensor(np.array([[[
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x')
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x')
self.out = Parameter(initializer(Tensor(np.array([[[
[ -5, -4, 0, 8],
[-10, -2, 2, 3],
[ 0, -2, -4, -7],
[ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y')
[-5, -4, 0, 8],
[-10, -2, 2, 3],
[0, -2, -4, -7],
[-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y')
self.get_shape = P.Shape()

def construct(self):
@@ -72,11 +75,11 @@ def test_conv2d_backprop_input():
[ -3, -4, -4, -19, 7, 23]
[ -3, -2, 0, -14, 3, 16]]]]
"""
expect = np.array([[[[ -5, -4, 5, 12, 0, -8],
[-15, -6, 17, 17, -2, -11],
[-15, -8, 13, 12, 2, -4],
[-13, -6, 8, -14, 5, 20],
[ -3, -4, -4, -19, 7, 23],
[ -3, -2, 0, -14, 3, 16]]]]).astype(np.float32)
expect = np.array([[[[-5, -4, 5, 12, 0, -8],
[-15, -6, 17, 17, -2, -11],
[-15, -8, 13, 12, 2, -4],
[-13, -6, 8, -14, 5, 20],
[-3, -4, -4, -19, 7, 23],
[-3, -2, 0, -14, 3, 16]]]]).astype(np.float32)
print(output)
assert (output.asnumpy() == expect).all()

+ 5
- 5
tests/st/ops/cpu/test_conv2d_op.py View File

@@ -24,8 +24,9 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class NetConv2d(nn.Cell):
def __init__( self):
def __init__(self):
super(NetConv2d, self).__init__()
out_channel = 2
kernel_size = 1
@@ -42,7 +43,6 @@ class NetConv2d(nn.Cell):
self.x = Parameter(initializer(
Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32)), [1, 3, 3, 3]), name='x')


def construct(self):
return self.conv(self.x, self.w)

@@ -64,9 +64,9 @@ def test_conv2d():
[162. 174. 186.]
[198. 210. 222.]]]]
"""
expect = np.array([[[[ 45, 48, 51],
[ 54, 57, 60],
[ 63, 66, 69]],
expect = np.array([[[[45, 48, 51],
[54, 57, 60],
[63, 66, 69]],
[[126, 138, 150],
[162, 174, 186],
[198, 210, 222]]]]).astype(np.float32)


+ 4
- 5
tests/st/ops/cpu/test_equalcount_op.py View File

@@ -24,14 +24,15 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetEqualCount(nn.Cell):
def __init__( self):
def __init__(self):
super(NetEqualCount, self).__init__()
self.equalcount = P.EqualCount()
x = Tensor(np.array([1, 20, 5]).astype(np.int32))
y = Tensor(np.array([2, 20, 5]).astype(np.int32))
self.x = Parameter(initializer(x, x.shape()), name ='x')
self.y = Parameter(initializer(y, y.shape()), name ='y')
self.x = Parameter(initializer(x, x.shape()), name='x')
self.y = Parameter(initializer(y, y.shape()), name='y')
def construct(self):
return self.equalcount(self.x, self.y)
@@ -41,11 +42,9 @@ class NetEqualCount(nn.Cell):
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_equalcount():
EqualCount = NetEqualCount()
output = EqualCount()
print("================================")
expect = np.array([2]).astype(np.int32)
print(output)
assert (output.asnumpy() == expect).all()

+ 1
- 2
tests/st/ops/cpu/test_maxpool_grad_op.py View File

@@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class Net_Pool_Grad(nn.Cell):
def __init__(self):
super(Net_Pool_Grad, self).__init__()
@@ -56,7 +57,6 @@ class Net_Pool_Grad(nn.Cell):
[31, 33, 35]
]]]).astype(np.float32)), [1, 1, 3, 3]), name='d')


def construct(self):
return self.maxpool_grad_fun(self.x, self.a, self.d)

@@ -78,4 +78,3 @@ def test_maxpool2d_grad():
[0, 31, 0, 33, 0, 35]
]]]))
assert (output.asnumpy() == expect_result).all()


+ 5
- 0
tests/st/ops/cpu/test_maxpool_op.py View File

@@ -21,20 +21,25 @@ import mindspore.context as context

context.set_context(mode=context.GRAPH_MODE, device_target="CPU")


class Net_Pool(nn.Cell):
def __init__(self):
super(Net_Pool, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID")

def construct(self, x):
return self.maxpool_fun(x)


class Net_Pool2(nn.Cell):
def __init__(self):
super(Net_Pool2, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")

def construct(self, x):
return self.maxpool_fun(x)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard


+ 5
- 3
tests/st/ops/cpu/test_momentum_op.py View File

@@ -25,6 +25,7 @@ import mindspore.context as context

context.set_context(mode=context.GRAPH_MODE, device_target="CPU")


class MomentumNet(nn.Cell):
def __init__(self):
super(MomentumNet, self).__init__()
@@ -39,6 +40,7 @@ class MomentumNet(nn.Cell):
output = self.fc1(output)
return output


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@@ -46,7 +48,7 @@ def test_momentum():
epoch = 13
net = MomentumNet()
learning_rate = 0.1
momentum = 0.9
momentum = 0.9

optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
@@ -55,11 +57,11 @@ def test_momentum():
train_network.set_train()
losses = []
for i in range(epoch):
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32)*0.01)
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
label = Tensor(np.array([0]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)
print("================================")
print(losses)
"""


+ 16
- 14
tests/st/ops/cpu/test_mul_op.py View File

@@ -1,17 +1,17 @@
#Copyright 2019 Huawei Technologies Co., Ltd
# Copyright 2019 Huawei Technologies Co., Ltd
#
#Licensed under the Apache License, Version 2.0(the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#http: // www.apache.org/licenses/LICENSE-2.0
# http: // www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#== == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==

import pytest
from mindspore import Tensor
@@ -23,13 +23,14 @@ import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter

x = np.random.uniform(-2, 2, (2,3,4,4)).astype(np.float32)
y = np.random.uniform(-2, 2, (1,1,1,1)).astype(np.float32)
x = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32)
y = np.random.uniform(-2, 2, (1, 1, 1, 1)).astype(np.float32)

context.set_context(device_target='CPU')


class Net(nn.Cell):
def __init__( self):
def __init__(self):
super(Net, self).__init__()
self.mul = P.Mul()
self.x = Parameter(initializer(Tensor(x), x.shape), name='x3')
@@ -39,6 +40,7 @@ class Net(nn.Cell):
def construct(self):
return self.mul(self.x, self.y)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard


+ 4
- 1
tests/st/ops/cpu/test_relu_grad_op.py View File

@@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class NetReluGrad(nn.Cell):
def __init__(self):
super(NetReluGrad, self).__init__()
@@ -35,16 +36,18 @@ class NetReluGrad(nn.Cell):
self.dy = Parameter(initializer(Tensor(np.array([[[[1, 0, 1],
[0, 1, 0],
[1, 1, 1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='dy')

def construct(self):
return self.rekuGrad(self.dy, self.x)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_relu_grad():
relu_grad = NetReluGrad()
output = relu_grad()
expect = np.array([[[ [0, 0, 1,],[0, 0, 0,],[1, 1, 0.] ]]]).astype(np.float32)
expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32)
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expect
assert np.all(diff < error)

+ 6
- 3
tests/st/ops/cpu/test_relu_op.py View File

@@ -24,6 +24,7 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class NetRelu(nn.Cell):
def __init__(self):
super(NetRelu, self).__init__()
@@ -31,17 +32,19 @@ class NetRelu(nn.Cell):
self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x')

def construct(self):
return self.relu(self.x)


@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_relu():
relu = NetRelu()
output = relu()
expect = np.array([[[ [0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.float32)
expect = np.array([[[[0, 1, 10, ],
[1, 0, 1, ],
[10, 1, 0.]]]]).astype(np.float32)
print(output)
assert (output.asnumpy() == expect).all()

+ 4
- 3
tests/st/ops/cpu/test_softmax_op.py View File

@@ -24,18 +24,20 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetSoftmax(nn.Cell):
def __init__( self):
def __init__(self):
super(NetSoftmax, self).__init__()
self.softmax = P.Softmax()
x = Tensor(np.array([[0.1, 0.3, 0.6],
[0.2, -0.6, 0.8],
[0.6, 1, 0.4]]).astype(np.float32))
self.x = Parameter(initializer(x, x.shape()), name ='x')
self.x = Parameter(initializer(x, x.shape()), name='x')
def construct(self):
return self.softmax(self.x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@@ -49,4 +51,3 @@ def test_softmax():
diff = np.abs(outputSum - expect)
print(diff)
assert np.all(diff < error)

+ 11
- 10
tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py View File

@@ -24,15 +24,16 @@ from mindspore.common.parameter import Parameter

context.set_context(mode=context.GRAPH_MODE, device_target='CPU')


class NetSoftmaxWithCrossEntropy(nn.Cell):
def __init__( self):
def __init__(self):
super(NetSoftmaxWithCrossEntropy, self).__init__()
logits = Tensor(np.array([[1,1,10],
[1,10,1],
[10,1,1]]).astype(np.float32))
self.logits = Parameter(initializer(logits, logits.shape()), name ='logits')
labels = Tensor(np.array([2,1,0]).astype(np.int32))
self.labels = Parameter(initializer(labels, labels.shape()), name ='labels')
logits = Tensor(np.array([[1, 1, 10],
[1, 10, 1],
[10, 1, 1]]).astype(np.float32))
self.logits = Parameter(initializer(logits, logits.shape()), name='logits')
labels = Tensor(np.array([2, 1, 0]).astype(np.int32))
self.labels = Parameter(initializer(labels, labels.shape()), name='labels')
self.SoftmaxWithCrossEntropy = P.SparseSoftmaxCrossEntropyWithLogits(True)

def construct(self):
@@ -45,9 +46,9 @@ class NetSoftmaxWithCrossEntropy(nn.Cell):
def test_net():
SoftmaxWithCrossEntropy = NetSoftmaxWithCrossEntropy()
output = SoftmaxWithCrossEntropy()
expect = np.array([[ 4.1126452e-05, 4.1126452e-05, -8.2234539e-05],
[ 4.1126452e-05, -8.2234539e-05, 4.1126452e-05],
[-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32)
expect = np.array([[4.1126452e-05, 4.1126452e-05, -8.2234539e-05],
[4.1126452e-05, -8.2234539e-05, 4.1126452e-05],
[-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32)
print(output)
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expect


+ 4
- 1
tests/st/ops/custom_ops_tbe/conv2d.py View File

@@ -21,6 +21,8 @@ from topi.cce import util
from te import platform as cce

Nonetype = type(None)


# pylint: disable=unused-argument, no-value-for-parameter, too-many-branches
@fusion_manager.register("conv2d")
def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations,
@@ -103,6 +105,7 @@ def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations,

return res


@util.check_input_type(dict, dict, (dict, Nonetype), dict, (tuple, list), (tuple, list), (tuple, list),
str)
def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations,
@@ -189,7 +192,7 @@ def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations,
if cce.CceProductParams().cce_product == "5.10":
conv_layer_fast_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew, bias=use_bias,
kernel_name=kernel_name, need_build=True, need_print=False)
kernel_name=kernel_name, need_build=True, need_print=False)
else:
conv_layer_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew,


+ 19
- 14
tests/st/ops/custom_ops_tbe/conv_layer.py View File

@@ -18,14 +18,16 @@ from te.platform import CUBE_MKN
from topi import generic
from topi.cce import util
from topi.cce.util import is_v200_version

# pylint: disable=R0912,R0913,R0914,R0915,E1101
# the dim of shape in conv must be 4
PAD_SHAPE_DIM = 2

NONETYPE = type(None)


@util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int),
int, int,(list, tuple), (list, tuple),
int, int, (list, tuple), (list, tuple),
str, str, str,
str, str, str,
str, bool, str)
@@ -57,9 +59,9 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p

if quantize_config[0] == 0:
if is_v200_version():
util.check_dtype_rule(in_dtype, ('int8', ))
util.check_dtype_rule(w_dtype, ('int8', ))
util.check_dtype_rule(res_dtype, ('int32', ))
util.check_dtype_rule(in_dtype, ('int8',))
util.check_dtype_rule(w_dtype, ('int8',))
util.check_dtype_rule(res_dtype, ('int32',))
else:
util.check_dtype_rule(in_dtype, ['float16'])
util.check_dtype_rule(w_dtype, ['float16'])
@@ -117,7 +119,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p

if isinstance(padh, list):
if len(padh) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM)
raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM)
pad_top = padh[0]
pad_bottom = padh[1]
else:
@@ -126,7 +128,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p

if isinstance(padw, list):
if len(padw) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM)
raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM)
pad_left = padw[0]
pad_right = padw[1]
else:
@@ -134,8 +136,8 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p
pad_right = padw

shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \
pad_left, pad_right, strideh, \
stridew, in_dtype, w_dtype, res_dtype)
pad_left, pad_right, strideh, \
stridew, in_dtype, w_dtype, res_dtype)

return shape_in, shape_w

@@ -248,9 +250,12 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
shape_in = list(shape_in)
shape_w = list(shape_w)

shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew,
quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, scale_dq_dtype,
scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, bias, kernel_name)
shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh,
stridew,
quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype,
scale_dq_dtype,
scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype,
bias, kernel_name)

# quantize switch on
if quantize_config[0] == 1:
@@ -338,7 +343,7 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
if is_quantize:
scale_q = tvm.placeholder(
(CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype)
if quantize_algorithm ==1:
if quantize_algorithm == 1:
offset_q = tvm.placeholder(
(CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype)

@@ -353,13 +358,13 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
else (out_channel,)
scale_rq = tvm.placeholder(
scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype)
if quantize_algorithm ==1:
if quantize_algorithm == 1:
offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],)
offset_rq = tvm.placeholder(
offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype)

# need offset_pad , for half offset
if quantize_algorithm ==1:
if quantize_algorithm == 1:
offset_pad = tvm.placeholder(
(CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad',
dtype=offset_pad_dtype)


+ 6
- 3
tests/st/ops/custom_ops_tbe/conv_layer_fast.py View File

@@ -17,12 +17,14 @@ from te import tvm
from te.platform import CUBE_MKN
from topi import generic
from topi.cce import util

# pylint: disable=R0913,R0914,R0915,E1101
# the dim of shape in conv must be 4
PAD_SHAPE_DIM = 2

NoneType = type(None)


@util.check_input_type((list, tuple), (list, tuple), str, str, str,
(list, int), (list, int), int, int, bool, str)
def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
@@ -40,7 +42,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty

if isinstance(padh, list):
if len(padh) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM)
raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM)
pad_top = padh[0]
pad_bottom = padh[1]
else:
@@ -49,7 +51,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty

if isinstance(padw, list):
if len(padw) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM)
raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM)
pad_left = padw[0]
pad_right = padw[1]
else:
@@ -62,6 +64,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty

return shape_in, shape_w


@util.check_input_type((list, tuple), (list, tuple), str, str, str,
(list, int), (list, int), int, int,
bool, str, bool, bool)
@@ -112,7 +115,7 @@ def conv_layer_fast_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
shape_w = list(shape_w)

shape_in, shape_w = conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew, bias, kernel_name)
padh, padw, strideh, stridew, bias, kernel_name)

batch_size = shape_in[0]
in_channel = shape_in[1]


+ 5
- 3
tests/st/ops/custom_ops_tbe/cus_conv2d.py View File

@@ -20,6 +20,8 @@ from mindspore import Tensor
from mindspore._checkparam import ParamValidator as validator
from mindspore._checkparam import Rel, check_bool, check_int_positive, twice
from mindspore.common import dtype as mstype


class Cus_Conv2D(PrimitiveWithInfer):
r"""
Applies 2D convolution for the input.
@@ -92,13 +94,13 @@ class Cus_Conv2D(PrimitiveWithInfer):
validator.check_type('kernel_size', kernel_size, [int, tuple])
if isinstance(kernel_size, int) and kernel_size < 1:
raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed '
+ str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.')
+ str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.')
if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or
(not isinstance(kernel_size[0], int)) or
(not isinstance(kernel_size[1], int)) or
kernel_size[0] < 1 or kernel_size[1] < 1):
raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed '
+ str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.')
+ str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.')
self.stride = validator.check_integer('stride', stride, 1, Rel.GE)
from .cus_conv2d_impl import Cus_Conv2D

@@ -147,4 +149,4 @@ class Cus_Conv2D(PrimitiveWithInfer):
def infer_dtype(self, x_dtype, w_dtype):
args = {'x_dtype': x_dtype, 'w_dtype': w_dtype}
validator.check_type_same(args, [mstype.int8, mstype.int32, mstype.float16, mstype.float32])
return x_dtype
return x_dtype

+ 1
- 0
tests/st/ops/custom_ops_tbe/cus_square.py View File

@@ -20,6 +20,7 @@ from mindspore import Tensor
# y = x^2
class CusSquare(PrimitiveWithInfer):
"""CusSquare definition"""

@prim_attr_register
def __init__(self):
"""init CusSquare"""


+ 12
- 9
tests/st/ops/custom_ops_tbe/test_cus_conv.py View File

@@ -20,31 +20,34 @@ import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from .cus_conv2d import Cus_Conv2D

context.set_context(device_target="Ascend")


class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
out_channel = 64
kernel_size = 7
self.conv = Cus_Conv2D(out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1)
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1)
self.w = Parameter(initializer(
'normal', [64, 3, 7, 7]), name='w')


@ms_function
def construct(self, x):
return self.conv(x, self.w)


def test_net():
np.random.seed(3800)
x = np.random.randn(32,3,224,224).astype(np.float32)
x = np.random.randn(32, 3, 224, 224).astype(np.float32)
conv = Net()
output = conv(Tensor(x))
print(output.asnumpy())

+ 5
- 2
tests/st/ops/custom_ops_tbe/test_square.py View File

@@ -18,8 +18,10 @@ import mindspore.context as context
from mindspore import Tensor
from cus_square import CusSquare
import pytest

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class Net(nn.Cell):
"""Net definition"""

@@ -30,6 +32,7 @@ class Net(nn.Cell):
def construct(self, data):
return self.square(data)


@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@@ -40,5 +43,5 @@ def test_net():
output = square(Tensor(x))
print(x)
print(output.asnumpy())
expect = np.array([1.0,16.0,81.0]).astype(np.float32)
assert (output.asnumpy() == expect).all()
expect = np.array([1.0, 16.0, 81.0]).astype(np.float32)
assert (output.asnumpy() == expect).all()

+ 1
- 0
tests/st/ops/gpu/test_addn_op.py View File

@@ -33,6 +33,7 @@ class Net(nn.Cell):
def construct(self, x, y, z):
return self.add((x, y, z))


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard


+ 11
- 9
tests/st/ops/gpu/test_argmax_op.py View File

@@ -21,11 +21,12 @@ from mindspore.common import dtype as mstype
import mindspore.nn as nn
import mindspore.context as context
class NetArgmax(nn.Cell):
def __init__( self):
def __init__(self):
super(NetArgmax, self).__init__()
axis1 = 0
axis2 = -1
axis2 = -1
self.argmax1 = P.Argmax(axis1, output_type=mstype.int32)
self.argmax2 = P.Argmax(axis2, output_type=mstype.int32)
self.argmax3 = P.Argmax(output_type=mstype.int32)
@@ -33,27 +34,28 @@ class NetArgmax(nn.Cell):
def construct(self, x):
return (self.argmax1(x), self.argmax2(x), self.argmax3(x))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_argmax():
x = Tensor(np.array([[1., 20., 5.],
[67., 8., 9.],
[130., 24., 15.],
[0.3, -0.4, -15.]]).astype(np.float32))
expect1 = np.array([2,2,2]).astype(np.int32)
expect2 = np.array([1,0,0,0]).astype(np.int32)
[67., 8., 9.],
[130., 24., 15.],
[0.3, -0.4, -15.]]).astype(np.float32))
expect1 = np.array([2, 2, 2]).astype(np.int32)
expect2 = np.array([1, 0, 0, 0]).astype(np.int32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
Argmax = NetArgmax()
output = Argmax(x)
assert (output[0].asnumpy() == expect1).all()
assert (output[1].asnumpy() == expect2).all()
assert (output[2].asnumpy() == expect2).all()
assert (output[2].asnumpy() == expect2).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
Argmax1 = NetArgmax()
output1 = Argmax(x)
assert (output1[0].asnumpy() == expect1).all()
assert (output1[1].asnumpy() == expect2).all()
assert (output1[2].asnumpy() == expect2).all()
assert (output1[2].asnumpy() == expect2).all()

+ 13
- 11
tests/st/ops/gpu/test_assign_add_op.py View File

@@ -20,6 +20,7 @@ import mindspore.nn as nn
import numpy as np
import mindspore.context as context


class AssignAdd(nn.Cell):
def __init__(self, value):
super(AssignAdd, self).__init__()
@@ -30,21 +31,22 @@ class AssignAdd(nn.Cell):
res = self.add(self.var, y)
return res


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_assign_add():
expect1 = np.array([[[[ 0, 2, 4.],
[ 6, 8, 10.],
[12, 14, 16.]],
[[18, 20, 22.],
[24, 26, 28.],
[30, 32, 34.]],
[[36, 38, 40.],
[42, 44, 46.],
[48, 50, 52.]]]])
expect2 = np.array([[[[ 0, 3, 6],
[ 9, 12, 15],
expect1 = np.array([[[[0, 2, 4.],
[6, 8, 10.],
[12, 14, 16.]],
[[18, 20, 22.],
[24, 26, 28.],
[30, 32, 34.]],
[[36, 38, 40.],
[42, 44, 46.],
[48, 50, 52.]]]])
expect2 = np.array([[[[0, 3, 6],
[9, 12, 15],
[18, 21, 24]],
[[27, 30, 33],
[36, 39, 42],


+ 2
- 0
tests/st/ops/gpu/test_assign_op.py View File

@@ -30,9 +30,11 @@ class Net(nn.Cell):
def construct(self, value):
return self.assign(self.var, value)


x = np.array([[1.2, 1], [1, 0]]).astype(np.float32)
value = np.array([[1, 2], [3, 4.0]]).astype(np.float32)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard


+ 45
- 41
tests/st/ops/gpu/test_batch_matmul.py View File

@@ -24,6 +24,7 @@ import mindspore.nn as nn
import mindspore.context as context
from mindspore.common import dtype as mstype


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@@ -35,6 +36,7 @@ class BatchMatMulNet(nn.Cell):
def construct(self, x, y):
return self.batch_matmul(x, y)


def test_4D():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
@@ -42,15 +44,15 @@ def test_4D():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet()
output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]],
[[ 200, 212, 224, 236]],
[[ 596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],
expect = [[[[20, 23, 26, 29]],
[[200, 212, 224, 236]],
[[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],

[[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all()


@@ -58,21 +60,21 @@ def test_4D():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_a():
input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32)
input_y = Tensor(np.arange(2*4*3*4).reshape(2,4,3,4), mstype.float32)
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_a=True)
output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]],
[[ 200, 212, 224, 236]],
[[ 596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],
expect = [[[[20, 23, 26, 29]],
[[200, 212, 224, 236]],
[[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],

[[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all()


@@ -80,21 +82,21 @@ def test_4D_transpose_a():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_b():
input_x = Tensor(np.arange(2*4*1*3).reshape(2,4,1,3), mstype.float32)
input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32)
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_b=True)
output = net(input_x, input_y)
expect = [[[[ 5, 14, 23, 32]],
[[ 158, 194, 230, 266]],
[[ 527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]],
expect = [[[[5, 14, 23, 32]],
[[158, 194, 230, 266]],
[[527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]],

[[[1913, 2030, 2147, 2264]],
[[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]]
[[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]]
assert (output.asnumpy() == expect).all()


@@ -102,23 +104,24 @@ def test_4D_transpose_b():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_ab():
input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32)
input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32)
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_a=True, transpose_b=True)
output = net(input_x, input_y)
expect = [[[[ 5, 14, 23, 32]],
[[ 158, 194, 230, 266]],
[[ 527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]],
expect = [[[[5, 14, 23, 32]],
[[158, 194, 230, 266]],
[[527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]],

[[[1913, 2030, 2147, 2264]],
[[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]]
[[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]]
assert (output.asnumpy() == expect).all()


class BatchMatMulNet(nn.Cell):
def __init__(self, transpose_a=False, transpose_b=False):
super(BatchMatMulNet, self).__init__()
@@ -127,6 +130,7 @@ class BatchMatMulNet(nn.Cell):
def construct(self, x, y):
return self.batch_matmul(x, y)


def test_4D_fp16():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
@@ -134,13 +138,13 @@ def test_4D_fp16():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet()
output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]],
[[ 200, 212, 224, 236]],
[[ 596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],
expect = [[[[20, 23, 26, 29]],
[[200, 212, 224, 236]],
[[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]],

[[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
[[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all()

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save