| @@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t | |||||
| return True | return True | ||||
| # pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements | |||||
| # pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements, | |||||
| # pylint: disable=inconsistent-return-statements | |||||
| # @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str) | # @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str) | ||||
| @op_info_register(matmul_cube_dense_left_op_info) | @op_info_register(matmul_cube_dense_left_op_info) | ||||
| def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False, | def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False, | ||||
| @@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \ | |||||
| .get_op_info() | .get_op_info() | ||||
| # pylint: disable=inconsistent-return-statements | |||||
| @op_info_register(matmul_cube_dense_right_op_info) | @op_info_register(matmul_cube_dense_right_op_info) | ||||
| def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, | def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, | ||||
| kernel_name="matmulcube"): | kernel_name="matmulcube"): | ||||
| @@ -31,6 +31,8 @@ from .transformer_model import TransformerModel | |||||
| GRADIENT_CLIP_TYPE = 1 | GRADIENT_CLIP_TYPE = 1 | ||||
| GRADIENT_CLIP_VALUE = 5.0 | GRADIENT_CLIP_VALUE = 5.0 | ||||
| # pylint: disable=consider-using-in | |||||
| class ClipGradients(nn.Cell): | class ClipGradients(nn.Cell): | ||||
| """ | """ | ||||
| Clip gradients. | Clip gradients. | ||||
| @@ -48,11 +50,12 @@ class ClipGradients(nn.Cell): | |||||
| self.clip_by_norm = nn.ClipByNorm() | self.clip_by_norm = nn.ClipByNorm() | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| self.dtype = P.DType() | self.dtype = P.DType() | ||||
| def construct(self, | def construct(self, | ||||
| grads, | grads, | ||||
| clip_type, | clip_type, | ||||
| clip_value): | clip_value): | ||||
| #return grads | |||||
| # return grads | |||||
| if clip_type != 0 and clip_type != 1: | if clip_type != 0 and clip_type != 1: | ||||
| return grads | return grads | ||||
| @@ -83,8 +86,8 @@ class TransformerTrainingLoss(nn.Cell): | |||||
| super(TransformerTrainingLoss, self).__init__(auto_prefix=False) | super(TransformerTrainingLoss, self).__init__(auto_prefix=False) | ||||
| self.vocab_size = config.vocab_size | self.vocab_size = config.vocab_size | ||||
| self.onehot = P.OneHot() | self.onehot = P.OneHot() | ||||
| self.on_value = Tensor(float(1-config.label_smoothing), mstype.float32) | |||||
| self.off_value = Tensor(config.label_smoothing/float(self.vocab_size-1), mstype.float32) | |||||
| self.on_value = Tensor(float(1 - config.label_smoothing), mstype.float32) | |||||
| self.off_value = Tensor(config.label_smoothing / float(self.vocab_size - 1), mstype.float32) | |||||
| self.reduce_sum = P.ReduceSum() | self.reduce_sum = P.ReduceSum() | ||||
| self.reduce_mean = P.ReduceMean() | self.reduce_mean = P.ReduceMean() | ||||
| self.reshape = P.Reshape() | self.reshape = P.Reshape() | ||||
| @@ -92,7 +95,7 @@ class TransformerTrainingLoss(nn.Cell): | |||||
| self.flatten = P.Flatten() | self.flatten = P.Flatten() | ||||
| self.neg = P.Neg() | self.neg = P.Neg() | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| self.flat_shape = (config.batch_size*config.seq_length,) | |||||
| self.flat_shape = (config.batch_size * config.seq_length,) | |||||
| def construct(self, prediction_scores, label_ids, label_weights): | def construct(self, prediction_scores, label_ids, label_weights): | ||||
| """Defines the computation performed.""" | """Defines the computation performed.""" | ||||
| @@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell): | |||||
| grad_scale = C.MultitypeFuncGraph("grad_scale") | grad_scale = C.MultitypeFuncGraph("grad_scale") | ||||
| reciprocal = P.Reciprocal() | reciprocal = P.Reciprocal() | ||||
| @grad_scale.register("Tensor", "Tensor") | @grad_scale.register("Tensor", "Tensor") | ||||
| def tensor_grad_scale(scale, grad): | def tensor_grad_scale(scale, grad): | ||||
| return grad * F.cast(reciprocal(scale), F.dtype(grad)) | return grad * F.cast(reciprocal(scale), F.dtype(grad)) | ||||
| class TransformerTrainOneStepWithLossScaleCell(nn.Cell): | class TransformerTrainOneStepWithLossScaleCell(nn.Cell): | ||||
| """ | """ | ||||
| Encapsulation class of Transformer network training. | Encapsulation class of Transformer network training. | ||||
| @@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0 | |||||
| _nn_clip_by_norm = nn.ClipByNorm() | _nn_clip_by_norm = nn.ClipByNorm() | ||||
| clip_grad = C.MultitypeFuncGraph("clip_grad") | clip_grad = C.MultitypeFuncGraph("clip_grad") | ||||
| # pylint: disable=consider-using-in | |||||
| @clip_grad.register("Number", "Number", "Tensor") | @clip_grad.register("Number", "Number", "Tensor") | ||||
| def _clip_grad(clip_type, clip_value, grad): | def _clip_grad(clip_type, clip_value, grad): | ||||
| """ | """ | ||||
| @@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad): | |||||
| new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) | new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) | ||||
| return new_grad | return new_grad | ||||
| class GetMaskedLMOutput(nn.Cell): | class GetMaskedLMOutput(nn.Cell): | ||||
| """ | """ | ||||
| Get masked lm output. | Get masked lm output. | ||||
| @@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): | |||||
| self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), | self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), | ||||
| name="loss_scale") | name="loss_scale") | ||||
| self.add_flags(has_effect=True) | self.add_flags(has_effect=True) | ||||
| def construct(self, | def construct(self, | ||||
| input_ids, | input_ids, | ||||
| input_mask, | input_mask, | ||||
| @@ -15,14 +15,15 @@ | |||||
| """Test bert submodules.""" | """Test bert submodules.""" | ||||
| import numpy as np | |||||
| import os | import os | ||||
| from mindspore import Tensor | |||||
| from mindspore import nn, context | |||||
| import numpy as np | |||||
| from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \ | from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \ | ||||
| BertConfig, BertPreTraining, BertNetworkWithLoss | BertConfig, BertPreTraining, BertNetworkWithLoss | ||||
| from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel | from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel | ||||
| from mindspore import Tensor | |||||
| from mindspore import nn, context | |||||
| from ..mindspore_test import mindspore_test | from ..mindspore_test import mindspore_test | ||||
| from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \ | from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \ | ||||
| pipeline_for_compile_forward_ge_graph_for_case_by_case_config | pipeline_for_compile_forward_ge_graph_for_case_by_case_config | ||||
| @@ -15,9 +15,10 @@ | |||||
| """Component that Check if the function raises the expected Exception.""" | """Component that Check if the function raises the expected Exception.""" | ||||
| import pytest | |||||
| import sys | import sys | ||||
| import pytest | |||||
| from ...components.icomponent import IExectorComponent | from ...components.icomponent import IExectorComponent | ||||
| from ...utils import keyword | from ...utils import keyword | ||||
| @@ -16,9 +16,10 @@ | |||||
| """Implementation of Numerical gradients checking.""" | """Implementation of Numerical gradients checking.""" | ||||
| # pylint: disable=missing-docstring | # pylint: disable=missing-docstring | ||||
| from typing import Callable, List, Any | |||||
| import mindspore._c_expression as _c_expression | import mindspore._c_expression as _c_expression | ||||
| import numpy as np | import numpy as np | ||||
| from typing import Callable, List, Any | |||||
| from mindspore import ParameterTuple | from mindspore import ParameterTuple | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| @@ -15,9 +15,10 @@ | |||||
| """Dataset utils.""" | """Dataset utils.""" | ||||
| import numpy as np | |||||
| import random | import random | ||||
| import numpy as np | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| @@ -24,8 +24,7 @@ from mindspore.ops import operations as P | |||||
| from mindspore.ops._grad.grad_base import bprop_getters | from mindspore.ops._grad.grad_base import bprop_getters | ||||
| from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer | from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer | ||||
| logging.basicConfig(level=logging.DEBUG, format= | |||||
| '[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') | |||||
| logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') | |||||
| logger = logging.getLogger(__name__) | logger = logging.getLogger(__name__) | ||||
| @@ -14,9 +14,8 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """Other utils.""" | """Other utils.""" | ||||
| import mindspore._c_expression as _c_expression | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore._c_expression as _c_expression | |||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| @@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0 | |||||
| _nn_clip_by_norm = nn.ClipByNorm() | _nn_clip_by_norm = nn.ClipByNorm() | ||||
| clip_grad = C.MultitypeFuncGraph("clip_grad") | clip_grad = C.MultitypeFuncGraph("clip_grad") | ||||
| # pylint: disable=consider-using-in | |||||
| @clip_grad.register("Number", "Number", "Tensor") | @clip_grad.register("Number", "Number", "Tensor") | ||||
| def _clip_grad(clip_type, clip_value, grad): | def _clip_grad(clip_type, clip_value, grad): | ||||
| """ | """ | ||||
| @@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad): | |||||
| new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) | new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) | ||||
| return new_grad | return new_grad | ||||
| class GetMaskedLMOutput(nn.Cell): | class GetMaskedLMOutput(nn.Cell): | ||||
| """ | """ | ||||
| Get masked lm output. | Get masked lm output. | ||||
| @@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): | |||||
| self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), | self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), | ||||
| name="loss_scale") | name="loss_scale") | ||||
| self.add_flags(has_effect=True) | self.add_flags(has_effect=True) | ||||
| def construct(self, | def construct(self, | ||||
| input_ids, | input_ids, | ||||
| input_mask, | input_mask, | ||||
| @@ -23,35 +23,41 @@ from mindspore.ops import functional as F, composite as C | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import pytest | import pytest | ||||
| class TensorIntAutoCast(nn.Cell): | class TensorIntAutoCast(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(TensorIntAutoCast, self).__init__() | super(TensorIntAutoCast, self).__init__() | ||||
| self.i = 2 | self.i = 2 | ||||
| def construct(self, t): | def construct(self, t): | ||||
| z = F.tensor_mul(t, self.i) | z = F.tensor_mul(t, self.i) | ||||
| return z | return z | ||||
| class TensorFPAutoCast(nn.Cell): | class TensorFPAutoCast(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(TensorFPAutoCast, self).__init__() | super(TensorFPAutoCast, self).__init__() | ||||
| self.f = 1.2 | self.f = 1.2 | ||||
| def construct(self, t): | def construct(self, t): | ||||
| z = F.tensor_mul(t, self.f) | z = F.tensor_mul(t, self.f) | ||||
| return z | return z | ||||
| class TensorBoolAutoCast(nn.Cell): | class TensorBoolAutoCast(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(TensorBoolAutoCast, self).__init__() | super(TensorBoolAutoCast, self).__init__() | ||||
| self.f = True | self.f = True | ||||
| def construct(self, t): | def construct(self, t): | ||||
| z = F.tensor_mul(t, self.f) | z = F.tensor_mul(t, self.f) | ||||
| return z | return z | ||||
| class TensorAutoCast(nn.Cell): | class TensorAutoCast(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(TensorAutoCast, self).__init__() | super(TensorAutoCast, self).__init__() | ||||
| def construct(self, t1, t2): | def construct(self, t1, t2): | ||||
| z = F.tensor_mul(t1, t2) | z = F.tensor_mul(t1, t2) | ||||
| return z | return z | ||||
| @@ -68,7 +74,7 @@ def test_tensor_auto_cast(): | |||||
| t_fp16 = Tensor(np.ones([2, 1, 2, 2]), mstype.float16) | t_fp16 = Tensor(np.ones([2, 1, 2, 2]), mstype.float16) | ||||
| t_fp32 = Tensor(np.ones([2, 1, 2, 2]), mstype.float32) | t_fp32 = Tensor(np.ones([2, 1, 2, 2]), mstype.float32) | ||||
| t_fp64 = Tensor(np.ones([2, 1, 2, 2]), mstype.float64) | t_fp64 = Tensor(np.ones([2, 1, 2, 2]), mstype.float64) | ||||
| net = TensorAutoCast() | |||||
| net = TensorAutoCast() | |||||
| rs = net(t_uint8, t_int8) | rs = net(t_uint8, t_int8) | ||||
| assert rs.dtype() == mstype.int16 | assert rs.dtype() == mstype.int16 | ||||
| rs = net(t_uint8, t_int16) | rs = net(t_uint8, t_int16) | ||||
| @@ -96,7 +102,7 @@ def test_tensor_auto_cast(): | |||||
| assert rs.dtype() == mstype.float64 | assert rs.dtype() == mstype.float64 | ||||
| rs = net(t_fp32, t_fp64) | rs = net(t_fp32, t_fp64) | ||||
| assert rs.dtype() == mstype.float64 | assert rs.dtype() == mstype.float64 | ||||
| rs = net(t_uint8, t_fp16) | rs = net(t_uint8, t_fp16) | ||||
| assert rs.dtype() == mstype.float16 | assert rs.dtype() == mstype.float16 | ||||
| rs = net(t_uint8, t_fp32) | rs = net(t_uint8, t_fp32) | ||||
| @@ -210,7 +216,6 @@ def test_tensor_auto_cast(): | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net(t_uint64, t_fp64) | net(t_uint64, t_fp64) | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| tfp(t_uint16) | tfp(t_uint16) | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| @@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore import context | from mindspore import context | ||||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | ||||
| @@ -29,14 +30,16 @@ def test_cast_op_attr(): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(CastNet, self).__init__() | super(CastNet, self).__init__() | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, x, t): | def construct(self, x, t): | ||||
| return self.cast(x, t) | return self.cast(x, t) | ||||
| class CastTypeTest(nn.Cell): | class CastTypeTest(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(CastTypeTest, self).__init__() | super(CastTypeTest, self).__init__() | ||||
| self.net = net | self.net = net | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| cast_op = self.cast | cast_op = self.cast | ||||
| t1 = cast_op(x, mstype.float32) | t1 = cast_op(x, mstype.float32) | ||||
| @@ -46,6 +49,7 @@ def test_cast_op_attr(): | |||||
| t4 = cast_net(y, mstype.int32) | t4 = cast_net(y, mstype.int32) | ||||
| t5 = cast_net(z, mstype.float16) | t5 = cast_net(z, mstype.float16) | ||||
| return (t1, t2, t3, t4, t5) | return (t1, t2, t3, t4, t5) | ||||
| net = CastTypeTest(CastNet()) | net = CastTypeTest(CastNet()) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| @@ -142,4 +142,6 @@ def test_transpose(): | |||||
| assert (output[1].asnumpy() == expect1).all() | assert (output[1].asnumpy() == expect1).all() | ||||
| assert (output[2].asnumpy() == expect2).all() | assert (output[2].asnumpy() == expect2).all() | ||||
| assert (output[3].asnumpy() == expect3).all() | assert (output[3].asnumpy() == expect3).all() | ||||
| test_transpose() | |||||
| test_transpose() | |||||
| @@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag): | |||||
| return fns[tag] | return fns[tag] | ||||
| # pylint: disable=unnecessary-semicolon | |||||
| def test_constant_duplicate_mul(tag): | def test_constant_duplicate_mul(tag): | ||||
| fns = FnDict() | fns = FnDict() | ||||
| Mul = Primitive('Mul'); | Mul = Primitive('Mul'); | ||||
| @@ -152,7 +152,7 @@ def test_dict_set_item(): | |||||
| x = Tensor(np.ones([2, 2, 3], np.float32)) | x = Tensor(np.ones([2, 2, 3], np.float32)) | ||||
| net = DictSetNet() | net = DictSetNet() | ||||
| out = net(x) | |||||
| _ = net(x) | |||||
| # if the dictionary item does not exist, create a new one | # if the dictionary item does not exist, create a new one | ||||
| @@ -168,4 +168,4 @@ def test_dict_set_item_create_new(): | |||||
| return my_dict | return my_dict | ||||
| x = Tensor(np.ones([2, 2, 3], np.float32)) | x = Tensor(np.ones([2, 2, 3], np.float32)) | ||||
| net = DictSetNet() | net = DictSetNet() | ||||
| out = net(x) | |||||
| _ = net(x) | |||||
| @@ -81,31 +81,3 @@ def test_hypermap_func_const(): | |||||
| net = NetMap() | net = NetMap() | ||||
| assert net() == (8, 12, 16) | assert net() == (8, 12, 16) | ||||
| """ | |||||
| def test_hypermap_func_variable(): | |||||
| class NetMap(Cell): | |||||
| def __init__(self): | |||||
| super(NetMap, self).__init__() | |||||
| def double(self, x): | |||||
| return 2 * x | |||||
| def triple(self, x): | |||||
| return 3 * x | |||||
| def square(self, x): | |||||
| return x * x | |||||
| def construct(self, x): | |||||
| _list = [self.double, self.triple, self.square] | |||||
| return map(lambda f: f(x), _list) | |||||
| x = Tensor(np.ones([3, 2, 3], np.float32)) | |||||
| net = NetMap() | |||||
| with pytest.raises(RuntimeError) as ex: | |||||
| net(x) | |||||
| assert "HyperMap don't support Closure with free variable yet" in str(ex.value) | |||||
| """ | |||||
| @@ -133,7 +133,7 @@ def test_list_append_2(): | |||||
| class ListOperate(nn.Cell): | class ListOperate(nn.Cell): | ||||
| def __init__(self, ): | |||||
| def __init__(self,): | |||||
| super(ListOperate, self).__init__() | super(ListOperate, self).__init__() | ||||
| def construct(self, t, l): | def construct(self, t, l): | ||||
| @@ -153,7 +153,7 @@ class ListOperate(nn.Cell): | |||||
| class InListNet(nn.Cell): | class InListNet(nn.Cell): | ||||
| def __init__(self, ): | |||||
| def __init__(self,): | |||||
| super(InListNet, self).__init__() | super(InListNet, self).__init__() | ||||
| self.list_ = [1, 2, 3, 4, 5, "ok"] | self.list_ = [1, 2, 3, 4, 5, "ok"] | ||||
| @@ -53,7 +53,7 @@ class NestTupleGraphNet(nn.Cell): | |||||
| class InTupleNet(nn.Cell): | class InTupleNet(nn.Cell): | ||||
| def __init__(self, ): | |||||
| def __init__(self,): | |||||
| super(InTupleNet, self).__init__() | super(InTupleNet, self).__init__() | ||||
| self.tuple_ = (1, 2, 3, 4, 5, "ok") | self.tuple_ = (1, 2, 3, 4, 5, "ok") | ||||
| @@ -99,4 +99,4 @@ def test_assignadd_scalar_cast(): | |||||
| net = AssignAddNet() | net = AssignAddNet() | ||||
| x = Tensor(np.ones([1]).astype(np.int64) * 102) | x = Tensor(np.ones([1]).astype(np.int64) * 102) | ||||
| # _executor.compile(net, 1) | # _executor.compile(net, 1) | ||||
| result = net(x) | |||||
| _ = net(x) | |||||
| @@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64(): | |||||
| def test_tensor_dtype_fp32_to_bool(): | def test_tensor_dtype_fp32_to_bool(): | ||||
| with pytest.raises(RuntimeError): | with pytest.raises(RuntimeError): | ||||
| input = np.random.randn(2, 3, 4, 5).astype(np.float32) | |||||
| input = ms.Tensor(input) | |||||
| input_me = ms.Tensor(input, dtype=ms.bool_) | |||||
| input_ = np.random.randn(2, 3, 4, 5).astype(np.float32) | |||||
| input_ = ms.Tensor(input_) | |||||
| _ = ms.Tensor(input_, dtype=ms.bool_) | |||||
| def test_tensor_operation(): | def test_tensor_operation(): | ||||
| @@ -41,10 +41,10 @@ class Func(nn.Cell): | |||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| init = self.alloc_status() | init = self.alloc_status() | ||||
| sum = add(x, y) | |||||
| sum_ = add(x, y) | |||||
| product = mul1(x, y) | product = mul1(x, y) | ||||
| flag = self.get_status(init) | flag = self.get_status(init) | ||||
| out = add2(sum, product) | |||||
| out = add2(sum_, product) | |||||
| clear = self.clear_status(flag) | clear = self.clear_status(flag) | ||||
| out = F.depend(out, clear) | out = F.depend(out, clear) | ||||
| return out | return out | ||||
| @@ -88,7 +88,7 @@ def test_sens(): | |||||
| sens = Tensor(np.ones([3, 3]).astype(np.float32)) | sens = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| net = Net() | net = Net() | ||||
| net.add_flags(has_effect=True) | net.add_flags(has_effect=True) | ||||
| out = net(x, y, sens) | |||||
| _ = net(x, y, sens) | |||||
| class Net_hyper(nn.Cell): | class Net_hyper(nn.Cell): | ||||
| @@ -119,7 +119,7 @@ def test_hyper_add(): | |||||
| sens = Tensor(np.ones([3, 3]).astype(np.float32)) | sens = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| net = Net_hyper() | net = Net_hyper() | ||||
| net.add_flags(has_effect=True) | net.add_flags(has_effect=True) | ||||
| out = net(x, y, sens) | |||||
| _ = net(x, y, sens) | |||||
| def test_keep_order_io_effect_exception_return_dtype(): | def test_keep_order_io_effect_exception_return_dtype(): | ||||
| @@ -148,9 +148,6 @@ def test_cast(): | |||||
| _executor.compile(net, x) | _executor.compile(net, x) | ||||
| """test grad of PReLU, which cause AddN(generated by grad) fail""" | |||||
| class IRBlockZ(nn.Cell): | class IRBlockZ(nn.Cell): | ||||
| def __init__(self, inplanes, planes): | def __init__(self, inplanes, planes): | ||||
| super(IRBlockZ, self).__init__() | super(IRBlockZ, self).__init__() | ||||
| @@ -46,6 +46,7 @@ class MaxNet(nn.Cell): | |||||
| kernel_size, | kernel_size, | ||||
| stride=None, | stride=None, | ||||
| padding=0): | padding=0): | ||||
| _ = padding | |||||
| super(MaxNet, self).__init__() | super(MaxNet, self).__init__() | ||||
| self.maxpool = nn.MaxPool2d(kernel_size, | self.maxpool = nn.MaxPool2d(kernel_size, | ||||
| stride) | stride) | ||||
| @@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell): | |||||
| def test_avg1d(): | def test_avg1d(): | ||||
| net = Avg1dNet(6, 1) | net = Avg1dNet(6, 1) | ||||
| input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) | |||||
| _executor.compile(net, input) | |||||
| input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) | |||||
| _executor.compile(net, input_) | |||||
| @@ -52,19 +52,19 @@ def test_compile_psnr_grayscale(): | |||||
| def test_psnr_max_val_negative(): | def test_psnr_max_val_negative(): | ||||
| max_val = -1 | max_val = -1 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = PSNRNet(max_val) | |||||
| _ = PSNRNet(max_val) | |||||
| def test_psnr_max_val_bool(): | def test_psnr_max_val_bool(): | ||||
| max_val = True | max_val = True | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = PSNRNet(max_val) | |||||
| _ = PSNRNet(max_val) | |||||
| def test_psnr_max_val_zero(): | def test_psnr_max_val_zero(): | ||||
| max_val = 0 | max_val = 0 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = PSNRNet(max_val) | |||||
| _ = PSNRNet(max_val) | |||||
| def test_psnr_different_shape(): | def test_psnr_different_shape(): | ||||
| @@ -51,59 +51,59 @@ def test_compile_grayscale(): | |||||
| def test_ssim_max_val_negative(): | def test_ssim_max_val_negative(): | ||||
| max_val = -1 | max_val = -1 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(max_val) | |||||
| _ = SSIMNet(max_val) | |||||
| def test_ssim_max_val_bool(): | def test_ssim_max_val_bool(): | ||||
| max_val = True | max_val = True | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = SSIMNet(max_val) | |||||
| _ = SSIMNet(max_val) | |||||
| def test_ssim_max_val_zero(): | def test_ssim_max_val_zero(): | ||||
| max_val = 0 | max_val = 0 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(max_val) | |||||
| _ = SSIMNet(max_val) | |||||
| def test_ssim_filter_size_float(): | def test_ssim_filter_size_float(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = SSIMNet(filter_size=1.1) | |||||
| _ = SSIMNet(filter_size=1.1) | |||||
| def test_ssim_filter_size_zero(): | def test_ssim_filter_size_zero(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_size=0) | |||||
| _ = SSIMNet(filter_size=0) | |||||
| def test_ssim_filter_sigma_zero(): | def test_ssim_filter_sigma_zero(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_sigma=0.0) | |||||
| _ = SSIMNet(filter_sigma=0.0) | |||||
| def test_ssim_filter_sigma_negative(): | def test_ssim_filter_sigma_negative(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_sigma=-0.1) | |||||
| _ = SSIMNet(filter_sigma=-0.1) | |||||
| def test_ssim_k1_k2_wrong_value(): | def test_ssim_k1_k2_wrong_value(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k1=1.1) | |||||
| _ = SSIMNet(k1=1.1) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k1=1.0) | |||||
| _ = SSIMNet(k1=1.0) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k1=0.0) | |||||
| _ = SSIMNet(k1=0.0) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k1=-1.0) | |||||
| _ = SSIMNet(k1=-1.0) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k2=1.1) | |||||
| _ = SSIMNet(k2=1.1) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k2=1.0) | |||||
| _ = SSIMNet(k2=1.0) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k2=0.0) | |||||
| _ = SSIMNet(k2=0.0) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k2=-1.0) | |||||
| _ = SSIMNet(k2=-1.0) | |||||
| def test_ssim_different_shape(): | def test_ssim_different_shape(): | ||||
| @@ -64,13 +64,13 @@ class BatchNormTester(nn.Cell): | |||||
| def test_batchnorm_train_onnx_export(): | def test_batchnorm_train_onnx_export(): | ||||
| "test onnx export interface does not modify trainable flag of a network" | "test onnx export interface does not modify trainable flag of a network" | ||||
| input = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) | |||||
| input_ = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) | |||||
| net = BatchNormTester(3) | net = BatchNormTester(3) | ||||
| net.set_train() | net.set_train() | ||||
| if not net.training: | if not net.training: | ||||
| raise ValueError('netowrk is not in training mode') | raise ValueError('netowrk is not in training mode') | ||||
| onnx_file = 'batch_norm.onnx' | onnx_file = 'batch_norm.onnx' | ||||
| export(net, input, file_name=onnx_file, file_format='ONNX') | |||||
| export(net, input_, file_name=onnx_file, file_format='ONNX') | |||||
| if not net.training: | if not net.training: | ||||
| raise ValueError('netowrk is not in training mode') | raise ValueError('netowrk is not in training mode') | ||||
| @@ -172,6 +172,7 @@ net_cfgs = [ | |||||
| def get_id(cfg): | def get_id(cfg): | ||||
| _ = cfg | |||||
| return list(map(lambda x: x[0], net_cfgs)) | return list(map(lambda x: x[0], net_cfgs)) | ||||
| @@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ | |||||
| class AssignAddNet(nn.Cell): | class AssignAddNet(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(AssignAddNet, self).__init__() | super(AssignAddNet, self).__init__() | ||||
| self.op = P.AssignAdd() | self.op = P.AssignAdd() | ||||
| self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") | self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") | ||||
| @@ -39,7 +39,7 @@ class AssignAddNet(nn.Cell): | |||||
| class AssignSubNet(nn.Cell): | class AssignSubNet(nn.Cell): | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(AssignSubNet, self).__init__() | super(AssignSubNet, self).__init__() | ||||
| self.op = P.AssignSub() | self.op = P.AssignSub() | ||||
| self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") | self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") | ||||
| @@ -635,7 +635,7 @@ test_case_math_ops = [ | |||||
| 'skip': ['backward']}), | 'skip': ['backward']}), | ||||
| # type of x and y not match | # type of x and y not match | ||||
| ('Greater1', { | ('Greater1', { | ||||
| 'block': P.Greater(), | |||||
| 'block': P.Greater(), | |||||
| 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], | 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], | ||||
| 'skip': ['backward']}), | 'skip': ['backward']}), | ||||
| # type of x and y not match | # type of x and y not match | ||||
| @@ -660,6 +660,7 @@ test_case_math_ops = [ | |||||
| 'skip': ['backward']}), | 'skip': ['backward']}), | ||||
| ] | ] | ||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) | ||||
| def test_check_exception(): | def test_check_exception(): | ||||
| return raise_set | return raise_set | ||||
| @@ -21,21 +21,25 @@ import mindspore.context as context | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import functional as F | from mindspore.ops import functional as F | ||||
| from mindspore.ops import prim_attr_register, PrimitiveWithInfer | from mindspore.ops import prim_attr_register, PrimitiveWithInfer | ||||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | ||||
| class FakeOp(PrimitiveWithInfer): | class FakeOp(PrimitiveWithInfer): | ||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self): | def __init__(self): | ||||
| """""" | """""" | ||||
| def infer_shape(self, x, y): | def infer_shape(self, x, y): | ||||
| self.second_shape = y | self.second_shape = y | ||||
| self.add_prim_attr("second_shape", y) | self.add_prim_attr("second_shape", y) | ||||
| return x | return x | ||||
| def infer_dtype(self, x, y): | def infer_dtype(self, x, y): | ||||
| return x | return x | ||||
| # test the normal case that should generate independent primitive because of different | |||||
| # test the normal case that should generate independent primitive because of different | |||||
| # generated attributes after inference | # generated attributes after inference | ||||
| def test_conv2d_same_primitive(): | def test_conv2d_same_primitive(): | ||||
| class Conv2DSameNet(nn.Cell): | class Conv2DSameNet(nn.Cell): | ||||
| @@ -43,15 +47,18 @@ def test_conv2d_same_primitive(): | |||||
| super(Conv2DSameNet, self).__init__() | super(Conv2DSameNet, self).__init__() | ||||
| self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | ||||
| self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| r1 = self.conv1(x) | r1 = self.conv1(x) | ||||
| r2 = self.conv2(y) | r2 = self.conv2(y) | ||||
| return (r1, r2) | return (r1, r2) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| net = Conv2DSameNet() | net = Conv2DSameNet() | ||||
| net(t1, t2) | net(t1, t2) | ||||
| # test cell as high order argument | # test cell as high order argument | ||||
| # The graph with free variables used as argument is not supported yet | # The graph with free variables used as argument is not supported yet | ||||
| # because of the limit of inference specialize system | # because of the limit of inference specialize system | ||||
| @@ -59,18 +66,22 @@ def Xtest_conv2d_op_with_arg(): | |||||
| class Conv2dNet(nn.Cell): | class Conv2dNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Conv2dNet, self).__init__() | super(Conv2dNet, self).__init__() | ||||
| def construct(self, op, x): | def construct(self, op, x): | ||||
| return op(x) | return op(x) | ||||
| class OpsNet(nn.Cell): | class OpsNet(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(OpsNet, self).__init__() | super(OpsNet, self).__init__() | ||||
| self.opnet = net | self.opnet = net | ||||
| self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| conv_op = self.conv2 | conv_op = self.conv2 | ||||
| a = self.opnet(conv_op, x) | a = self.opnet(conv_op, x) | ||||
| b = self.opnet(conv_op, y) | b = self.opnet(conv_op, y) | ||||
| return (a, b) | return (a, b) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| net = OpsNet(Conv2dNet()) | net = OpsNet(Conv2dNet()) | ||||
| @@ -82,23 +93,29 @@ def test_conv2d_op_with_arg(): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(FackOpNet, self).__init__() | super(FackOpNet, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return self.op(x, y) | return self.op(x, y) | ||||
| class OpNet(nn.Cell): | class OpNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(OpNet, self).__init__() | super(OpNet, self).__init__() | ||||
| def construct(self, op, x, y): | def construct(self, op, x, y): | ||||
| return op(x, y) | return op(x, y) | ||||
| class OpsNet(nn.Cell): | class OpsNet(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(OpsNet, self).__init__() | super(OpsNet, self).__init__() | ||||
| self.opnet = net | self.opnet = net | ||||
| self.op = FackOpNet() | self.op = FackOpNet() | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| op = self.op | op = self.op | ||||
| a = self.opnet(op, x, y) | a = self.opnet(op, x, y) | ||||
| b = self.opnet(op, y, x) | b = self.opnet(op, y, x) | ||||
| return (a, b) | return (a, b) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| net = OpsNet(OpNet()) | net = OpsNet(OpNet()) | ||||
| @@ -110,63 +127,77 @@ def test_conv2d_op_with_arg_same_input(): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(FackOpNet, self).__init__() | super(FackOpNet, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return self.op(x, y) | return self.op(x, y) | ||||
| class OpNet(nn.Cell): | class OpNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(OpNet, self).__init__() | super(OpNet, self).__init__() | ||||
| def construct(self, op, x, y): | def construct(self, op, x, y): | ||||
| return op(x, y) | return op(x, y) | ||||
| class OpsNet(nn.Cell): | class OpsNet(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(OpsNet, self).__init__() | super(OpsNet, self).__init__() | ||||
| self.opnet = net | self.opnet = net | ||||
| self.op = FackOpNet() | self.op = FackOpNet() | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| op = self.op | op = self.op | ||||
| a = self.opnet(op, x, x) | a = self.opnet(op, x, x) | ||||
| b = self.opnet(op, y, x) | b = self.opnet(op, y, x) | ||||
| return (a, b) | return (a, b) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| net = OpsNet(OpNet()) | net = OpsNet(OpNet()) | ||||
| net(t1, t2) | net(t1, t2) | ||||
| # test op with partial | # test op with partial | ||||
| def test_op_as_partial(): | def test_op_as_partial(): | ||||
| class OpAsPartial(nn.Cell): | class OpAsPartial(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(OpAsPartial, self).__init__() | super(OpAsPartial, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| partial_op = F.partial(self.op, x) | partial_op = F.partial(self.op, x) | ||||
| a = partial_op(y) | a = partial_op(y) | ||||
| b = partial_op(z) | b = partial_op(z) | ||||
| return a, b | return a, b | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| net = OpAsPartial() | net = OpAsPartial() | ||||
| net(t1, t2, t3) | net(t1, t2, t3) | ||||
| # test op with partial | # test op with partial | ||||
| def test_op_as_partial_inside(): | def test_op_as_partial_inside(): | ||||
| class OpAsPartial(nn.Cell): | class OpAsPartial(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(OpAsPartial, self).__init__() | super(OpAsPartial, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| partial_op = F.partial(self.op, x) | partial_op = F.partial(self.op, x) | ||||
| a = partial_op(y) | a = partial_op(y) | ||||
| b = partial_op(z) | b = partial_op(z) | ||||
| return a, b | return a, b | ||||
| class OuterNet(nn.Cell): | class OuterNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(OuterNet, self).__init__() | super(OuterNet, self).__init__() | ||||
| self.net = OpAsPartial() | self.net = OpAsPartial() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| a, b = self.net(x, y, z) | a, b = self.net(x, y, z) | ||||
| return a, b | return a, b | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| @@ -180,12 +211,14 @@ def test_op_as_partial_independent(): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(OpAsPartial, self).__init__() | super(OpAsPartial, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| partial_op1 = F.partial(self.op, x) | partial_op1 = F.partial(self.op, x) | ||||
| a = partial_op1(y) | a = partial_op1(y) | ||||
| partial_op2 = F.partial(self.op, x) | partial_op2 = F.partial(self.op, x) | ||||
| b = partial_op2(z) | b = partial_op2(z) | ||||
| return a, b | return a, b | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| @@ -198,6 +231,7 @@ def test_nest_partial(): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(NestPartial, self).__init__() | super(NestPartial, self).__init__() | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| partial_op1 = F.partial(self.op) | partial_op1 = F.partial(self.op) | ||||
| partial_op2 = F.partial(partial_op1, x) | partial_op2 = F.partial(partial_op1, x) | ||||
| @@ -206,54 +240,65 @@ def test_nest_partial(): | |||||
| partial_op4 = F.partial(partial_op3, x) | partial_op4 = F.partial(partial_op3, x) | ||||
| b = partial_op4(z) | b = partial_op4(z) | ||||
| return a, b | return a, b | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| net = NestPartial() | net = NestPartial() | ||||
| net(t1, t2, t3) | net(t1, t2, t3) | ||||
| # high order argument | # high order argument | ||||
| # op and op args as network arguments | # op and op args as network arguments | ||||
| def test_op_with_arg_as_input(): | def test_op_with_arg_as_input(): | ||||
| class WithOpArgNet(nn.Cell): | class WithOpArgNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(WithOpArgNet, self).__init__() | super(WithOpArgNet, self).__init__() | ||||
| def construct(self, op, x, y): | def construct(self, op, x, y): | ||||
| return op(x, y) | return op(x, y) | ||||
| class OpsNet(nn.Cell): | class OpsNet(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(OpsNet, self).__init__() | super(OpsNet, self).__init__() | ||||
| self.opnet = net | self.opnet = net | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| op = self.op | op = self.op | ||||
| a = self.opnet(op, x, z) | a = self.opnet(op, x, z) | ||||
| b = self.opnet(op, x, y) | b = self.opnet(op, x, y) | ||||
| return (a, b) | return (a, b) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| net = OpsNet(WithOpArgNet()) | net = OpsNet(WithOpArgNet()) | ||||
| net(t1, t2, t3) | net(t1, t2, t3) | ||||
| # The partial application used as argument is not supported yet | # The partial application used as argument is not supported yet | ||||
| # because of the limit of inference specialize system | # because of the limit of inference specialize system | ||||
| def Xtest_partial_as_arg(): | def Xtest_partial_as_arg(): | ||||
| class PartialArgNet(nn.Cell): | class PartialArgNet(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(PartialArgNet, self).__init__() | super(PartialArgNet, self).__init__() | ||||
| def construct(self, partial_op, y): | def construct(self, partial_op, y): | ||||
| return partial_op(y) | return partial_op(y) | ||||
| class OpsNet(nn.Cell): | class OpsNet(nn.Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(OpsNet, self).__init__() | super(OpsNet, self).__init__() | ||||
| self.partial_net = net | self.partial_net = net | ||||
| self.op = FakeOp() | self.op = FakeOp() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| partial_op = F.partial(self.op, x) | partial_op = F.partial(self.op, x) | ||||
| a = self.partial_net(partial_op, z) | a = self.partial_net(partial_op, z) | ||||
| b = self.partial_net(partial_op, y) | b = self.partial_net(partial_op, y) | ||||
| return (a, b) | return (a, b) | ||||
| t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) | ||||
| t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) | ||||
| t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) | ||||
| @@ -32,6 +32,7 @@ from ....mindspore_test_framework.pipeline.forward.verify_exception \ | |||||
| logging.basicConfig(level=logging.WARNING) | logging.basicConfig(level=logging.WARNING) | ||||
| # pylint: disable=abstract-method | |||||
| class NetMissConstruct(nn.Cell): | class NetMissConstruct(nn.Cell): | ||||
| """ NetMissConstruct definition """ | """ NetMissConstruct definition """ | ||||
| @@ -46,7 +47,6 @@ class NetMissConstruct(nn.Cell): | |||||
| self.max_pool2d = nn.MaxPool2d(kernel_size=2) | self.max_pool2d = nn.MaxPool2d(kernel_size=2) | ||||
| self.flatten = P.Flatten() | self.flatten = P.Flatten() | ||||
| # pylint: disable=abstract-method | |||||
| # TestCase: Mis-spelled 'construct' to 'construtc' | # TestCase: Mis-spelled 'construct' to 'construtc' | ||||
| def construtc(self, x): | def construtc(self, x): | ||||
| x = self.max_pool2d(self.relu(self.conv1(x))) | x = self.max_pool2d(self.relu(self.conv1(x))) | ||||
| @@ -44,7 +44,7 @@ class MockNeg(PrimitiveWithInfer): | |||||
| def infer_dtype(self, input_x): | def infer_dtype(self, input_x): | ||||
| raise TypeError("InferError") | raise TypeError("InferError") | ||||
| return input_x | |||||
| # return input_x | |||||
| class MockSub(PrimitiveWithInfer): | class MockSub(PrimitiveWithInfer): | ||||
| @@ -79,8 +79,8 @@ class Net(nn.Cell): | |||||
| self.matmul = P.MatMul() | self.matmul = P.MatMul() | ||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| def construct(self, input): | |||||
| output = self.add(self.matmul(input, self.weight), self.bias) | |||||
| def construct(self, input_): | |||||
| output = self.add(self.matmul(input_, self.weight), self.bias) | |||||
| return output | return output | ||||
| @@ -93,9 +93,9 @@ class NetFP16(nn.Cell): | |||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, input): | |||||
| def construct(self, input_): | |||||
| output = self.cast( | output = self.cast( | ||||
| self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.add(self.matmul(self.cast(input_, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | self.cast(self.bias, mstype.float16)), mstype.float32) | ||||
| return output | return output | ||||
| @@ -42,10 +42,10 @@ class MindDataSet(MindData): | |||||
| if self._size < self._iter_num: | if self._size < self._iter_num: | ||||
| raise StopIteration | raise StopIteration | ||||
| self._iter_num += 1 | self._iter_num += 1 | ||||
| next = [] | |||||
| for shape, type in zip(self._output_shapes, self._np_types): | |||||
| next.append(Tensor(np.ones(shape).astype(type))) | |||||
| return tuple(next) | |||||
| lst = [] | |||||
| for shape_, type_ in zip(self._output_shapes, self._np_types): | |||||
| lst.append(Tensor(np.ones(shape_).astype(type_))) | |||||
| return tuple(lst) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -56,8 +56,8 @@ class Net(nn.Cell): | |||||
| self.matmul = P.MatMul() | self.matmul = P.MatMul() | ||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| def construct(self, input): | |||||
| output = self.add(self.matmul(input, self.weight), self.bias) | |||||
| def construct(self, input_): | |||||
| output = self.add(self.matmul(input_, self.weight), self.bias) | |||||
| return output | return output | ||||
| @@ -70,9 +70,9 @@ class NetFP16(nn.Cell): | |||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, input): | |||||
| def construct(self, input_): | |||||
| output = self.cast( | output = self.cast( | ||||
| self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.add(self.matmul(self.cast(input_, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | self.cast(self.bias, mstype.float16)), mstype.float32) | ||||
| return output | return output | ||||
| @@ -97,8 +97,8 @@ def test_all_to_all(): | |||||
| print(strategys) | print(strategys) | ||||
| expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' | expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' | ||||
| '/SoftmaxCrossEntropyWithLogits-op3': [[8, 1], [8, 1]], | '/SoftmaxCrossEntropyWithLogits-op3': [[8, 1], [8, 1]], | ||||
| 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/OneHot-op4': [ | |||||
| [8, 1], [], []], | |||||
| 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/' | |||||
| 'OneHot-op4': [[8, 1], [], []], | |||||
| 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [ | 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [ | ||||
| [8, 1]], | [8, 1]], | ||||
| 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [ | 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [ | ||||
| @@ -170,4 +170,4 @@ def test_matmul_forward_reduce_scatter_transpose(): | |||||
| x = Tensor(np.ones([128, 32]), dtype=ms.float32) | x = Tensor(np.ones([128, 32]), dtype=ms.float32) | ||||
| y = Tensor(np.ones([64, 32]), dtype=ms.float32) | y = Tensor(np.ones([64, 32]), dtype=ms.float32) | ||||
| b = Tensor(np.ones([128, 64]), dtype=ms.float32) | b = Tensor(np.ones([128, 64]), dtype=ms.float32) | ||||
| compile_net(net, x, y, b) | |||||
| compile_net(net, x, y, b) | |||||
| @@ -280,4 +280,4 @@ def test_mixed_precision_const_parameter(): | |||||
| x = Tensor(np.ones((1, 3, 28, 28), np.float32)) | x = Tensor(np.ones((1, 3, 28, 28), np.float32)) | ||||
| y = Tensor(np.ones((1, 3, 14, 14), np.float32)) | y = Tensor(np.ones((1, 3, 14, 14), np.float32)) | ||||
| z = Tensor(np.ones((1, 3, 28, 28), np.float32)) | z = Tensor(np.ones((1, 3, 28, 28), np.float32)) | ||||
| out = net(x, y, z) | |||||
| _ = net(x, y, z) | |||||
| @@ -39,7 +39,7 @@ def test_net_vargs_expand(): | |||||
| y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | ||||
| sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | ||||
| net = AddNet() | net = AddNet() | ||||
| out = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) | |||||
| _ = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) | |||||
| class VarNet(Cell): | class VarNet(Cell): | ||||
| @@ -111,7 +111,7 @@ def test_all_var_args_grad_with_sens(): | |||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y, sens) | |||||
| _ = grad_net(x, y, sens) | |||||
| def test_grad_list_var_args(): | def test_grad_list_var_args(): | ||||
| @@ -128,7 +128,7 @@ def test_grad_list_var_args(): | |||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y) | |||||
| _ = grad_net(x, y) | |||||
| def test_grad_all_var_args(): | def test_grad_all_var_args(): | ||||
| @@ -145,7 +145,7 @@ def test_grad_all_var_args(): | |||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y) | |||||
| _ = grad_net(x, y) | |||||
| def test_grad_all_var_args_with_sens(): | def test_grad_all_var_args_with_sens(): | ||||
| @@ -163,7 +163,7 @@ def test_grad_all_var_args_with_sens(): | |||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y, sens) | |||||
| _ = grad_net(x, y, sens) | |||||
| def test_grad_var_args_with_sens(): | def test_grad_var_args_with_sens(): | ||||
| @@ -181,7 +181,7 @@ def test_grad_var_args_with_sens(): | |||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y, sens) | |||||
| _ = grad_net(x, y, sens) | |||||
| def test_var_args_grad(): | def test_var_args_grad(): | ||||
| @@ -219,7 +219,7 @@ def test_var_args_grad(): | |||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y, sens) | |||||
| _ = grad_net(x, y, sens) | |||||
| def test_var_args_positional(): | def test_var_args_positional(): | ||||
| @@ -253,7 +253,7 @@ def test_var_args_positional(): | |||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y) | |||||
| _ = grad_net(x, y) | |||||
| def test_grad_within_if_else(): | def test_grad_within_if_else(): | ||||
| @@ -271,7 +271,7 @@ def test_grad_within_if_else(): | |||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | |||||
| _ = Tensor(1.0, dtype=mstype.float32) | |||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| out = grad_net(x, y) | out = grad_net(x, y) | ||||
| @@ -316,10 +316,10 @@ def test_grad_for_concat(): | |||||
| net = Concat(axis=self.axis) | net = Concat(axis=self.axis) | ||||
| grad_net = GradNet(net) | grad_net = GradNet(net) | ||||
| grad_net.set_train() | grad_net.set_train() | ||||
| input_grad = grad_net(*inputs, Tensor(self.out_grad_np)) | |||||
| _ = grad_net(*inputs, Tensor(self.out_grad_np)) | |||||
| def grad_cmp(self): | def grad_cmp(self): | ||||
| input_grad_mindspore = self.grad_mindspore_impl() | |||||
| self.grad_mindspore_impl() | |||||
| fact = ConcatFactory(input_shape=( | fact = ConcatFactory(input_shape=( | ||||
| (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) | (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) | ||||
| @@ -84,7 +84,7 @@ class for_loop_with_cont_break(Cell): | |||||
| if i > 5: | if i > 5: | ||||
| x *= 3 | x *= 3 | ||||
| break | break | ||||
| x *= 2 | |||||
| # x *= 2 | |||||
| x = x * 2 | x = x * 2 | ||||
| pass | pass | ||||
| return x | return x | ||||
| @@ -123,6 +123,7 @@ def sub(x, y): | |||||
| return x - y | return x - y | ||||
| # pylint: disable=using-constant-test | |||||
| @ms_function | @ms_function | ||||
| def if_always_true(x): | def if_always_true(x): | ||||
| """ if_always_true """ | """ if_always_true """ | ||||
| @@ -870,6 +871,7 @@ def test_grad_refactor_14(): | |||||
| assert C.grad_all(grad_refactor_14)(2, 3) == (3, 9) | assert C.grad_all(grad_refactor_14)(2, 3) == (3, 9) | ||||
| # pylint: disable=using-constant-test | |||||
| class IfDeferInline(nn.Cell): | class IfDeferInline(nn.Cell): | ||||
| def __init__(self, mul_size): | def __init__(self, mul_size): | ||||
| super().__init__() | super().__init__() | ||||
| @@ -1,12 +1,10 @@ | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.ops.operations as P | import mindspore.ops.operations as P | ||||
| from mindspore import context | |||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore import context, Tensor, ParameterTuple | from mindspore import context, Tensor, ParameterTuple | ||||
| from mindspore.common.initializer import TruncatedNormal | from mindspore.common.initializer import TruncatedNormal | ||||
| from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum | |||||
| from mindspore.nn import WithLossCell, Momentum | |||||
| context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") | context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") | ||||
| @@ -18,25 +16,28 @@ def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): | |||||
| kernel_size=kernel_size, stride=stride, padding=padding, | kernel_size=kernel_size, stride=stride, padding=padding, | ||||
| weight_init=weight, has_bias=False, pad_mode="valid") | weight_init=weight, has_bias=False, pad_mode="valid") | ||||
| def fc_with_initialize(input_channels, out_channels): | def fc_with_initialize(input_channels, out_channels): | ||||
| """weight initial for fc layer""" | """weight initial for fc layer""" | ||||
| weight = weight_variable() | weight = weight_variable() | ||||
| bias = weight_variable() | bias = weight_variable() | ||||
| return nn.Dense(input_channels, out_channels, weight, bias) | return nn.Dense(input_channels, out_channels, weight, bias) | ||||
| def weight_variable(): | def weight_variable(): | ||||
| """weight initial""" | """weight initial""" | ||||
| return TruncatedNormal(0.02) | return TruncatedNormal(0.02) | ||||
| def cell_hook_function(cell_id, grad_input, grad_output): | def cell_hook_function(cell_id, grad_input, grad_output): | ||||
| print(cell_id) | print(cell_id) | ||||
| assert(grad_output[0].asnumpy().shape == (32, 6, 14, 14)) | |||||
| assert(grad_input[0].asnumpy().shape == (32, 16, 10, 10)) | |||||
| assert (grad_output[0].asnumpy().shape == (32, 6, 14, 14)) | |||||
| assert (grad_input[0].asnumpy().shape == (32, 16, 10, 10)) | |||||
| def var_hook_function(grad_out): | def var_hook_function(grad_out): | ||||
| print("grad:", grad_out) | print("grad:", grad_out) | ||||
| assert(grad_out[0].asnumpy().shape == (32, 120)) | |||||
| assert (grad_out[0].asnumpy().shape == (32, 120)) | |||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| @@ -82,7 +83,7 @@ class LeNet5(nn.Cell): | |||||
| x = self.fc3(x) | x = self.fc3(x) | ||||
| return x | return x | ||||
| class GradWrap(nn.Cell): | class GradWrap(nn.Cell): | ||||
| """ GradWrap definition """ | """ GradWrap definition """ | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| @@ -94,6 +95,7 @@ class GradWrap(nn.Cell): | |||||
| weights = self.weights | weights = self.weights | ||||
| return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) | return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) | ||||
| def test_hook(): | def test_hook(): | ||||
| net = LeNet5() | net = LeNet5() | ||||
| optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) | optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) | ||||
| @@ -101,7 +103,7 @@ def test_hook(): | |||||
| net_with_criterion = WithLossCell(net, criterion) | net_with_criterion = WithLossCell(net, criterion) | ||||
| train_network = GradWrap(net_with_criterion) | train_network = GradWrap(net_with_criterion) | ||||
| train_network.set_train() | train_network.set_train() | ||||
| input_data = Tensor(np.ones([net.batch_size, 1, 32, 32]).astype(np.float32) * 0.01) | input_data = Tensor(np.ones([net.batch_size, 1, 32, 32]).astype(np.float32) * 0.01) | ||||
| label = Tensor(np.ones([net.batch_size, net.num_class]).astype(np.float32)) | label = Tensor(np.ones([net.batch_size, net.num_class]).astype(np.float32)) | ||||
| output = net(Tensor(input_data)) | output = net(Tensor(input_data)) | ||||
| @@ -111,8 +113,6 @@ def test_hook(): | |||||
| print(loss_output.asnumpy().shape) | print(loss_output.asnumpy().shape) | ||||
| class MulAdd(nn.Cell): | class MulAdd(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(MulAdd, self).__init__() | super(MulAdd, self).__init__() | ||||
| @@ -121,12 +121,13 @@ class MulAdd(nn.Cell): | |||||
| return 2 * x + y | return 2 * x + y | ||||
| def bprop(self, x, y, out, dout): | def bprop(self, x, y, out, dout): | ||||
| assert(x == 1) | |||||
| assert(y == 2) | |||||
| assert(out == 4) | |||||
| assert(dout == 1) | |||||
| assert (x == 1) | |||||
| assert (y == 2) | |||||
| assert (out == 4) | |||||
| assert (dout == 1) | |||||
| return 3 * dout, 2 * y | return 3 * dout, 2 * y | ||||
| def test_custom_bprop(): | def test_custom_bprop(): | ||||
| mul_add = MulAdd() | mul_add = MulAdd() | ||||
| mul_add.bprop_debug = True | mul_add.bprop_debug = True | ||||
| @@ -18,10 +18,9 @@ import pytest | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Parameter, ParameterTuple, Tensor | |||||
| from mindspore import Parameter, ParameterTuple | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore import context | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -60,6 +60,7 @@ class InvertedResidual(nn.Cell): | |||||
| class MobileNetV2(nn.Cell): | class MobileNetV2(nn.Cell): | ||||
| def __init__(self, num_class=1000, input_size=224, width_mul=1.): | def __init__(self, num_class=1000, input_size=224, width_mul=1.): | ||||
| super(MobileNetV2, self).__init__() | super(MobileNetV2, self).__init__() | ||||
| _ = input_size | |||||
| block = InvertedResidual | block = InvertedResidual | ||||
| input_channel = 32 | input_channel = 32 | ||||
| last_channel = 1280 | last_channel = 1280 | ||||
| @@ -68,6 +68,7 @@ class InvertedResidual(nn.Cell): | |||||
| class MobileNetV2(nn.Cell): | class MobileNetV2(nn.Cell): | ||||
| def __init__(self, num_class=1000, input_size=224, width_mul=1.): | def __init__(self, num_class=1000, input_size=224, width_mul=1.): | ||||
| super(MobileNetV2, self).__init__() | super(MobileNetV2, self).__init__() | ||||
| _ = input_size | |||||
| block = InvertedResidual | block = InvertedResidual | ||||
| input_channel = 32 | input_channel = 32 | ||||
| last_channel = 1280 | last_channel = 1280 | ||||
| @@ -63,33 +63,3 @@ class LeNet5(nn.Cell): | |||||
| x = self.fc2(x) | x = self.fc2(x) | ||||
| x = self.fc3(x) | x = self.fc3(x) | ||||
| return x | return x | ||||
| """ | |||||
| def test_qat_lenet(): | |||||
| net = LeNet5() | |||||
| net = qat.convert_quant_network( | |||||
| net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) | |||||
| def test_qat_mobile(): | |||||
| net = MobileNetV2() | |||||
| img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32)) | |||||
| net = qat.convert_quant_network( | |||||
| net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) | |||||
| net(img) | |||||
| def test_qat_mobile_train(): | |||||
| net = MobileNetV2(num_class=10) | |||||
| img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32)) | |||||
| label = Tensor(np.ones((1, 10)).astype(np.float32)) | |||||
| net = qat.convert_quant_network( | |||||
| net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean') | |||||
| optimizer = nn.Momentum(net.trainable_params(), | |||||
| learning_rate=0.1, momentum=0.9) | |||||
| net = nn.WithLossCell(net, loss) | |||||
| net = nn.TrainOneStepCell(net, optimizer) | |||||
| net(img, label) | |||||
| """ | |||||
| @@ -13,9 +13,10 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """Summary reader.""" | """Summary reader.""" | ||||
| import mindspore.train.summary_pb2 as summary_pb2 | |||||
| import struct | import struct | ||||
| import mindspore.train.summary_pb2 as summary_pb2 | |||||
| _HEADER_SIZE = 8 | _HEADER_SIZE = 8 | ||||
| _HEADER_CRC_SIZE = 4 | _HEADER_CRC_SIZE = 4 | ||||
| _DATA_CRC_SIZE = 4 | _DATA_CRC_SIZE = 4 | ||||
| @@ -25,6 +25,7 @@ from ....dataset_mock import MindData | |||||
| def setup_module(module): | def setup_module(module): | ||||
| _ = module | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -56,7 +57,7 @@ def test_amp_o0(): | |||||
| optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| train_network = amp.build_train_network(net, optimizer, level="O0") | train_network = amp.build_train_network(net, optimizer, level="O0") | ||||
| output = train_network(inputs, label) | |||||
| _ = train_network(inputs, label) | |||||
| def test_amp_o2(): | def test_amp_o2(): | ||||
| @@ -66,7 +67,7 @@ def test_amp_o2(): | |||||
| optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| train_network = amp.build_train_network(net, optimizer, level="O2") | train_network = amp.build_train_network(net, optimizer, level="O2") | ||||
| output = train_network(inputs, label) | |||||
| _ = train_network(inputs, label) | |||||
| def test_amp_o2_loss(): | def test_amp_o2_loss(): | ||||
| @@ -76,7 +77,7 @@ def test_amp_o2_loss(): | |||||
| loss = nn.MSELoss() | loss = nn.MSELoss() | ||||
| optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| train_network = amp.build_train_network(net, optimizer, loss, level="O2") | train_network = amp.build_train_network(net, optimizer, loss, level="O2") | ||||
| output = train_network(inputs, label) | |||||
| _ = train_network(inputs, label) | |||||
| def test_amp_o0_loss(): | def test_amp_o0_loss(): | ||||
| @@ -86,7 +87,7 @@ def test_amp_o0_loss(): | |||||
| loss = nn.MSELoss() | loss = nn.MSELoss() | ||||
| optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| train_network = amp.build_train_network(net, optimizer, loss) | train_network = amp.build_train_network(net, optimizer, loss) | ||||
| output = train_network(inputs, label) | |||||
| _ = train_network(inputs, label) | |||||
| class MindDataSet(MindData): | class MindDataSet(MindData): | ||||
| @@ -100,10 +101,10 @@ class MindDataSet(MindData): | |||||
| if self._size < self._iter_num: | if self._size < self._iter_num: | ||||
| raise StopIteration | raise StopIteration | ||||
| self._iter_num += 1 | self._iter_num += 1 | ||||
| next = [] | |||||
| for shape, type in zip(self._output_shapes, self._np_types): | |||||
| next.append(Tensor(np.ones(shape).astype(type))) | |||||
| return tuple(next) | |||||
| lst = [] | |||||
| for shape_, type_ in zip(self._output_shapes, self._np_types): | |||||
| lst.append(Tensor(np.ones(shape_).astype(type_))) | |||||
| return tuple(lst) | |||||
| def test_compile_model_train_O0(): | def test_compile_model_train_O0(): | ||||
| @@ -151,7 +151,7 @@ def test_eval(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| model2.eval(dataset) | model2.eval(dataset) | ||||
| net3 = LossNet() | |||||
| _ = LossNet() | |||||
| model3 = Model(net2, eval_network=net2, metrics={"loss"}) | model3 = Model(net2, eval_network=net2, metrics={"loss"}) | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| model3.eval(dataset) | model3.eval(dataset) | ||||
| @@ -15,6 +15,7 @@ | |||||
| """test callback function.""" | """test callback function.""" | ||||
| import os | import os | ||||
| import stat | import stat | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| @@ -283,14 +284,14 @@ def test_build_callbacks(): | |||||
| callbacks = [ck_obj, loss_cb_1, 'Error', None] | callbacks = [ck_obj, loss_cb_1, 'Error', None] | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| callback_list = _build_callbacks(callbacks) | |||||
| _ = _build_callbacks(callbacks) | |||||
| def test_RunContext(): | def test_RunContext(): | ||||
| """Test RunContext.""" | """Test RunContext.""" | ||||
| context_err = 666 | context_err = 666 | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| context = RunContext(context_err) | |||||
| _ = RunContext(context_err) | |||||
| cb_params = _InternalCallbackParam() | cb_params = _InternalCallbackParam() | ||||
| cb_params.member1 = 1 | cb_params.member1 = 1 | ||||
| @@ -223,6 +223,7 @@ def vm_impl_avg_pool_grad(self): | |||||
| return vm_impl | return vm_impl | ||||
| # pylint: disable=function-redefined | |||||
| @vm_impl_getters.register(G.FusedBatchNormGrad) | @vm_impl_getters.register(G.FusedBatchNormGrad) | ||||
| def vm_impl_fused_batch_norm_grad(self): | def vm_impl_fused_batch_norm_grad(self): | ||||
| """Generate vm_impl function for FusedBatchNormGrad""" | """Generate vm_impl function for FusedBatchNormGrad""" | ||||
| @@ -239,6 +240,7 @@ def vm_impl_fused_batch_norm_grad(self): | |||||
| return vm_impl | return vm_impl | ||||
| # pylint: disable=function-redefined | |||||
| @vm_impl_getters.register(G.BatchNormGrad) | @vm_impl_getters.register(G.BatchNormGrad) | ||||
| def vm_impl_fused_batch_norm_grad(self): | def vm_impl_fused_batch_norm_grad(self): | ||||
| """Generate vm_impl function for BatchNormGrad""" | """Generate vm_impl function for BatchNormGrad""" | ||||