Merge pull request !1042 from jinyaohui/clean_pylinttags/v0.3.0-alpha
| @@ -18,13 +18,13 @@ | |||||
| # pylint: disable=missing-docstring, arguments-differ, W0612 | # pylint: disable=missing-docstring, arguments-differ, W0612 | ||||
| import os | import os | ||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.optim import AdamWeightDecayDynamicLR | |||||
| from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, \ | from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, \ | ||||
| BertTrainOneStepWithLossScaleCell | BertTrainOneStepWithLossScaleCell | ||||
| from mindspore.nn.wrap.loss_scale import FixedLossScaleUpdateCell | |||||
| from mindspore.nn.optim import AdamWeightDecayDynamicLR | |||||
| from mindspore.train.loss_scale_manager import DynamicLossScaleManager | from mindspore.train.loss_scale_manager import DynamicLossScaleManager | ||||
| from ...dataset_mock import MindData | from ...dataset_mock import MindData | ||||
| from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph | from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph | ||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """use ImageNetToMR tool generate mindrecord""" | """use ImageNetToMR tool generate mindrecord""" | ||||
| import os | |||||
| from mindspore.mindrecord import ImageNetToMR | from mindspore.mindrecord import ImageNetToMR | ||||
| IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" | IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" | ||||
| @@ -21,6 +20,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images" | |||||
| MINDRECORD_FILE = "./imagenet.mindrecord" | MINDRECORD_FILE = "./imagenet.mindrecord" | ||||
| PARTITION_NUMBER = 16 | PARTITION_NUMBER = 16 | ||||
| def imagenet_to_mindrecord(): | def imagenet_to_mindrecord(): | ||||
| imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, | imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, | ||||
| IMAGENET_IMAGE_DIR, | IMAGENET_IMAGE_DIR, | ||||
| @@ -28,5 +28,6 @@ def imagenet_to_mindrecord(): | |||||
| PARTITION_NUMBER) | PARTITION_NUMBER) | ||||
| imagenet_transformer.transform() | imagenet_transformer.transform() | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| imagenet_to_mindrecord() | imagenet_to_mindrecord() | ||||
| @@ -15,6 +15,7 @@ | |||||
| """generate tfrecord""" | """generate tfrecord""" | ||||
| import collections | import collections | ||||
| import os | import os | ||||
| import tensorflow as tf | import tensorflow as tf | ||||
| IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" | IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" | ||||
| @@ -22,6 +23,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images" | |||||
| TFRECORD_FILE = "./imagenet.tfrecord" | TFRECORD_FILE = "./imagenet.tfrecord" | ||||
| PARTITION_NUMBER = 16 | PARTITION_NUMBER = 16 | ||||
| def get_imagenet_filename_label_pic(map_file, image_dir): | def get_imagenet_filename_label_pic(map_file, image_dir): | ||||
| """ | """ | ||||
| Get data from imagenet. | Get data from imagenet. | ||||
| @@ -69,18 +71,22 @@ def get_imagenet_filename_label_pic(map_file, image_dir): | |||||
| continue | continue | ||||
| yield str(file_name), int(label), image_bytes | yield str(file_name), int(label), image_bytes | ||||
| def create_int_feature(values): | def create_int_feature(values): | ||||
| feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) | feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) | ||||
| return feature | return feature | ||||
| def create_string_feature(values): | def create_string_feature(values): | ||||
| feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')])) | feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')])) | ||||
| return feature | return feature | ||||
| def create_bytes_feature(values): | def create_bytes_feature(values): | ||||
| feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) | feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) | ||||
| return feature | return feature | ||||
| def imagenet_to_tfrecord(): | def imagenet_to_tfrecord(): | ||||
| writers = [] | writers = [] | ||||
| for i in range(PARTITION_NUMBER): | for i in range(PARTITION_NUMBER): | ||||
| @@ -109,5 +115,6 @@ def imagenet_to_tfrecord(): | |||||
| print("Write {} total examples".format(total_written)) | print("Write {} total examples".format(total_written)) | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| imagenet_to_tfrecord() | imagenet_to_tfrecord() | ||||
| @@ -14,17 +14,20 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test dataset performance about mindspore.MindDataset, mindspore.TFRecordDataset, tf.data.TFRecordDataset""" | """test dataset performance about mindspore.MindDataset, mindspore.TFRecordDataset, tf.data.TFRecordDataset""" | ||||
| import time | import time | ||||
| import mindspore.dataset as ds | |||||
| from mindspore.mindrecord import FileReader | |||||
| import tensorflow as tf | import tensorflow as tf | ||||
| import mindspore.dataset as ds | |||||
| from mindspore.mindrecord import FileReader | |||||
| print_step = 5000 | print_step = 5000 | ||||
| def print_log(count): | def print_log(count): | ||||
| if count % print_step == 0: | if count % print_step == 0: | ||||
| print("Read {} rows ...".format(count)) | print("Read {} rows ...".format(count)) | ||||
| def use_filereader(mindrecord): | def use_filereader(mindrecord): | ||||
| start = time.time() | start = time.time() | ||||
| columns_list = ["data", "label"] | columns_list = ["data", "label"] | ||||
| @@ -38,6 +41,7 @@ def use_filereader(mindrecord): | |||||
| end = time.time() | end = time.time() | ||||
| print("Read by FileReader - total rows: {}, cost time: {}s".format(num_iter, end - start)) | print("Read by FileReader - total rows: {}, cost time: {}s".format(num_iter, end - start)) | ||||
| def use_minddataset(mindrecord): | def use_minddataset(mindrecord): | ||||
| start = time.time() | start = time.time() | ||||
| columns_list = ["data", "label"] | columns_list = ["data", "label"] | ||||
| @@ -51,6 +55,7 @@ def use_minddataset(mindrecord): | |||||
| end = time.time() | end = time.time() | ||||
| print("Read by MindDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | print("Read by MindDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | ||||
| def use_tfrecorddataset(tfrecord): | def use_tfrecorddataset(tfrecord): | ||||
| start = time.time() | start = time.time() | ||||
| columns_list = ["data", "label"] | columns_list = ["data", "label"] | ||||
| @@ -66,8 +71,10 @@ def use_tfrecorddataset(tfrecord): | |||||
| end = time.time() | end = time.time() | ||||
| print("Read by TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | print("Read by TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | ||||
| def use_tensorflow_tfrecorddataset(tfrecord): | def use_tensorflow_tfrecorddataset(tfrecord): | ||||
| start = time.time() | start = time.time() | ||||
| def _parse_record(example_photo): | def _parse_record(example_photo): | ||||
| features = { | features = { | ||||
| 'file_name': tf.io.FixedLenFeature([], tf.string), | 'file_name': tf.io.FixedLenFeature([], tf.string), | ||||
| @@ -87,6 +94,7 @@ def use_tensorflow_tfrecorddataset(tfrecord): | |||||
| end = time.time() | end = time.time() | ||||
| print("Read by TensorFlow TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | print("Read by TensorFlow TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| # use MindDataset | # use MindDataset | ||||
| mindrecord = './imagenet.mindrecord00' | mindrecord = './imagenet.mindrecord00' | ||||
| @@ -18,15 +18,14 @@ | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore.ops.composite as C | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.model_zoo.lenet import LeNet | |||||
| from mindspore import context | from mindspore import context | ||||
| import mindspore.ops.composite as C | |||||
| from mindspore.common.api import _executor | |||||
| from mindspore.model_zoo.lenet import LeNet | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| batch_size = 1 | batch_size = 1 | ||||
| channel = 1 | channel = 1 | ||||
| height = 32 | height = 32 | ||||
| @@ -36,6 +35,7 @@ num_class = 10 | |||||
| class LeNetGrad(nn.Cell): | class LeNetGrad(nn.Cell): | ||||
| """Backward of LeNet""" | """Backward of LeNet""" | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(LeNetGrad, self).__init__() | super(LeNetGrad, self).__init__() | ||||
| self.grad_op = C.grad_all_with_sens | self.grad_op = C.grad_all_with_sens | ||||
| @@ -17,10 +17,11 @@ | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| from .resnet_example import resnet50 | from .resnet_example import resnet50 | ||||
| def test_compile(): | def test_compile(): | ||||
| net = resnet50() | net = resnet50() | ||||
| inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32)) | inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32)) | ||||
| @@ -20,9 +20,9 @@ | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..train_step_wrap import train_step_without_opt | |||||
| from .resnet_example import resnet50 | from .resnet_example import resnet50 | ||||
| from ..vm_impl import * | |||||
| from ..train_step_wrap import train_step_without_opt | |||||
| def test_resnet50_pynative(): | def test_resnet50_pynative(): | ||||
| net = train_step_without_opt(resnet50()) | net = train_step_without_opt(resnet50()) | ||||
| @@ -17,13 +17,15 @@ | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..train_step_wrap import train_step_with_loss_warp | |||||
| from mindspore.common.api import _executor | |||||
| from .resnet_example import resnet50 | from .resnet_example import resnet50 | ||||
| from ..train_step_wrap import train_step_with_loss_warp | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| def test_train_step(): | def test_train_step(): | ||||
| net = train_step_with_loss_warp(resnet50()) | net = train_step_with_loss_warp(resnet50()) | ||||
| net.set_train() | net.set_train() | ||||
| @@ -16,15 +16,15 @@ | |||||
| train step wrap | train step wrap | ||||
| """ | """ | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops import functional as F | |||||
| from mindspore import ParameterTuple | |||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore import Parameter, ParameterTuple | |||||
| class TrainStepWrap(nn.Cell): | class TrainStepWrap(nn.Cell): | ||||
| """ | """ | ||||
| TrainStepWrap definition | TrainStepWrap definition | ||||
| """ | """ | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(TrainStepWrap, self).__init__() | super(TrainStepWrap, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -39,10 +39,12 @@ class TrainStepWrap(nn.Cell): | |||||
| grads = self.grad(self.network, weights)(x, label) | grads = self.grad(self.network, weights)(x, label) | ||||
| return self.optimizer(grads) | return self.optimizer(grads) | ||||
| class NetWithLossClass(nn.Cell): | class NetWithLossClass(nn.Cell): | ||||
| """ | """ | ||||
| NetWithLossClass definition | NetWithLossClass definition | ||||
| """ | """ | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(NetWithLossClass, self).__init__(auto_prefix=False) | super(NetWithLossClass, self).__init__(auto_prefix=False) | ||||
| self.loss = nn.SoftmaxCrossEntropyWithLogits() | self.loss = nn.SoftmaxCrossEntropyWithLogits() | ||||
| @@ -61,6 +63,7 @@ class TrainStepWrap2(nn.Cell): | |||||
| """ | """ | ||||
| TrainStepWrap2 definition | TrainStepWrap2 definition | ||||
| """ | """ | ||||
| def __init__(self, network, sens): | def __init__(self, network, sens): | ||||
| super(TrainStepWrap2, self).__init__() | super(TrainStepWrap2, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -76,13 +79,16 @@ class TrainStepWrap2(nn.Cell): | |||||
| grads = self.grad(self.network, weights)(x, self.sens) | grads = self.grad(self.network, weights)(x, self.sens) | ||||
| return self.optimizer(grads) | return self.optimizer(grads) | ||||
| def train_step_with_sens(network, sens): | def train_step_with_sens(network, sens): | ||||
| return TrainStepWrap2(network, sens) | return TrainStepWrap2(network, sens) | ||||
| class TrainStepWrapWithoutOpt(nn.Cell): | class TrainStepWrapWithoutOpt(nn.Cell): | ||||
| """ | """ | ||||
| TrainStepWrapWithoutOpt definition | TrainStepWrapWithoutOpt definition | ||||
| """ | """ | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(TrainStepWrapWithoutOpt, self).__init__() | super(TrainStepWrapWithoutOpt, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -93,5 +99,6 @@ class TrainStepWrapWithoutOpt(nn.Cell): | |||||
| grads = self.grad(self.network, self.weights)(x, label) | grads = self.grad(self.network, self.weights)(x, label) | ||||
| return grads | return grads | ||||
| def train_step_without_opt(network): | def train_step_without_opt(network): | ||||
| return TrainStepWrapWithoutOpt(NetWithLossClass(network)) | return TrainStepWrapWithoutOpt(NetWithLossClass(network)) | ||||
| @@ -28,6 +28,7 @@ context.set_context(mode=context.GRAPH_MODE) | |||||
| def Xtest_arg_dict(): | def Xtest_arg_dict(): | ||||
| class DictNet(Cell): | class DictNet(Cell): | ||||
| """DictNet definition""" | """DictNet definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(DictNet, self).__init__() | super(DictNet, self).__init__() | ||||
| self.max = P.Maximum() | self.max = P.Maximum() | ||||
| @@ -48,6 +49,7 @@ def Xtest_arg_dict(): | |||||
| def test_const_dict(): | def test_const_dict(): | ||||
| class DictNet(Cell): | class DictNet(Cell): | ||||
| """DictNet1 definition""" | """DictNet1 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(DictNet, self).__init__() | super(DictNet, self).__init__() | ||||
| self.max = P.Maximum() | self.max = P.Maximum() | ||||
| @@ -58,6 +60,7 @@ def test_const_dict(): | |||||
| a = self.max(self.dictionary["x"], self.dictionary["y"]) | a = self.max(self.dictionary["x"], self.dictionary["y"]) | ||||
| b = self.min(self.dictionary["x"], self.dictionary["y"]) | b = self.min(self.dictionary["x"], self.dictionary["y"]) | ||||
| return a + b | return a + b | ||||
| net = DictNet() | net = DictNet() | ||||
| net() | net() | ||||
| @@ -65,6 +68,7 @@ def test_const_dict(): | |||||
| def test_dict_set_or_get_item(): | def test_dict_set_or_get_item(): | ||||
| class DictNet(Cell): | class DictNet(Cell): | ||||
| """DictNet1 definition""" | """DictNet1 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(DictNet, self).__init__() | super(DictNet, self).__init__() | ||||
| self.dict_ = {"x": 1, "y": 2} | self.dict_ = {"x": 1, "y": 2} | ||||
| @@ -91,6 +95,7 @@ def test_dict_set_or_get_item(): | |||||
| def test_dict_set_or_get_item_2(): | def test_dict_set_or_get_item_2(): | ||||
| class DictNet(Cell): | class DictNet(Cell): | ||||
| """DictNet1 definition""" | """DictNet1 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(DictNet, self).__init__() | super(DictNet, self).__init__() | ||||
| @@ -117,6 +122,7 @@ def test_dict_set_or_get_item_2(): | |||||
| def test_dict_set_or_get_item_3(): | def test_dict_set_or_get_item_3(): | ||||
| class DictNet(Cell): | class DictNet(Cell): | ||||
| """DictNet1 definition""" | """DictNet1 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(DictNet, self).__init__() | super(DictNet, self).__init__() | ||||
| self.dict_ = {"x": Tensor(np.ones([2, 2, 3], np.float32)), "y": 1} | self.dict_ = {"x": Tensor(np.ones([2, 2, 3], np.float32)), "y": 1} | ||||
| @@ -130,5 +136,3 @@ def test_dict_set_or_get_item_3(): | |||||
| net = DictNet() | net = DictNet() | ||||
| assert net() == Tensor(np.ones([4, 2, 3], np.float32)) | assert net() == Tensor(np.ones([4, 2, 3], np.float32)) | ||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| from mindspore import Tensor, context | from mindspore import Tensor, context | ||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| @@ -15,6 +15,7 @@ | |||||
| """setup for pytest""" | """setup for pytest""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| # pylint: disable=unused-argument | # pylint: disable=unused-argument | ||||
| def setup_module(module): | def setup_module(module): | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -16,6 +16,7 @@ | |||||
| resnet50 example | resnet50 example | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -16,19 +16,21 @@ | |||||
| test assign add | test assign add | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore as ms | |||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| import mindspore as ms | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.ops import operations as P | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore.context as context | |||||
| import pytest | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.AssignAdd = P.AssignAdd() | self.AssignAdd = P.AssignAdd() | ||||
| @@ -39,18 +41,19 @@ class Net(nn.Cell): | |||||
| out = self.AssignAdd(self.inputdata, x) | out = self.AssignAdd(self.inputdata, x) | ||||
| return out | return out | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_AssignAdd_1(): | def test_AssignAdd_1(): | ||||
| """test AssignAdd 1""" | """test AssignAdd 1""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| net = Net() | net = Net() | ||||
| x = Tensor(np.ones([1]).astype(np.int64)*100) | |||||
| x = Tensor(np.ones([1]).astype(np.int64) * 100) | |||||
| print("MyPrintResult dataX:", x) | print("MyPrintResult dataX:", x) | ||||
| result = net(x) | result = net(x) | ||||
| print("MyPrintResult data::", result) | print("MyPrintResult data::", result) | ||||
| expect = np.ones([1]).astype(np.int64)*101 | |||||
| expect = np.ones([1]).astype(np.int64) * 101 | |||||
| diff = result.asnumpy() - expect | diff = result.asnumpy() - expect | ||||
| print("MyPrintExpect:", expect) | print("MyPrintExpect:", expect) | ||||
| @@ -58,18 +61,19 @@ def test_AssignAdd_1(): | |||||
| error = np.ones(shape=[1]) * 1.0e-3 | error = np.ones(shape=[1]) * 1.0e-3 | ||||
| assert np.all(diff < error) | assert np.all(diff < error) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_AssignAdd_2(): | def test_AssignAdd_2(): | ||||
| """test AssignAdd 2""" | """test AssignAdd 2""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| net = Net() | net = Net() | ||||
| x = Tensor(np.ones([1]).astype(np.int64)*102) | |||||
| x = Tensor(np.ones([1]).astype(np.int64) * 102) | |||||
| print("MyPrintResult dataX:", x) | print("MyPrintResult dataX:", x) | ||||
| result = net(x) | result = net(x) | ||||
| print("MyPrintResult data::", result.asnumpy()) | print("MyPrintResult data::", result.asnumpy()) | ||||
| expect = np.ones([1]).astype(np.int64)*103 | |||||
| expect = np.ones([1]).astype(np.int64) * 103 | |||||
| diff = result.asnumpy() - expect | diff = result.asnumpy() - expect | ||||
| print("MyPrintExpect:", expect) | print("MyPrintExpect:", expect) | ||||
| @@ -77,8 +81,10 @@ def test_AssignAdd_2(): | |||||
| error = np.ones(shape=[1]) * 1.0e-3 | error = np.ones(shape=[1]) * 1.0e-3 | ||||
| assert np.all(diff < error) | assert np.all(diff < error) | ||||
| class AssignAddNet(nn.Cell): | class AssignAddNet(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(AssignAddNet, self).__init__() | super(AssignAddNet, self).__init__() | ||||
| self.AssignAdd = P.AssignAdd() | self.AssignAdd = P.AssignAdd() | ||||
| @@ -89,9 +95,10 @@ class AssignAddNet(nn.Cell): | |||||
| z1 = self.AssignAdd(self.inputdata, self.one) | z1 = self.AssignAdd(self.inputdata, self.one) | ||||
| return z1 | return z1 | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_assignadd_scalar_cast(): | def test_assignadd_scalar_cast(): | ||||
| net = AssignAddNet() | net = AssignAddNet() | ||||
| x = Tensor(np.ones([1]).astype(np.int64)*102) | |||||
| #_executor.compile(net, 1) | |||||
| x = Tensor(np.ones([1]).astype(np.int64) * 102) | |||||
| # _executor.compile(net, 1) | |||||
| result = net(x) | result = net(x) | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test Activations """ | """ test Activations """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -16,15 +16,17 @@ | |||||
| test assign sub | test assign sub | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.ops.operations as P | import mindspore.ops.operations as P | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| import mindspore.context as context | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ut for batchnorm layer""" | """ut for batchnorm layer""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -14,14 +14,17 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test BiasAdd """ | """ test BiasAdd """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.ops import operations as P | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| output_channels, | output_channels, | ||||
| bias_init='zeros', | bias_init='zeros', | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test conv""" | """test conv""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -25,6 +26,7 @@ out_channels = 64 | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| cin, | cin, | ||||
| cout, | cout, | ||||
| @@ -70,6 +72,7 @@ def test_compile2(): | |||||
| output = net(input_data) | output = net(input_data) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_compile3(): | def test_compile3(): | ||||
| net = Net(3, 1, (3, 3), weight_init='ONES') | net = Net(3, 1, (3, 3), weight_init='ONES') | ||||
| @@ -14,12 +14,15 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test Dense """ | """ test Dense """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| input_channels, | input_channels, | ||||
| output_channels, | output_channels, | ||||
| @@ -14,11 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test eval""" | """test eval""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore as ms | import mindspore as ms | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore.common.api import _executor | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -16,8 +16,8 @@ | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -15,12 +15,12 @@ | |||||
| """ | """ | ||||
| test pooling api | test pooling api | ||||
| """ | """ | ||||
| import numpy as np | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| class MaxNet(nn.Cell): | class MaxNet(nn.Cell): | ||||
| """MaxNet definition""" | """MaxNet definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| kernel_size, | kernel_size, | ||||
| stride=None): | stride=None): | ||||
| @@ -16,9 +16,11 @@ | |||||
| test softmax api | test softmax api | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self, dim): | def __init__(self, dim): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -14,10 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test TensorAdd """ | """ test TensorAdd """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -14,29 +14,33 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test model train """ | """ test model train """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore import Tensor, Parameter, Model | from mindspore import Tensor, Parameter, Model | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits | from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore.ops import operations as P | |||||
| # fn is a funcation use i as input | # fn is a funcation use i as input | ||||
| def lr_gen(fn, epoch_size): | def lr_gen(fn, epoch_size): | ||||
| for i in range(epoch_size): | for i in range(epoch_size): | ||||
| yield fn(i) | yield fn(i) | ||||
| def me_train_tensor(net, input_np, label_np, epoch_size=2): | def me_train_tensor(net, input_np, label_np, epoch_size=2): | ||||
| """me_train_tensor""" | """me_train_tensor""" | ||||
| loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") | loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, 0.01, 1024) | |||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, | |||||
| 0.01, 1024) | |||||
| Model(net, loss, opt) | Model(net, loss, opt) | ||||
| _network = nn.WithLossCell(net, loss) | _network = nn.WithLossCell(net, loss) | ||||
| _train_net = nn.TrainOneStepCell(_network, opt) | _train_net = nn.TrainOneStepCell(_network, opt) | ||||
| _train_net.set_train() | _train_net.set_train() | ||||
| label_np = np.argmax(label_np, axis=-1).astype(np.int32) | label_np = np.argmax(label_np, axis=-1).astype(np.int32) | ||||
| for epoch in range(0, epoch_size): | for epoch in range(0, epoch_size): | ||||
| print(f"epoch %d"%(epoch)) | |||||
| print(f"epoch %d" % (epoch)) | |||||
| _train_net(Tensor(input_np), Tensor(label_np)) | _train_net(Tensor(input_np), Tensor(label_np)) | ||||
| @@ -52,6 +56,7 @@ def test_bias_add(test_with_simu): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| output_channels, | output_channels, | ||||
| bias_init='zeros', | bias_init='zeros', | ||||
| @@ -87,6 +92,7 @@ def test_conv(test_with_simu): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| "Net definition""" | "Net definition""" | ||||
| def __init__(self, | def __init__(self, | ||||
| cin, | cin, | ||||
| cout, | cout, | ||||
| @@ -116,6 +122,7 @@ def test_net(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01) | Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01) | ||||
| @@ -141,6 +148,7 @@ def test_net(): | |||||
| label_np = np.ones([32, 12]).astype(np.int32) | label_np = np.ones([32, 12]).astype(np.int32) | ||||
| me_train_tensor(net, input_np, label_np) | me_train_tensor(net, input_np, label_np) | ||||
| def test_bn(): | def test_bn(): | ||||
| """test_bn""" | """test_bn""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| @@ -151,6 +159,7 @@ def test_bn(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, cin, cout): | def __init__(self, cin, cout): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.bn = nn.BatchNorm2d(cin) | self.bn = nn.BatchNorm2d(cin) | ||||
| @@ -23,6 +23,7 @@ from mindspore.ops import composite as C | |||||
| from mindspore.ops import functional as F | from mindspore.ops import functional as F | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| def get_reordered_parameters(parameters): | def get_reordered_parameters(parameters): | ||||
| """get_reordered_parameters""" | """get_reordered_parameters""" | ||||
| # put the bias parameter to the end | # put the bias parameter to the end | ||||
| @@ -36,12 +37,15 @@ def get_reordered_parameters(parameters): | |||||
| reordered_params = tuple(non_bias_param + bias_param) | reordered_params = tuple(non_bias_param + bias_param) | ||||
| return len(non_bias_param), len(reordered_params), reordered_params | return len(non_bias_param), len(reordered_params), reordered_params | ||||
| def get_net_trainable_reordered_params(net): | def get_net_trainable_reordered_params(net): | ||||
| params = net.trainable_params() | params = net.trainable_params() | ||||
| return get_reordered_parameters(params) | return get_reordered_parameters(params) | ||||
| class TrainOneStepWithLarsCell(nn.Cell): | class TrainOneStepWithLarsCell(nn.Cell): | ||||
| """TrainOneStepWithLarsCell definition""" | """TrainOneStepWithLarsCell definition""" | ||||
| def __init__(self, network, optimizer, sens=1.0): | def __init__(self, network, optimizer, sens=1.0): | ||||
| super(TrainOneStepWithLarsCell, self).__init__(auto_prefix=False) | super(TrainOneStepWithLarsCell, self).__init__(auto_prefix=False) | ||||
| self.network = network | self.network = network | ||||
| @@ -66,11 +70,13 @@ class TrainOneStepWithLarsCell(nn.Cell): | |||||
| new_grads = lars_grads + bias_grads | new_grads = lars_grads + bias_grads | ||||
| return F.depend(loss, self.optimizer(new_grads)) | return F.depend(loss, self.optimizer(new_grads)) | ||||
| # fn is a funcation use i as input | # fn is a funcation use i as input | ||||
| def lr_gen(fn, epoch_size): | def lr_gen(fn, epoch_size): | ||||
| for i in range(epoch_size): | for i in range(epoch_size): | ||||
| yield fn(i) | yield fn(i) | ||||
| def me_train_tensor(net, input_np, label_np, epoch_size=2): | def me_train_tensor(net, input_np, label_np, epoch_size=2): | ||||
| """me_train_tensor""" | """me_train_tensor""" | ||||
| loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | ||||
| @@ -14,12 +14,14 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test_dtype""" | """test_dtype""" | ||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore as ms | import mindspore as ms | ||||
| from mindspore.common import dtype | from mindspore.common import dtype | ||||
| def test_dtype_to_nptype(): | def test_dtype_to_nptype(): | ||||
| """test_dtype2nptype""" | """test_dtype2nptype""" | ||||
| assert ms.dtype_to_nptype(ms.bool_) == np.bool_ | assert ms.dtype_to_nptype(ms.bool_) == np.bool_ | ||||
| @@ -59,6 +61,7 @@ def test_dtype_to_pytype(): | |||||
| @dataclass | @dataclass | ||||
| class Foo: | class Foo: | ||||
| x: int | x: int | ||||
| def inf(self): | def inf(self): | ||||
| return self.x | return self.x | ||||
| @@ -25,25 +25,27 @@ import mindspore as ms | |||||
| import mindspore.common.api as me | import mindspore.common.api as me | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| ndarr = np.ones((2, 3)) | ndarr = np.ones((2, 3)) | ||||
| def test_tensor_flatten(): | def test_tensor_flatten(): | ||||
| with pytest.raises(AttributeError): | with pytest.raises(AttributeError): | ||||
| lst = [1, 2, 3, 4,] | |||||
| lst = [1, 2, 3, 4, ] | |||||
| tensor_list = ms.Tensor(lst, ms.float32) | tensor_list = ms.Tensor(lst, ms.float32) | ||||
| tensor_list = tensor_list.Flatten() | tensor_list = tensor_list.Flatten() | ||||
| print(tensor_list) | print(tensor_list) | ||||
| def test_tensor_list(): | def test_tensor_list(): | ||||
| lst = [[1.0, 2.0, 1.0], [1.0, 10.0, 9.0]] | lst = [[1.0, 2.0, 1.0], [1.0, 10.0, 9.0]] | ||||
| tensor_list = ms.Tensor(lst, ms.float32) | tensor_list = ms.Tensor(lst, ms.float32) | ||||
| print(tensor_list) | print(tensor_list) | ||||
| def test_tensor(): | def test_tensor(): | ||||
| """test_tensor""" | """test_tensor""" | ||||
| t1 = ms.Tensor(ndarr) | t1 = ms.Tensor(ndarr) | ||||
| @@ -63,6 +65,7 @@ def test_tensor(): | |||||
| assert isinstance(t4, ms.Tensor) | assert isinstance(t4, ms.Tensor) | ||||
| assert t4.dtype() == ms.int64 | assert t4.dtype() == ms.int64 | ||||
| def test_tensor_type_float16(): | def test_tensor_type_float16(): | ||||
| t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) | t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) | ||||
| assert isinstance(t_float16, ms.Tensor) | assert isinstance(t_float16, ms.Tensor) | ||||
| @@ -107,6 +110,7 @@ def test_tensor_type_float64_user_define(): | |||||
| assert t_float64.shape() == (2, 3) | assert t_float64.shape() == (2, 3) | ||||
| assert t_float64.dtype() == ms.float64 | assert t_float64.dtype() == ms.float64 | ||||
| def test_tensor_type_bool(): | def test_tensor_type_bool(): | ||||
| # init a tensor with bool type | # init a tensor with bool type | ||||
| ts_bool_array = ms.Tensor(np.zeros([2, 3], np.bool), ms.bool_) | ts_bool_array = ms.Tensor(np.zeros([2, 3], np.bool), ms.bool_) | ||||
| @@ -122,6 +126,7 @@ def test_tensor_type_bool(): | |||||
| assert t_bool_array.shape() == (2, 3) | assert t_bool_array.shape() == (2, 3) | ||||
| assert t_bool_array.dtype() == ms.bool_ | assert t_bool_array.dtype() == ms.bool_ | ||||
| def test_tensor_type_int8(): | def test_tensor_type_int8(): | ||||
| t_int8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) | t_int8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) | ||||
| assert isinstance(t_int8_array, ms.Tensor) | assert isinstance(t_int8_array, ms.Tensor) | ||||
| @@ -154,6 +159,7 @@ def test_tensor_type_int64(): | |||||
| assert t_int64.shape() == (2, 3) | assert t_int64.shape() == (2, 3) | ||||
| assert t_int64.dtype() == ms.int64 | assert t_int64.dtype() == ms.int64 | ||||
| def test_tensor_type_uint8(): | def test_tensor_type_uint8(): | ||||
| t_uint8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) | t_uint8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) | ||||
| assert isinstance(t_uint8_array, ms.Tensor) | assert isinstance(t_uint8_array, ms.Tensor) | ||||
| @@ -181,6 +187,7 @@ def test_tensor_type_uint64(): | |||||
| assert t_uint64.shape() == (2, 3) | assert t_uint64.shape() == (2, 3) | ||||
| assert t_uint64.dtype() == ms.uint64 | assert t_uint64.dtype() == ms.uint64 | ||||
| def test_set_type(): | def test_set_type(): | ||||
| t = ms.Tensor(ndarr) | t = ms.Tensor(ndarr) | ||||
| t.set_dtype(ms.float32) | t.set_dtype(ms.float32) | ||||
| @@ -202,15 +209,17 @@ def test_sub(): | |||||
| z = x - y | z = x - y | ||||
| assert isinstance(z, ms.Tensor) | assert isinstance(z, ms.Tensor) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_div(): | def test_div(): | ||||
| x = ms.Tensor(np.array([[2,6,10],[12, 4, 8]]).astype(np.float32)) | |||||
| y = ms.Tensor(np.array([[2,2,5],[6, 1, 2]]).astype(np.float32)) | |||||
| x = ms.Tensor(np.array([[2, 6, 10], [12, 4, 8]]).astype(np.float32)) | |||||
| y = ms.Tensor(np.array([[2, 2, 5], [6, 1, 2]]).astype(np.float32)) | |||||
| z = x / y | z = x / y | ||||
| z2 = x / 2 | z2 = x / 2 | ||||
| assert isinstance(z, ms.Tensor) | assert isinstance(z, ms.Tensor) | ||||
| assert isinstance(z2, ms.Tensor) | assert isinstance(z2, ms.Tensor) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_parameter(): | def test_parameter(): | ||||
| x = Parameter(initializer(1, [1], ms.float32), name="beta1_power") | x = Parameter(initializer(1, [1], ms.float32), name="beta1_power") | ||||
| @@ -220,6 +229,7 @@ def test_parameter(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """Net definition""" | """Net definition""" | ||||
| def __init__(self, dim): | def __init__(self, dim): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.dim = dim | self.dim = dim | ||||
| @@ -266,6 +276,7 @@ def test_tensor_contiguous(): | |||||
| assert True, rt_f.flags['C_CONTIGUOUS'] | assert True, rt_f.flags['C_CONTIGUOUS'] | ||||
| print("rt_f flags = ", rt_f.flags) | print("rt_f flags = ", rt_f.flags) | ||||
| def test_tensor_contiguous2(): | def test_tensor_contiguous2(): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) | input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) | ||||
| input_me = input_data.transpose(0, 3, 1, 2) | input_me = input_data.transpose(0, 3, 1, 2) | ||||
| @@ -274,36 +285,43 @@ def test_tensor_contiguous2(): | |||||
| out_f = tensor_f_float32.asnumpy() | out_f = tensor_f_float32.asnumpy() | ||||
| print("out_f flags = ", out_f.flags) | print("out_f flags = ", out_f.flags) | ||||
| def test_tensor_input_string(): | def test_tensor_input_string(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = 'ccc' | input_data = 'ccc' | ||||
| ms.Tensor(input_data) | ms.Tensor(input_data) | ||||
| def test_tensor_input_tuple_string(): | def test_tensor_input_tuple_string(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = (2, 3, '4', 5) | input_data = (2, 3, '4', 5) | ||||
| ms.Tensor(input_data) | ms.Tensor(input_data) | ||||
| def test_tensor_input_list_string(): | def test_tensor_input_list_string(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = [[2, 3, '4', 5], [1, 2, 3, 4]] | input_data = [[2, 3, '4', 5], [1, 2, 3, 4]] | ||||
| ms.Tensor(input_data) | ms.Tensor(input_data) | ||||
| def test_tensor_input_none(): | def test_tensor_input_none(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = None | input_data = None | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| # pylint: disable=no-value-for-parameter | # pylint: disable=no-value-for-parameter | ||||
| def test_tensor_input_empty(): | def test_tensor_input_empty(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| ms.Tensor() | ms.Tensor() | ||||
| def test_tensor_input_ndarray_str(): | def test_tensor_input_ndarray_str(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| inp = np.array(["88", 2, 4]) | inp = np.array(["88", 2, 4]) | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| def test_tensor_input_ndarray_bool(): | def test_tensor_input_ndarray_bool(): | ||||
| inp = np.array([True, 2, 4]) | inp = np.array([True, 2, 4]) | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| @@ -311,86 +329,103 @@ def test_tensor_input_ndarray_bool(): | |||||
| inp = np.array([False, 2, 4]) | inp = np.array([False, 2, 4]) | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| def test_tensor_input_ndarray_complex(): | def test_tensor_input_ndarray_complex(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| inp = np.array([20j, 2, 4]) | inp = np.array([20j, 2, 4]) | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| def test_tensor_input_ndarray_none(): | def test_tensor_input_ndarray_none(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| inp = np.array([None, 2, 4]) | inp = np.array([None, 2, 4]) | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| def test_tensor_input_ndarray_dict(): | def test_tensor_input_ndarray_dict(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| inp = {'a': 6, 'b': 7} | inp = {'a': 6, 'b': 7} | ||||
| ms.Tensor(inp) | ms.Tensor(inp) | ||||
| def test_tensor_input_np_nan(): | def test_tensor_input_np_nan(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = (1, 2, 3, np.nan) | input_data = (1, 2, 3, np.nan) | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| def test_tensor_input_tuple_inf(): | def test_tensor_input_tuple_inf(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = (1, 2, 3, float("inf")) | input_data = (1, 2, 3, float("inf")) | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| def test_tensor_input_dict(): | def test_tensor_input_dict(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = {'a': 6, 'b': 7} | input_data = {'a': 6, 'b': 7} | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| def test_tensor_input_complex(): | def test_tensor_input_complex(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = (1, 2j, 3) | input_data = (1, 2j, 3) | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| def test_tensor_dtype_np_float(): | def test_tensor_dtype_np_float(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.float) | input_data = np.random.randn(32, 112, 112, 3).astype(np.float) | ||||
| ms.Tensor(input_data, np.float) | ms.Tensor(input_data, np.float) | ||||
| def test_tensor_dtype_np_float16(): | def test_tensor_dtype_np_float16(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.float16) | input_data = np.random.randn(32, 112, 112, 3).astype(np.float16) | ||||
| ms.Tensor(input_data, np.float16) | ms.Tensor(input_data, np.float16) | ||||
| def test_tensor_dtype_np_float32(): | def test_tensor_dtype_np_float32(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) | input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) | ||||
| ms.Tensor(input_data, np.float32) | ms.Tensor(input_data, np.float32) | ||||
| def test_tensor_dtype_np_float64(): | def test_tensor_dtype_np_float64(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.float64) | input_data = np.random.randn(32, 112, 112, 3).astype(np.float64) | ||||
| ms.Tensor(input_data, np.float64) | ms.Tensor(input_data, np.float64) | ||||
| def test_tensor_dtype_np_int(): | def test_tensor_dtype_np_int(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.int) | input_data = np.random.randn(32, 112, 112, 3).astype(np.int) | ||||
| ms.Tensor(input_data, np.int) | ms.Tensor(input_data, np.int) | ||||
| def test_tensor_dtype_np_int8(): | def test_tensor_dtype_np_int8(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.int8) | input_data = np.random.randn(32, 112, 112, 3).astype(np.int8) | ||||
| ms.Tensor(input_data, np.int8) | ms.Tensor(input_data, np.int8) | ||||
| def test_tensor_dtype_np_int16(): | def test_tensor_dtype_np_int16(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.int16) | input_data = np.random.randn(32, 112, 112, 3).astype(np.int16) | ||||
| ms.Tensor(input_data, np.int16) | ms.Tensor(input_data, np.int16) | ||||
| def test_tensor_dtype_np_int32(): | def test_tensor_dtype_np_int32(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.int32) | input_data = np.random.randn(32, 112, 112, 3).astype(np.int32) | ||||
| ms.Tensor(input_data, np.int32) | ms.Tensor(input_data, np.int32) | ||||
| def test_tensor_dtype_np_int64(): | def test_tensor_dtype_np_int64(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| input_data = np.random.randn(32, 112, 112, 3).astype(np.int64) | input_data = np.random.randn(32, 112, 112, 3).astype(np.int64) | ||||
| ms.Tensor(input_data, np.int64) | ms.Tensor(input_data, np.int64) | ||||
| def test_tensor_dtype_fp32_to_bool(): | def test_tensor_dtype_fp32_to_bool(): | ||||
| with pytest.raises(RuntimeError): | with pytest.raises(RuntimeError): | ||||
| input = np.random.randn(2, 3, 4, 5).astype(np.float32) | input = np.random.randn(2, 3, 4, 5).astype(np.float32) | ||||
| @@ -399,7 +434,7 @@ def test_tensor_dtype_fp32_to_bool(): | |||||
| def test_tensor_operation(): | def test_tensor_operation(): | ||||
| x = Tensor(np.ones((3,3)) * 4) | |||||
| x = Tensor(np.ones((3, 3)) * 4) | |||||
| res = x + 1 | res = x + 1 | ||||
| assert np.all(res.asnumpy() == np.ones((3, 3)) * 5) | assert np.all(res.asnumpy() == np.ones((3, 3)) * 5) | ||||
| res = 1 + x | res = 1 + x | ||||
| @@ -14,10 +14,11 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test tensor py""" | """test tensor py""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore as ms | |||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore as ms | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -93,6 +94,7 @@ def test_float(): | |||||
| def test_tensor_method_sub(): | def test_tensor_method_sub(): | ||||
| """test_tensor_method_sub""" | """test_tensor_method_sub""" | ||||
| class Net(Cell): | class Net(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -111,6 +113,7 @@ def test_tensor_method_sub(): | |||||
| def test_tensor_method_mul(): | def test_tensor_method_mul(): | ||||
| """test_tensor_method_mul""" | """test_tensor_method_mul""" | ||||
| class Net(Cell): | class Net(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -129,6 +132,7 @@ def test_tensor_method_mul(): | |||||
| def test_tensor_method_div(): | def test_tensor_method_div(): | ||||
| """test_tensor_method_div""" | """test_tensor_method_div""" | ||||
| class Net(Cell): | class Net(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -13,24 +13,25 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.tensor import Tensor | |||||
| from mindspore.ops import operations as P | |||||
| import mindspore.ops.functional as F | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops.composite import core | |||||
| from mindspore.common import dtype as mstype | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.ops.functional as F | |||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.common.tensor import Tensor | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| add1 = P.TensorAdd() | add1 = P.TensorAdd() | ||||
| mul1 = P.MatMul() | mul1 = P.MatMul() | ||||
| add2 = P.TensorAdd() | add2 = P.TensorAdd() | ||||
| def add(x, y): | def add(x, y): | ||||
| return add1(x, y) | return add1(x, y) | ||||
| class Func(nn.Cell): | class Func(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Func, self).__init__() | super(Func, self).__init__() | ||||
| @@ -48,7 +49,10 @@ class Func(nn.Cell): | |||||
| out = F.depend(out, clear) | out = F.depend(out, clear) | ||||
| return out | return out | ||||
| grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True) | grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True) | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -69,6 +73,7 @@ class Net(nn.Cell): | |||||
| out = F.depend(out, clear) | out = F.depend(out, clear) | ||||
| return out | return out | ||||
| def test_add(): | def test_add(): | ||||
| x = Tensor(np.ones([3, 3]).astype(np.float32)) | x = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| y = Tensor(np.ones([3, 3]).astype(np.float32)) | y = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| @@ -76,6 +81,7 @@ def test_add(): | |||||
| func.add_flags(has_effect=True) | func.add_flags(has_effect=True) | ||||
| func(x, y) | func(x, y) | ||||
| def test_sens(): | def test_sens(): | ||||
| x = Tensor(np.ones([3, 3]).astype(np.float32)) | x = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| y = Tensor(np.ones([3, 3]).astype(np.float32)) | y = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| @@ -84,6 +90,7 @@ def test_sens(): | |||||
| net.add_flags(has_effect=True) | net.add_flags(has_effect=True) | ||||
| out = net(x, y, sens) | out = net(x, y, sens) | ||||
| class Net_hyper(nn.Cell): | class Net_hyper(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net_hyper, self).__init__() | super(Net_hyper, self).__init__() | ||||
| @@ -105,6 +112,7 @@ class Net_hyper(nn.Cell): | |||||
| out = F.depend(out, clear) | out = F.depend(out, clear) | ||||
| return out | return out | ||||
| def test_hyper_add(): | def test_hyper_add(): | ||||
| x = Tensor(np.ones([3, 3]).astype(np.float32)) | x = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| y = Tensor(np.ones([3, 3]).astype(np.float32)) | y = Tensor(np.ones([3, 3]).astype(np.float32)) | ||||
| @@ -113,10 +121,11 @@ def test_hyper_add(): | |||||
| net.add_flags(has_effect=True) | net.add_flags(has_effect=True) | ||||
| out = net(x, y, sens) | out = net(x, y, sens) | ||||
| def test_keep_order_io_effect_exception_return_dtype(): | def test_keep_order_io_effect_exception_return_dtype(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super().__init__() | |||||
| super().__init__() | |||||
| self.alloc_status = P.NPUAllocFloatStatus() | self.alloc_status = P.NPUAllocFloatStatus() | ||||
| self.get_status = P.NPUGetFloatStatus() | self.get_status = P.NPUGetFloatStatus() | ||||
| self.clear_status = P.NPUClearFloatStatus() | self.clear_status = P.NPUClearFloatStatus() | ||||
| @@ -126,16 +135,16 @@ def test_keep_order_io_effect_exception_return_dtype(): | |||||
| self.neg = P.Neg() | self.neg = P.Neg() | ||||
| self.add_flags(has_effect=True) | self.add_flags(has_effect=True) | ||||
| def construct(self, x): | |||||
| def construct(self, x): | |||||
| init = self.alloc_status() | init = self.alloc_status() | ||||
| self.clear_status(init) | self.clear_status(init) | ||||
| res = self.sub(x, self.neg(x)) | |||||
| res = self.sub(x, self.neg(x)) | |||||
| self.get_status(init) | self.get_status(init) | ||||
| dtype = self.dtype(res) | dtype = self.dtype(res) | ||||
| return dtype | return dtype | ||||
| value = 655 | |||||
| value = 655 | |||||
| data = np.full((8, 5, 3, 1), value, dtype=np.float16) | data = np.full((8, 5, 3, 1), value, dtype=np.float16) | ||||
| x = Tensor(data, dtype=mstype.float16) | x = Tensor(data, dtype=mstype.float16) | ||||
| net = Net() | net = Net() | ||||
| data = net(x) | |||||
| data = net(x) | |||||
| @@ -14,10 +14,13 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test accuracy""" | """test accuracy""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import Accuracy | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import Accuracy | |||||
| def test_classification_accuracy(): | def test_classification_accuracy(): | ||||
| """test_classification_accuracy""" | """test_classification_accuracy""" | ||||
| @@ -29,8 +32,9 @@ def test_classification_accuracy(): | |||||
| metric.update(x, y) | metric.update(x, y) | ||||
| accuracy = metric.eval() | accuracy = metric.eval() | ||||
| accuracy2 = metric(x, y2) | accuracy2 = metric(x, y2) | ||||
| assert math.isclose(accuracy, 2/3) | |||||
| assert math.isclose(accuracy2, 2/3) | |||||
| assert math.isclose(accuracy, 2 / 3) | |||||
| assert math.isclose(accuracy2, 2 / 3) | |||||
| def test_multilabel_accuracy(): | def test_multilabel_accuracy(): | ||||
| x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | ||||
| @@ -39,7 +43,8 @@ def test_multilabel_accuracy(): | |||||
| metric.clear() | metric.clear() | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| accuracy = metric.eval() | accuracy = metric.eval() | ||||
| assert accuracy == 1/3 | |||||
| assert accuracy == 1 / 3 | |||||
| def test_shape_accuracy(): | def test_shape_accuracy(): | ||||
| x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | ||||
| @@ -49,6 +54,7 @@ def test_shape_accuracy(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| def test_shape_accuracy2(): | def test_shape_accuracy2(): | ||||
| x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) | ||||
| y = Tensor(np.array([0, 1, 1, 1])) | y = Tensor(np.array([0, 1, 1, 1])) | ||||
| @@ -57,6 +63,7 @@ def test_shape_accuracy2(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| def test_shape_accuracy3(): | def test_shape_accuracy3(): | ||||
| x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) | x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) | ||||
| y = Tensor(np.array([[1, 0, 1], [1, 1, 1]])) | y = Tensor(np.array([[1, 0, 1], [1, 1, 1]])) | ||||
| @@ -65,6 +72,7 @@ def test_shape_accuracy3(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| def test_shape_accuracy4(): | def test_shape_accuracy4(): | ||||
| x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) | x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) | ||||
| y = Tensor(np.array(1)) | y = Tensor(np.array(1)) | ||||
| @@ -73,6 +81,7 @@ def test_shape_accuracy4(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| def test_type_accuracy(): | def test_type_accuracy(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Accuracy('test') | Accuracy('test') | ||||
| @@ -14,10 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test error""" | """test error""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import MAE, MSE | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import MAE, MSE | |||||
| def test_MAE(): | def test_MAE(): | ||||
| @@ -27,7 +29,7 @@ def test_MAE(): | |||||
| error.clear() | error.clear() | ||||
| error.update(x, y) | error.update(x, y) | ||||
| result = error.eval() | result = error.eval() | ||||
| assert math.isclose(result, 0.15/4) | |||||
| assert math.isclose(result, 0.15 / 4) | |||||
| def test_input_MAE(): | def test_input_MAE(): | ||||
| @@ -52,7 +54,7 @@ def test_MSE(): | |||||
| error.clear() | error.clear() | ||||
| error.update(x, y) | error.update(x, y) | ||||
| result = error.eval() | result = error.eval() | ||||
| assert math.isclose(result, 0.0125/4) | |||||
| assert math.isclose(result, 0.0125 / 4) | |||||
| def test_input_MSE(): | def test_input_MSE(): | ||||
| @@ -13,11 +13,11 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| # """test_fbeta""" | # """test_fbeta""" | ||||
| import math | |||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import get_metric_fn, Fbeta | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import get_metric_fn, Fbeta | |||||
| def test_classification_fbeta(): | def test_classification_fbeta(): | ||||
| @@ -32,9 +32,9 @@ def test_classification_fbeta(): | |||||
| fbeta_mean = metric.eval(True) | fbeta_mean = metric.eval(True) | ||||
| fbeta2 = metric(x, y2) | fbeta2 = metric(x, y2) | ||||
| assert np.allclose(fbeta, np.array([2/3, 2/3])) | |||||
| assert np.allclose(fbeta2, np.array([2/3, 2/3])) | |||||
| assert np.allclose(fbeta_mean, 2/3) | |||||
| assert np.allclose(fbeta, np.array([2 / 3, 2 / 3])) | |||||
| assert np.allclose(fbeta2, np.array([2 / 3, 2 / 3])) | |||||
| assert np.allclose(fbeta_mean, 2 / 3) | |||||
| def test_fbeta_update1(): | def test_fbeta_update1(): | ||||
| @@ -46,6 +46,7 @@ def test_fbeta_update1(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| def test_fbeta_update2(): | def test_fbeta_update2(): | ||||
| x1 = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.2], [0.9, 0.6, 0.5]])) | x1 = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.2], [0.9, 0.6, 0.5]])) | ||||
| y1 = Tensor(np.array([1, 0, 2])) | y1 = Tensor(np.array([1, 0, 2])) | ||||
| @@ -15,8 +15,9 @@ | |||||
| """test loss""" | """test loss""" | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import Loss | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import Loss | |||||
| def test_loss_inputs_error(): | def test_loss_inputs_error(): | ||||
| @@ -14,9 +14,11 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test_metric_factory""" | """test_metric_factory""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore.nn.metrics import get_metric_fn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import get_metric_fn | |||||
| def test_classification_accuracy(): | def test_classification_accuracy(): | ||||
| @@ -26,7 +28,7 @@ def test_classification_accuracy(): | |||||
| metric.clear() | metric.clear() | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| accuracy = metric.eval() | accuracy = metric.eval() | ||||
| assert math.isclose(accuracy, 2/3) | |||||
| assert math.isclose(accuracy, 2 / 3) | |||||
| def test_classification_accuracy_by_alias(): | def test_classification_accuracy_by_alias(): | ||||
| @@ -36,7 +38,7 @@ def test_classification_accuracy_by_alias(): | |||||
| metric.clear() | metric.clear() | ||||
| metric.update(x, y) | metric.update(x, y) | ||||
| accuracy = metric.eval() | accuracy = metric.eval() | ||||
| assert math.isclose(accuracy, 2/3) | |||||
| assert math.isclose(accuracy, 2 / 3) | |||||
| def test_classification_precision(): | def test_classification_precision(): | ||||
| @@ -14,10 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test_precision""" | """test_precision""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import Precision | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import Precision | |||||
| def test_classification_precision(): | def test_classification_precision(): | ||||
| @@ -43,7 +45,7 @@ def test_multilabel_precision(): | |||||
| metric.update(x, y) | metric.update(x, y) | ||||
| precision = metric.eval() | precision = metric.eval() | ||||
| assert np.equal(precision, np.array([1, 2/3, 1])).all() | |||||
| assert np.equal(precision, np.array([1, 2 / 3, 1])).all() | |||||
| def test_average_precision(): | def test_average_precision(): | ||||
| @@ -54,7 +56,7 @@ def test_average_precision(): | |||||
| metric.update(x, y) | metric.update(x, y) | ||||
| precision = metric.eval(True) | precision = metric.eval(True) | ||||
| assert math.isclose(precision, (1 + 2/3 + 1) / 3) | |||||
| assert math.isclose(precision, (1 + 2 / 3 + 1) / 3) | |||||
| def test_num_precision(): | def test_num_precision(): | ||||
| @@ -14,10 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test recall""" | """test recall""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import Recall | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import Recall | |||||
| def test_classification_recall(): | def test_classification_recall(): | ||||
| @@ -43,7 +45,7 @@ def test_multilabel_recall(): | |||||
| metric.update(x, y) | metric.update(x, y) | ||||
| recall = metric.eval() | recall = metric.eval() | ||||
| assert np.equal(recall, np.array([2/3, 2/3, 1])).all() | |||||
| assert np.equal(recall, np.array([2 / 3, 2 / 3, 1])).all() | |||||
| def test_average_recall(): | def test_average_recall(): | ||||
| @@ -54,7 +56,7 @@ def test_average_recall(): | |||||
| metric.update(x, y) | metric.update(x, y) | ||||
| recall = metric.eval(True) | recall = metric.eval(True) | ||||
| assert math.isclose(recall, (2/3 + 2/3 + 1) / 3) | |||||
| assert math.isclose(recall, (2 / 3 + 2 / 3 + 1) / 3) | |||||
| def test_num_recall(): | def test_num_recall(): | ||||
| @@ -14,10 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test topk""" | """test topk""" | ||||
| import math | import math | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy | |||||
| def test_type_topk(): | def test_type_topk(): | ||||
| @@ -54,8 +56,8 @@ def test_topk(): | |||||
| topk.update(x, y) | topk.update(x, y) | ||||
| result = topk.eval() | result = topk.eval() | ||||
| result2 = topk(x, y2) | result2 = topk(x, y2) | ||||
| assert math.isclose(result, 2/3) | |||||
| assert math.isclose(result2, 2/3) | |||||
| assert math.isclose(result, 2 / 3) | |||||
| assert math.isclose(result2, 2 / 3) | |||||
| def test_zero_topk(): | def test_zero_topk(): | ||||
| @@ -79,8 +81,8 @@ def test_top1(): | |||||
| topk.update(x, y) | topk.update(x, y) | ||||
| result = topk.eval() | result = topk.eval() | ||||
| result2 = topk(x, y2) | result2 = topk(x, y2) | ||||
| assert math.isclose(result, 1/3) | |||||
| assert math.isclose(result2, 1/3) | |||||
| assert math.isclose(result, 1 / 3) | |||||
| assert math.isclose(result2, 1 / 3) | |||||
| def test_top5(): | def test_top5(): | ||||
| @@ -97,5 +99,5 @@ def test_top5(): | |||||
| topk.update(x, y) | topk.update(x, y) | ||||
| result = topk.eval() | result = topk.eval() | ||||
| result2 = topk(x, y2) | result2 = topk(x, y2) | ||||
| assert math.isclose(result, 2/3) | |||||
| assert math.isclose(result2, 2/3) | |||||
| assert math.isclose(result, 2 / 3) | |||||
| assert math.isclose(result2, 2 / 3) | |||||
| @@ -15,6 +15,7 @@ | |||||
| """setup for pytest""" | """setup for pytest""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| # pylint: disable=unused-argument | # pylint: disable=unused-argument | ||||
| def setup_module(module): | def setup_module(module): | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -17,10 +17,10 @@ resnet50 example | |||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore.nn as nn # pylint: disable=C0414 | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.ops.operations import TensorAdd | from mindspore.ops.operations import TensorAdd | ||||
| import mindspore.nn as nn # pylint: disable=C0414 | |||||
| from ...train_step_wrap import train_step_with_loss_warp | from ...train_step_wrap import train_step_with_loss_warp | ||||
| @@ -15,9 +15,8 @@ | |||||
| """ test bert cell """ | """ test bert cell """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore import Model | |||||
| from mindspore.nn.optim import AdamWeightDecay | |||||
| from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel, BertNetworkWithLoss, BertTrainOneStepCell | |||||
| from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel | |||||
| from ....dataset_mock import MindData | from ....dataset_mock import MindData | ||||
| @@ -14,26 +14,30 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test bert of graph compile """ | """ test bert of graph compile """ | ||||
| import functools | import functools | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.nn as nn | |||||
| import mindspore.ops.composite as C | |||||
| from mindspore.common.initializer import TruncatedNormal | |||||
| from mindspore.common.parameter import ParameterTuple | |||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput | |||||
| from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients | |||||
| from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ | from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ | ||||
| EmbeddingLookup, EmbeddingPostprocessor, BertOutput, RelaPosMatrixGenerator, \ | EmbeddingLookup, EmbeddingPostprocessor, BertOutput, RelaPosMatrixGenerator, \ | ||||
| RelaPosEmbeddingsGenerator, SaturateCast, BertAttention, BertSelfAttention, \ | RelaPosEmbeddingsGenerator, SaturateCast, BertAttention, BertSelfAttention, \ | ||||
| BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel | BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel | ||||
| from mindspore.nn.layer.basic import Norm | from mindspore.nn.layer.basic import Norm | ||||
| from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput | |||||
| import mindspore.nn as nn | |||||
| from mindspore.common.initializer import TruncatedNormal | |||||
| from mindspore.common.parameter import ParameterTuple | |||||
| from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR | from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR | ||||
| from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients | |||||
| import mindspore.ops.composite as C | |||||
| from mindspore.ops import functional as F | |||||
| from ....ops_common import convert | |||||
| from ....mindspore_test_framework.mindspore_test import mindspore_test | from ....mindspore_test_framework.mindspore_test import mindspore_test | ||||
| from ....mindspore_test_framework.pipeline.forward.compile_forward import pipeline_for_compile_forward_ge_graph_for_case_by_case_config | |||||
| from ....mindspore_test_framework.pipeline.gradient.compile_gradient import pipeline_for_compile_grad_ge_graph_for_case_by_case_config | |||||
| from ....mindspore_test_framework.pipeline.forward.compile_forward import \ | |||||
| pipeline_for_compile_forward_ge_graph_for_case_by_case_config | |||||
| from ....mindspore_test_framework.pipeline.gradient.compile_gradient import \ | |||||
| pipeline_for_compile_grad_ge_graph_for_case_by_case_config | |||||
| from ....ops_common import convert | |||||
| def bert_trans(): | def bert_trans(): | ||||
| """bert_trans""" | """bert_trans""" | ||||
| @@ -53,10 +57,12 @@ def bert_trans(): | |||||
| net.set_train() | net.set_train() | ||||
| return net | return net | ||||
| def set_train(net): | def set_train(net): | ||||
| net.set_train() | net.set_train() | ||||
| return net | return net | ||||
| class NetForAdam(nn.Cell): | class NetForAdam(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(NetForAdam, self).__init__() | super(NetForAdam, self).__init__() | ||||
| @@ -66,8 +72,10 @@ class NetForAdam(nn.Cell): | |||||
| x = self.dense(x) | x = self.dense(x) | ||||
| return x | return x | ||||
| class TrainStepWrapForAdam(nn.Cell): | class TrainStepWrapForAdam(nn.Cell): | ||||
| """TrainStepWrapForAdam definition""" | """TrainStepWrapForAdam definition""" | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(TrainStepWrapForAdam, self).__init__() | super(TrainStepWrapForAdam, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -81,8 +89,10 @@ class TrainStepWrapForAdam(nn.Cell): | |||||
| grads = self.clip_gradients(grads, 1, 1.0) | grads = self.clip_gradients(grads, 1, 1.0) | ||||
| return self.optimizer(grads) | return self.optimizer(grads) | ||||
| class TrainStepWrapForAdamDynamicLr(nn.Cell): | class TrainStepWrapForAdamDynamicLr(nn.Cell): | ||||
| """TrainStepWrapForAdamDynamicLr definition""" | """TrainStepWrapForAdamDynamicLr definition""" | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(TrainStepWrapForAdamDynamicLr, self).__init__() | super(TrainStepWrapForAdamDynamicLr, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -95,16 +105,19 @@ class TrainStepWrapForAdamDynamicLr(nn.Cell): | |||||
| grads = C.grad_by_list_with_sens(self.network, weights)(x, self.sens) | grads = C.grad_by_list_with_sens(self.network, weights)(x, self.sens) | ||||
| return self.optimizer(grads) | return self.optimizer(grads) | ||||
| class TempC2Wrap(nn.Cell): | class TempC2Wrap(nn.Cell): | ||||
| def __init__(self, op, c1=None, c2=None,): | |||||
| def __init__(self, op, c1=None, c2=None, ): | |||||
| super(TempC2Wrap, self).__init__() | super(TempC2Wrap, self).__init__() | ||||
| self.op = op | self.op = op | ||||
| self.c1 = c1 | self.c1 = c1 | ||||
| self.c2 = c2 | self.c2 = c2 | ||||
| def construct(self, x1): | def construct(self, x1): | ||||
| x = self.op(x1, self.c1, self.c2) | x = self.op(x1, self.c1, self.c2) | ||||
| return x | return x | ||||
| test_case_cell_ops = [ | test_case_cell_ops = [ | ||||
| ('Norm_keepdims', { | ('Norm_keepdims', { | ||||
| 'block': Norm(keep_dims=True), | 'block': Norm(keep_dims=True), | ||||
| @@ -373,7 +386,7 @@ test_case_cell_ops = [ | |||||
| 'block': set_train(nn.Dense(in_channels=768, | 'block': set_train(nn.Dense(in_channels=768, | ||||
| out_channels=3072, | out_channels=3072, | ||||
| activation='gelu', | activation='gelu', | ||||
| weight_init=TruncatedNormal(0.02),)), | |||||
| weight_init=TruncatedNormal(0.02), )), | |||||
| 'desc_inputs': [[3, 768]], | 'desc_inputs': [[3, 768]], | ||||
| 'desc_bprop': [[3, 3072]]}), | 'desc_bprop': [[3, 3072]]}), | ||||
| ('GetNextSentenceOutput', { | ('GetNextSentenceOutput', { | ||||
| @@ -396,26 +409,28 @@ test_case_cell_ops = [ | |||||
| 'block': TempC2Wrap(ClipGradients(), 1, 1.0), | 'block': TempC2Wrap(ClipGradients(), 1, 1.0), | ||||
| 'desc_inputs': [tuple(convert(shp) for shp in [[1], [1], [1]])], | 'desc_inputs': [tuple(convert(shp) for shp in [[1], [1], [1]])], | ||||
| 'skip': ['backward', 'exec']}), | 'skip': ['backward', 'exec']}), | ||||
| ] | |||||
| ] | |||||
| test_case = functools.reduce(lambda x, y: x+y, [test_case_cell_ops]) | |||||
| test_case = functools.reduce(lambda x, y: x + y, [test_case_cell_ops]) | |||||
| # use -k to select certain testcast | # use -k to select certain testcast | ||||
| # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | ||||
| test_exec_case = filter(lambda x: 'skip' not in x[1] or | test_exec_case = filter(lambda x: 'skip' not in x[1] or | ||||
| 'exec' not in x[1]['skip'], test_case) | |||||
| 'exec' not in x[1]['skip'], test_case) | |||||
| test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or | test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or | ||||
| 'backward' not in x[1]['skip'] and 'backward_exec' | |||||
| not in x[1]['skip'], test_case) | |||||
| 'backward' not in x[1]['skip'] and 'backward_exec' | |||||
| not in x[1]['skip'], test_case) | |||||
| test_check_gradient_case = filter(lambda x: 'skip' not in x[1] or | test_check_gradient_case = filter(lambda x: 'skip' not in x[1] or | ||||
| 'backward' not in x[1]['skip'] and 'backward_exec' | |||||
| not in x[1]['skip'], test_case) | |||||
| 'backward' not in x[1]['skip'] and 'backward_exec' | |||||
| not in x[1]['skip'], test_case) | |||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | ||||
| def test_exec(): | def test_exec(): | ||||
| return test_exec_case | return test_exec_case | ||||
| @mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config) | ||||
| def test_backward_exec(): | def test_backward_exec(): | ||||
| return test_backward_exec_case | return test_backward_exec_case | ||||
| @@ -15,16 +15,19 @@ | |||||
| """test lenet""" | """test lenet""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| import mindspore.context as context | |||||
| from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens | from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| """LeNet5 definition""" | """LeNet5 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet5, self).__init__() | super(LeNet5, self).__init__() | ||||
| self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | ||||
| @@ -14,8 +14,8 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test_lenet_core_after_exception""" | """test_lenet_core_after_exception""" | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| @@ -25,6 +25,7 @@ from ....train_step_wrap import train_step_with_loss_warp | |||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| """LeNet5 definition""" | """LeNet5 definition""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet5, self).__init__() | super(LeNet5, self).__init__() | ||||
| self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") | self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") | ||||
| @@ -15,23 +15,24 @@ | |||||
| """test_mix_precision""" | """test_mix_precision""" | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor, context | |||||
| from mindspore.common import ParameterTuple | |||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.common import ParameterTuple | |||||
| from mindspore import Tensor, context | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | |||||
| from mindspore.nn import Momentum | from mindspore.nn import Momentum | ||||
| from ....train_step_wrap import train_step_with_loss_warp | |||||
| from tests.ops_common import convert | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.train.parallel_utils import ParallelMode | from mindspore.train.parallel_utils import ParallelMode | ||||
| from tests.ops_common import convert | |||||
| from ....train_step_wrap import train_step_with_loss_warp | |||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| """LeNet5""" | """LeNet5""" | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet5, self).__init__() | super(LeNet5, self).__init__() | ||||
| self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | ||||
| @@ -148,10 +149,13 @@ def test_cast(): | |||||
| """test grad of PReLU, which cause AddN(generated by grad) fail""" | """test grad of PReLU, which cause AddN(generated by grad) fail""" | ||||
| class IRBlockZ(nn.Cell): | class IRBlockZ(nn.Cell): | ||||
| def __init__(self, inplanes, planes): | def __init__(self, inplanes, planes): | ||||
| super(IRBlockZ, self).__init__() | super(IRBlockZ, self).__init__() | ||||
| self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False, dilation=1) | |||||
| self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False, | |||||
| dilation=1) | |||||
| self.act_layer = nn.PReLU(planes) | self.act_layer = nn.PReLU(planes) | ||||
| def construct(self, x): | def construct(self, x): | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """test_dataset_utils""" | """test_dataset_utils""" | ||||
| import pytest | import pytest | ||||
| import mindspore as ms | import mindspore as ms | ||||
| from mindspore.train._utils import _construct_tensor_list | from mindspore.train._utils import _construct_tensor_list | ||||
| @@ -15,6 +15,7 @@ | |||||
| """setup for pytest""" | """setup for pytest""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| # pylint: disable=unused-argument | # pylint: disable=unused-argument | ||||
| def setup_module(module): | def setup_module(module): | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -15,16 +15,18 @@ | |||||
| """ test adam """ | """ test adam """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR | from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR | ||||
| from mindspore.ops import operations as P | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | ||||
| @@ -15,6 +15,7 @@ | |||||
| """ test FTRL """ | """ test FTRL """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| @@ -47,4 +48,3 @@ def test_ftrl(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| train_network = TrainOneStepCell(net_with_loss, optimizer) | train_network = TrainOneStepCell(net_with_loss, optimizer) | ||||
| _executor.compile(train_network, inputs, label) | _executor.compile(train_network, inputs, label) | ||||
| @@ -14,16 +14,18 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test lamb """ | """ test lamb """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import Lamb | from mindspore.nn.optim import Lamb | ||||
| from mindspore.ops import operations as P | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | ||||
| @@ -38,6 +40,7 @@ class Net(nn.Cell): | |||||
| class NetWithoutWeight(nn.Cell): | class NetWithoutWeight(nn.Cell): | ||||
| """ NetWithoutWeight definition """ | """ NetWithoutWeight definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(NetWithoutWeight, self).__init__() | super(NetWithoutWeight, self).__init__() | ||||
| self.matmul = P.MatMul() | self.matmul = P.MatMul() | ||||
| @@ -12,15 +12,17 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| from collections import Counter | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.nn.optim import LARS, Momentum | from mindspore.nn.optim import LARS, Momentum | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.common import dtype as mstype | |||||
| from collections import Counter | |||||
| def multisteplr(total_steps, milestone, base_lr=0.9, gamma=0.1, dtype=mstype.float32): | def multisteplr(total_steps, milestone, base_lr=0.9, gamma=0.1, dtype=mstype.float32): | ||||
| @@ -56,7 +58,7 @@ def test_lars_multi_step_lr(): | |||||
| lr = multisteplr(10, [2, 6]) | lr = multisteplr(10, [2, 6]) | ||||
| SGD = Momentum(net.trainable_params(), lr, 0.9) | SGD = Momentum(net.trainable_params(), lr, 0.9) | ||||
| optimizer = LARS(SGD, epsilon=1e-08, hyperpara=0.02, decay_filter=lambda x: 'bn' not in x.name, | optimizer = LARS(SGD, epsilon=1e-08, hyperpara=0.02, decay_filter=lambda x: 'bn' not in x.name, | ||||
| lars_filter=lambda x: 'bn' not in x.name) | |||||
| lars_filter=lambda x: 'bn' not in x.name) | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| train_network = TrainOneStepCell(net_with_loss, optimizer) | train_network = TrainOneStepCell(net_with_loss, optimizer) | ||||
| @@ -77,4 +79,4 @@ def test_lars_float_lr(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| train_network = TrainOneStepCell(net_with_loss, optimizer) | train_network = TrainOneStepCell(net_with_loss, optimizer) | ||||
| _executor.compile(train_network, inputs, label) | |||||
| _executor.compile(train_network, inputs, label) | |||||
| @@ -14,19 +14,17 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test_lr_schedule """ | """ test_lr_schedule """ | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore.nn import Cell | |||||
| from mindspore.ops.operations import BiasAdd, MatMul | |||||
| from mindspore import Parameter, ParameterTuple, Tensor | from mindspore import Parameter, ParameterTuple, Tensor | ||||
| from mindspore.nn import WithLossCell | |||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.nn import SoftmaxCrossEntropyWithLogits | |||||
| from mindspore.ops.composite import grad_by_list | |||||
| from mindspore.ops import functional as F | |||||
| from mindspore.nn import Cell | |||||
| from mindspore.nn.optim import Optimizer | from mindspore.nn.optim import Optimizer | ||||
| from mindspore.ops.composite import grad_by_list | |||||
| from mindspore.ops.operations import BiasAdd, MatMul | |||||
| class Net(Cell): | class Net(Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight") | self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight") | ||||
| @@ -41,6 +39,7 @@ class Net(Cell): | |||||
| class _TrainOneStepCell(Cell): | class _TrainOneStepCell(Cell): | ||||
| """ _TrainOneStepCell definition """ | """ _TrainOneStepCell definition """ | ||||
| def __init__(self, network, optimizer): | def __init__(self, network, optimizer): | ||||
| """ | """ | ||||
| Append an optimizer to the training network after that the construct | Append an optimizer to the training network after that the construct | ||||
| @@ -67,4 +66,3 @@ class _TrainOneStepCell(Cell): | |||||
| if self.lr_schedule: | if self.lr_schedule: | ||||
| self.schedule.update_lr(*args) | self.schedule.update_lr(*args) | ||||
| return self.optimizer(grads) | return self.optimizer(grads) | ||||
| @@ -14,16 +14,18 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test momentum """ | """ test momentum """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore.ops import operations as P | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | ||||
| @@ -15,9 +15,10 @@ | |||||
| """ test optimizer """ | """ test optimizer """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR | |||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR | |||||
| class IterableObjc: | class IterableObjc: | ||||
| @@ -30,6 +31,7 @@ class IterableObjc: | |||||
| params = IterableObjc() | params = IterableObjc() | ||||
| class TestOptimizer(): | class TestOptimizer(): | ||||
| def test_init(self): | def test_init(self): | ||||
| Optimizer(0.5, params) | Optimizer(0.5, params) | ||||
| @@ -44,6 +46,7 @@ class TestOptimizer(): | |||||
| class TestAdam(): | class TestAdam(): | ||||
| """ TestAdam definition """ | """ TestAdam definition """ | ||||
| def test_init(self): | def test_init(self): | ||||
| Adam(params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, | Adam(params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, | ||||
| use_nesterov=False, weight_decay=0.0, loss_scale=1.0) | use_nesterov=False, weight_decay=0.0, loss_scale=1.0) | ||||
| @@ -58,6 +61,7 @@ class TestAdam(): | |||||
| class TestSGD(): | class TestSGD(): | ||||
| """ TestSGD definition """ | """ TestSGD definition """ | ||||
| def test_init(self): | def test_init(self): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SGD(params, learning_rate=0.1, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False) | SGD(params, learning_rate=0.1, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False) | ||||
| @@ -68,6 +72,7 @@ class TestSGD(): | |||||
| class TestNullParam(): | class TestNullParam(): | ||||
| """ TestNullParam definition """ | """ TestNullParam definition """ | ||||
| def test_optim_init(self): | def test_optim_init(self): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| Optimizer(0.1, None) | Optimizer(0.1, None) | ||||
| @@ -84,8 +89,10 @@ class TestNullParam(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SGD(None) | SGD(None) | ||||
| class TestUnsupportParam(): | class TestUnsupportParam(): | ||||
| """ TestUnsupportParam definition """ | """ TestUnsupportParam definition """ | ||||
| def test_optim_init(self): | def test_optim_init(self): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| Optimizer(0.1, (1, 2, 3)) | Optimizer(0.1, (1, 2, 3)) | ||||
| @@ -15,16 +15,18 @@ | |||||
| """ test adam """ | """ test adam """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import RMSProp | from mindspore.nn.optim import RMSProp | ||||
| from mindspore.ops import operations as P | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") | ||||
| @@ -59,4 +61,3 @@ def test_rmsprop_e(): | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| RMSProp(net.get_parameters(), momentum=1, learning_rate=0.1) | RMSProp(net.get_parameters(), momentum=1, learning_rate=0.1) | ||||
| @@ -14,11 +14,13 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test Activations """ | """ test Activations """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| class SoftmaxNet(nn.Cell): | class SoftmaxNet(nn.Cell): | ||||
| def __init__(self, dim): | def __init__(self, dim): | ||||
| super(SoftmaxNet, self).__init__() | super(SoftmaxNet, self).__init__() | ||||
| @@ -17,8 +17,8 @@ import numpy as np | |||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | |||||
| def test_bn_pars_valid1(): | def test_bn_pars_valid1(): | ||||
| @@ -62,11 +62,12 @@ class GroupNet(nn.Cell): | |||||
| def __init__(self): | def __init__(self): | ||||
| super(GroupNet, self).__init__() | super(GroupNet, self).__init__() | ||||
| self.group_bn = nn.GroupNorm() | self.group_bn = nn.GroupNorm() | ||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.group_bn(x) | return self.group_bn(x) | ||||
| def test_compile_groupnorm(): | def test_compile_groupnorm(): | ||||
| net = nn.GroupNorm(16, 64) | net = nn.GroupNorm(16, 64) | ||||
| input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) | |||||
| input_data = Tensor(np.random.rand(1, 64, 256, 256).astype(np.float32)) | |||||
| _executor.compile(net, input_data) | _executor.compile(net, input_data) | ||||
| @@ -15,11 +15,10 @@ | |||||
| """ test cell """ | """ test cell """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from ..ut_filter import non_graph_engine | |||||
| class ModA(nn.Cell): | class ModA(nn.Cell): | ||||
| @@ -90,7 +89,7 @@ class ConvNet(nn.Cell): | |||||
| self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") | ||||
| self.flatten = nn.Flatten() | self.flatten = nn.Flatten() | ||||
| self.fc = nn.Dense( | self.fc = nn.Dense( | ||||
| int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)), | |||||
| int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)), | |||||
| num_classes) | num_classes) | ||||
| def construct(self, x): | def construct(self, x): | ||||
| @@ -12,14 +12,15 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import pytest | |||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate | from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -14,10 +14,10 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test checkparameter """ | """ test checkparameter """ | ||||
| import pytest | import pytest | ||||
| from mindspore._checkparam import check_int, check_int_positive, \ | from mindspore._checkparam import check_int, check_int_positive, \ | ||||
| check_input_format, check_bool, twice | check_input_format, check_bool, twice | ||||
| kernel_size = 5 | kernel_size = 5 | ||||
| kernel_size1 = twice(kernel_size) | kernel_size1 = twice(kernel_size) | ||||
| assert kernel_size1 == (5, 5) | assert kernel_size1 == (5, 5) | ||||
| @@ -14,11 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test clip_by_norm """ | """ test clip_by_norm """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_clip_by_norm(): | def test_clip_by_norm(): | ||||
| clip_by_norm = nn.ClipByNorm() | clip_by_norm = nn.ClipByNorm() | ||||
| @@ -14,12 +14,13 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test container """ | """ test container """ | ||||
| from collections import OrderedDict | from collections import OrderedDict | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| weight = Tensor(np.ones([2, 2])) | weight = Tensor(np.ones([2, 2])) | ||||
| conv2 = nn.Conv2d(3, 64, (3, 3), stride=2, padding=0) | conv2 = nn.Conv2d(3, 64, (3, 3), stride=2, padding=0) | ||||
| @@ -31,6 +32,7 @@ avg_pool = nn.AvgPool2d(kernel_size, stride) | |||||
| class TestSequentialCell(): | class TestSequentialCell(): | ||||
| """ TestSequentialCell """ | """ TestSequentialCell """ | ||||
| def test_SequentialCell_init(self): | def test_SequentialCell_init(self): | ||||
| m = nn.SequentialCell() | m = nn.SequentialCell() | ||||
| assert type(m).__name__ == 'SequentialCell' | assert type(m).__name__ == 'SequentialCell' | ||||
| @@ -86,6 +88,7 @@ class TestSequentialCell(): | |||||
| class TestCellList(): | class TestCellList(): | ||||
| """ TestCellList """ | """ TestCellList """ | ||||
| def test_init1(self): | def test_init1(self): | ||||
| cell_list = nn.CellList([conv2, avg_pool]) | cell_list = nn.CellList([conv2, avg_pool]) | ||||
| assert len(cell_list) == 2 | assert len(cell_list) == 2 | ||||
| @@ -118,7 +121,6 @@ class TestCellList(): | |||||
| cell = item | cell = item | ||||
| assert type(cell).__name__ == 'AvgPool2d' | assert type(cell).__name__ == 'AvgPool2d' | ||||
| def test_add(self): | def test_add(self): | ||||
| cell_list = nn.CellList([conv2, avg_pool]) | cell_list = nn.CellList([conv2, avg_pool]) | ||||
| cell_list += [conv2] | cell_list += [conv2] | ||||
| @@ -15,10 +15,11 @@ | |||||
| """ test nn.Dense """ | """ test nn.Dense """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | |||||
| from mindspore.common.api import _executor | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -68,6 +69,7 @@ def test_dense_channels_error(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self, | def __init__(self, | ||||
| input_channels, | input_channels, | ||||
| output_channels, | output_channels, | ||||
| @@ -15,12 +15,14 @@ | |||||
| """ Test Dropout """ | """ Test Dropout """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import context | from mindspore import context | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| def test_check_dropout_3(): | def test_check_dropout_3(): | ||||
| Tensor(np.ones([20, 16, 50]).astype(np.int32)) | Tensor(np.ones([20, 16, 50]).astype(np.int32)) | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| @@ -14,7 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ Test Dynamic Learning Rate """ | """ Test Dynamic Learning Rate """ | ||||
| import pytest | import pytest | ||||
| import mindspore | |||||
| from mindspore.nn import dynamic_lr as dr | from mindspore.nn import dynamic_lr as dr | ||||
| milestone = [10, 20, 30] | milestone = [10, 20, 30] | ||||
| @@ -29,8 +29,9 @@ min_lr = 0.01 | |||||
| max_lr = 0.1 | max_lr = 0.1 | ||||
| power = 0.5 | power = 0.5 | ||||
| class TestInputs: | class TestInputs: | ||||
| def test_milestone1(self): | |||||
| def test_milestone1(self): | |||||
| milestone1 = 1 | milestone1 = 1 | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| dr.piecewise_constant_lr(milestone1, learning_rates) | dr.piecewise_constant_lr(milestone1, learning_rates) | ||||
| @@ -58,7 +59,7 @@ class TestInputs: | |||||
| lr = True | lr = True | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| dr.exponential_decay_lr(lr, decay_rate, total_step, step_per_epoch, decay_epoch) | dr.exponential_decay_lr(lr, decay_rate, total_step, step_per_epoch, decay_epoch) | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | ||||
| @@ -71,7 +72,7 @@ class TestInputs: | |||||
| dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | ||||
| def test_end_learning_rate_type(self): | def test_end_learning_rate_type(self): | ||||
| lr = True | |||||
| lr = True | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| dr.polynomial_decay_lr(learning_rate, lr, total_step, step_per_epoch, decay_epoch, power) | dr.polynomial_decay_lr(learning_rate, lr, total_step, step_per_epoch, decay_epoch, power) | ||||
| @@ -127,7 +128,7 @@ class TestInputs: | |||||
| step_per_epoch1 = -1 | step_per_epoch1 = -1 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch) | dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch) | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch) | dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch) | ||||
| @@ -226,9 +227,10 @@ def test_cosine_decay(): | |||||
| lr = dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch) | lr = dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch) | ||||
| assert len(lr) == total_step | assert len(lr) == total_step | ||||
| def test_polynomial_decay(): | def test_polynomial_decay(): | ||||
| lr1 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | lr1 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) | ||||
| assert len(lr1) == total_step | assert len(lr1) == total_step | ||||
| lr2 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power, | lr2 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power, | ||||
| True) | |||||
| True) | |||||
| assert len(lr2) == total_step | assert len(lr2) == total_step | ||||
| @@ -14,11 +14,10 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test_embedding """ | """ test_embedding """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import dtype as mstype | from mindspore import dtype as mstype | ||||
| from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -39,6 +38,7 @@ def test_check_embedding_lookup_2(): | |||||
| use_one_hot_embeddings=True) | use_one_hot_embeddings=True) | ||||
| m(Tensor(np.ones([128]), mstype.int32)) | m(Tensor(np.ones([128]), mstype.int32)) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_check_embedding_lookup_3(): | def test_check_embedding_lookup_3(): | ||||
| m = EmbeddingLookup(vocab_size=32000, | m = EmbeddingLookup(vocab_size=32000, | ||||
| @@ -48,11 +48,12 @@ def test_check_embedding_lookup_3(): | |||||
| initializer_range=0.01) | initializer_range=0.01) | ||||
| m(Tensor(np.ones([128]), mstype.int32)) | m(Tensor(np.ones([128]), mstype.int32)) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_embedding_post_1(): | def test_embedding_post_1(): | ||||
| m = EmbeddingPostprocessor(embedding_size=768, | m = EmbeddingPostprocessor(embedding_size=768, | ||||
| embedding_shape=[1, 128, 768], | |||||
| use_token_type=True) | |||||
| embedding_shape=[1, 128, 768], | |||||
| use_token_type=True) | |||||
| m(Tensor(np.ones([128]), mstype.int32), Tensor(np.ones([1, 128, 768]), mstype.float32)) | m(Tensor(np.ones([128]), mstype.int32), Tensor(np.ones([1, 128, 768]), mstype.float32)) | ||||
| @@ -16,9 +16,10 @@ | |||||
| test flatten api | test flatten api | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -15,14 +15,17 @@ | |||||
| """ test image gradients """ | """ test image gradients """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | |||||
| import mindspore.context as context | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -32,9 +35,10 @@ class Net(nn.Cell): | |||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.image_gradients(x) | return self.image_gradients(x) | ||||
| def test_compile(): | def test_compile(): | ||||
| # input shape 1 x 1 x 2 x 2 | # input shape 1 x 1 x 2 x 2 | ||||
| image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) | |||||
| image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32) | |||||
| net = Net() | net = Net() | ||||
| _executor.compile(net, image) | _executor.compile(net, image) | ||||
| @@ -42,16 +46,17 @@ def test_compile(): | |||||
| def test_compile_multi_channel(): | def test_compile_multi_channel(): | ||||
| # input shape 4 x 2 x 2 x 2 | # input shape 4 x 2 x 2 x 2 | ||||
| dtype = mstype.int32 | dtype = mstype.int32 | ||||
| image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]], | |||||
| [[[3,5],[7,9]], [[11,13],[15,17]]], | |||||
| [[[5,10],[15,20]], [[25,30],[35,40]]], | |||||
| [[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype) | |||||
| image = Tensor(np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], | |||||
| [[[3, 5], [7, 9]], [[11, 13], [15, 17]]], | |||||
| [[[5, 10], [15, 20]], [[25, 30], [35, 40]]], | |||||
| [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype) | |||||
| net = Net() | net = Net() | ||||
| _executor.compile(net, image) | _executor.compile(net, image) | ||||
| def test_invalid_5d_input(): | def test_invalid_5d_input(): | ||||
| dtype = mstype.float32 | dtype = mstype.float32 | ||||
| image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype) | image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype) | ||||
| net = Net() | net = Net() | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| _executor.compile(net, image) | |||||
| _executor.compile(net, image) | |||||
| @@ -14,12 +14,12 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test loss """ | """ test loss """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| import mindspore | |||||
| def test_L1Loss(): | def test_L1Loss(): | ||||
| loss = nn.L1Loss() | loss = nn.L1Loss() | ||||
| @@ -60,5 +60,5 @@ def test_SoftmaxCrossEntropyExpand(): | |||||
| loss = nn.SoftmaxCrossEntropyExpand() | loss = nn.SoftmaxCrossEntropyExpand() | ||||
| logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) | logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) | ||||
| labels = Tensor(np.random.randint(0, 9, [10,]).astype(np.float32)) | |||||
| labels = Tensor(np.random.randint(0, 9, [10, ]).astype(np.float32)) | |||||
| _executor.compile(loss, logits, labels) | _executor.compile(loss, logits, labels) | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test lstm """ | """ test lstm """ | ||||
| import pytest | import pytest | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| from mindspore import nn | from mindspore import nn | ||||
| from ..ut_filter import run_on_gpu | from ..ut_filter import run_on_gpu | ||||
| @@ -22,6 +23,7 @@ from ....ops_common import convert | |||||
| class LstmTestNet(nn.Cell): | class LstmTestNet(nn.Cell): | ||||
| """ LstmTestNet definition """ | """ LstmTestNet definition """ | ||||
| def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional): | def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional): | ||||
| super(LstmTestNet, self).__init__() | super(LstmTestNet, self).__init__() | ||||
| self.lstm = nn.LSTM(input_size=input_size, | self.lstm = nn.LSTM(input_size=input_size, | ||||
| @@ -32,7 +34,6 @@ class LstmTestNet(nn.Cell): | |||||
| bidirectional=bidirectional, | bidirectional=bidirectional, | ||||
| dropout=0.0) | dropout=0.0) | ||||
| def construct(self, inp, h0, c0): | def construct(self, inp, h0, c0): | ||||
| return self.lstm(inp, (h0, c0)) | return self.lstm(inp, (h0, c0)) | ||||
| @@ -86,6 +87,7 @@ def test_compile(args): | |||||
| out = net(*inputs) | out = net(*inputs) | ||||
| print(f"out: {out}") | print(f"out: {out}") | ||||
| @run_on_gpu | @run_on_gpu | ||||
| @pytest.mark.parametrize('args', test_case_cell_ops, ids=lambda x: x[0]) | @pytest.mark.parametrize('args', test_case_cell_ops, ids=lambda x: x[0]) | ||||
| def test_execute(args): | def test_execute(args): | ||||
| @@ -14,10 +14,11 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test nn embedding """ | """ test nn embedding """ | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common import dtype | from mindspore.common import dtype | ||||
| from mindspore.nn import Embedding | |||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.nn import Embedding | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -41,6 +42,7 @@ def test_check_embedding_3(): | |||||
| input_data = Tensor(np.ones([8, 128]), dtype.int32) | input_data = Tensor(np.ones([8, 128]), dtype.int32) | ||||
| _executor.compile(net, input_data) | _executor.compile(net, input_data) | ||||
| @non_graph_engine | @non_graph_engine | ||||
| def test_print_embedding(): | def test_print_embedding(): | ||||
| net = Embedding(20000, 768, False) | net = Embedding(20000, 768, False) | ||||
| @@ -13,13 +13,12 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test nn pad """ | """ test nn pad """ | ||||
| from mindspore import Tensor | |||||
| from mindspore.ops import operations as P | |||||
| import numpy as np | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.ops.composite import GradOperation | |||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| import numpy as np | |||||
| import mindspore.context as context | |||||
| from mindspore.ops.composite import GradOperation | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test norm """ | """ test norm """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| @@ -15,10 +15,11 @@ | |||||
| """ test parameter """ | """ test parameter """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore import Tensor, Parameter, ParameterTuple | from mindspore import Tensor, Parameter, ParameterTuple | ||||
| from mindspore._checkparam import _check_str_by_regular | |||||
| from mindspore.common import dtype as mstype | from mindspore.common import dtype as mstype | ||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore._checkparam import _check_str_by_regular | |||||
| def test_parameter_init(): | def test_parameter_init(): | ||||
| @@ -30,7 +31,7 @@ def test_parameter_init(): | |||||
| def test_parameter_tuple_illegal(): | def test_parameter_tuple_illegal(): | ||||
| p1 = Parameter(initializer(0, [1], mstype.int32), name="global_step1") | p1 = Parameter(initializer(0, [1], mstype.int32), name="global_step1") | ||||
| p2 = Parameter(initializer(0, [1], mstype.int32), name="global_step2") | p2 = Parameter(initializer(0, [1], mstype.int32), name="global_step2") | ||||
| plist = [p1,p2] | |||||
| plist = [p1, p2] | |||||
| plist2 = [p1, "str"] | plist2 = [p1, "str"] | ||||
| ptuple = (p1, p2) | ptuple = (p1, p2) | ||||
| ptuple_str = ("2", "1") | ptuple_str = ("2", "1") | ||||
| @@ -100,21 +101,21 @@ def test_parameter_init_illegal(): | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_tuple) | Parameter(tensor, name=data_str, requires_grad=data_tuple) | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_bool) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_bool) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=dat) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=dat) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=tensor) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=tensor) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_none) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_none) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_str) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_str) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_int) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_int) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_list) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_list) | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_tuple) | |||||
| Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_tuple) | |||||
| def test_check_str_by_regular(): | def test_check_str_by_regular(): | ||||
| @@ -16,9 +16,10 @@ | |||||
| test pooling api | test pooling api | ||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| class AvgNet(nn.Cell): | class AvgNet(nn.Cell): | ||||
| @@ -40,6 +41,7 @@ def test_compile_avg(): | |||||
| class MaxNet(nn.Cell): | class MaxNet(nn.Cell): | ||||
| """ MaxNet definition """ | """ MaxNet definition """ | ||||
| def __init__(self, | def __init__(self, | ||||
| kernel_size, | kernel_size, | ||||
| stride=None, | stride=None, | ||||
| @@ -68,7 +70,8 @@ class Avg1dNet(nn.Cell): | |||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.avg1d(x) | return self.avg1d(x) | ||||
| def test_avg1d(): | def test_avg1d(): | ||||
| net = Avg1dNet(6, 1) | net = Avg1dNet(6, 1) | ||||
| input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) | input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) | ||||
| _executor.compile(net, input) | |||||
| _executor.compile(net, input) | |||||
| @@ -17,11 +17,11 @@ test psnr | |||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common import dtype as mstype | from mindspore.common import dtype as mstype | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore import Tensor | |||||
| class PSNRNet(nn.Cell): | class PSNRNet(nn.Cell): | ||||
| @@ -40,6 +40,7 @@ def test_compile_psnr(): | |||||
| img2 = Tensor(np.random.random((8, 3, 16, 16))) | img2 = Tensor(np.random.random((8, 3, 16, 16))) | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_compile_psnr_grayscale(): | def test_compile_psnr_grayscale(): | ||||
| max_val = 255 | max_val = 255 | ||||
| net = PSNRNet(max_val) | net = PSNRNet(max_val) | ||||
| @@ -47,21 +48,25 @@ def test_compile_psnr_grayscale(): | |||||
| img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_psnr_max_val_negative(): | def test_psnr_max_val_negative(): | ||||
| max_val = -1 | max_val = -1 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = PSNRNet(max_val) | net = PSNRNet(max_val) | ||||
| def test_psnr_max_val_bool(): | def test_psnr_max_val_bool(): | ||||
| max_val = True | max_val = True | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = PSNRNet(max_val) | net = PSNRNet(max_val) | ||||
| def test_psnr_max_val_zero(): | def test_psnr_max_val_zero(): | ||||
| max_val = 0 | max_val = 0 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = PSNRNet(max_val) | net = PSNRNet(max_val) | ||||
| def test_psnr_different_shape(): | def test_psnr_different_shape(): | ||||
| shape_1 = (8, 3, 16, 16) | shape_1 = (8, 3, 16, 16) | ||||
| shape_2 = (8, 3, 8, 8) | shape_2 = (8, 3, 8, 8) | ||||
| @@ -71,6 +76,7 @@ def test_psnr_different_shape(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_psnr_different_dtype(): | def test_psnr_different_dtype(): | ||||
| dtype_1 = mstype.float32 | dtype_1 = mstype.float32 | ||||
| dtype_2 = mstype.float16 | dtype_2 = mstype.float16 | ||||
| @@ -80,6 +86,7 @@ def test_psnr_different_dtype(): | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_psnr_invalid_5d_input(): | def test_psnr_invalid_5d_input(): | ||||
| shape_1 = (8, 3, 16, 16) | shape_1 = (8, 3, 16, 16) | ||||
| shape_2 = (8, 3, 8, 8) | shape_2 = (8, 3, 8, 8) | ||||
| @@ -17,10 +17,11 @@ test ssim | |||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| from mindspore.common.api import _executor | |||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import _executor | |||||
| class SSIMNet(nn.Cell): | class SSIMNet(nn.Cell): | ||||
| @@ -38,44 +39,53 @@ def test_compile(): | |||||
| img2 = Tensor(np.random.random((8, 3, 16, 16))) | img2 = Tensor(np.random.random((8, 3, 16, 16))) | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_compile_grayscale(): | def test_compile_grayscale(): | ||||
| max_val = 255 | max_val = 255 | ||||
| net = SSIMNet(max_val = max_val) | |||||
| net = SSIMNet(max_val=max_val) | |||||
| img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | ||||
| img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_ssim_max_val_negative(): | def test_ssim_max_val_negative(): | ||||
| max_val = -1 | max_val = -1 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(max_val) | net = SSIMNet(max_val) | ||||
| def test_ssim_max_val_bool(): | def test_ssim_max_val_bool(): | ||||
| max_val = True | max_val = True | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = SSIMNet(max_val) | net = SSIMNet(max_val) | ||||
| def test_ssim_max_val_zero(): | def test_ssim_max_val_zero(): | ||||
| max_val = 0 | max_val = 0 | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(max_val) | net = SSIMNet(max_val) | ||||
| def test_ssim_filter_size_float(): | def test_ssim_filter_size_float(): | ||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| net = SSIMNet(filter_size=1.1) | net = SSIMNet(filter_size=1.1) | ||||
| def test_ssim_filter_size_zero(): | def test_ssim_filter_size_zero(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_size=0) | net = SSIMNet(filter_size=0) | ||||
| def test_ssim_filter_sigma_zero(): | def test_ssim_filter_sigma_zero(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_sigma=0.0) | net = SSIMNet(filter_sigma=0.0) | ||||
| def test_ssim_filter_sigma_negative(): | def test_ssim_filter_sigma_negative(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(filter_sigma=-0.1) | net = SSIMNet(filter_sigma=-0.1) | ||||
| def test_ssim_k1_k2_wrong_value(): | def test_ssim_k1_k2_wrong_value(): | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k1=1.1) | net = SSIMNet(k1=1.1) | ||||
| @@ -95,6 +105,7 @@ def test_ssim_k1_k2_wrong_value(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| net = SSIMNet(k2=-1.0) | net = SSIMNet(k2=-1.0) | ||||
| def test_ssim_different_shape(): | def test_ssim_different_shape(): | ||||
| shape_1 = (8, 3, 16, 16) | shape_1 = (8, 3, 16, 16) | ||||
| shape_2 = (8, 3, 8, 8) | shape_2 = (8, 3, 8, 8) | ||||
| @@ -104,6 +115,7 @@ def test_ssim_different_shape(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_ssim_different_dtype(): | def test_ssim_different_dtype(): | ||||
| dtype_1 = mstype.float32 | dtype_1 = mstype.float32 | ||||
| dtype_2 = mstype.float16 | dtype_2 = mstype.float16 | ||||
| @@ -113,6 +125,7 @@ def test_ssim_different_dtype(): | |||||
| with pytest.raises(TypeError): | with pytest.raises(TypeError): | ||||
| _executor.compile(net, img1, img2) | _executor.compile(net, img1, img2) | ||||
| def test_ssim_invalid_5d_input(): | def test_ssim_invalid_5d_input(): | ||||
| shape_1 = (8, 3, 16, 16) | shape_1 = (8, 3, 16, 16) | ||||
| shape_2 = (8, 3, 8, 8) | shape_2 = (8, 3, 8, 8) | ||||
| @@ -17,9 +17,9 @@ test_structure_output | |||||
| """ | """ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.ops.operations as P | |||||
| from mindspore import Tensor, context | from mindspore import Tensor, context | ||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| import mindspore.ops.operations as P | |||||
| from mindspore.ops.functional import depend | from mindspore.ops.functional import depend | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -13,4 +13,3 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """ init vm impl """ | """ init vm impl """ | ||||
| from ....vm_impl import vm | |||||
| @@ -13,27 +13,26 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | |||||
| import pytest | import pytest | ||||
| from mindspore import context | |||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore import context | |||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.nn.wrap.cell_wrapper import WithLossCell | from mindspore.nn.wrap.cell_wrapper import WithLossCell | ||||
| from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager | |||||
| from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell | from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.ops import functional as F | from mindspore.ops import functional as F | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.train import Model | |||||
| from ....dataset_mock import MindData | |||||
| from mindspore.nn.optim import Lamb | |||||
| from mindspore.ops._utils import _get_broadcast_shape | |||||
| from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops._grad.grad_base import bprop_getters | from mindspore.ops._grad.grad_base import bprop_getters | ||||
| from mindspore.ops._grad.grad_math_ops import binop_grad_common | from mindspore.ops._grad.grad_math_ops import binop_grad_common | ||||
| from mindspore.ops._utils import _get_broadcast_shape | |||||
| from mindspore.ops.primitive import PrimitiveWithInfer, prim_attr_register | |||||
| from mindspore.train.loss_scale_manager import DynamicLossScaleManager | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| class MockNeg(PrimitiveWithInfer): | class MockNeg(PrimitiveWithInfer): | ||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self): | def __init__(self): | ||||
| @@ -47,6 +46,7 @@ class MockNeg(PrimitiveWithInfer): | |||||
| raise TypeError("InferError") | raise TypeError("InferError") | ||||
| return input_x | return input_x | ||||
| class MockSub(PrimitiveWithInfer): | class MockSub(PrimitiveWithInfer): | ||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self): | def __init__(self): | ||||
| @@ -59,6 +59,7 @@ class MockSub(PrimitiveWithInfer): | |||||
| def infer_dtype(self, x_dtype, y_dtype): | def infer_dtype(self, x_dtype, y_dtype): | ||||
| return x_dtype | return x_dtype | ||||
| @bprop_getters.register(MockSub) | @bprop_getters.register(MockSub) | ||||
| def get_bprop_mock_sub(self): | def get_bprop_mock_sub(self): | ||||
| """Grad definition for `MockSub` operation.""" | """Grad definition for `MockSub` operation.""" | ||||
| @@ -66,8 +67,10 @@ def get_bprop_mock_sub(self): | |||||
| def bprop(x, y, out, dout): | def bprop(x, y, out, dout): | ||||
| return binop_grad_common(x, y, dout, neg_func(dout)) | return binop_grad_common(x, y, dout, neg_func(dout)) | ||||
| return bprop | return bprop | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self, in_features, out_features): | def __init__(self, in_features, out_features): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -80,6 +83,7 @@ class Net(nn.Cell): | |||||
| output = self.add(self.matmul(input, self.weight), self.bias) | output = self.add(self.matmul(input, self.weight), self.bias) | ||||
| return output | return output | ||||
| class NetFP16(nn.Cell): | class NetFP16(nn.Cell): | ||||
| def __init__(self, in_features, out_features): | def __init__(self, in_features, out_features): | ||||
| super(NetFP16, self).__init__() | super(NetFP16, self).__init__() | ||||
| @@ -90,16 +94,19 @@ class NetFP16(nn.Cell): | |||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, input): | def construct(self, input): | ||||
| output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | |||||
| output = self.cast( | |||||
| self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | |||||
| return output | return output | ||||
| def get_axis(x): | def get_axis(x): | ||||
| shape = F.shape(x) | shape = F.shape(x) | ||||
| length = F.tuple_len(shape) | length = F.tuple_len(shape) | ||||
| perm = F.make_range(0, length) | perm = F.make_range(0, length) | ||||
| return perm | return perm | ||||
| class MSELoss(nn.Cell): | class MSELoss(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(MSELoss, self).__init__() | super(MSELoss, self).__init__() | ||||
| @@ -107,17 +114,21 @@ class MSELoss(nn.Cell): | |||||
| self.square = P.Square() | self.square = P.Square() | ||||
| self.reduce_mean = P.ReduceMean() | self.reduce_mean = P.ReduceMean() | ||||
| self.sub = MockSub() | self.sub = MockSub() | ||||
| def construct(self, data, label): | def construct(self, data, label): | ||||
| diff = self.sub(data, label) | diff = self.sub(data, label) | ||||
| return self.reduce_mean(self.square(diff), get_axis(diff)) | return self.reduce_mean(self.square(diff), get_axis(diff)) | ||||
| class NegCell(nn.Cell): | class NegCell(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(NegCell, self).__init__() | super(NegCell, self).__init__() | ||||
| self.neg = MockNeg() | self.neg = MockNeg() | ||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.neg(x) | return self.neg(x) | ||||
| class Net3(nn.Cell): | class Net3(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super().__init__() | super().__init__() | ||||
| @@ -146,6 +157,7 @@ class SequenceNet(nn.Cell): | |||||
| x = self.seq(x) + bbb | x = self.seq(x) + bbb | ||||
| return x | return x | ||||
| def test_sequential_resolve_error(): | def test_sequential_resolve_error(): | ||||
| input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) | input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) | ||||
| input_me = Tensor(input_np) | input_me = Tensor(input_np) | ||||
| @@ -153,6 +165,7 @@ def test_sequential_resolve_error(): | |||||
| with pytest.raises(RuntimeError) as e: | with pytest.raises(RuntimeError) as e: | ||||
| net(input_me) | net(input_me) | ||||
| def test_compile_grad_error(): | def test_compile_grad_error(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -164,9 +177,8 @@ def test_compile_grad_error(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| scale_manager = DynamicLossScaleManager() | scale_manager = DynamicLossScaleManager() | ||||
| update_cell = scale_manager.get_update_cell() | update_cell = scale_manager.get_update_cell() | ||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) | |||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) | |||||
| train_network.set_train() | train_network.set_train() | ||||
| with pytest.raises(TypeError) as e: | with pytest.raises(TypeError) as e: | ||||
| train_network(inputs, label) | train_network(inputs, label) | ||||
| print (e) | |||||
| print(e) | |||||
| @@ -13,19 +13,20 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import context | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore import context | |||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.nn.optim import Lamb | |||||
| from mindspore.nn.optim import Momentum, Adam | |||||
| from mindspore.nn.wrap.cell_wrapper import WithLossCell | from mindspore.nn.wrap.cell_wrapper import WithLossCell | ||||
| from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell | from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell | ||||
| from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn.optim import Momentum, Adam | |||||
| from mindspore.ops import functional as F | from mindspore.ops import functional as F | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.train import Model | from mindspore.train import Model | ||||
| from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager | |||||
| from ....dataset_mock import MindData | from ....dataset_mock import MindData | ||||
| from mindspore.nn.optim import Lamb | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -36,6 +37,7 @@ class MindDataSet(MindData): | |||||
| np_types=dataset_types, | np_types=dataset_types, | ||||
| output_shapes=dataset_shapes, | output_shapes=dataset_shapes, | ||||
| input_indexs=(0, 1)) | input_indexs=(0, 1)) | ||||
| def __next__(self): | def __next__(self): | ||||
| if self._size < self._iter_num: | if self._size < self._iter_num: | ||||
| raise StopIteration | raise StopIteration | ||||
| @@ -45,6 +47,7 @@ class MindDataSet(MindData): | |||||
| next.append(Tensor(np.ones(shape).astype(type))) | next.append(Tensor(np.ones(shape).astype(type))) | ||||
| return tuple(next) | return tuple(next) | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self, in_features, out_features): | def __init__(self, in_features, out_features): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| @@ -57,6 +60,7 @@ class Net(nn.Cell): | |||||
| output = self.add(self.matmul(input, self.weight), self.bias) | output = self.add(self.matmul(input, self.weight), self.bias) | ||||
| return output | return output | ||||
| class NetFP16(nn.Cell): | class NetFP16(nn.Cell): | ||||
| def __init__(self, in_features, out_features): | def __init__(self, in_features, out_features): | ||||
| super(NetFP16, self).__init__() | super(NetFP16, self).__init__() | ||||
| @@ -67,10 +71,12 @@ class NetFP16(nn.Cell): | |||||
| self.cast = P.Cast() | self.cast = P.Cast() | ||||
| def construct(self, input): | def construct(self, input): | ||||
| output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | |||||
| output = self.cast( | |||||
| self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), | |||||
| self.cast(self.bias, mstype.float16)), mstype.float32) | |||||
| return output | return output | ||||
| def get_axis(x): | def get_axis(x): | ||||
| shape_op = P.Shape() | shape_op = P.Shape() | ||||
| shape = shape_op(x) | shape = shape_op(x) | ||||
| @@ -78,6 +84,7 @@ def get_axis(x): | |||||
| perm = F.make_range(0, length) | perm = F.make_range(0, length) | ||||
| return perm | return perm | ||||
| class MSELoss(nn.Cell): | class MSELoss(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(MSELoss, self).__init__() | super(MSELoss, self).__init__() | ||||
| @@ -89,6 +96,7 @@ class MSELoss(nn.Cell): | |||||
| diff = data - label | diff = data - label | ||||
| return self.reduce_mean(self.square(diff), get_axis(diff)) | return self.reduce_mean(self.square(diff), get_axis(diff)) | ||||
| def test_momentum_compile(): | def test_momentum_compile(): | ||||
| inputs = Tensor(np.ones([15, 1]).astype(np.float32)) | inputs = Tensor(np.ones([15, 1]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([15, 1]).astype(np.float32)) | label = Tensor(np.zeros([15, 1]).astype(np.float32)) | ||||
| @@ -104,6 +112,7 @@ def test_momentum_compile(): | |||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_not_overflow(): | def test_compile_fp16_not_overflow(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -119,6 +128,7 @@ def test_compile_fp16_not_overflow(): | |||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_lr_overflow(): | def test_compile_fp16_lr_overflow(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -134,6 +144,7 @@ def test_compile_fp16_lr_overflow(): | |||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_overflow(): | def test_compile_fp16_overflow(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -148,6 +159,7 @@ def test_compile_fp16_overflow(): | |||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_lr_overflow_with_lossscale_update(): | def test_compile_fp16_lr_overflow_with_lossscale_update(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -165,6 +177,7 @@ def test_compile_fp16_lr_overflow_with_lossscale_update(): | |||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_f16_model_train(): | def test_compile_f16_model_train(): | ||||
| dataset_types = (np.float32, np.float32) | dataset_types = (np.float32, np.float32) | ||||
| dataset_shapes = ((16, 16), (16, 16)) | dataset_shapes = ((16, 16), (16, 16)) | ||||
| @@ -205,11 +218,12 @@ def test_compile_fp16_lr_overflow_fixed_feed(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| scale_manager = FixedLossScaleManager() | scale_manager = FixedLossScaleManager() | ||||
| update_cell = scale_manager.get_update_cell() | update_cell = scale_manager.get_update_cell() | ||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) | |||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) | |||||
| train_network.set_train() | train_network.set_train() | ||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_lr_overflow_dynamic_feed(): | def test_compile_fp16_lr_overflow_dynamic_feed(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -222,11 +236,12 @@ def test_compile_fp16_lr_overflow_dynamic_feed(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| scale_manager = DynamicLossScaleManager() | scale_manager = DynamicLossScaleManager() | ||||
| update_cell = scale_manager.get_update_cell() | update_cell = scale_manager.get_update_cell() | ||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) | |||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) | |||||
| train_network.set_train() | train_network.set_train() | ||||
| output = train_network(inputs, label, scaling_sens) | output = train_network(inputs, label, scaling_sens) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_lr_overflow_fixed_graph(): | def test_compile_fp16_lr_overflow_fixed_graph(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -238,11 +253,12 @@ def test_compile_fp16_lr_overflow_fixed_graph(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| scale_manager = FixedLossScaleManager(drop_overflow_update=True) | scale_manager = FixedLossScaleManager(drop_overflow_update=True) | ||||
| update_cell = scale_manager.get_update_cell() | update_cell = scale_manager.get_update_cell() | ||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) | |||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) | |||||
| train_network.set_train() | train_network.set_train() | ||||
| output = train_network(inputs, label) | output = train_network(inputs, label) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_compile_fp16_lr_overflow_dynamic_graph(): | def test_compile_fp16_lr_overflow_dynamic_graph(): | ||||
| inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([16, 16]).astype(np.float32)) | label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| @@ -254,11 +270,12 @@ def test_compile_fp16_lr_overflow_dynamic_graph(): | |||||
| net_with_loss = WithLossCell(net, loss) | net_with_loss = WithLossCell(net, loss) | ||||
| scale_manager = DynamicLossScaleManager() | scale_manager = DynamicLossScaleManager() | ||||
| update_cell = scale_manager.get_update_cell() | update_cell = scale_manager.get_update_cell() | ||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) | |||||
| train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) | |||||
| train_network.set_train() | train_network.set_train() | ||||
| output = train_network(inputs, label) | output = train_network(inputs, label) | ||||
| print("the result is ", output) | print("the result is ", output) | ||||
| def test_adam_compile(): | def test_adam_compile(): | ||||
| inputs = Tensor(np.ones([15, 1]).astype(np.float32)) | inputs = Tensor(np.ones([15, 1]).astype(np.float32)) | ||||
| label = Tensor(np.zeros([15, 1]).astype(np.float32)) | label = Tensor(np.zeros([15, 1]).astype(np.float32)) | ||||
| @@ -14,20 +14,22 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam | |||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore.common.api import _executor | from mindspore.common.api import _executor | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam | |||||
| from mindspore.ops import operations as P | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| """ LeNet5 definition """ | """ LeNet5 definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet5, self).__init__() | super(LeNet5, self).__init__() | ||||
| self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') | ||||
| @@ -13,16 +13,17 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.ops.composite as C | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| import mindspore.ops.composite as C | |||||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | context.set_context(mode=context.GRAPH_MODE, save_graphs=True) | ||||
| def test_parser_three_default_mixed_args_subnet(): | |||||
| def test_parser_three_default_mixed_args_subnet(): | |||||
| class SubNetDefaultMixedArgs(Cell): | class SubNetDefaultMixedArgs(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super().__init__() | super().__init__() | ||||
| @@ -55,7 +56,7 @@ def test_net_vararg_kwonlyarg_kwarg(): | |||||
| super(FirstNet, self).__init__() | super(FirstNet, self).__init__() | ||||
| self.net = SecondNet() | self.net = SecondNet() | ||||
| def construct(self, x=1, z=2+2+4, y=3): | |||||
| def construct(self, x=1, z=2 + 2 + 4, y=3): | |||||
| c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) | c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) | ||||
| return c | return c | ||||
| @@ -74,13 +75,14 @@ def test_net_vararg_kwonlyarg_kwarg(): | |||||
| net = FirstNet() | net = FirstNet() | ||||
| net() | net() | ||||
| def test_net_vararg_normal_input(): | def test_net_vararg_normal_input(): | ||||
| class FirstNet(Cell): | class FirstNet(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(FirstNet, self).__init__() | super(FirstNet, self).__init__() | ||||
| self.net = SecondNet() | self.net = SecondNet() | ||||
| def construct(self, x=1, z=2+2+4, y=3): | |||||
| def construct(self, x=1, z=2 + 2 + 4, y=3): | |||||
| c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) | c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) | ||||
| return c | return c | ||||
| @@ -95,10 +97,12 @@ def test_net_vararg_normal_input(): | |||||
| d = var[0] * var[1] * var[2] * var[3] | d = var[0] * var[1] * var[2] * var[3] | ||||
| e = key1 - key2 - kwargs["key3"] + kwargs["key4"] | e = key1 - key2 - kwargs["key3"] + kwargs["key4"] | ||||
| return a + b + c + d + e | return a + b + c + d + e | ||||
| x = Tensor(np.ones((2, 3, 4), np.int32)) | x = Tensor(np.ones((2, 3, 4), np.int32)) | ||||
| net = FirstNet() | net = FirstNet() | ||||
| net(x, x, x) | net(x, x, x) | ||||
| def test_prim_vararg_kwonlyarg(): | def test_prim_vararg_kwonlyarg(): | ||||
| class FirstNet(Cell): | class FirstNet(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| @@ -201,9 +205,11 @@ def test_net_variable_and_weights(): | |||||
| z = Tensor(np.ones((4,), np.float32)) | z = Tensor(np.ones((4,), np.float32)) | ||||
| net(x, y, z) | net(x, y, z) | ||||
| def test_net_vargs_expand(): | def test_net_vargs_expand(): | ||||
| class InputBackward(Cell): | class InputBackward(Cell): | ||||
| """ InputBackward definition """ | """ InputBackward definition """ | ||||
| def __init__(self, network, c1=None, c2=None): | def __init__(self, network, c1=None, c2=None): | ||||
| super(InputBackward, self).__init__() | super(InputBackward, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| @@ -214,9 +220,11 @@ def test_net_vargs_expand(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return self.grad(self.network)(*inputs) | return self.grad(self.network)(*inputs) | ||||
| class AddNet(Cell): | class AddNet(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(AddNet, self).__init__() | super(AddNet, self).__init__() | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return x + y | return x + y | ||||
| @@ -227,7 +235,7 @@ def test_net_vargs_expand(): | |||||
| net.set_train() | net.set_train() | ||||
| net(x, y, sens) | net(x, y, sens) | ||||
| def test_mixed_precision_const_parameter(): | def test_mixed_precision_const_parameter(): | ||||
| class NetLoss(Cell): | class NetLoss(Cell): | ||||
| @@ -237,6 +245,7 @@ def test_mixed_precision_const_parameter(): | |||||
| self.up_sample1 = P.ResizeBilinear((14, 14)) | self.up_sample1 = P.ResizeBilinear((14, 14)) | ||||
| self.up_sample2 = P.ResizeBilinear((28, 28)) | self.up_sample2 = P.ResizeBilinear((28, 28)) | ||||
| self.up_sample3 = P.ResizeBilinear((36, 36)) | self.up_sample3 = P.ResizeBilinear((36, 36)) | ||||
| def construct(self, x, y, z, *args): | def construct(self, x, y, z, *args): | ||||
| ret = 0 | ret = 0 | ||||
| if args[0] == self.shape(z)[2]: | if args[0] == self.shape(z)[2]: | ||||
| @@ -250,20 +259,23 @@ def test_mixed_precision_const_parameter(): | |||||
| ret = x * y | ret = x * y | ||||
| ret = ret * z | ret = ret * z | ||||
| return ret | return ret | ||||
| class NetMain(Cell): | class NetMain(Cell): | ||||
| def __init__(self, loss_fn): | def __init__(self, loss_fn): | ||||
| super(NetMain, self).__init__() | super(NetMain, self).__init__() | ||||
| self.loss_fn = loss_fn | self.loss_fn = loss_fn | ||||
| self.shape = P.Shape() | self.shape = P.Shape() | ||||
| def construct(self, x, y, z): | def construct(self, x, y, z): | ||||
| size_x = self.shape(x)[2] | size_x = self.shape(x)[2] | ||||
| size_y = self.shape(y)[2] | size_y = self.shape(y)[2] | ||||
| ret = self.loss_fn(x, y, z, size_x, size_y) | ret = self.loss_fn(x, y, z, size_x, size_y) | ||||
| return ret | return ret | ||||
| loss_fn = NetLoss() | loss_fn = NetLoss() | ||||
| net = NetMain(loss_fn) | net = NetMain(loss_fn) | ||||
| net.add_flags_recursive(fp32=True) | net.add_flags_recursive(fp32=True) | ||||
| x = Tensor(np.ones((1, 3, 28, 28), np.float32)) | x = Tensor(np.ones((1, 3, 28, 28), np.float32)) | ||||
| y = Tensor(np.ones((1, 3, 14, 14), np.float32)) | y = Tensor(np.ones((1, 3, 14, 14), np.float32)) | ||||
| z = Tensor(np.ones((1, 3, 28, 28), np.float32)) | z = Tensor(np.ones((1, 3, 28, 28), np.float32)) | ||||
| out = net(x, y, z) | |||||
| out = net(x, y, z) | |||||
| @@ -13,14 +13,14 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| from mindspore import context | |||||
| import mindspore.ops.composite as C | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore import context | |||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.common.parameter import ParameterTuple | |||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| import mindspore.ops.composite as C | |||||
| from mindspore.common.api import _executor | |||||
| from mindspore.common.parameter import ParameterTuple | |||||
| from mindspore.common import dtype as mstype | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -34,6 +34,7 @@ def test_net_vargs_expand(): | |||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return x + y | return x + y | ||||
| x = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | x = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | ||||
| y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | ||||
| sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) | ||||
| @@ -51,7 +52,7 @@ class VarNet(Cell): | |||||
| self.net = net | self.net = net | ||||
| def construct(self, *args): | def construct(self, *args): | ||||
| return self.net(*args)*self.w + self.b | |||||
| return self.net(*args) * self.w + self.b | |||||
| class SecondNet(Cell): | class SecondNet(Cell): | ||||
| @@ -95,6 +96,7 @@ class Bprop(Cell): | |||||
| def test_all_var_args_grad_with_sens(): | def test_all_var_args_grad_with_sens(): | ||||
| """"test grad_by_list_with_sens with all var args input""" | """"test grad_by_list_with_sens with all var args input""" | ||||
| class GradNet(Cell): | class GradNet(Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(GradNet, self).__init__() | super(GradNet, self).__init__() | ||||
| @@ -103,6 +105,7 @@ def test_all_var_args_grad_with_sens(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return C.grad_by_list_with_sens(self.net, self.weights)(*inputs) | return C.grad_by_list_with_sens(self.net, self.weights)(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| @@ -120,6 +123,7 @@ def test_grad_list_var_args(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return C.grad_by_list(self.net, self.weights)(*inputs) | return C.grad_by_list(self.net, self.weights)(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| @@ -136,6 +140,7 @@ def test_grad_all_var_args(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return C.grad_all(self.net)(*inputs) | return C.grad_all(self.net)(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| @@ -152,6 +157,7 @@ def test_grad_all_var_args_with_sens(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return C.grad_all_with_sens(self.net)(*inputs) | return C.grad_all_with_sens(self.net)(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| @@ -169,6 +175,7 @@ def test_grad_var_args_with_sens(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return C.grad_with_sens(self.net)(*inputs) | return C.grad_with_sens(self.net)(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| @@ -206,6 +213,7 @@ def test_var_args_grad(): | |||||
| def construct(self, x, y, sens): | def construct(self, x, y, sens): | ||||
| return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens) | return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| @@ -216,13 +224,14 @@ def test_var_args_grad(): | |||||
| def test_var_args_positional(): | def test_var_args_positional(): | ||||
| """"test grad_all with var args in inner graph""" | """"test grad_all with var args in inner graph""" | ||||
| class VarNet(Cell): | class VarNet(Cell): | ||||
| def __init__(self, net): | def __init__(self, net): | ||||
| super(VarNet, self).__init__() | super(VarNet, self).__init__() | ||||
| self.net = net | self.net = net | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return self.net(x, y)*x | |||||
| return self.net(x, y) * x | |||||
| class SecondNet(Cell): | class SecondNet(Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| @@ -239,6 +248,7 @@ def test_var_args_positional(): | |||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return C.grad_all(self.net)(x, y) | return C.grad_all(self.net)(x, y) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| net = VarNet(SecondNet()) | net = VarNet(SecondNet()) | ||||
| @@ -258,6 +268,7 @@ def test_grad_within_if_else(): | |||||
| def construct(self, *inputs): | def construct(self, *inputs): | ||||
| return self.grad(*inputs) | return self.grad(*inputs) | ||||
| x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) | ||||
| sens = Tensor(1.0, dtype=mstype.float32) | sens = Tensor(1.0, dtype=mstype.float32) | ||||
| @@ -309,6 +320,7 @@ def test_grad_for_concat(): | |||||
| def grad_cmp(self): | def grad_cmp(self): | ||||
| input_grad_mindspore = self.grad_mindspore_impl() | input_grad_mindspore = self.grad_mindspore_impl() | ||||
| fact = ConcatFactory(input_shape=( | fact = ConcatFactory(input_shape=( | ||||
| (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) | (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) | ||||
| fact.grad_cmp() | fact.grad_cmp() | ||||
| @@ -15,12 +15,13 @@ | |||||
| """ | """ | ||||
| log test | log test | ||||
| """ | """ | ||||
| import logging | |||||
| import os | import os | ||||
| import sys | |||||
| import time | |||||
| import re | import re | ||||
| import shutil | import shutil | ||||
| import logging | |||||
| import sys | |||||
| import time | |||||
| def test_log_stdout(): | def test_log_stdout(): | ||||
| # Clean up environment variables | # Clean up environment variables | ||||
| @@ -50,8 +51,8 @@ def test_log_setlevel(): | |||||
| _rm_env_config() | _rm_env_config() | ||||
| os.environ['GLOG_v'] = '0' | os.environ['GLOG_v'] = '0' | ||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| #logger_instance = logger._get_logger() | |||||
| #del logger_instance | |||||
| # logger_instance = logger._get_logger() | |||||
| # del logger_instance | |||||
| loglevel = logger.get_level() | loglevel = logger.get_level() | ||||
| log_str = 'print debug informations' | log_str = 'print debug informations' | ||||
| logger.debug("5 test log message debug:%s", log_str) | logger.debug("5 test log message debug:%s", log_str) | ||||
| @@ -87,7 +88,7 @@ def test_log_file(): | |||||
| "\[.*:.*[0-9]\] test log message warning" | "\[.*:.*[0-9]\] test log message warning" | ||||
| match_obj = re.match(pattern, result) | match_obj = re.match(pattern, result) | ||||
| #Clear test file | |||||
| # Clear test file | |||||
| if os.path.exists(file_path): | if os.path.exists(file_path): | ||||
| shutil.rmtree(file_path) | shutil.rmtree(file_path) | ||||
| @@ -100,7 +101,7 @@ def test_log_backup_count(): | |||||
| """ | """ | ||||
| test backup count | test backup count | ||||
| """ | """ | ||||
| #logger.reset_log_config(level=logging.INFO, console=False, | |||||
| # logger.reset_log_config(level=logging.INFO, console=False, | |||||
| # filepath=file_path, maxBytes=1000, backupCount=10) | # filepath=file_path, maxBytes=1000, backupCount=10) | ||||
| _rm_env_config() | _rm_env_config() | ||||
| file_path = '/tmp/log/mindspore_test' | file_path = '/tmp/log/mindspore_test' | ||||
| @@ -141,12 +142,12 @@ def test_log_verify_envconfig(): | |||||
| # level is not a number | # level is not a number | ||||
| _rm_env_config() | _rm_env_config() | ||||
| os.environ['GLOG_v'] = 'test' | os.environ['GLOG_v'] = 'test' | ||||
| verify_dict_0 = logger._get_env_config() | |||||
| verify_dict_0 = logger._get_env_config() | |||||
| # level is not in range | # level is not in range | ||||
| _rm_env_config() | _rm_env_config() | ||||
| os.environ['GLOG_v'] = '100' | os.environ['GLOG_v'] = '100' | ||||
| verify_dict_1 = logger._get_env_config() | |||||
| verify_dict_1 = logger._get_env_config() | |||||
| # console is not a number | # console is not a number | ||||
| _rm_env_config() | _rm_env_config() | ||||
| @@ -236,7 +237,6 @@ def test_log_repeated_print(): | |||||
| logger._global_logger = None | logger._global_logger = None | ||||
| def test_log_getconfig(): | def test_log_getconfig(): | ||||
| _rm_env_config() | _rm_env_config() | ||||
| os.environ['GLOG_v'] = '3' | os.environ['GLOG_v'] = '3' | ||||
| @@ -307,7 +307,7 @@ def test_log_ms_import(): | |||||
| targetdict = {'GLOG_v': '2', 'GLOG_logtostderr': '1'} | targetdict = {'GLOG_v': '2', 'GLOG_logtostderr': '1'} | ||||
| level = ms.get_level() | level = ms.get_level() | ||||
| assert configdict == targetdict and level == '2' | assert configdict == targetdict and level == '2' | ||||
| def _rm_env_config(): | def _rm_env_config(): | ||||
| envlist = ['GLOG_v', 'GLOG_logtostderr', 'GLOG_log_dir', 'logger_maxBytes', 'logger_backupCount'] | envlist = ['GLOG_v', 'GLOG_logtostderr', 'GLOG_log_dir', 'logger_maxBytes', 'logger_backupCount'] | ||||
| @@ -15,6 +15,7 @@ | |||||
| """setup for pytest""" | """setup for pytest""" | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| # pylint: disable=unused-argument | # pylint: disable=unused-argument | ||||
| def setup_module(module): | def setup_module(module): | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| @@ -13,15 +13,16 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test_graph_summary """ | """ test_graph_summary """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore import Model, context | from mindspore import Model, context | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.train.callback import SummaryStep | from mindspore.train.callback import SummaryStep | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| from .....dataset_mock import MindData | from .....dataset_mock import MindData | ||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| @@ -21,8 +21,8 @@ import tempfile | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| from mindspore.train.summary._summary_adapter import _calc_histogram_bins | from mindspore.train.summary._summary_adapter import _calc_histogram_bins | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| from .summary_reader import SummaryReader | from .summary_reader import SummaryReader | ||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| @@ -18,16 +18,18 @@ | |||||
| @Date : 2019-07-4 | @Date : 2019-07-4 | ||||
| @Desc : test summary function | @Desc : test summary function | ||||
| """ | """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, \ | |||||
| _cache_summary_tensor_data | |||||
| from mindspore import Model, context | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore import Model, context | |||||
| from mindspore.train.callback import SummaryStep | from mindspore.train.callback import SummaryStep | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, \ | |||||
| _cache_summary_tensor_data | |||||
| from .....dataset_mock import MindData | from .....dataset_mock import MindData | ||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| @@ -18,16 +18,18 @@ | |||||
| @Date : 2019-07-4 | @Date : 2019-07-4 | ||||
| @Desc : test summary function | @Desc : test summary function | ||||
| """ | """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import random | import random | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| from mindspore.train.callback import SummaryStep | |||||
| from mindspore.common.tensor import Tensor | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.tensor import Tensor | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.callback import SummaryStep | |||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | ||||
| @@ -129,7 +131,8 @@ def test_scalar_summary_sample_with_shape_1(): | |||||
| # Test: test with ge | # Test: test with ge | ||||
| class SummaryDemo(nn.Cell): | class SummaryDemo(nn.Cell): | ||||
| """ SummaryDemo definition """ | """ SummaryDemo definition """ | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(SummaryDemo, self).__init__() | super(SummaryDemo, self).__init__() | ||||
| self.s = P.ScalarSummary() | self.s = P.ScalarSummary() | ||||
| self.histogram_summary = P.HistogramSummary() | self.histogram_summary = P.HistogramSummary() | ||||
| @@ -218,9 +221,9 @@ def test_validate(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| sr.record(2.0) | sr.record(2.0) | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| sr.record((1,3)) | |||||
| sr.record((1, 3)) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| sr.record([2,3]) | |||||
| sr.record([2, 3]) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| sr.record("str") | sr.record("str") | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| @@ -235,8 +238,8 @@ def test_validate(): | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SummaryStep(sr, "str") | SummaryStep(sr, "str") | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SummaryStep(sr, (1,2)) | |||||
| SummaryStep(sr, (1, 2)) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SummaryStep(sr, [3,4]) | |||||
| SummaryStep(sr, [3, 4]) | |||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| SummaryStep(sr, sr) | SummaryStep(sr, sr) | ||||
| @@ -18,11 +18,13 @@ | |||||
| @Date : 2019-08-5 | @Date : 2019-08-5 | ||||
| @Desc : test summary function of abnormal input | @Desc : test summary function of abnormal input | ||||
| """ | """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | ||||
| @@ -65,6 +67,7 @@ def test_summaryrecord_input_null_string(): | |||||
| assert False | assert False | ||||
| log.debug("finished test_summaryrecord_input_null_string") | log.debug("finished test_summaryrecord_input_null_string") | ||||
| def test_summaryrecord_input_None(): | def test_summaryrecord_input_None(): | ||||
| log.debug("begin test_summaryrecord_input_None") | log.debug("begin test_summaryrecord_input_None") | ||||
| # step 0: create the thread | # step 0: create the thread | ||||
| @@ -76,6 +79,7 @@ def test_summaryrecord_input_None(): | |||||
| assert False | assert False | ||||
| log.debug("finished test_summaryrecord_input_None") | log.debug("finished test_summaryrecord_input_None") | ||||
| def test_summaryrecord_input_relative_dir_1(): | def test_summaryrecord_input_relative_dir_1(): | ||||
| log.debug("begin test_summaryrecord_input_relative_dir_1") | log.debug("begin test_summaryrecord_input_relative_dir_1") | ||||
| # step 0: create the thread | # step 0: create the thread | ||||
| @@ -18,16 +18,17 @@ | |||||
| @Date : 2019-08-5 | @Date : 2019-08-5 | ||||
| @Desc : test summary function of ops params valid check | @Desc : test summary function of ops params valid check | ||||
| """ | """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import random | import random | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| from mindspore.common.tensor import Tensor | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.tensor import Tensor | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.summary.summary_record import SummaryRecord | |||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | ||||
| @@ -38,6 +39,7 @@ log.setLevel(level=logging.ERROR) | |||||
| class SummaryDemoTag(nn.Cell): | class SummaryDemoTag(nn.Cell): | ||||
| """ SummaryDemoTag definition """ | """ SummaryDemoTag definition """ | ||||
| def __init__(self, tag1, tag2, tag3): | def __init__(self, tag1, tag2, tag3): | ||||
| super(SummaryDemoTag, self).__init__() | super(SummaryDemoTag, self).__init__() | ||||
| self.s = P.ScalarSummary() | self.s = P.ScalarSummary() | ||||
| @@ -58,6 +60,7 @@ class SummaryDemoTag(nn.Cell): | |||||
| class SummaryDemoTagForSet(nn.Cell): | class SummaryDemoTagForSet(nn.Cell): | ||||
| """ SummaryDemoTagForSet definition """ | """ SummaryDemoTagForSet definition """ | ||||
| def __init__(self, tag_tuple): | def __init__(self, tag_tuple): | ||||
| super(SummaryDemoTagForSet, self).__init__() | super(SummaryDemoTagForSet, self).__init__() | ||||
| self.s = P.ScalarSummary() | self.s = P.ScalarSummary() | ||||
| @@ -75,6 +78,7 @@ class SummaryDemoTagForSet(nn.Cell): | |||||
| class SummaryDemoValue(nn.Cell): | class SummaryDemoValue(nn.Cell): | ||||
| """ SummaryDemoValue definition """ | """ SummaryDemoValue definition """ | ||||
| def __init__(self, value): | def __init__(self, value): | ||||
| super(SummaryDemoValue, self).__init__() | super(SummaryDemoValue, self).__init__() | ||||
| self.s = P.ScalarSummary() | self.s = P.ScalarSummary() | ||||
| @@ -88,8 +92,10 @@ class SummaryDemoValue(nn.Cell): | |||||
| self.s("y", self.v) | self.s("y", self.v) | ||||
| return z | return z | ||||
| class SummaryDemoValueForSet(nn.Cell): | class SummaryDemoValueForSet(nn.Cell): | ||||
| """ SummaryDemoValueForSet definition """ | """ SummaryDemoValueForSet definition """ | ||||
| def __init__(self, value, tag_tuple): | def __init__(self, value, tag_tuple): | ||||
| super(SummaryDemoValueForSet, self).__init__() | super(SummaryDemoValueForSet, self).__init__() | ||||
| self.s = P.ScalarSummary() | self.s = P.ScalarSummary() | ||||
| @@ -106,11 +112,12 @@ class SummaryDemoValueForSet(nn.Cell): | |||||
| class HistogramSummaryNet(nn.Cell): | class HistogramSummaryNet(nn.Cell): | ||||
| "HistogramSummaryNet definition" | "HistogramSummaryNet definition" | ||||
| def __init__(self, value): | def __init__(self, value): | ||||
| self.histogram_summary = P.HistogramSummary() | self.histogram_summary = P.HistogramSummary() | ||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| self.value = value | self.value = value | ||||
| def construct(self, tensors1, tensor2): | def construct(self, tensors1, tensor2): | ||||
| self.histogram_summary("value", self.value) | self.histogram_summary("value", self.value) | ||||
| return self.add(tensors1, tensor2) | return self.add(tensors1, tensor2) | ||||
| @@ -246,7 +253,7 @@ def test_histogram_summary_use_valid_value(): | |||||
| """Test histogram summary with valid value""" | """Test histogram summary with valid value""" | ||||
| log.debug("Begin test_histogram_summary_use_valid_value") | log.debug("Begin test_histogram_summary_use_valid_value") | ||||
| try: | try: | ||||
| net = HistogramSummaryNet(Tensor(np.array([1,2,3]))) | |||||
| net = HistogramSummaryNet(Tensor(np.array([1, 2, 3]))) | |||||
| run_case(net) | run_case(net) | ||||
| except: | except: | ||||
| assert True | assert True | ||||
| @@ -18,13 +18,15 @@ | |||||
| @Date : 2019-07-4 | @Date : 2019-07-4 | ||||
| @Desc : test summary function | @Desc : test summary function | ||||
| """ | """ | ||||
| import os | |||||
| import logging | import logging | ||||
| import os | |||||
| import numpy as np | import numpy as np | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| from mindspore.common.tensor import Tensor | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore.common.tensor import Tensor | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data | |||||
| CUR_DIR = os.getcwd() | CUR_DIR = os.getcwd() | ||||
| SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" | ||||
| @@ -93,7 +95,6 @@ def test_tensor_summary_sample(): | |||||
| log.debug("finished test_tensor_summary_sample") | log.debug("finished test_tensor_summary_sample") | ||||
| def get_test_data_check(step): | def get_test_data_check(step): | ||||
| """ get_test_data_check """ | """ get_test_data_check """ | ||||
| test_data_list = [] | test_data_list = [] | ||||
| @@ -111,7 +112,8 @@ def get_test_data_check(step): | |||||
| # Test: test with ge | # Test: test with ge | ||||
| class SummaryDemo(nn.Cell): | class SummaryDemo(nn.Cell): | ||||
| """ SummaryDemo definition """ | """ SummaryDemo definition """ | ||||
| def __init__(self,): | |||||
| def __init__(self, ): | |||||
| super(SummaryDemo, self).__init__() | super(SummaryDemo, self).__init__() | ||||
| self.s = P.TensorSummary() | self.s = P.TensorSummary() | ||||
| self.add = P.TensorAdd() | self.add = P.TensorAdd() | ||||
| @@ -123,6 +125,7 @@ class SummaryDemo(nn.Cell): | |||||
| self.s("y1", y) | self.s("y1", y) | ||||
| return z | return z | ||||
| def test_tensor_summary_with_ge(): | def test_tensor_summary_with_ge(): | ||||
| """ test_tensor_summary_with_ge """ | """ test_tensor_summary_with_ge """ | ||||
| log.debug("begin test_tensor_summary_with_ge") | log.debug("begin test_tensor_summary_with_ge") | ||||
| @@ -140,7 +143,7 @@ def test_tensor_summary_with_ge(): | |||||
| steps = 100 | steps = 100 | ||||
| for i in range(1, steps): | for i in range(1, steps): | ||||
| x = Tensor(np.array([[i], [i]]).astype(np.float32)) | x = Tensor(np.array([[i], [i]]).astype(np.float32)) | ||||
| y = Tensor(np.array([[i+1], [i+1]]).astype(np.float32)) | |||||
| y = Tensor(np.array([[i + 1], [i + 1]]).astype(np.float32)) | |||||
| net(x, y) | net(x, y) | ||||
| test_writer.record(i) | test_writer.record(i) | ||||
| @@ -15,12 +15,11 @@ | |||||
| """ auto mixed precision """ | """ auto mixed precision """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.context as context | |||||
| from mindspore import Tensor | |||||
| from mindspore import amp | from mindspore import amp | ||||
| from mindspore import nn | from mindspore import nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common import dtype as mstype | |||||
| import mindspore.context as context | |||||
| from mindspore.model_zoo.resnet import resnet50 | |||||
| from mindspore.train import Model | from mindspore.train import Model | ||||
| from ....dataset_mock import MindData | from ....dataset_mock import MindData | ||||
| @@ -96,6 +95,7 @@ class MindDataSet(MindData): | |||||
| np_types=dataset_types, | np_types=dataset_types, | ||||
| output_shapes=dataset_shapes, | output_shapes=dataset_shapes, | ||||
| input_indexs=(0, 1)) | input_indexs=(0, 1)) | ||||
| def __next__(self): | def __next__(self): | ||||
| if self._size < self._iter_num: | if self._size < self._iter_num: | ||||
| raise StopIteration | raise StopIteration | ||||
| @@ -122,6 +122,7 @@ def test_compile_model_train_O0(): | |||||
| # not actual run, the metrics step will fail, check if compile ok. | # not actual run, the metrics step will fail, check if compile ok. | ||||
| model.eval(dataset) | model.eval(dataset) | ||||
| def test_compile_model_train_O2(): | def test_compile_model_train_O2(): | ||||
| dataset_types = (np.float32, np.float32) | dataset_types = (np.float32, np.float32) | ||||
| dataset_shapes = ((16, 16), (16, 16)) | dataset_shapes = ((16, 16), (16, 16)) | ||||
| @@ -14,6 +14,7 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test_run_config """ | """ test_run_config """ | ||||
| import pytest | import pytest | ||||
| from mindspore.train.callback import CheckpointConfig | from mindspore.train.callback import CheckpointConfig | ||||
| @@ -14,12 +14,14 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test_training """ | """ test_training """ | ||||
| import logging | import logging | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Model, context | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore import Model, context | |||||
| from mindspore.train.callback import SummaryStep | from mindspore.train.callback import SummaryStep | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| from ....dataset_mock import MindData | from ....dataset_mock import MindData | ||||
| @@ -19,12 +19,12 @@ | |||||
| @Desc : test mindspore compile method | @Desc : test mindspore compile method | ||||
| """ | """ | ||||
| import logging | import logging | ||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor, Parameter, Model | |||||
| from mindspore import Tensor, Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from ..ut_filter import non_graph_engine | |||||
| log = logging.getLogger("test") | log = logging.getLogger("test") | ||||
| log.setLevel(level=logging.ERROR) | log.setLevel(level=logging.ERROR) | ||||
| @@ -104,6 +104,7 @@ class ResidualBlock(nn.Cell): | |||||
| class ResNet(nn.Cell): | class ResNet(nn.Cell): | ||||
| """ ResNet definition """ | """ ResNet definition """ | ||||
| def __init__(self, tensor): | def __init__(self, tensor): | ||||
| super(ResNet, self).__init__() | super(ResNet, self).__init__() | ||||
| self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) | ||||
| @@ -118,6 +119,7 @@ class ResNet(nn.Cell): | |||||
| class LeNet(nn.Cell): | class LeNet(nn.Cell): | ||||
| """ LeNet definition """ | """ LeNet definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet, self).__init__() | super(LeNet, self).__init__() | ||||
| self.relu = nn.ReLU() | self.relu = nn.ReLU() | ||||
| @@ -165,4 +167,3 @@ class Net(nn.Cell): | |||||
| def construct(self, input_x): | def construct(self, input_x): | ||||
| return self.softmax(input_x) | return self.softmax(input_x) | ||||
| @@ -15,19 +15,19 @@ | |||||
| """test callback function.""" | """test callback function.""" | ||||
| import os | import os | ||||
| import stat | import stat | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore.nn as nn | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| from mindspore import context | |||||
| import mindspore.nn as nn | |||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.nn.optim import Momentum | |||||
| from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \ | from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \ | ||||
| LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \ | LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \ | ||||
| _build_callbacks, CheckpointConfig, _set_cur_net | _build_callbacks, CheckpointConfig, _set_cur_net | ||||
| from mindspore.common.api import ms_function | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -15,6 +15,7 @@ | |||||
| """ test_checkparam """ | """ test_checkparam """ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| import mindspore | import mindspore | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Model, context | from mindspore import Model, context | ||||
| @@ -23,6 +24,7 @@ from mindspore.common.tensor import Tensor | |||||
| class LeNet5(nn.Cell): | class LeNet5(nn.Cell): | ||||
| """ LeNet5 definition """ | """ LeNet5 definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(LeNet5, self).__init__() | super(LeNet5, self).__init__() | ||||
| self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") | self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") | ||||