| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import os | import os | ||||
| import pytest | |||||
| def test_expand_loss(): | def test_expand_loss(): | ||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import os | import os | ||||
| import pytest | |||||
| def test_expand_loss(): | def test_expand_loss(): | ||||
| @@ -14,7 +14,6 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """train_multinpu.""" | """train_multinpu.""" | ||||
| import os | import os | ||||
| import sys | import sys | ||||
| import numpy as np | import numpy as np | ||||
| @@ -35,7 +34,6 @@ context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, mirr | |||||
| init() | init() | ||||
| def get_WideDeep_net(config): | def get_WideDeep_net(config): | ||||
| WideDeep_net = WideDeepModel(config) | WideDeep_net = WideDeepModel(config) | ||||
| loss_net = NetWithLossClass(WideDeep_net, config) | loss_net = NetWithLossClass(WideDeep_net, config) | ||||
| @@ -48,6 +46,7 @@ class ModelBuilder(): | |||||
| """ | """ | ||||
| ModelBuilder | ModelBuilder | ||||
| """ | """ | ||||
| def __init__(self): | def __init__(self): | ||||
| pass | pass | ||||
| @@ -101,14 +100,13 @@ def test_train_eval(): | |||||
| print("=====" * 5 + "model.eval() initialized: {}".format(out)) | print("=====" * 5 + "model.eval() initialized: {}".format(out)) | ||||
| model.train(epochs, ds_train, | model.train(epochs, ds_train, | ||||
| callbacks=[TimeMonitor(ds_train.get_dataset_size()), eval_callback, callback, ckpoint_cb]) | callbacks=[TimeMonitor(ds_train.get_dataset_size()), eval_callback, callback, ckpoint_cb]) | ||||
| expect_out0 = [0.792634,0.799862,0.803324] | |||||
| expect_out6 = [0.796580,0.803908,0.807262] | |||||
| expect_out0 = [0.792634, 0.799862, 0.803324] | |||||
| expect_out6 = [0.796580, 0.803908, 0.807262] | |||||
| if get_rank() == 0: | if get_rank() == 0: | ||||
| assert np.allclose(eval_callback.eval_values, expect_out0) | assert np.allclose(eval_callback.eval_values, expect_out0) | ||||
| if get_rank() == 6: | if get_rank() == 6: | ||||
| assert np.allclose(eval_callback.eval_values, expect_out6) | assert np.allclose(eval_callback.eval_values, expect_out6) | ||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||
| test_train_eval() | test_train_eval() | ||||
| @@ -16,8 +16,10 @@ | |||||
| """train bert network without lossscale""" | """train bert network without lossscale""" | ||||
| import os | import os | ||||
| import pytest | |||||
| import numpy as np | import numpy as np | ||||
| from src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell | |||||
| from src.bert_model import BertConfig | |||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| import mindspore.dataset.engine.datasets as de | import mindspore.dataset.engine.datasets as de | ||||
| @@ -25,14 +27,11 @@ import mindspore.dataset.transforms.c_transforms as C | |||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.nn import learning_rate_schedule as lr_schedules | |||||
| from mindspore.nn.optim import Lamb | from mindspore.nn.optim import Lamb | ||||
| from mindspore.train.callback import Callback | from mindspore.train.callback import Callback | ||||
| from mindspore.train.loss_scale_manager import DynamicLossScaleManager | from mindspore.train.loss_scale_manager import DynamicLossScaleManager | ||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| from mindspore.nn import learning_rate_schedule as lr_schedules | |||||
| from src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell | |||||
| from src.bert_model import BertConfig | |||||
| DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] | DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] | ||||
| SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" | SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" | ||||
| @@ -23,10 +23,8 @@ import pytest | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore.ops import operations as P | |||||
| context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | ||||
| @@ -21,7 +21,6 @@ import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.nn import Dense | |||||
| from mindspore.nn import TrainOneStepCell, WithLossCell | from mindspore.nn import TrainOneStepCell, WithLossCell | ||||
| from mindspore.nn.optim import Momentum | from mindspore.nn.optim import Momentum | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -18,7 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.common import dtype as mstype | |||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -54,6 +54,7 @@ def test_slice_grad(): | |||||
| print("output:\n", output) | print("output:\n", output) | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| class SliceGrad2(nn.Cell): | class SliceGrad2(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(SliceGrad2, self).__init__() | super(SliceGrad2, self).__init__() | ||||
| @@ -62,6 +63,7 @@ class SliceGrad2(nn.Cell): | |||||
| def construct(self, dy, x): | def construct(self, dy, x): | ||||
| return self.slicegrad(dy, x, (0, 1, 0), (2, 2, 2)) | return self.slicegrad(dy, x, (0, 1, 0), (2, 2, 2)) | ||||
| @pytest.mark.level0 | @pytest.mark.level0 | ||||
| @pytest.mark.platform_x86_cpu | @pytest.mark.platform_x86_cpu | ||||
| @pytest.mark.env_onecard | @pytest.mark.env_onecard | ||||
| @@ -71,10 +73,11 @@ def test_slice_grad2(): | |||||
| grad = SliceGrad2() | grad = SliceGrad2() | ||||
| output = grad(dy, x) | output = grad(dy, x) | ||||
| print("output:\n", output) | print("output:\n", output) | ||||
| expect = [[[0., 0.], [2., 3.], [4., 5.]], | |||||
| expect = [[[0., 0.], [2., 3.], [4., 5.]], | |||||
| [[0., 0.], [8., 9.], [10., 11.]]] | [[0., 0.], [8., 9.], [10., 11.]]] | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| test_slice_grad() | test_slice_grad() | ||||
| test_slice_grad2() | test_slice_grad2() | ||||
| @@ -21,10 +21,10 @@ import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common import dtype as mstype | from mindspore.common import dtype as mstype | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.operations import _grad_ops as G | |||||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | ||||
| class Slice(nn.Cell): | class Slice(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Slice, self).__init__() | super(Slice, self).__init__() | ||||
| @@ -33,6 +33,7 @@ class Slice(nn.Cell): | |||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.slice(x, (0, 1, 0), (2, 1, 3)) | return self.slice(x, (0, 1, 0), (2, 1, 3)) | ||||
| @pytest.mark.level0 | @pytest.mark.level0 | ||||
| @pytest.mark.platform_x86_cpu | @pytest.mark.platform_x86_cpu | ||||
| @pytest.mark.env_onecard | @pytest.mark.env_onecard | ||||
| @@ -47,6 +48,7 @@ def test_slice(): | |||||
| print("output:\n", output) | print("output:\n", output) | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| class Slice2(nn.Cell): | class Slice2(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Slice2, self).__init__() | super(Slice2, self).__init__() | ||||
| @@ -55,12 +57,13 @@ class Slice2(nn.Cell): | |||||
| def construct(self, x): | def construct(self, x): | ||||
| return self.slice(x, (1, 0, 0), (1, 2, 3)) | return self.slice(x, (1, 0, 0), (1, 2, 3)) | ||||
| @pytest.mark.level0 | @pytest.mark.level0 | ||||
| @pytest.mark.platform_x86_cpu | @pytest.mark.platform_x86_cpu | ||||
| @pytest.mark.env_onecard | @pytest.mark.env_onecard | ||||
| def test_slice2(): | def test_slice2(): | ||||
| x = Tensor(np.arange(3 * 2 * 3).reshape(3, 2, 3), mstype.float32) | x = Tensor(np.arange(3 * 2 * 3).reshape(3, 2, 3), mstype.float32) | ||||
| expect = [[[6., 7., 8.], | |||||
| expect = [[[6., 7., 8.], | |||||
| [9., 10., 11.]]] | [9., 10., 11.]]] | ||||
| slice_op = Slice2() | slice_op = Slice2() | ||||
| @@ -68,6 +71,7 @@ def test_slice2(): | |||||
| print("output:\n", output) | print("output:\n", output) | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| test_slice() | test_slice() | ||||
| test_slice2() | test_slice2() | ||||
| @@ -14,9 +14,11 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| from mindspore.ops import prim_attr_register, PrimitiveWithInfer | from mindspore.ops import prim_attr_register, PrimitiveWithInfer | ||||
| # sum = input1 + input2 + const_bias | # sum = input1 + input2 + const_bias | ||||
| class CusAdd3(PrimitiveWithInfer): | class CusAdd3(PrimitiveWithInfer): | ||||
| """Custom add3 definition""" | """Custom add3 definition""" | ||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self, const_bias=0.0): | def __init__(self, const_bias=0.0): | ||||
| self.init_prim_io_names(inputs=['input1', 'input2'], outputs=['sum3']) | self.init_prim_io_names(inputs=['input1', 'input2'], outputs=['sum3']) | ||||
| @@ -24,8 +24,8 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from tests.summary_utils import SummaryReader | |||||
| from mindspore.train.summary.summary_record import SummaryRecord | from mindspore.train.summary.summary_record import SummaryRecord | ||||
| from tests.summary_utils import SummaryReader | |||||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | ||||
| @@ -16,17 +16,15 @@ | |||||
| This is the test module for mindrecord | This is the test module for mindrecord | ||||
| """ | """ | ||||
| import collections | import collections | ||||
| import json | |||||
| import numpy as np | |||||
| import os | import os | ||||
| import pytest | |||||
| import re | import re | ||||
| import string | import string | ||||
| import numpy as np | |||||
| import pytest | |||||
| import mindspore.dataset as ds | import mindspore.dataset as ds | ||||
| import mindspore.dataset.transforms.vision.c_transforms as vision | |||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| from mindspore.dataset.transforms.vision import Inter | |||||
| from mindspore.mindrecord import FileWriter | from mindspore.mindrecord import FileWriter | ||||
| FILES_NUM = 4 | FILES_NUM = 4 | ||||
| @@ -52,9 +50,9 @@ def add_and_remove_cv_file(): | |||||
| writer = FileWriter(CV_FILE_NAME, FILES_NUM) | writer = FileWriter(CV_FILE_NAME, FILES_NUM) | ||||
| data = get_data(CV_DIR_NAME) | data = get_data(CV_DIR_NAME) | ||||
| cv_schema_json = {"id": {"type": "int32"}, | cv_schema_json = {"id": {"type": "int32"}, | ||||
| "file_name": {"type": "string"}, | |||||
| "label": {"type": "int32"}, | |||||
| "data": {"type": "bytes"}} | |||||
| "file_name": {"type": "string"}, | |||||
| "label": {"type": "int32"}, | |||||
| "data": {"type": "bytes"}} | |||||
| writer.add_schema(cv_schema_json, "img_schema") | writer.add_schema(cv_schema_json, "img_schema") | ||||
| writer.add_index(["file_name", "label"]) | writer.add_index(["file_name", "label"]) | ||||
| writer.write_raw_data(data) | writer.write_raw_data(data) | ||||
| @@ -85,14 +83,14 @@ def add_and_remove_nlp_file(): | |||||
| writer = FileWriter(NLP_FILE_NAME, FILES_NUM) | writer = FileWriter(NLP_FILE_NAME, FILES_NUM) | ||||
| data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | ||||
| nlp_schema_json = {"id": {"type": "string"}, "label": {"type": "int32"}, | nlp_schema_json = {"id": {"type": "string"}, "label": {"type": "int32"}, | ||||
| "rating": {"type": "float32"}, | |||||
| "input_ids": {"type": "int64", | |||||
| "shape": [-1]}, | |||||
| "input_mask": {"type": "int64", | |||||
| "shape": [1, -1]}, | |||||
| "segment_ids": {"type": "int64", | |||||
| "shape": [2, -1]} | |||||
| } | |||||
| "rating": {"type": "float32"}, | |||||
| "input_ids": {"type": "int64", | |||||
| "shape": [-1]}, | |||||
| "input_mask": {"type": "int64", | |||||
| "shape": [1, -1]}, | |||||
| "segment_ids": {"type": "int64", | |||||
| "shape": [2, -1]} | |||||
| } | |||||
| writer.set_header_size(1 << 14) | writer.set_header_size(1 << 14) | ||||
| writer.set_page_size(1 << 15) | writer.set_page_size(1 << 15) | ||||
| writer.add_schema(nlp_schema_json, "nlp_schema") | writer.add_schema(nlp_schema_json, "nlp_schema") | ||||
| @@ -110,6 +108,7 @@ def add_and_remove_nlp_file(): | |||||
| os.remove("{}".format(x)) | os.remove("{}".format(x)) | ||||
| os.remove("{}.db".format(x)) | os.remove("{}.db".format(x)) | ||||
| def test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file): | def test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file): | ||||
| """tutorial for cv minderdataset.""" | """tutorial for cv minderdataset.""" | ||||
| columns_list = ["label", "file_name", "data"] | columns_list = ["label", "file_name", "data"] | ||||
| @@ -130,7 +129,7 @@ def test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file): | |||||
| if item['label'] == -1: | if item['label'] == -1: | ||||
| num_padded_iter += 1 | num_padded_iter += 1 | ||||
| assert item['file_name'] == bytes(padded_sample['file_name'], | assert item['file_name'] == bytes(padded_sample['file_name'], | ||||
| encoding='utf8') | |||||
| encoding='utf8') | |||||
| assert item['label'] == padded_sample['label'] | assert item['label'] == padded_sample['label'] | ||||
| assert (item['data'] == np.array(list(padded_sample['data']))).all() | assert (item['data'] == np.array(list(padded_sample['data']))).all() | ||||
| num_iter += 1 | num_iter += 1 | ||||
| @@ -177,6 +176,7 @@ def test_cv_minddataset_partition_padded_samples(add_and_remove_cv_file): | |||||
| partitions(5, 5, 3) | partitions(5, 5, 3) | ||||
| partitions(9, 8, 2) | partitions(9, 8, 2) | ||||
| def test_cv_minddataset_partition_padded_samples_multi_epoch(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_multi_epoch(add_and_remove_cv_file): | ||||
| """tutorial for cv minddataset.""" | """tutorial for cv minddataset.""" | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| @@ -248,6 +248,7 @@ def test_cv_minddataset_partition_padded_samples_multi_epoch(add_and_remove_cv_f | |||||
| partitions(5, 5, 3) | partitions(5, 5, 3) | ||||
| partitions(9, 8, 2) | partitions(9, 8, 2) | ||||
| def test_cv_minddataset_partition_padded_samples_no_dividsible(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_no_dividsible(add_and_remove_cv_file): | ||||
| """tutorial for cv minddataset.""" | """tutorial for cv minddataset.""" | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| @@ -273,6 +274,7 @@ def test_cv_minddataset_partition_padded_samples_no_dividsible(add_and_remove_cv | |||||
| with pytest.raises(RuntimeError): | with pytest.raises(RuntimeError): | ||||
| partitions(4, 1) | partitions(4, 1) | ||||
| def test_cv_minddataset_partition_padded_samples_dataset_size_no_divisible(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_dataset_size_no_divisible(add_and_remove_cv_file): | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| @@ -291,8 +293,10 @@ def test_cv_minddataset_partition_padded_samples_dataset_size_no_divisible(add_a | |||||
| num_padded=num_padded) | num_padded=num_padded) | ||||
| with pytest.raises(RuntimeError): | with pytest.raises(RuntimeError): | ||||
| data_set.get_dataset_size() == 3 | data_set.get_dataset_size() == 3 | ||||
| partitions(4, 1) | partitions(4, 1) | ||||
| def test_cv_minddataset_partition_padded_samples_no_equal_column_list(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_no_equal_column_list(add_and_remove_cv_file): | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| @@ -314,9 +318,11 @@ def test_cv_minddataset_partition_padded_samples_no_equal_column_list(add_and_re | |||||
| logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | ||||
| logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | ||||
| logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | ||||
| with pytest.raises(Exception, match="padded_sample cannot match columns_list."): | with pytest.raises(Exception, match="padded_sample cannot match columns_list."): | ||||
| partitions(4, 2) | partitions(4, 2) | ||||
| def test_cv_minddataset_partition_padded_samples_no_column_list(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_no_column_list(add_and_remove_cv_file): | ||||
| data = get_data(CV_DIR_NAME) | data = get_data(CV_DIR_NAME) | ||||
| padded_sample = data[0] | padded_sample = data[0] | ||||
| @@ -336,9 +342,11 @@ def test_cv_minddataset_partition_padded_samples_no_column_list(add_and_remove_c | |||||
| logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | ||||
| logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | ||||
| logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | ||||
| with pytest.raises(Exception, match="padded_sample is specified and requires columns_list as well."): | with pytest.raises(Exception, match="padded_sample is specified and requires columns_list as well."): | ||||
| partitions(4, 2) | partitions(4, 2) | ||||
| def test_cv_minddataset_partition_padded_samples_no_num_padded(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_no_num_padded(add_and_remove_cv_file): | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| data = get_data(CV_DIR_NAME) | data = get_data(CV_DIR_NAME) | ||||
| @@ -357,9 +365,11 @@ def test_cv_minddataset_partition_padded_samples_no_num_padded(add_and_remove_cv | |||||
| logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | ||||
| logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | ||||
| logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | ||||
| with pytest.raises(Exception, match="padded_sample is specified and requires num_padded as well."): | with pytest.raises(Exception, match="padded_sample is specified and requires num_padded as well."): | ||||
| partitions(4, 2) | partitions(4, 2) | ||||
| def test_cv_minddataset_partition_padded_samples_no_padded_samples(add_and_remove_cv_file): | def test_cv_minddataset_partition_padded_samples_no_padded_samples(add_and_remove_cv_file): | ||||
| columns_list = ["data", "file_name", "label"] | columns_list = ["data", "file_name", "label"] | ||||
| data = get_data(CV_DIR_NAME) | data = get_data(CV_DIR_NAME) | ||||
| @@ -378,18 +388,18 @@ def test_cv_minddataset_partition_padded_samples_no_padded_samples(add_and_remov | |||||
| logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"]))) | ||||
| logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) | ||||
| logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"])) | ||||
| with pytest.raises(Exception, match="num_padded is specified but padded_sample is not."): | with pytest.raises(Exception, match="num_padded is specified but padded_sample is not."): | ||||
| partitions(4, 2) | partitions(4, 2) | ||||
| def test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file): | def test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file): | ||||
| columns_list = ["input_ids", "id", "rating"] | columns_list = ["input_ids", "id", "rating"] | ||||
| data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | ||||
| padded_sample = data[0] | padded_sample = data[0] | ||||
| padded_sample['id'] = "-1" | padded_sample['id'] = "-1" | ||||
| padded_sample['input_ids'] = np.array([-1,-1,-1,-1], dtype=np.int64) | |||||
| padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64) | |||||
| padded_sample['rating'] = 1.0 | padded_sample['rating'] = 1.0 | ||||
| num_readers = 4 | num_readers = 4 | ||||
| @@ -406,7 +416,9 @@ def test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file): | |||||
| for item in data_set.create_dict_iterator(): | for item in data_set.create_dict_iterator(): | ||||
| logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | ||||
| logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | ||||
| logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format(item["input_ids"], item["input_ids"].shape)) | |||||
| logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format( | |||||
| item["input_ids"], | |||||
| item["input_ids"].shape)) | |||||
| if item['id'] == bytes('-1', encoding='utf-8'): | if item['id'] == bytes('-1', encoding='utf-8'): | ||||
| num_padded_iter += 1 | num_padded_iter += 1 | ||||
| assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | ||||
| @@ -420,13 +432,14 @@ def test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file): | |||||
| partitions(5, 5, 3) | partitions(5, 5, 3) | ||||
| partitions(9, 8, 2) | partitions(9, 8, 2) | ||||
| def test_nlp_minddataset_reader_basic_padded_samples_multi_epoch(add_and_remove_nlp_file): | def test_nlp_minddataset_reader_basic_padded_samples_multi_epoch(add_and_remove_nlp_file): | ||||
| columns_list = ["input_ids", "id", "rating"] | columns_list = ["input_ids", "id", "rating"] | ||||
| data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)] | ||||
| padded_sample = data[0] | padded_sample = data[0] | ||||
| padded_sample['id'] = "-1" | padded_sample['id'] = "-1" | ||||
| padded_sample['input_ids'] = np.array([-1,-1,-1,-1], dtype=np.int64) | |||||
| padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64) | |||||
| padded_sample['rating'] = 1.0 | padded_sample['rating'] = 1.0 | ||||
| num_readers = 4 | num_readers = 4 | ||||
| repeat_size = 3 | repeat_size = 3 | ||||
| @@ -451,7 +464,9 @@ def test_nlp_minddataset_reader_basic_padded_samples_multi_epoch(add_and_remove_ | |||||
| for item in data_set.create_dict_iterator(): | for item in data_set.create_dict_iterator(): | ||||
| logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | ||||
| logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | ||||
| logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format(item["input_ids"], item["input_ids"].shape)) | |||||
| logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format( | |||||
| item["input_ids"], | |||||
| item["input_ids"].shape)) | |||||
| if item['id'] == bytes('-1', encoding='utf-8'): | if item['id'] == bytes('-1', encoding='utf-8'): | ||||
| num_padded_iter += 1 | num_padded_iter += 1 | ||||
| assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | ||||
| @@ -488,7 +503,7 @@ def test_nlp_minddataset_reader_basic_padded_samples_check_whole_reshuffle_resul | |||||
| padded_sample = {} | padded_sample = {} | ||||
| padded_sample['id'] = "-1" | padded_sample['id'] = "-1" | ||||
| padded_sample['input_ids'] = np.array([-1,-1,-1,-1], dtype=np.int64) | |||||
| padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64) | |||||
| padded_sample['rating'] = 1.0 | padded_sample['rating'] = 1.0 | ||||
| num_readers = 4 | num_readers = 4 | ||||
| repeat_size = 3 | repeat_size = 3 | ||||
| @@ -512,14 +527,15 @@ def test_nlp_minddataset_reader_basic_padded_samples_check_whole_reshuffle_resul | |||||
| logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | logger.info("-------------- item[id]: {} ------------------------".format(item["id"])) | ||||
| logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | logger.info("-------------- item[rating]: {} --------------------".format(item["rating"])) | ||||
| logger.info("-------------- item[input_ids]: {}, shape: {} -----------------" | logger.info("-------------- item[input_ids]: {}, shape: {} -----------------" | ||||
| .format(item["input_ids"], item["input_ids"].shape)) | |||||
| .format(item["input_ids"], item["input_ids"].shape)) | |||||
| if item['id'] == bytes('-1', encoding='utf-8'): | if item['id'] == bytes('-1', encoding='utf-8'): | ||||
| num_padded_iter += 1 | num_padded_iter += 1 | ||||
| assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | assert item['id'] == bytes(padded_sample['id'], encoding='utf-8') | ||||
| assert (item['input_ids'] == padded_sample['input_ids']).all() | assert (item['input_ids'] == padded_sample['input_ids']).all() | ||||
| assert (item['rating'] == padded_sample['rating']).all() | assert (item['rating'] == padded_sample['rating']).all() | ||||
| # save epoch result | # save epoch result | ||||
| epoch_result[partition_id][int(inner_num_iter / dataset_size)][inner_num_iter % dataset_size] = item["id"] | |||||
| epoch_result[partition_id][int(inner_num_iter / dataset_size)][inner_num_iter % dataset_size] = item[ | |||||
| "id"] | |||||
| num_iter += 1 | num_iter += 1 | ||||
| inner_num_iter += 1 | inner_num_iter += 1 | ||||
| assert epoch_result[partition_id][0] not in (epoch_result[partition_id][1], epoch_result[partition_id][2]) | assert epoch_result[partition_id][0] not in (epoch_result[partition_id][1], epoch_result[partition_id][2]) | ||||
| @@ -651,6 +667,7 @@ def inputs(vectors, maxlen=50): | |||||
| segment = [0] * maxlen | segment = [0] * maxlen | ||||
| return input_, mask, segment | return input_, mask, segment | ||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||
| test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file) | test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file) | ||||
| test_cv_minddataset_partition_padded_samples(add_and_remove_cv_file) | test_cv_minddataset_partition_padded_samples(add_and_remove_cv_file) | ||||
| @@ -216,6 +216,7 @@ def test_sampler_chain(): | |||||
| assert test_config(5, 3) == [3] | assert test_config(5, 3) == [3] | ||||
| assert test_config(5, 4) == [4] | assert test_config(5, 4) == [4] | ||||
| def test_add_sampler_invalid_input(): | def test_add_sampler_invalid_input(): | ||||
| manifest_file = "../data/dataset/testManifestData/test5trainimgs.json" | manifest_file = "../data/dataset/testManifestData/test5trainimgs.json" | ||||
| _ = {(172876, 0): 0, (54214, 0): 1, (54214, 1): 2, (173673, 0): 3, (64631, 1): 4} | _ = {(172876, 0): 0, (54214, 0): 1, (54214, 1): 2, (173673, 0): 3, (64631, 1): 4} | ||||
| @@ -231,7 +232,7 @@ def test_add_sampler_invalid_input(): | |||||
| sampler = ds.SequentialSampler() | sampler = ds.SequentialSampler() | ||||
| with pytest.raises(ValueError) as info: | with pytest.raises(ValueError) as info: | ||||
| data2 = ds.ManifestDataset(manifest_file, sampler=sampler, num_samples=20) | |||||
| data2 = ds.ManifestDataset(manifest_file, sampler=sampler, num_samples=20) | |||||
| assert "Conflicting arguments during sampler assignments" in str(info.value) | assert "Conflicting arguments during sampler assignments" in str(info.value) | ||||
| @@ -19,7 +19,10 @@ import filecmp | |||||
| import glob | import glob | ||||
| import json | import json | ||||
| import os | import os | ||||
| import numpy as np | import numpy as np | ||||
| from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME | |||||
| from util import config_get_set_num_parallel_workers | |||||
| import mindspore.dataset as ds | import mindspore.dataset as ds | ||||
| import mindspore.dataset.transforms.c_transforms as c | import mindspore.dataset.transforms.c_transforms as c | ||||
| @@ -27,8 +30,6 @@ import mindspore.dataset.transforms.vision.c_transforms as vision | |||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| from mindspore.dataset.transforms.vision import Inter | from mindspore.dataset.transforms.vision import Inter | ||||
| from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME | |||||
| from util import config_get_set_num_parallel_workers | |||||
| def test_imagefolder(remove_json_files=True): | def test_imagefolder(remove_json_files=True): | ||||
| """ | """ | ||||
| @@ -29,7 +29,6 @@ from mindspore.common import ms_function | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| grad_by_list = C.GradOperation(get_by_list=True) | grad_by_list = C.GradOperation(get_by_list=True) | ||||
| grad_all = C.GradOperation(get_all=True) | grad_all = C.GradOperation(get_all=True) | ||||
| grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) | grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) | ||||
| @@ -123,6 +122,7 @@ def test_if_none(): | |||||
| net = Net(z) | net = Net(z) | ||||
| assert np.all(net(x, y).asnumpy() == y.asnumpy()) | assert np.all(net(x, y).asnumpy() == y.asnumpy()) | ||||
| def test_if_str_is_not_none_right(): | def test_if_str_is_not_none_right(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self, z: str): | def __init__(self, z: str): | ||||
| @@ -455,8 +455,10 @@ def test_parser_switch_layer_switch_in_bprop(): | |||||
| super(OneInputBprop, self).__init__() | super(OneInputBprop, self).__init__() | ||||
| self.op = P.ReLU() | self.op = P.ReLU() | ||||
| self.funcs = funcs | self.funcs = funcs | ||||
| def construct(self, i, x): | def construct(self, i, x): | ||||
| return self.op(x) | |||||
| return self.op(x) | |||||
| def bprop(self, i, x, out, dout): | def bprop(self, i, x, out, dout): | ||||
| return i, self.funcs[i](x, dout) | return i, self.funcs[i](x, dout) | ||||
| @@ -475,6 +477,7 @@ def test_parser_switch_layer_switch_in_bprop(): | |||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| return self.op(x, y) | return self.op(x, y) | ||||
| func1 = Add() | func1 = Add() | ||||
| func2 = Mul() | func2 = Mul() | ||||
| funcs = (func1, func2) | funcs = (func1, func2) | ||||
| @@ -572,6 +575,7 @@ def test_switch_layer_env_eliminate(): | |||||
| weights = self.weights | weights = self.weights | ||||
| grad = self.grad_op(self.net, weights)(x, index) | grad = self.grad_op(self.net, weights)(x, index) | ||||
| return grad | return grad | ||||
| net = Net() | net = Net() | ||||
| net2 = NetGrad(net) | net2 = NetGrad(net) | ||||
| x = Tensor(np.ones((3, 1, 12, 12)), ms.float32) | x = Tensor(np.ones((3, 1, 12, 12)), ms.float32) | ||||
| @@ -601,6 +605,7 @@ def test_switch_layer_single_layer(): | |||||
| weights = self.weights | weights = self.weights | ||||
| grad = self.grad_op(self.net, weights)(x, index) | grad = self.grad_op(self.net, weights)(x, index) | ||||
| return grad | return grad | ||||
| net = Net() | net = Net() | ||||
| net2 = NetGrad(net) | net2 = NetGrad(net) | ||||
| x = Tensor(np.ones((3, 1, 12, 12)), ms.float32) | x = Tensor(np.ones((3, 1, 12, 12)), ms.float32) | ||||
| @@ -638,6 +643,7 @@ def test_if_nested_compile(): | |||||
| else: | else: | ||||
| res = self.squre(self.value) | res = self.squre(self.value) | ||||
| return res | return res | ||||
| x = Tensor(1.0, dtype=ms.float32) | x = Tensor(1.0, dtype=ms.float32) | ||||
| y = Tensor(2.0, dtype=ms.float32) | y = Tensor(2.0, dtype=ms.float32) | ||||
| net = Net() | net = Net() | ||||
| @@ -660,6 +666,7 @@ def test_if_inside_for(): | |||||
| else: | else: | ||||
| res = res - y | res = res - y | ||||
| return res | return res | ||||
| c1 = Tensor(1, dtype=ms.int32) | c1 = Tensor(1, dtype=ms.int32) | ||||
| c2 = Tensor(1, dtype=ms.int32) | c2 = Tensor(1, dtype=ms.int32) | ||||
| net = Net() | net = Net() | ||||
| @@ -671,6 +678,7 @@ def test_while_in_while(): | |||||
| c2 = Tensor(2, dtype=ms.int32) | c2 = Tensor(2, dtype=ms.int32) | ||||
| c3 = Tensor(3, dtype=ms.int32) | c3 = Tensor(3, dtype=ms.int32) | ||||
| c4 = Tensor(4, dtype=ms.int32) | c4 = Tensor(4, dtype=ms.int32) | ||||
| @ms_function | @ms_function | ||||
| def while_in_while(x, y, z, u): | def while_in_while(x, y, z, u): | ||||
| out = c4 | out = c4 | ||||
| @@ -683,6 +691,7 @@ def test_while_in_while(): | |||||
| out = out + 3 | out = out + 3 | ||||
| return out | return out | ||||
| while_in_while(c1, c2, c3, c4) | while_in_while(c1, c2, c3, c4) | ||||
| @@ -692,6 +701,7 @@ def test_tensor_cond(): | |||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.t = Tensor(np.array(0, np.bool)) | self.t = Tensor(np.array(0, np.bool)) | ||||
| self.t1 = Tensor(np.array([True], np.bool)) | self.t1 = Tensor(np.array([True], np.bool)) | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| t = 0 | t = 0 | ||||
| if self.t: | if self.t: | ||||
| @@ -703,18 +713,19 @@ def test_tensor_cond(): | |||||
| else: | else: | ||||
| t = t + x * y | t = t + x * y | ||||
| return t | return t | ||||
| x = Tensor(np.ones([6, 8, 10], np.int32)) | x = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| y = Tensor(np.ones([6, 8, 10], np.int32)) | y = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| net = Net() | net = Net() | ||||
| out = net(x, y) | out = net(x, y) | ||||
| def test_tensor_cond_exception(): | def test_tensor_cond_exception(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.t = Tensor(np.array([True, False], np.bool)) | self.t = Tensor(np.array([True, False], np.bool)) | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| t = 0 | t = 0 | ||||
| if self.t: | if self.t: | ||||
| @@ -722,19 +733,20 @@ def test_tensor_cond_exception(): | |||||
| else: | else: | ||||
| t = t - x / y | t = t - x / y | ||||
| return t | return t | ||||
| x = Tensor(np.ones([6, 8, 10], np.int32)) | x = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| y = Tensor(np.ones([6, 8, 10], np.int32)) | y = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| net = Net() | net = Net() | ||||
| with pytest.raises(ValueError): | with pytest.raises(ValueError): | ||||
| out = net(x, y) | out = net(x, y) | ||||
| def test_while_scalar(): | def test_while_scalar(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.x = 10 | self.x = 10 | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| i = 0 | i = 0 | ||||
| t = 0 | t = 0 | ||||
| @@ -742,17 +754,20 @@ def test_while_scalar(): | |||||
| t = t + x + y | t = t + x + y | ||||
| i = i + 1 | i = i + 1 | ||||
| return t | return t | ||||
| net = Net() | net = Net() | ||||
| x = Tensor(np.ones([6, 8, 10], np.int32)) | x = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| y = Tensor(np.ones([6, 8, 10], np.int32)) | y = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| out = net(x, y) | out = net(x, y) | ||||
| def test_while_tensor(): | def test_while_tensor(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.t = Tensor(np.ones([6, 8, 10], np.int32)) | self.t = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| self.count = Tensor(np.array([10], np.int32)) | self.count = Tensor(np.array([10], np.int32)) | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| i = 0 | i = 0 | ||||
| t = self.t | t = self.t | ||||
| @@ -760,6 +775,7 @@ def test_while_tensor(): | |||||
| t = t + x + y | t = t + x + y | ||||
| i = i + 1 | i = i + 1 | ||||
| return t | return t | ||||
| net = Net() | net = Net() | ||||
| x = Tensor(np.ones([6, 8, 10], np.int32)) | x = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| y = Tensor(np.ones([6, 8, 10], np.int32)) | y = Tensor(np.ones([6, 8, 10], np.int32)) | ||||
| @@ -770,7 +786,7 @@ def test_large_for_loop(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.flatten = P.ReLU() #nn.Flatten() | |||||
| self.flatten = P.ReLU() # nn.Flatten() | |||||
| def construct(self, x): | def construct(self, x): | ||||
| for elem in range(1, 1900): | for elem in range(1, 1900): | ||||
| @@ -791,7 +807,7 @@ def test_large_for_loop_with_continue_break(): | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.flatten = P.ReLU() #nn.Flatten() | |||||
| self.flatten = P.ReLU() # nn.Flatten() | |||||
| def construct(self, x): | def construct(self, x): | ||||
| idx = 0 | idx = 0 | ||||
| @@ -854,7 +870,7 @@ def test_tensor_all_construct_lack_branch(): | |||||
| if input1.all(): | if input1.all(): | ||||
| return self.logicaland(input1, input2) | return self.logicaland(input1, input2) | ||||
| while input1.any(): | while input1.any(): | ||||
| return self.logicalor(input1, input2) | |||||
| return self.logicalor(input1, input2) | |||||
| # NOTICE: here missing return statement, default return None | # NOTICE: here missing return statement, default return None | ||||
| input_np_1 = np.random.choice([True], size=(2, 3, 4, 5)) | input_np_1 = np.random.choice([True], size=(2, 3, 4, 5)) | ||||
| @@ -891,28 +907,29 @@ def test_parser_switch_layer_func_primitive(): | |||||
| def test_recursive_call(): | def test_recursive_call(): | ||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| """ Net definition """ | """ Net definition """ | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.fc = nn.Dense(10, 10) # padding=0 | self.fc = nn.Dense(10, 10) # padding=0 | ||||
| #self.net2 = Net2() | |||||
| # self.net2 = Net2() | |||||
| def construct(self, x): | def construct(self, x): | ||||
| net2 = Net2() | net2 = Net2() | ||||
| x = net2(x) | x = net2(x) | ||||
| out = self.fc(x) | out = self.fc(x) | ||||
| return out | return out | ||||
| class Net2(nn.Cell): | class Net2(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(Net2, self).__init__() | super(Net2, self).__init__() | ||||
| self.net = Net() | self.net = Net() | ||||
| self.fc = nn.Dense(10, 10) | self.fc = nn.Dense(10, 10) | ||||
| def construct(self, x): | def construct(self, x): | ||||
| x = self.net(x) | x = self.net(x) | ||||
| out = self.fc(x) | out = self.fc(x) | ||||
| return out | return out | ||||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=False) | context.set_context(mode=context.GRAPH_MODE, save_graphs=False) | ||||
| old_max_call_depth = context.get_context('max_call_depth') | old_max_call_depth = context.get_context('max_call_depth') | ||||
| context.set_context(max_call_depth=80) | context.set_context(max_call_depth=80) | ||||
| @@ -949,7 +966,6 @@ def test_switch_layer_shape_join_failed(): | |||||
| funcs = (func1, func2) | funcs = (func1, func2) | ||||
| net = AddFuncNet(funcs, func3) | net = AddFuncNet(funcs, func3) | ||||
| inp = Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32)) | inp = Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32)) | ||||
| @@ -980,7 +996,6 @@ def test_switch_layer_dtype_join_failed(): | |||||
| x = self.op(x) | x = self.op(x) | ||||
| return x | return x | ||||
| func1 = nn.ReLU() | func1 = nn.ReLU() | ||||
| func2 = Cast(mstype.int32) | func2 = Cast(mstype.int32) | ||||
| funcs = (func1, func2) | funcs = (func1, func2) | ||||
| @@ -14,8 +14,8 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test math ops """ | """ test math ops """ | ||||
| import functools | import functools | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore as ms | import mindspore as ms | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| @@ -31,6 +31,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ | |||||
| import pipeline_for_compile_forward_ge_graph_for_case_by_case_config | import pipeline_for_compile_forward_ge_graph_for_case_by_case_config | ||||
| from ....mindspore_test_framework.pipeline.forward.verify_exception \ | from ....mindspore_test_framework.pipeline.forward.verify_exception \ | ||||
| import pipeline_for_verify_exception_for_case_by_case_config | import pipeline_for_verify_exception_for_case_by_case_config | ||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| # pylint: disable=W0613 | # pylint: disable=W0613 | ||||
| @@ -35,7 +35,6 @@ from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ | |||||
| import pipeline_for_compile_grad_ge_graph_for_case_by_case_config | import pipeline_for_compile_grad_ge_graph_for_case_by_case_config | ||||
| from ....ops_common import convert | from ....ops_common import convert | ||||
| grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) | grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) | ||||
| @@ -266,6 +265,7 @@ class ScatterNdSub(nn.Cell): | |||||
| out = self.scatter_nd_sub(self.ref, indices, updates) | out = self.scatter_nd_sub(self.ref, indices, updates) | ||||
| return out | return out | ||||
| class ScatterNdAdd(nn.Cell): | class ScatterNdAdd(nn.Cell): | ||||
| """ScatterNdAdd net definition""" | """ScatterNdAdd net definition""" | ||||
| @@ -311,7 +311,7 @@ class ScatterDiv(nn.Cell): | |||||
| def __init__(self, ref_shape, dtype=np.float32, use_locking=False): | def __init__(self, ref_shape, dtype=np.float32, use_locking=False): | ||||
| super(ScatterDiv, self).__init__() | super(ScatterDiv, self).__init__() | ||||
| self.scatter_div = P.ScatterDiv(use_locking) | self.scatter_div = P.ScatterDiv(use_locking) | ||||
| self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)*10), name="ref") | |||||
| self.ref = Parameter(Tensor(np.ones(ref_shape, dtype) * 10), name="ref") | |||||
| def construct(self, indices, updates): | def construct(self, indices, updates): | ||||
| out = self.scatter_div(self.ref, indices, updates) | out = self.scatter_div(self.ref, indices, updates) | ||||
| @@ -633,7 +633,7 @@ class CTCGreedyDecoderNet(nn.Cell): | |||||
| self.assert_op = P.Assert(300) | self.assert_op = P.Assert(300) | ||||
| def construct(self, inputs, sequence_length): | def construct(self, inputs, sequence_length): | ||||
| out = self.ctc_greedy_decoder(inputs,sequence_length) | |||||
| out = self.ctc_greedy_decoder(inputs, sequence_length) | |||||
| self.assert_op(True, (out[0], out[1], out[2], out[3])) | self.assert_op(True, (out[0], out[1], out[2], out[3])) | ||||
| return out[2] | return out[2] | ||||
| @@ -711,12 +711,13 @@ class BasicLSTMCellNet(nn.Cell): | |||||
| def construct(self, x, h, c, w, b): | def construct(self, x, h, c, w, b): | ||||
| return self.lstm(x, h, c, w, b) | return self.lstm(x, h, c, w, b) | ||||
| class EditDistance(nn.Cell): | class EditDistance(nn.Cell): | ||||
| def __init__(self, hypothesis_shape, truth_shape, normalize=True): | def __init__(self, hypothesis_shape, truth_shape, normalize=True): | ||||
| super(EditDistance, self).__init__() | super(EditDistance, self).__init__() | ||||
| self.edit_distance = P.EditDistance(normalize) | self.edit_distance = P.EditDistance(normalize) | ||||
| self.hypothesis_shape = hypothesis_shape | self.hypothesis_shape = hypothesis_shape | ||||
| self.truth_shape =truth_shape | |||||
| self.truth_shape = truth_shape | |||||
| def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): | def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): | ||||
| return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, | return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, | ||||
| @@ -20,13 +20,11 @@ import mindspore.context as context | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import amp | from mindspore import amp | ||||
| from mindspore import nn | from mindspore import nn | ||||
| from mindspore.train import Model | |||||
| from mindspore.communication.management import init | |||||
| from mindspore.context import ParallelMode | from mindspore.context import ParallelMode | ||||
| from mindspore.common import dtype as mstype | |||||
| from mindspore.train import Model | |||||
| from ....dataset_mock import MindData | from ....dataset_mock import MindData | ||||
| from mindspore.parallel._auto_parallel_context import auto_parallel_context | |||||
| from mindspore.communication.management import init | |||||
| from tests.ut.python.model.resnet import resnet50 | |||||
| def setup_module(module): | def setup_module(module): | ||||
| _ = module | _ = module | ||||
| @@ -144,6 +142,7 @@ def test_compile_model_train_O2(): | |||||
| # not actual run, the metrics step will fail, check if compile ok. | # not actual run, the metrics step will fail, check if compile ok. | ||||
| model.eval(dataset) | model.eval(dataset) | ||||
| def test_compile_model_train_O2_parallel(): | def test_compile_model_train_O2_parallel(): | ||||
| dataset_types = (np.float32, np.float32) | dataset_types = (np.float32, np.float32) | ||||
| dataset_shapes = ((16, 16), (16, 16)) | dataset_shapes = ((16, 16), (16, 16)) | ||||
| @@ -141,6 +141,7 @@ def test_init_abnormal(): | |||||
| with py.raises(TypeError): | with py.raises(TypeError): | ||||
| init.initializer([''], [5, 4], ms.float32) | init.initializer([''], [5, 4], ms.float32) | ||||
| def test_initializer_reinit(): | def test_initializer_reinit(): | ||||
| weights = init.initializer("XavierUniform", shape=(10, 1, 10, 10), dtype=ms.float16) | weights = init.initializer("XavierUniform", shape=(10, 1, 10, 10), dtype=ms.float16) | ||||
| assert weights.dtype == ms.float16 | assert weights.dtype == ms.float16 | ||||
| @@ -152,7 +153,8 @@ def test_initializer_reinit(): | |||||
| weights = init.initializer(weights, (10, 1)) | weights = init.initializer(weights, (10, 1)) | ||||
| assert weights.dtype == ms.float16 | assert weights.dtype == ms.float16 | ||||
| assert weights.shape == (10, 1) | assert weights.shape == (10, 1) | ||||
| def test_init_xavier_uniform(): | def test_init_xavier_uniform(): | ||||
| """ test_init_xavier_uniform """ | """ test_init_xavier_uniform """ | ||||
| gain = 1.2 | gain = 1.2 | ||||