| @@ -42,7 +42,6 @@ class for_loop_with_break(Cell): | |||
| x *= 3 | |||
| break | |||
| x = x * 2 | |||
| pass | |||
| return x | |||
| @@ -71,9 +70,7 @@ class for_loop_with_cont_break(Cell): | |||
| if i > 5: | |||
| x *= 3 | |||
| break | |||
| x *= 2 | |||
| x = x * 2 | |||
| pass | |||
| return x | |||
| @@ -82,7 +79,7 @@ class for_nested_loop_with_break(Cell): | |||
| super().__init__() | |||
| def construct(self, x): | |||
| for i in range(3): | |||
| for _ in range(3): | |||
| for j in range(5): | |||
| if j > 3: | |||
| x *= 2 | |||
| @@ -12,13 +12,9 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor, ms_function | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore.ops import operations as P | |||
| @ms_function | |||
| @@ -37,7 +33,7 @@ def test_net(): | |||
| c3 = Tensor([1], mstype.int32) | |||
| expect = Tensor([21], mstype.int32) | |||
| ret = t1_while(c1, c2, c3) | |||
| assert (ret == expect) | |||
| assert ret == expect | |||
| if __name__ == "__main__": | |||
| @@ -17,7 +17,7 @@ import numpy as np | |||
| import mindspore.common.dtype as mstype | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor, ms_function | |||
| from mindspore import Tensor | |||
| from mindspore.ops import operations as P | |||
| context.set_context(mode=context.GRAPH_MODE, device_id=5, device_target="Ascend") | |||
| @@ -16,10 +16,8 @@ import numpy as np | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor, Parameter, Model, ms_function | |||
| from mindspore.common.initializer import initializer | |||
| from mindspore import Tensor, Model, ms_function | |||
| from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits | |||
| from mindspore.nn.optim import Momentum | |||
| from mindspore.ops import operations as P | |||
| context.set_context(device_target="Ascend") | |||
| @@ -19,8 +19,8 @@ curr_path = os.path.abspath(os.curdir) | |||
| file_memreuse = curr_path + "/mem_reuse_check/memreuse.ir" | |||
| file_normal = curr_path + "/mem_reuse_check/normal_mem.ir" | |||
| checker = os.path.exists(file_memreuse) | |||
| assert (checker, True) | |||
| assert checker == True | |||
| checker = os.path.exists(file_normal) | |||
| assert (checker, True) | |||
| assert checker == True | |||
| checker = filecmp.cmp(file_memreuse, file_normal) | |||
| assert (checker, True) | |||
| assert checker == True | |||
| @@ -12,10 +12,10 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import argparse | |||
| import numpy as np | |||
| import os | |||
| import random | |||
| import argparse | |||
| import numpy as np | |||
| from resnet import resnet50 | |||
| import mindspore.common.dtype as mstype | |||
| @@ -31,7 +31,6 @@ from mindspore.ops import functional as F | |||
| from mindspore.ops import operations as P | |||
| from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor | |||
| from mindspore.train.model import Model, ParallelMode | |||
| from mindspore.train.serialization import load_checkpoint, load_param_into_net | |||
| random.seed(1) | |||
| np.random.seed(1) | |||
| @@ -143,11 +142,8 @@ if __name__ == '__main__': | |||
| model.train(epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb]) | |||
| if args_opt.do_eval: | |||
| # if args_opt.checkpoint_path: | |||
| # param_dict = load_checkpoint(args_opt.checkpoint_path) | |||
| # load_param_into_net(net, param_dict) | |||
| eval_dataset = create_dataset(1, training=False) | |||
| res = model.eval(eval_dataset) | |||
| print("result: ", res) | |||
| checker = os.path.exists("./memreuse.ir") | |||
| assert (checker, True) | |||
| assert checker == True | |||
| @@ -12,10 +12,10 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import argparse | |||
| import numpy as np | |||
| import os | |||
| import random | |||
| import argparse | |||
| import numpy as np | |||
| from resnet import resnet50 | |||
| import mindspore.common.dtype as mstype | |||
| @@ -31,7 +31,6 @@ from mindspore.ops import functional as F | |||
| from mindspore.ops import operations as P | |||
| from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor | |||
| from mindspore.train.model import Model, ParallelMode | |||
| from mindspore.train.serialization import load_checkpoint, load_param_into_net | |||
| random.seed(1) | |||
| np.random.seed(1) | |||
| @@ -143,11 +142,8 @@ if __name__ == '__main__': | |||
| model.train(epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb]) | |||
| if args_opt.do_eval: | |||
| # if args_opt.checkpoint_path: | |||
| # param_dict = load_checkpoint(args_opt.checkpoint_path) | |||
| # load_param_into_net(net, param_dict) | |||
| eval_dataset = create_dataset(1, training=False) | |||
| res = model.eval(eval_dataset) | |||
| print("result: ", res) | |||
| checker = os.path.exists("./normal_memreuse.ir") | |||
| assert (checker, True) | |||
| assert checker == True | |||
| @@ -21,7 +21,7 @@ import pytest | |||
| @pytest.mark.env_single | |||
| def test_nccl_lenet(): | |||
| return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py") | |||
| assert (return_code == 0) | |||
| assert return_code == 0 | |||
| @pytest.mark.level0 | |||
| @@ -29,7 +29,7 @@ def test_nccl_lenet(): | |||
| @pytest.mark.env_single | |||
| def test_nccl_all_reduce_op(): | |||
| return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py") | |||
| assert (return_code == 0) | |||
| assert return_code == 0 | |||
| @pytest.mark.level0 | |||
| @@ -37,7 +37,7 @@ def test_nccl_all_reduce_op(): | |||
| @pytest.mark.env_single | |||
| def test_nccl_all_gather_op(): | |||
| return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py") | |||
| assert (return_code == 0) | |||
| assert return_code == 0 | |||
| @pytest.mark.level0 | |||
| @@ -45,4 +45,4 @@ def test_nccl_all_gather_op(): | |||
| @pytest.mark.env_single | |||
| def test_nccl_reduce_scatter_op(): | |||
| return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py") | |||
| assert (return_code == 0) | |||
| assert return_code == 0 | |||
| @@ -51,4 +51,4 @@ def test_AllGather(): | |||
| diff = output.asnumpy() - expect | |||
| error = np.ones(shape=expect.shape) * 1.0e-5 | |||
| assert np.all(diff < error) | |||
| assert (output.shape() == expect.shape) | |||
| assert output.shape() == expect.shape | |||
| @@ -19,7 +19,7 @@ import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore.communication.management import init, get_rank, get_group_size | |||
| from mindspore.communication.management import init, get_group_size | |||
| from mindspore.nn import TrainOneStepCell, WithLossCell | |||
| from mindspore.nn.optim import Momentum | |||
| from mindspore.ops import operations as P | |||
| @@ -94,8 +94,8 @@ def test_lenet_nccl(): | |||
| data = Tensor(np.ones([net.batch_size, 3, 32, 32]).astype(np.float32) * 0.01) | |||
| label = Tensor(np.ones([net.batch_size]).astype(np.int32)) | |||
| start = datetime.datetime.now() | |||
| for i in range(epoch): | |||
| for step in range(mini_batch): | |||
| for _ in range(epoch): | |||
| for _ in range(mini_batch): | |||
| loss = train_network(data, label) | |||
| losses.append(loss.asnumpy()) | |||
| end = datetime.datetime.now() | |||
| @@ -105,4 +105,4 @@ def test_lenet_nccl(): | |||
| with open("ms_loss.txt", "w") as fo2: | |||
| fo2.write("loss:") | |||
| fo2.write(str(losses[-5:])) | |||
| assert (losses[-1] < 0.01) | |||
| assert losses[-1] < 0.01 | |||
| @@ -54,23 +54,23 @@ def test_ReduceScatter(): | |||
| reduce_scatter = Net() | |||
| output = reduce_scatter() | |||
| sum = np.ones([size, 1, 3, 3]).astype(np.float32) * 0 | |||
| sum_ones = np.ones([size, 1, 3, 3]).astype(np.float32) * 0 | |||
| for i in range(size): | |||
| sum += np.ones([size, 1, 3, 3]).astype(np.float32) * 0.01 * (i + 1) | |||
| expect0 = sum[rank: rank + 1] | |||
| sum_ones += np.ones([size, 1, 3, 3]).astype(np.float32) * 0.01 * (i + 1) | |||
| expect0 = sum_ones[rank: rank + 1] | |||
| diff0 = output[0].asnumpy() - expect0 | |||
| error0 = np.ones(shape=expect0.shape) * 1.0e-5 | |||
| assert np.all(diff0 < error0) | |||
| assert (output[0].shape() == expect0.shape) | |||
| assert output[0].shape() == expect0.shape | |||
| expect1 = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * size | |||
| diff1 = output[1].asnumpy() - expect1 | |||
| error1 = np.ones(shape=expect1.shape) * 1.0e-5 | |||
| assert np.all(diff1 < error1) | |||
| assert (output[1].shape() == expect1.shape) | |||
| assert output[1].shape() == expect1.shape | |||
| expect2 = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * 1 | |||
| diff2 = output[2].asnumpy() - expect2 | |||
| error2 = np.ones(shape=expect2.shape) * 1.0e-5 | |||
| assert np.all(diff2 < error2) | |||
| assert (output[2].shape() == expect2.shape) | |||
| assert output[2].shape() == expect2.shape | |||
| @@ -13,7 +13,6 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import mindspore.nn as nn | |||
| from mindspore.nn import Dense | |||
| from mindspore.ops import operations as P | |||
| @@ -15,10 +15,9 @@ | |||
| """train bert network without lossscale""" | |||
| import numpy as np | |||
| import os | |||
| import pytest | |||
| from numpy import allclose | |||
| import numpy as np | |||
| import mindspore.common.dtype as mstype | |||
| import mindspore.dataset.engine.datasets as de | |||
| @@ -28,7 +27,7 @@ from mindspore import log as logger | |||
| from mindspore.common.tensor import Tensor | |||
| from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell | |||
| from mindspore.nn.optim import Momentum | |||
| from mindspore.train.callback import Callback, LossMonitor | |||
| from mindspore.train.callback import Callback | |||
| from mindspore.train.loss_scale_manager import DynamicLossScaleManager | |||
| from mindspore.train.model import Model | |||
| @@ -12,11 +12,7 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor | |||
| from mindspore.nn import Dense | |||
| from mindspore.ops import operations as P | |||
| @@ -12,10 +12,9 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import math | |||
| import time | |||
| import numpy as np | |||
| import pytest | |||
| import time | |||
| import mindspore.nn as nn | |||
| from mindspore import context, Tensor, ParameterTuple | |||
| @@ -152,10 +151,10 @@ def test_ascend_pynative_lenet(): | |||
| fw_output = net(inputs) | |||
| loss_output = criterion(fw_output, labels) | |||
| grads = train_network(inputs, labels) | |||
| success = optimizer(grads) | |||
| optimizer(grads) | |||
| end_time = time.time() | |||
| cost_time = end_time - start_time | |||
| total_time = total_time + cost_time | |||
| print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time) | |||
| assert (loss_output.asnumpy() < 0.1) | |||
| assert loss_output.asnumpy() < 0.1 | |||
| @@ -13,18 +13,15 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """ test model train """ | |||
| import numpy as np | |||
| import os | |||
| import numpy as np | |||
| from apply_momentum import ApplyMomentum | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| import mindspore.nn as wrap | |||
| from mindspore import Tensor, Parameter, Model | |||
| from mindspore import Tensor, Model | |||
| from mindspore.common.api import ms_function | |||
| from mindspore.common.initializer import initializer | |||
| from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits | |||
| from mindspore.nn.optim import Momentum | |||
| from mindspore.ops import operations as P | |||
| from mindspore.train.summary.summary_record import SummaryRecord | |||
| @@ -12,12 +12,11 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import os | |||
| import pytest | |||
| import random | |||
| import shutil | |||
| import time | |||
| import pytest | |||
| import numpy as np | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| @@ -76,7 +75,7 @@ class SummaryNet(nn.Cell): | |||
| return z | |||
| def train_summary_record_scalar_for_1(test_writer, steps, fwd_x, fwd_y): | |||
| def train_summary_record_scalar_for_1(test_writer, steps): | |||
| net = SummaryNet() | |||
| out_me_dict = {} | |||
| for i in range(0, steps): | |||
| @@ -89,12 +88,9 @@ def train_summary_record_scalar_for_1(test_writer, steps, fwd_x, fwd_y): | |||
| return out_me_dict | |||
| def me_scalar_summary(steps, tag=None, value=None): | |||
| def me_scalar_summary(steps): | |||
| with SummaryRecord(SUMMARY_DIR_ME_TEMP) as test_writer: | |||
| x = Tensor(np.array([1.1]).astype(np.float32)) | |||
| y = Tensor(np.array([1.2]).astype(np.float32)) | |||
| out_me_dict = train_summary_record_scalar_for_1(test_writer, steps, x, y) | |||
| out_me_dict = train_summary_record_scalar_for_1(test_writer, steps) | |||
| return out_me_dict | |||
| @@ -13,7 +13,7 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import os | |||
| from resnet_torch import resnet50 | |||
| from mindspore import Tensor | |||
| @@ -12,10 +12,10 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import argparse | |||
| import numpy as np | |||
| import os | |||
| import random | |||
| import argparse | |||
| import numpy as np | |||
| from resnet import resnet50 | |||
| import mindspore.common.dtype as mstype | |||
| @@ -13,11 +13,11 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import os | |||
| import pytest | |||
| import random | |||
| import time | |||
| import pytest | |||
| import numpy as np | |||
| from resnet import resnet50 | |||
| import mindspore.common.dtype as mstype | |||
| @@ -134,7 +134,7 @@ class LossGet(Callback): | |||
| return self._loss | |||
| def train_process(device_id, epoch_size, num_classes, device_num, batch_size): | |||
| def train_process(device_id, epoch_size, num_classes, batch_size): | |||
| os.system("mkdir " + str(device_id)) | |||
| os.chdir(str(device_id)) | |||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | |||
| @@ -181,15 +181,14 @@ def eval(batch_size, num_classes): | |||
| @pytest.mark.platform_x86_ascend_training | |||
| @pytest.mark.env_onecard | |||
| def test_resnet_cifar_1p(): | |||
| device_num = 1 | |||
| epoch_size = 1 | |||
| num_classes = 10 | |||
| batch_size = 32 | |||
| device_id = 0 | |||
| train_process(device_id, epoch_size, num_classes, device_num, batch_size) | |||
| train_process(device_id, epoch_size, num_classes, batch_size) | |||
| time.sleep(3) | |||
| acc = eval(batch_size, num_classes) | |||
| os.chdir("../") | |||
| os.system("rm -rf " + str(device_id)) | |||
| print("End training...") | |||
| assert (acc['acc'] > 0.35) | |||
| assert acc['acc'] > 0.35 | |||
| @@ -13,10 +13,10 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import os | |||
| import pytest | |||
| import random | |||
| import numpy as np | |||
| import pytest | |||
| from multiprocessing import Process, Queue | |||
| from resnet import resnet50 | |||
| @@ -168,7 +168,7 @@ def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size, | |||
| dataset = create_dataset(epoch_size, training=True, | |||
| batch_size=batch_size, rank_id=device_id, rank_size=device_num, | |||
| enable_hccl=enable_hccl) | |||
| batch_num = dataset.get_dataset_size() | |||
| loss_cb = LossGet() | |||
| model.train(epoch_size, dataset, callbacks=[loss_cb]) | |||
| q.put(loss_cb.get_loss()) | |||
| @@ -207,4 +207,4 @@ def test_resnet_cifar_8p(): | |||
| for i in range(device_num): | |||
| os.system("rm -rf " + str(i)) | |||
| print("End training...") | |||
| assert (loss < 2.0) | |||
| assert loss < 2.0 | |||