You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_gpu_lenet.py 6.4 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import os
  17. import pytest
  18. import mindspore.context as context
  19. import mindspore.dataset as ds
  20. import mindspore.dataset.transforms.c_transforms as C
  21. import mindspore.dataset.transforms.vision.c_transforms as CV
  22. import mindspore.nn as nn
  23. from mindspore import Tensor
  24. from mindspore.common import dtype as mstype
  25. from mindspore.common.initializer import initializer
  26. from mindspore.dataset.transforms.vision import Inter
  27. from mindspore.model_zoo.lenet import LeNet5
  28. from mindspore.nn import Dense, TrainOneStepCell, WithLossCell
  29. from mindspore.nn.metrics import Accuracy
  30. from mindspore.nn.optim import Momentum
  31. from mindspore.ops import operations as P
  32. from mindspore.train import Model
  33. from mindspore.train.callback import LossMonitor
  34. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  35. class LeNet(nn.Cell):
  36. def __init__(self):
  37. super(LeNet, self).__init__()
  38. self.relu = P.ReLU()
  39. self.batch_size = 1
  40. weight1 = Tensor(np.ones([6, 3, 5, 5]).astype(np.float32) * 0.01)
  41. weight2 = Tensor(np.ones([16, 6, 5, 5]).astype(np.float32) * 0.01)
  42. self.conv1 = nn.Conv2d(3, 6, (5, 5), weight_init=weight1, stride=1, padding=0, pad_mode='valid')
  43. self.conv2 = nn.Conv2d(6, 16, (5, 5), weight_init=weight2, pad_mode='valid', stride=1, padding=0)
  44. self.pool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="valid")
  45. self.reshape = P.Reshape()
  46. self.reshape1 = P.Reshape()
  47. self.fc1 = Dense(400, 120)
  48. self.fc2 = Dense(120, 84)
  49. self.fc3 = Dense(84, 10)
  50. def construct(self, input_x):
  51. output = self.conv1(input_x)
  52. output = self.relu(output)
  53. output = self.pool(output)
  54. output = self.conv2(output)
  55. output = self.relu(output)
  56. output = self.pool(output)
  57. output = self.reshape(output, (self.batch_size, -1))
  58. output = self.fc1(output)
  59. output = self.fc2(output)
  60. output = self.fc3(output)
  61. return output
  62. def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
  63. lr = []
  64. for step in range(total_steps):
  65. lr_ = base_lr * gamma ** (step // gap)
  66. lr.append(lr_)
  67. return Tensor(np.array(lr), dtype)
  68. @pytest.mark.level0
  69. @pytest.mark.platform_x86_gpu_training
  70. @pytest.mark.env_onecard
  71. def test_train_lenet():
  72. epoch = 100
  73. net = LeNet()
  74. momentum = initializer(Tensor(np.array([0.9]).astype(np.float32)), [1])
  75. learning_rate = multisteplr(epoch, 30)
  76. optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
  77. criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  78. net_with_criterion = WithLossCell(net, criterion)
  79. train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
  80. train_network.set_train()
  81. losses = []
  82. for i in range(epoch):
  83. data = Tensor(np.ones([net.batch_size, 3, 32, 32]).astype(np.float32) * 0.01)
  84. label = Tensor(np.ones([net.batch_size]).astype(np.int32))
  85. loss = train_network(data, label)
  86. losses.append(loss)
  87. print(losses)
  88. def create_dataset(data_path, batch_size=32, repeat_size=1,
  89. num_parallel_workers=1):
  90. """
  91. create dataset for train or test
  92. """
  93. # define dataset
  94. mnist_ds = ds.MnistDataset(data_path)
  95. resize_height, resize_width = 32, 32
  96. rescale = 1.0 / 255.0
  97. shift = 0.0
  98. rescale_nml = 1 / 0.3081
  99. shift_nml = -1 * 0.1307 / 0.3081
  100. # define map operations
  101. resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
  102. rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
  103. rescale_op = CV.Rescale(rescale, shift)
  104. hwc2chw_op = CV.HWC2CHW()
  105. type_cast_op = C.TypeCast(mstype.int32)
  106. # apply map operations on images
  107. mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
  108. mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
  109. mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
  110. mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
  111. mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
  112. # apply DatasetOps
  113. buffer_size = 10000
  114. mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
  115. mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
  116. mnist_ds = mnist_ds.repeat(repeat_size)
  117. return mnist_ds
  118. @pytest.mark.level0
  119. @pytest.mark.platform_x86_gpu_training
  120. @pytest.mark.env_onecard
  121. def test_train_and_eval_lenet():
  122. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  123. network = LeNet5(10)
  124. net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
  125. net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
  126. model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
  127. print("============== Starting Training ==============")
  128. ds_train = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "train"), 32, 1)
  129. model.train(1, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True)
  130. print("============== Starting Testing ==============")
  131. ds_eval = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "test"), 32, 1)
  132. acc = model.eval(ds_eval, dataset_sink_mode=True)
  133. print("============== Accuracy:{} ==============".format(acc))