You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_gpu_lenet.py 6.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import os
  16. import numpy as np
  17. import pytest
  18. import mindspore.context as context
  19. import mindspore.dataset as ds
  20. import mindspore.dataset.transforms.c_transforms as C
  21. import mindspore.dataset.transforms.vision.c_transforms as CV
  22. import mindspore.nn as nn
  23. from mindspore import Tensor
  24. from mindspore.common import dtype as mstype
  25. from mindspore.dataset.transforms.vision import Inter
  26. from mindspore.model_zoo.lenet import LeNet5
  27. from mindspore.nn import Dense, TrainOneStepCell, WithLossCell
  28. from mindspore.nn.metrics import Accuracy
  29. from mindspore.nn.optim import Momentum
  30. from mindspore.ops import operations as P
  31. from mindspore.train import Model
  32. from mindspore.train.callback import LossMonitor
  33. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  34. class LeNet(nn.Cell):
  35. def __init__(self):
  36. super(LeNet, self).__init__()
  37. self.relu = P.ReLU()
  38. self.batch_size = 1
  39. weight1 = Tensor(np.ones([6, 3, 5, 5]).astype(np.float32) * 0.01)
  40. weight2 = Tensor(np.ones([16, 6, 5, 5]).astype(np.float32) * 0.01)
  41. self.conv1 = nn.Conv2d(3, 6, (5, 5), weight_init=weight1, stride=1, padding=0, pad_mode='valid')
  42. self.conv2 = nn.Conv2d(6, 16, (5, 5), weight_init=weight2, pad_mode='valid', stride=1, padding=0)
  43. self.pool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="valid")
  44. self.reshape = P.Reshape()
  45. self.reshape1 = P.Reshape()
  46. self.fc1 = Dense(400, 120)
  47. self.fc2 = Dense(120, 84)
  48. self.fc3 = Dense(84, 10)
  49. def construct(self, input_x):
  50. output = self.conv1(input_x)
  51. output = self.relu(output)
  52. output = self.pool(output)
  53. output = self.conv2(output)
  54. output = self.relu(output)
  55. output = self.pool(output)
  56. output = self.reshape(output, (self.batch_size, -1))
  57. output = self.fc1(output)
  58. output = self.fc2(output)
  59. output = self.fc3(output)
  60. return output
  61. def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
  62. lr = []
  63. for step in range(total_steps):
  64. lr_ = base_lr * gamma ** (step // gap)
  65. lr.append(lr_)
  66. return Tensor(np.array(lr), dtype)
  67. @pytest.mark.level0
  68. @pytest.mark.platform_x86_gpu_training
  69. @pytest.mark.env_onecard
  70. def test_train_lenet():
  71. epoch = 100
  72. net = LeNet()
  73. momentum = 0.9
  74. learning_rate = multisteplr(epoch, 30)
  75. optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
  76. criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  77. net_with_criterion = WithLossCell(net, criterion)
  78. train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
  79. train_network.set_train()
  80. losses = []
  81. for i in range(epoch):
  82. data = Tensor(np.ones([net.batch_size, 3, 32, 32]).astype(np.float32) * 0.01)
  83. label = Tensor(np.ones([net.batch_size]).astype(np.int32))
  84. loss = train_network(data, label)
  85. losses.append(loss)
  86. print(losses)
  87. def create_dataset(data_path, batch_size=32, repeat_size=1,
  88. num_parallel_workers=1):
  89. """
  90. create dataset for train or test
  91. """
  92. # define dataset
  93. mnist_ds = ds.MnistDataset(data_path)
  94. resize_height, resize_width = 32, 32
  95. rescale = 1.0 / 255.0
  96. shift = 0.0
  97. rescale_nml = 1 / 0.3081
  98. shift_nml = -1 * 0.1307 / 0.3081
  99. # define map operations
  100. resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
  101. rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
  102. rescale_op = CV.Rescale(rescale, shift)
  103. hwc2chw_op = CV.HWC2CHW()
  104. type_cast_op = C.TypeCast(mstype.int32)
  105. # apply map operations on images
  106. mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
  107. mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
  108. mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
  109. mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
  110. mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
  111. # apply DatasetOps
  112. buffer_size = 10000
  113. mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
  114. mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
  115. mnist_ds = mnist_ds.repeat(repeat_size)
  116. return mnist_ds
  117. @pytest.mark.level0
  118. @pytest.mark.platform_x86_gpu_training
  119. @pytest.mark.env_onecard
  120. def test_train_and_eval_lenet():
  121. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  122. network = LeNet5(10)
  123. net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
  124. net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
  125. model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
  126. print("============== Starting Training ==============")
  127. ds_train = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "train"), 32, 1)
  128. model.train(1, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True)
  129. print("============== Starting Testing ==============")
  130. ds_eval = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "test"), 32, 1)
  131. acc = model.eval(ds_eval, dataset_sink_mode=True)
  132. print("============== Accuracy:{} ==============".format(acc))