You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_gpu_svi_vae.py 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import os
  16. import mindspore.common.dtype as mstype
  17. import mindspore.dataset as ds
  18. import mindspore.dataset.transforms.vision.c_transforms as CV
  19. import mindspore.nn as nn
  20. from mindspore import context, Tensor
  21. from mindspore.ops import operations as P
  22. from mindspore.nn.probability.dpn import VAE
  23. from mindspore.nn.probability.infer import ELBO, SVI
  24. context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
  25. IMAGE_SHAPE = (-1, 1, 32, 32)
  26. image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train")
  27. class Encoder(nn.Cell):
  28. def __init__(self):
  29. super(Encoder, self).__init__()
  30. self.fc1 = nn.Dense(1024, 800)
  31. self.fc2 = nn.Dense(800, 400)
  32. self.relu = nn.ReLU()
  33. self.flatten = nn.Flatten()
  34. def construct(self, x):
  35. x = self.flatten(x)
  36. x = self.fc1(x)
  37. x = self.relu(x)
  38. x = self.fc2(x)
  39. x = self.relu(x)
  40. return x
  41. class Decoder(nn.Cell):
  42. def __init__(self):
  43. super(Decoder, self).__init__()
  44. self.fc1 = nn.Dense(400, 1024)
  45. self.sigmoid = nn.Sigmoid()
  46. self.reshape = P.Reshape()
  47. def construct(self, z):
  48. z = self.fc1(z)
  49. z = self.reshape(z, IMAGE_SHAPE)
  50. z = self.sigmoid(z)
  51. return z
  52. def create_dataset(data_path, batch_size=32, repeat_size=1,
  53. num_parallel_workers=1):
  54. """
  55. create dataset for train or test
  56. """
  57. # define dataset
  58. mnist_ds = ds.MnistDataset(data_path)
  59. resize_height, resize_width = 32, 32
  60. rescale = 1.0 / 255.0
  61. shift = 0.0
  62. # define map operations
  63. resize_op = CV.Resize((resize_height, resize_width)) # Bilinear mode
  64. rescale_op = CV.Rescale(rescale, shift)
  65. hwc2chw_op = CV.HWC2CHW()
  66. # apply map operations on images
  67. mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
  68. mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
  69. mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
  70. # apply DatasetOps
  71. mnist_ds = mnist_ds.batch(batch_size)
  72. mnist_ds = mnist_ds.repeat(repeat_size)
  73. return mnist_ds
  74. def test_svi_vae():
  75. # define the encoder and decoder
  76. encoder = Encoder()
  77. decoder = Decoder()
  78. # define the vae model
  79. vae = VAE(encoder, decoder, hidden_size=400, latent_size=20)
  80. # define the loss function
  81. net_loss = ELBO(latent_prior='Normal', output_prior='Normal')
  82. # define the optimizer
  83. optimizer = nn.Adam(params=vae.trainable_params(), learning_rate=0.001)
  84. # define the training dataset
  85. ds_train = create_dataset(image_path, 128, 1)
  86. net_with_loss = nn.WithLossCell(vae, net_loss)
  87. # define the variational inference
  88. vi = SVI(net_with_loss=net_with_loss, optimizer=optimizer)
  89. # run the vi to return the trained network.
  90. vae = vi.run(train_dataset=ds_train, epochs=5)
  91. # get the trained loss
  92. trained_loss = vi.get_train_loss()
  93. # test function: generate_sample
  94. generated_sample = vae.generate_sample(64, IMAGE_SHAPE)
  95. # test function: reconstruct_sample
  96. for sample in ds_train.create_dict_iterator():
  97. sample_x = Tensor(sample['image'], dtype=mstype.float32)
  98. reconstructed_sample = vae.reconstruct_sample(sample_x)
  99. print('The loss of the trained network is ', trained_loss)
  100. print('The hape of the generated sample is ', generated_sample.shape)
  101. print('The shape of the reconstructed sample is ', reconstructed_sample.shape)