You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_time_distributed_op.py 8.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore
  18. import mindspore.context as context
  19. import mindspore.nn as nn
  20. import mindspore.ops as ops
  21. from mindspore import Tensor
  22. context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
  23. class TestTimeDistributed(nn.Cell):
  24. def __init__(self, cell, time_axis, reshape_with_axis=None):
  25. super(TestTimeDistributed, self).__init__()
  26. self.time_distributed = nn.TimeDistributed(cell, time_axis, reshape_with_axis)
  27. def construct(self, inputs):
  28. return self.time_distributed(inputs)
  29. @pytest.mark.level0
  30. @pytest.mark.platform_x86_gpu_training
  31. @pytest.mark.env_onecard
  32. def test_time_distributed_conv2d():
  33. inputs = np.random.randint(0, 10, [32, 12, 10, 10])
  34. conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
  35. output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
  36. inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
  37. time_distributed = TestTimeDistributed(conv2d, time_axis=1, reshape_with_axis=0)
  38. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  39. for i in range(output.shape[1]):
  40. assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
  41. print("Conv2D layer wrapped successful")
  42. @pytest.mark.level0
  43. @pytest.mark.platform_x86_gpu_training
  44. @pytest.mark.env_onecard
  45. def test_time_distributed_maxpool2d():
  46. inputs = np.random.randint(0, 10, [32, 12, 10, 10])
  47. pool = nn.MaxPool2d(kernel_size=3, stride=1)
  48. output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
  49. inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
  50. time_distributed = TestTimeDistributed(pool, time_axis=1, reshape_with_axis=0)
  51. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  52. for i in range(output.shape[1]):
  53. assert np.all(output[:, i, :] == output_expect)
  54. print("MaxPooling2D layer wrapped successful")
  55. @pytest.mark.level0
  56. @pytest.mark.platform_x86_gpu_training
  57. @pytest.mark.env_onecard
  58. def test_time_distributed_dense():
  59. inputs = np.random.randint(0, 10, [32, 10])
  60. dense = nn.Dense(10, 6)
  61. output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
  62. inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
  63. time_distributed = TestTimeDistributed(dense, time_axis=1, reshape_with_axis=0)
  64. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  65. for i in range(output.shape[1]):
  66. assert np.all(output[:, i, :] == output_expect)
  67. print("Dense layer wrapped successful")
  68. @pytest.mark.level0
  69. @pytest.mark.platform_x86_gpu_training
  70. @pytest.mark.env_onecard
  71. def test_time_distributed_dense_with_reshape_axis_not_first():
  72. inputs = np.random.randint(0, 10, [32, 10])
  73. dense = nn.Dense(10, 6)
  74. output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
  75. inputs = inputs.reshape([1, 32, 10]).repeat(6, axis=0)
  76. time_distributed = TestTimeDistributed(dense, time_axis=0, reshape_with_axis=1)
  77. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  78. for i in range(output.shape[0]):
  79. assert np.all(output[i, :] == output_expect)
  80. print("Dense layer wrapped successful")
  81. @pytest.mark.level0
  82. @pytest.mark.platform_x86_gpu_training
  83. @pytest.mark.env_onecard
  84. def test_time_distributed_argmax():
  85. inputs = np.random.randint(0, 10, [3, 4])
  86. argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
  87. output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
  88. inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
  89. time_distributed = TestTimeDistributed(argmax, time_axis=1, reshape_with_axis=0)
  90. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  91. for i in range(output.shape[1]):
  92. assert np.all(output[:, i] == output_expect)
  93. print("Argmax op wrapped successful")
  94. @pytest.mark.level0
  95. @pytest.mark.platform_x86_gpu_training
  96. @pytest.mark.env_onecard
  97. def test_time_distributed_flatten():
  98. inputs = np.random.randint(0, 10, [3, 4, 5])
  99. flatten = nn.Flatten()
  100. output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
  101. inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
  102. time_distributed = TestTimeDistributed(flatten, time_axis=1, reshape_with_axis=0)
  103. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  104. for i in range(output.shape[1]):
  105. assert np.all(output[:, i, :] == output_expect)
  106. print("Flatten op wrapped successful")
  107. @pytest.mark.level0
  108. @pytest.mark.platform_x86_gpu_training
  109. @pytest.mark.env_onecard
  110. def test_time_distributed_conv2d_no_reshape_axis():
  111. inputs = np.random.randint(0, 10, [32, 12, 10, 10])
  112. conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
  113. output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
  114. inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
  115. time_distributed = TestTimeDistributed(conv2d, time_axis=1)
  116. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  117. for i in range(output.shape[1]):
  118. assert np.all(output[:, i, :] == output_expect)
  119. print("Conv2D layer with no reshape axis wrapped successful")
  120. @pytest.mark.level0
  121. @pytest.mark.platform_x86_gpu_training
  122. @pytest.mark.env_onecard
  123. def test_time_distributed_maxpool2d_no_reshape_axis():
  124. inputs = np.random.randint(0, 10, [32, 12, 10, 10])
  125. pool = nn.MaxPool2d(kernel_size=3, stride=1)
  126. output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
  127. inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
  128. time_distributed = TestTimeDistributed(pool, time_axis=1)
  129. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  130. for i in range(output.shape[1]):
  131. assert np.all(output[:, i, :] == output_expect)
  132. print("MaxPooling2D layer with no reshape axis wrapped successful")
  133. @pytest.mark.level0
  134. @pytest.mark.platform_x86_gpu_training
  135. @pytest.mark.env_onecard
  136. def test_time_distributed_dense_no_reshape_axis():
  137. inputs = np.random.randint(0, 10, [32, 10])
  138. dense = nn.Dense(10, 6)
  139. output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
  140. inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
  141. time_distributed = TestTimeDistributed(dense, time_axis=1)
  142. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  143. for i in range(output.shape[1]):
  144. assert np.all(output[:, i, :] == output_expect)
  145. print("Dense layer with no reshape axis wrapped successful")
  146. @pytest.mark.level0
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.env_onecard
  149. def test_time_distributed_argmax_no_reshape_axis():
  150. inputs = np.random.randint(0, 10, [3, 4])
  151. argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
  152. output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
  153. inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
  154. time_distributed = TestTimeDistributed(argmax, time_axis=1)
  155. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  156. for i in range(output.shape[1]):
  157. assert np.all(output[:, i] == output_expect)
  158. print("Argmax op with no reshape axis wrapped successful")
  159. @pytest.mark.level0
  160. @pytest.mark.platform_x86_gpu_training
  161. @pytest.mark.env_onecard
  162. def test_time_distributed_flatten_no_reshape_axis():
  163. inputs = np.random.randint(0, 10, [3, 4, 5])
  164. flatten = nn.Flatten()
  165. output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
  166. inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
  167. time_distributed = TestTimeDistributed(flatten, time_axis=1)
  168. output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
  169. for i in range(output.shape[1]):
  170. assert np.all(output[:, i, :] == output_expect)
  171. print("Flatten op with no reshape axis wrapped successful")