You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_batch_matmul.py 5.5 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import pytest
  16. import numpy as np
  17. from mindspore import Tensor
  18. from mindspore.ops import operations as P
  19. from mindspore.common.api import ms_function
  20. from mindspore.common.initializer import initializer
  21. from mindspore.common.parameter import Parameter
  22. import mindspore.nn as nn
  23. import mindspore.context as context
  24. from mindspore.common import dtype as mstype
  25. @pytest.mark.level0
  26. @pytest.mark.platform_x86_gpu_training
  27. @pytest.mark.env_onecard
  28. class BatchMatMulNet(nn.Cell):
  29. def __init__(self, transpose_a=False, transpose_b=False):
  30. super(BatchMatMulNet, self).__init__()
  31. self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
  32. def construct(self, x, y):
  33. return self.batch_matmul(x, y)
  34. def test_4D():
  35. input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
  36. input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
  37. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  38. net = BatchMatMulNet()
  39. output = net(input_x, input_y)
  40. expect = [[[[20, 23, 26, 29]],
  41. [[200, 212, 224, 236]],
  42. [[596, 617, 638, 659]],
  43. [[1208, 1238, 1268, 1298]]],
  44. [[[2036, 2075, 2114, 2153]],
  45. [[3080, 3128, 3176, 3224]],
  46. [[4340, 4397, 4454, 4511]],
  47. [[5816, 5882, 5948, 6014]]]]
  48. assert (output.asnumpy() == expect).all()
  49. @pytest.mark.level0
  50. @pytest.mark.platform_x86_gpu_training
  51. @pytest.mark.env_onecard
  52. def test_4D_transpose_a():
  53. input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
  54. input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
  55. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  56. net = BatchMatMulNet(transpose_a=True)
  57. output = net(input_x, input_y)
  58. expect = [[[[20, 23, 26, 29]],
  59. [[200, 212, 224, 236]],
  60. [[596, 617, 638, 659]],
  61. [[1208, 1238, 1268, 1298]]],
  62. [[[2036, 2075, 2114, 2153]],
  63. [[3080, 3128, 3176, 3224]],
  64. [[4340, 4397, 4454, 4511]],
  65. [[5816, 5882, 5948, 6014]]]]
  66. assert (output.asnumpy() == expect).all()
  67. @pytest.mark.level0
  68. @pytest.mark.platform_x86_gpu_training
  69. @pytest.mark.env_onecard
  70. def test_4D_transpose_b():
  71. input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
  72. input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
  73. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  74. net = BatchMatMulNet(transpose_b=True)
  75. output = net(input_x, input_y)
  76. expect = [[[[5, 14, 23, 32]],
  77. [[158, 194, 230, 266]],
  78. [[527, 590, 653, 716]],
  79. [[1112, 1202, 1292, 1382]]],
  80. [[[1913, 2030, 2147, 2264]],
  81. [[2930, 3074, 3218, 3362]],
  82. [[4163, 4334, 4505, 4676]],
  83. [[5612, 5810, 6008, 6206]]]]
  84. assert (output.asnumpy() == expect).all()
  85. @pytest.mark.level0
  86. @pytest.mark.platform_x86_gpu_training
  87. @pytest.mark.env_onecard
  88. def test_4D_transpose_ab():
  89. input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
  90. input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
  91. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  92. net = BatchMatMulNet(transpose_a=True, transpose_b=True)
  93. output = net(input_x, input_y)
  94. expect = [[[[5, 14, 23, 32]],
  95. [[158, 194, 230, 266]],
  96. [[527, 590, 653, 716]],
  97. [[1112, 1202, 1292, 1382]]],
  98. [[[1913, 2030, 2147, 2264]],
  99. [[2930, 3074, 3218, 3362]],
  100. [[4163, 4334, 4505, 4676]],
  101. [[5612, 5810, 6008, 6206]]]]
  102. assert (output.asnumpy() == expect).all()
  103. class BatchMatMulNet(nn.Cell):
  104. def __init__(self, transpose_a=False, transpose_b=False):
  105. super(BatchMatMulNet, self).__init__()
  106. self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
  107. def construct(self, x, y):
  108. return self.batch_matmul(x, y)
  109. def test_4D_fp16():
  110. input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
  111. input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
  112. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  113. net = BatchMatMulNet()
  114. output = net(input_x, input_y)
  115. expect = [[[[20, 23, 26, 29]],
  116. [[200, 212, 224, 236]],
  117. [[596, 617, 638, 659]],
  118. [[1208, 1238, 1268, 1298]]],
  119. [[[2036, 2075, 2114, 2153]],
  120. [[3080, 3128, 3176, 3224]],
  121. [[4340, 4397, 4454, 4511]],
  122. [[5816, 5882, 5948, 6014]]]]
  123. assert (output.asnumpy() == expect).all()