You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_comparison_function_info.py 9.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import mindspore as ms
  16. from mindspore import context
  17. import mindspore.nn as nn
  18. from mindspore.ops import operations as P
  19. from mindspore import Tensor
  20. from tests.ut.python.ops.test_math_ops import VirtualLoss
  21. from mindspore.common.api import _executor
  22. from mindspore.ops import composite as C
  23. class NetWithLoss(nn.Cell):
  24. def __init__(self, network):
  25. super(NetWithLoss, self).__init__()
  26. self.loss = VirtualLoss()
  27. self.network = network
  28. def construct(self, x, y, b):
  29. predict = self.network(x, y, b)
  30. return self.loss(predict)
  31. class GradWrap(nn.Cell):
  32. def __init__(self, network):
  33. super(GradWrap, self).__init__()
  34. self.network = network
  35. def construct(self, x, y, b):
  36. return C.grad_all(self.network)(x, y, b)
  37. def test_matmul_equal():
  38. class Net(nn.Cell):
  39. def __init__(self, strategy1, strategy2):
  40. super().__init__()
  41. self.matmul = P.MatMul().set_strategy(strategy1)
  42. self.equal = P.Equal().set_strategy(strategy2)
  43. def construct(self, x, y, b):
  44. out = self.matmul(x, y)
  45. out = self.equal(out, b)
  46. return out
  47. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  48. strategy1 = ((2, 2), (2, 2))
  49. strategy2 = ((4, 2), (4, 2))
  50. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  51. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  52. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  53. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  54. _executor.compile(net, x, y, b)
  55. def test_matmul_not_equal():
  56. class Net(nn.Cell):
  57. def __init__(self, strategy1, strategy2):
  58. super().__init__()
  59. self.matmul = P.MatMul().set_strategy(strategy1)
  60. self.notequal = P.NotEqual().set_strategy(strategy2)
  61. def construct(self, x, y, b):
  62. out = self.matmul(x, y)
  63. out = self.notequal(out, b)
  64. return out
  65. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  66. strategy1 = ((2, 2), (2, 2))
  67. strategy2 = ((4, 2), (4, 2))
  68. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  69. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  70. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  71. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  72. _executor.compile(net, x, y, b)
  73. def test_matmul_not_equal_repeated_calculation():
  74. class Net(nn.Cell):
  75. def __init__(self, strategy1, strategy2):
  76. super().__init__()
  77. self.matmul = P.MatMul().set_strategy(strategy1)
  78. self.notequal = P.NotEqual().set_strategy(strategy2)
  79. def construct(self, x, y, b):
  80. out = self.matmul(x, y)
  81. out = self.notequal(out, b)
  82. return out
  83. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  84. strategy1 = ((2, 2), (2, 2))
  85. strategy2 = ((4, 1), (4, 1))
  86. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  87. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  88. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  89. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  90. _executor.compile(net, x, y, b)
  91. def test_matmul_maximum():
  92. class Net(nn.Cell):
  93. def __init__(self, strategy1, strategy2):
  94. super().__init__()
  95. self.matmul = P.MatMul().set_strategy(strategy1)
  96. self.maximum = P.Maximum().set_strategy(strategy2)
  97. def construct(self, x, y, b):
  98. out = self.matmul(x, y)
  99. out = self.maximum(out, b)
  100. return out
  101. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  102. strategy1 = ((2, 2), (2, 2))
  103. strategy2 = ((4, 2), (4, 2))
  104. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  105. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  106. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  107. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  108. _executor.compile(net, x, y, b)
  109. def test_matmul_maximum_broadcast():
  110. class Net(nn.Cell):
  111. def __init__(self, strategy1, strategy2):
  112. super().__init__()
  113. self.matmul = P.MatMul().set_strategy(strategy1)
  114. self.maximum = P.Maximum().set_strategy(strategy2)
  115. def construct(self, x, y, b):
  116. out = self.matmul(x, y)
  117. out = self.maximum(out, b)
  118. return out
  119. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  120. strategy1 = ((2, 2), (2, 2))
  121. strategy2 = ((4, 2), (2, ))
  122. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  123. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  124. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  125. b = Tensor(np.ones([64]), dtype=ms.float32)
  126. _executor.compile(net, x, y, b)
  127. def test_matmul_maximum_broadcast2():
  128. class Net(nn.Cell):
  129. def __init__(self, strategy1, strategy2):
  130. super().__init__()
  131. self.matmul = P.MatMul().set_strategy(strategy1)
  132. self.maximum = P.Maximum().set_strategy(strategy2)
  133. def construct(self, x, y, b):
  134. out = self.matmul(x, y)
  135. out = self.maximum(out, b)
  136. return out
  137. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  138. strategy1 = ((2, 4), (4, 1))
  139. strategy2 = ((4, 1), (1, 2))
  140. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  141. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  142. y = Tensor(np.ones([32, 1]), dtype=ms.float32)
  143. b = Tensor(np.ones([1, 64]), dtype=ms.float32)
  144. _executor.compile(net, x, y, b)
  145. def test_matmul_minimum():
  146. class Net(nn.Cell):
  147. def __init__(self, strategy1, strategy2):
  148. super().__init__()
  149. self.matmul = P.MatMul().set_strategy(strategy1)
  150. self.minimum = P.Minimum().set_strategy(strategy2)
  151. def construct(self, x, y, b):
  152. out = self.matmul(x, y)
  153. out = self.minimum(out, b)
  154. return out
  155. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  156. strategy1 = ((2, 2), (2, 2))
  157. strategy2 = ((4, 2), (4, 2))
  158. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  159. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  160. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  161. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  162. _executor.compile(net, x, y, b)
  163. def test_matmul_minimum_broadcast():
  164. class Net(nn.Cell):
  165. def __init__(self, strategy1, strategy2):
  166. super().__init__()
  167. self.matmul = P.MatMul().set_strategy(strategy1)
  168. self.minimum = P.Maximum().set_strategy(strategy2)
  169. def construct(self, x, y, b):
  170. out = self.matmul(x, y)
  171. out = self.minimum(out, b)
  172. return out
  173. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  174. strategy1 = ((2, 2), (2, 2))
  175. strategy2 = ((4, 2), (2, ))
  176. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  177. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  178. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  179. b = Tensor(np.ones([64]), dtype=ms.float32)
  180. _executor.compile(net, x, y, b)
  181. def test_matmul_minimum_broadcast2():
  182. class Net(nn.Cell):
  183. def __init__(self, strategy1, strategy2):
  184. super().__init__()
  185. self.matmul = P.MatMul().set_strategy(strategy1)
  186. self.minimum = P.Minimum().set_strategy(strategy2)
  187. def construct(self, x, y, b):
  188. out = self.matmul(x, y)
  189. out = self.minimum(out, b)
  190. return out
  191. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  192. strategy1 = ((2, 4), (4, 1))
  193. strategy2 = ((4, 1), (1, 2))
  194. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  195. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  196. y = Tensor(np.ones([32, 1]), dtype=ms.float32)
  197. b = Tensor(np.ones([1, 64]), dtype=ms.float32)
  198. _executor.compile(net, x, y, b)
  199. def test_matmul_minimum_auto_parallel():
  200. class Net(nn.Cell):
  201. def __init__(self):
  202. super().__init__()
  203. self.matmul = P.MatMul()
  204. self.minimum = P.Minimum()
  205. def construct(self, x, y, b):
  206. out = self.matmul(x, y)
  207. out = self.minimum(out, b)
  208. return out
  209. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel")
  210. net = GradWrap(NetWithLoss(Net()))
  211. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  212. y = Tensor(np.ones([32, 1]), dtype=ms.float32)
  213. b = Tensor(np.ones([1, 64]), dtype=ms.float32)
  214. _executor.compile(net, x, y, b)