You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

comm_ops.py 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """comm_ops"""
  16. from ..._checkparam import Validator as validator
  17. from ..._checkparam import Rel
  18. from ...communication.management import get_rank, get_group_size, GlobalComm, _get_group
  19. from ...common import dtype as mstype
  20. from ..primitive import PrimitiveWithInfer, prim_attr_register
  21. class ReduceOp:
  22. """
  23. Operation options for reduce tensors.
  24. There are four kinds of operation options, "SUM","MAX","MIN","PROD".
  25. - SUM: Take the sum.
  26. - MAX: Take the maximum.
  27. - MIN: Take the minimum.
  28. - PROD: Take the product.
  29. """
  30. SUM = "sum"
  31. MAX = "max"
  32. MIN = "min"
  33. PROD = "prod"
  34. class AllReduce(PrimitiveWithInfer):
  35. """
  36. Reduces the tensor data across all devices in such a way that all devices will get the same final result.
  37. Note:
  38. The operation of AllReduce does not support "prod" currently.
  39. Tensor must have same shape and format in all processes participating in the collective.
  40. Args:
  41. op (str): Specifies an operation used for element-wise reductions,
  42. like sum, max, min. Default: ReduceOp.SUM.
  43. group (str): The communication group to work on. Default: "hccl_world_group".
  44. Raises:
  45. TypeError: If any of op and group is not a string
  46. or fusion is not a integer or the input's dtype is bool.
  47. ValueError: If op is "prod"
  48. Inputs:
  49. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  50. Outputs:
  51. Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
  52. The contents depend on the specified operation.
  53. Examples:
  54. >>> from mindspore.communication import init
  55. >>> import mindspore.ops.operations as P
  56. >>> init('nccl')
  57. >>> class Net(nn.Cell):
  58. >>> def __init__(self):
  59. >>> super(Net, self).__init__()
  60. >>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group")
  61. >>>
  62. >>> def construct(self, x):
  63. >>> return self.allreduce_sum(x)
  64. >>>
  65. >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
  66. >>> net = Net()
  67. >>> output = net(input_)
  68. """
  69. @prim_attr_register
  70. def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
  71. if not isinstance(op, type(ReduceOp.SUM)):
  72. raise TypeError("The operation of AllReduce should be str.")
  73. if op == ReduceOp.PROD:
  74. raise RuntimeError("The operation of AllReduce 'prod' is not supported yet.")
  75. if not isinstance(_get_group(group), str):
  76. raise TypeError("The group of AllReduce should be str.")
  77. self.op = op
  78. self.add_prim_attr('group', _get_group(group))
  79. self.add_prim_attr('fusion', 0)
  80. def vm_impl(self, x):
  81. """Implement by vm mode."""
  82. x = x.asnumpy()
  83. return Tensor(x)
  84. def infer_shape(self, x_shape):
  85. return x_shape
  86. def infer_dtype(self, x_dtype):
  87. if x_dtype.element_type() == mstype.bool_:
  88. raise TypeError("AllReduce does not support 'Bool' as the dtype of input!")
  89. return x_dtype
  90. class AllGather(PrimitiveWithInfer):
  91. """
  92. Gathers tensors from the specified communication group.
  93. Note:
  94. Tensor must have the same shape and format in all processes participating in the collective.
  95. Args:
  96. group (str): The communication group to work on. Default: "hccl_world_group".
  97. Raises:
  98. TypeError: If group is not a string.
  99. ValueError: If the local rank id of the calling process in the group
  100. is larger than the group's rank size.
  101. Inputs:
  102. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  103. Outputs:
  104. Tensor. If the number of devices in the group is N,
  105. then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`.
  106. Examples:
  107. >>> from mindspore.communication import init
  108. >>> import mindspore.ops.operations as P
  109. >>> init('nccl')
  110. >>> class Net(nn.Cell):
  111. >>> def __init__(self):
  112. >>> super(Net, self).__init__()
  113. >>> self.allgather = P.AllGather(group="nccl_world_group")
  114. >>>
  115. >>> def construct(self, x):
  116. >>> return self.allgather(x)
  117. >>>
  118. >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
  119. >>> net = Net()
  120. >>> output = net(input_)
  121. """
  122. @prim_attr_register
  123. def __init__(self, group=GlobalComm.WORLD_COMM_GROUP):
  124. validator.check_value_type('group', _get_group(group), (str,), self.name)
  125. self.rank = get_rank(_get_group(group))
  126. self.rank_size = get_group_size(_get_group(group))
  127. validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name)
  128. self.add_prim_attr('rank_size', self.rank_size)
  129. self.add_prim_attr('group', _get_group(group))
  130. def infer_shape(self, x_shape):
  131. x_shape[0] = x_shape[0] * self.rank_size
  132. return x_shape
  133. def infer_dtype(self, x_dtype):
  134. if x_dtype.element_type() == mstype.bool_:
  135. raise TypeError(f"{self.name} does not support 'Bool' as the dtype of input!")
  136. return x_dtype
  137. def __call__(self, tensor):
  138. raise NotImplementedError
  139. class ReduceScatter(PrimitiveWithInfer):
  140. """
  141. Reduces and scatters tensors from the specified communication group.
  142. Note:
  143. The back propagation of the op is not surported yet. Stay tuned for more.
  144. Tensor must have the same shape and format in all processes participating in the collective.
  145. Args:
  146. op (str): Specifies an operation used for element-wise reductions,
  147. like sum, max, avg. Default: ReduceOp.SUM.
  148. group (str): The communication group to work on. Default: "hccl_world_group".
  149. Raises:
  150. TypeError: If any of op and group is not a string
  151. ValueError: If the first dimension of input can not be divided by rank size.
  152. Examples:
  153. >>> from mindspore.communication import init
  154. >>> import mindspore.ops.operations as P
  155. >>> init('nccl')
  156. >>> class Net(nn.Cell):
  157. >>> def __init__(self):
  158. >>> super(Net, self).__init__()
  159. >>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM, group="nccl_world_group")
  160. >>>
  161. >>> def construct(self, x):
  162. >>> return self.reducescatter(x)
  163. >>>
  164. >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
  165. >>> net = Net()
  166. >>> output = net(input_)
  167. """
  168. @prim_attr_register
  169. def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
  170. validator.check_value_type('op', op, (type(ReduceOp.SUM),), self.name)
  171. validator.check_value_type('group', _get_group(group), (str,), self.name)
  172. self.op = op
  173. self.rank_size = get_group_size(_get_group(group))
  174. self.add_prim_attr('rank_size', self.rank_size)
  175. self.add_prim_attr('group', _get_group(group))
  176. def infer_shape(self, x_shape):
  177. if x_shape[0] % self.rank_size != 0:
  178. raise ValueError(f"For '{self.name}' the first dimension of x should be divided by rank_size.")
  179. x_shape[0] = int(x_shape[0]/self.rank_size)
  180. return x_shape
  181. def infer_dtype(self, x_dtype):
  182. if x_dtype.element_type() == mstype.bool_:
  183. raise TypeError(f"{self.name} does not support 'Bool' as the dtype of input!")
  184. return x_dtype
  185. def __call__(self, tensor):
  186. raise NotImplementedError
  187. class Broadcast(PrimitiveWithInfer):
  188. """
  189. Broadcasts the tensor to the whole group.
  190. Note:
  191. Tensor must have the same shape and format in all processes participating in the collective.
  192. Args:
  193. root_rank (int): Source rank. Required in all processes except the one
  194. that is sending the data.
  195. group (str): The communication group to work on. Default: "hccl_world_group".
  196. Inputs:
  197. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  198. Outputs:
  199. Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
  200. The contents depend on the data of the `root_rank` device.
  201. Raises:
  202. TypeError: If root_rank is not a integer or group is not a string.
  203. Examples:
  204. >>> from mindspore.communication import init
  205. >>> import mindspore.ops.operations as P
  206. >>> init('nccl')
  207. >>> class Net(nn.Cell):
  208. >>> def __init__(self):
  209. >>> super(Net, self).__init__()
  210. >>> self.broadcast = P.Broadcast(1)
  211. >>>
  212. >>> def construct(self, x):
  213. >>> return self.broadcast((x,))
  214. >>>
  215. >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
  216. >>> net = Net()
  217. >>> output = net(input_)
  218. """
  219. @prim_attr_register
  220. def __init__(self, root_rank, group=GlobalComm.WORLD_COMM_GROUP):
  221. validator.check_value_type('root_rank', root_rank, (int,), self.name)
  222. validator.check_value_type('group', _get_group(group), (str,), self.name)
  223. self.add_prim_attr('group', _get_group(group))
  224. def infer_shape(self, x_shape):
  225. return x_shape
  226. def infer_dtype(self, x_dtype):
  227. if not isinstance(x_dtype, tuple):
  228. raise TypeError(f"{self.name}'s input should be a tuple!")
  229. for _ele in x_dtype:
  230. if _ele.element_type() == mstype.bool_:
  231. raise TypeError(f"{self.name} does not support 'Bool' as the dtype of input!")
  232. return x_dtype
  233. class _AlltoAll(PrimitiveWithInfer):
  234. """
  235. AlltoAll is a collective operation.
  236. AlltoAll sends data from the all processes to the all processes in the specified group. It has two phases:
  237. - The scatter phase: On each process, the operand is split into split_count number of blocks along the
  238. split_dimensions, and the blocks are scattered to all processes, e.g., the ith block is send to the ith process.
  239. - The gather phase: Each process concatenates the received blocks along the concat_dimension.
  240. Note:
  241. Tensor must have the same shape and format in all processes participating in the collective.
  242. Args:
  243. split_count (int): On each process, divide blocks into split_count number.
  244. split_dim (int): On each process, split blocks along the split_dim.
  245. concat_dim (int): On each process, gather the received blocks along the concat_dimension.
  246. group (str): The communication group to work on. Default: "hccl_world_group".
  247. Raises:
  248. TypeError: If group is not a string.
  249. """
  250. @prim_attr_register
  251. def __init__(self, split_count, split_dim, concat_dim, group=GlobalComm.WORLD_COMM_GROUP):
  252. """init AlltoAll"""
  253. validator.check_value_type('group', _get_group(group), (str,), self.name)
  254. self.split_count = split_count
  255. self.split_dim = split_dim
  256. self.concat_dim = concat_dim
  257. self.add_prim_attr('group', _get_group(group))
  258. def infer_shape(self, x_shape):
  259. x_shape[self.concat_dim] = x_shape[self.concat_dim] * self.split_count
  260. x_shape[self.split_dim] = int(x_shape[self.split_dim] / self.split_count)
  261. return x_shape
  262. def infer_dtype(self, x_dtype):
  263. if x_dtype.element_type() == mstype.bool_:
  264. raise TypeError(f"{self.name} does not support 'Bool' as the dtype of input!")
  265. return x_dtype
  266. def __call__(self, tensor):
  267. return
  268. class _MirrorOperator(PrimitiveWithInfer):
  269. """
  270. Auto parallel virtual operator. Do nothing in forward, do all reduce and mean in backward. It is only for
  271. internal use of parallel modules and cannot be called by users.
  272. Args:
  273. group (str): The communication group to work on. Default: None.
  274. dev_num (int): The device number of the group. Default: None.
  275. mean_flag (bool): Whether use mean in backward. Default: None.
  276. """
  277. @prim_attr_register
  278. def __init__(self, group=None, dev_num=None, mean_flag=None):
  279. self.group = group
  280. self.dev_num = dev_num
  281. self.mean_flag = mean_flag
  282. def infer_shape(self, x_shape):
  283. return x_shape
  284. def infer_dtype(self, x_dtype):
  285. return x_dtype
  286. mirror = _MirrorOperator()
  287. class _VirtualDiv(PrimitiveWithInfer):
  288. """
  289. Auto parallel virtual operator. Do nothing in forward, do Div in backward.
  290. Args:
  291. divisor: float32
  292. """
  293. @prim_attr_register
  294. def __init__(self, divisor=None):
  295. self.divisor = divisor
  296. def infer_shape(self, x_shape):
  297. return x_shape
  298. def infer_dtype(self, x_dtype):
  299. return x_dtype
  300. virtual_div = _VirtualDiv()
  301. class _VirtualDataset(PrimitiveWithInfer):
  302. """
  303. Auto parallel virtual dataset operator.
  304. It would insert Broadcast operator in forward computation and be deleted before backward computation.
  305. """
  306. @prim_attr_register
  307. def __init__(self):
  308. """init"""
  309. def infer_shape(self, *args):
  310. if len(args) == 1:
  311. return args[0]
  312. return args
  313. def infer_dtype(self, *args):
  314. if len(args) == 1:
  315. return args[0]
  316. return args
  317. virtual_dataset = _VirtualDataset()
  318. class _GetTensorSlice(PrimitiveWithInfer):
  319. """
  320. Gets tensor slice by device matrix and tensor map.
  321. Args:
  322. dev_mat (tuple): The device matrix of the slice tensor.
  323. tensor_map (tuple): The tensor map of the slice tensor.
  324. """
  325. @prim_attr_register
  326. def __init__(self):
  327. """init ChunkTensor"""
  328. def infer_value(self, x, dev_mat, tensor_map):
  329. from mindspore.parallel._tensor import _load_tensor
  330. validator.check_value_type("dev_mat", dev_mat, [tuple], self.name)
  331. validator.check_value_type("tensor_map", tensor_map, [tuple], self.name)
  332. return _load_tensor(x, dev_mat, tensor_map)