You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_utils.py 6.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Utils of auto parallel"""
  16. import numpy as np
  17. from mindspore._c_expression import reset_op_id
  18. from mindspore.common.tensor import Tensor
  19. from mindspore.common.dtype import dtype_to_nptype
  20. from mindspore.common import dtype as mstype
  21. from mindspore.communication.management import get_group_size, get_rank
  22. from mindspore.parallel._auto_parallel_context import auto_parallel_context
  23. def _get_parallel_mode():
  24. """Get parallel mode."""
  25. return auto_parallel_context().get_parallel_mode()
  26. def _get_full_batch():
  27. """Get whether to use full_batch."""
  28. return auto_parallel_context().get_full_batch()
  29. def _need_to_full():
  30. """Check whether to convert input to full shape or tensor."""
  31. parallel_mode = _get_parallel_mode()
  32. full_batch = _get_full_batch()
  33. need = ((parallel_mode in ("semi_auto_parallel", "auto_parallel"))
  34. and (not full_batch))
  35. return need
  36. def _to_full_shapes(shapes, device_num):
  37. """Expanding batch dimension according to device_num, adapt to mindspore minddata graph solution."""
  38. new_shapes = []
  39. for shape in shapes:
  40. new_shape = ()
  41. for i, item in enumerate(shape):
  42. if i == 0:
  43. new_shape += (item * device_num,)
  44. else:
  45. new_shape += (item,)
  46. new_shapes.append(new_shape)
  47. return new_shapes
  48. def _to_full_tensor(elem, device_num, global_rank, scaling_sens=None):
  49. """Convert numpy to tensor, expanding batch dimension according to device_num, adapt to feed the data
  50. from host solution."""
  51. lst = []
  52. if not isinstance(elem, (tuple, list)):
  53. elem = [elem]
  54. if global_rank >= device_num:
  55. raise ValueError("The global rank must be smaller than device number, the global rank is {}, "
  56. "the device num is {}".format(global_rank, device_num))
  57. for data in elem:
  58. if isinstance(data, np.ndarray):
  59. data = Tensor(data)
  60. if not isinstance(data, Tensor):
  61. raise ValueError("elements in tensors must be Tensor")
  62. shape_ = data.shape
  63. type_ = data.dtype
  64. new_shape = ()
  65. batchsize_per_device = 1
  66. for i, item in enumerate(shape_):
  67. if i == 0:
  68. new_shape += (item * device_num,)
  69. batchsize_per_device = item
  70. else:
  71. new_shape += (item,)
  72. new_tensor_numpy = np.zeros(new_shape, dtype_to_nptype(type_))
  73. start = global_rank * batchsize_per_device
  74. new_tensor_numpy[start: start + batchsize_per_device] = data.asnumpy()
  75. new_tensor = Tensor(new_tensor_numpy)
  76. lst.append(new_tensor)
  77. if scaling_sens:
  78. lst.append(Tensor(scaling_sens, mstype.float32))
  79. return tuple(lst)
  80. def _get_mirror_mean():
  81. """Get if using mirror_mean."""
  82. return auto_parallel_context().get_mirror_mean()
  83. def _get_device_num():
  84. """Get the device num."""
  85. parallel_mode = auto_parallel_context().get_parallel_mode()
  86. if parallel_mode == "stand_alone":
  87. device_num = 1
  88. return device_num
  89. if auto_parallel_context().get_device_num_is_set() is False:
  90. device_num = get_group_size()
  91. else:
  92. device_num = auto_parallel_context().get_device_num()
  93. return device_num
  94. def _get_global_rank():
  95. """Get the global rank."""
  96. parallel_mode = auto_parallel_context().get_parallel_mode()
  97. if parallel_mode == "stand_alone":
  98. global_rank = 0
  99. return global_rank
  100. if auto_parallel_context().get_global_rank_is_set() is False:
  101. global_rank = get_rank()
  102. else:
  103. global_rank = auto_parallel_context().get_global_rank()
  104. return global_rank
  105. def _get_parameter_broadcast():
  106. """Get the parameter broadcast."""
  107. parallel_mode = auto_parallel_context().get_parallel_mode()
  108. if parallel_mode == "stand_alone":
  109. parameter_broadcast = False
  110. return parameter_broadcast
  111. if auto_parallel_context().get_parameter_broadcast_is_set() is True:
  112. parameter_broadcast = auto_parallel_context().get_parameter_broadcast()
  113. elif parallel_mode in ("data_parallel", "hybrid_parallel"):
  114. parameter_broadcast = True
  115. else:
  116. parameter_broadcast = False
  117. return parameter_broadcast
  118. def _device_number_check(parallel_mode, device_number):
  119. """
  120. Check device num.
  121. Args:
  122. parallel_mode (str): The parallel mode.
  123. device_number (int): The device number.
  124. """
  125. if parallel_mode == "stand_alone" and device_number != 1:
  126. raise ValueError("If parallel_mode is stand_alone, device_number must be 1, "
  127. "device_number: {0}, parallel_mode:{1}".format(device_number, parallel_mode))
  128. def _parameter_broadcast_check(parallel_mode, parameter_broadcast):
  129. """
  130. Check parameter broadcast.
  131. Note:
  132. If parallel mode is semi_auto_parallel or auto_parallel, parameter broadcast is not supported. Using the same
  133. random seed to make sure parameters on multiple devices are the same.
  134. Args:
  135. parallel_mode (str): The parallel mode.
  136. parameter_broadcast (bool): The parameter broadcast.
  137. Raises:
  138. ValueError: If parameter is broadcasted
  139. but the parallel mode is "stand_alone" or "semi_auto_parallel" or "auto_parallel").
  140. """
  141. if parameter_broadcast is True and parallel_mode in ("stand_alone", "semi_auto_parallel", "auto_parallel"):
  142. raise ValueError("stand_alone, semi_auto_parallel and auto_parallel "
  143. "do not support parameter broadcast, parallel_mode: {0}, parameter_broadcast:{1}"
  144. .format(parallel_mode, parameter_broadcast))
  145. def _get_python_op(op_name, op_path, instance_name, arglist):
  146. """Get python operator."""
  147. module = __import__(op_path, fromlist=["None"])
  148. cls = getattr(module, op_name)
  149. op = cls(*arglist)
  150. op.set_prim_instance_name(instance_name)
  151. return op
  152. def _reset_op_id():
  153. """Reset op id."""
  154. reset_op_id()