You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

context.py 36 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. The context of mindspore, used to configure the current execution environment,
  17. includes the execution mode, execution backend and other feature switches.
  18. """
  19. import json
  20. import os
  21. import time
  22. import threading
  23. from collections import namedtuple
  24. from types import FunctionType
  25. from mindspore import log as logger
  26. from mindspore._c_expression import MSContext, ms_ctx_param
  27. from mindspore._checkparam import args_type_check, Validator
  28. from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
  29. _reset_auto_parallel_context
  30. from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context
  31. from .default_config import __device_target__, __package_name__
  32. __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
  33. 'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
  34. 'get_ps_context', 'reset_ps_context']
  35. GRAPH_MODE = 0
  36. PYNATIVE_MODE = 1
  37. _DEVICE_APP_MEMORY_SIZE = 31 # The max memory size of graph plus variable.
  38. _re_pattern = r'[1-9][0-9]*(\.)?[0-9]*GB|0\.[0-9]*GB'
  39. _k_context = None
  40. def _make_directory(path):
  41. """Make directory."""
  42. real_path = None
  43. if path is None or not isinstance(path, str) or path.strip() == "":
  44. raise ValueError(f"Input path `{path}` is invalid type")
  45. # convert the relative paths
  46. path = os.path.realpath(path)
  47. logger.debug("The absolute path is %r", path)
  48. # check whether the path is already existed and has written permissions
  49. if os.path.exists(path):
  50. real_path = path
  51. else:
  52. # All exceptions need to be caught because create directory maybe have some limit(permissions)
  53. logger.debug("The directory(%s) doesn't exist, will create it", path)
  54. try:
  55. os.makedirs(path)
  56. real_path = path
  57. except PermissionError as e:
  58. logger.error(f"No write permission on the directory `{path}, error = {e}")
  59. raise ValueError(f"No write permission on the directory `{path}`.")
  60. return real_path
  61. def _get_print_file_name(file_name):
  62. """Add timestamp suffix to file name. Rename the file name: file_name + "." + time(seconds)."""
  63. time_second = str(int(time.time()))
  64. file_name = file_name + "." + time_second
  65. if os.path.exists(file_name):
  66. ValueError("This file {} already exists.".format(file_name))
  67. return file_name
  68. class _ThreadLocalInfo(threading.local):
  69. """
  70. Thread local Info used for store thread local attributes.
  71. """
  72. def __init__(self):
  73. super(_ThreadLocalInfo, self).__init__()
  74. self._reserve_class_name_in_scope = True
  75. @property
  76. def reserve_class_name_in_scope(self):
  77. """Gets whether to save the network class name in the scope."""
  78. return self._reserve_class_name_in_scope
  79. @reserve_class_name_in_scope.setter
  80. def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
  81. """Sets whether to save the network class name in the scope."""
  82. if not isinstance(reserve_class_name_in_scope, bool):
  83. raise ValueError(
  84. "Set reserve_class_name_in_scope value must be bool!")
  85. self._reserve_class_name_in_scope = reserve_class_name_in_scope
  86. _ContextRecord = namedtuple(
  87. "_ContextRecord", ["is_pynative_mode", "switch_context_fn"])
  88. class _ContextSwitchInfo(threading.local):
  89. """
  90. Record of context switch information.
  91. Args:
  92. is_pynative (bool): Whether to adopt the PyNative mode.
  93. """
  94. def __init__(self, is_pynative):
  95. super(_ContextSwitchInfo, self).__init__()
  96. self.context_stack = []
  97. if is_pynative:
  98. self.push(True, None)
  99. def push(self, is_pynative, switch_context_fn):
  100. """
  101. Push a context switch record onto the stack.
  102. Args:
  103. is_pynative (bool): Whether context switch to PyNative mode.
  104. switch_context_fn (Function): A callable that executes the context switch.
  105. """
  106. if isinstance(switch_context_fn, FunctionType):
  107. switch_context_fn()
  108. self.context_stack.append(
  109. _ContextRecord(is_pynative, switch_context_fn))
  110. def pop(self):
  111. self.context_stack.pop()
  112. class _Context:
  113. """
  114. _Context is the environment in which operations are executed
  115. Note:
  116. Create a context through instantiating Context object is not recommended.
  117. should use context() to get the context since Context is singleton.
  118. """
  119. _instance = None
  120. _instance_lock = threading.Lock()
  121. def __init__(self):
  122. self._thread_local_info = _ThreadLocalInfo()
  123. self._context_switches = _ContextSwitchInfo(True)
  124. self._context_handle = MSContext.get_instance()
  125. def __new__(cls, *args, **kwargs):
  126. if cls._instance is None:
  127. cls._instance_lock.acquire()
  128. cls._instance = object.__new__(cls)
  129. cls._instance_lock.release()
  130. return cls._instance
  131. def __getattribute__(self, attr):
  132. value = object.__getattribute__(self, attr)
  133. if attr == "_context_handle" and value is None:
  134. raise ValueError("Context handle is none in context!!!")
  135. return value
  136. def get_param(self, param):
  137. return self._context_handle.get_param(param)
  138. def set_param(self, param, value):
  139. self._context_handle.set_param(param, value)
  140. def set_mode(self, mode):
  141. """
  142. Switch between Graph mode and PyNative mode.
  143. Args:
  144. mode (int): GRAPH_MODE or PYNATIVE_MODE.
  145. """
  146. if mode == PYNATIVE_MODE:
  147. if self.enable_debug_runtime:
  148. self.set_backend_policy("vm")
  149. self._context_switches.push(True, None)
  150. elif mode == GRAPH_MODE:
  151. if self.enable_debug_runtime:
  152. self.set_backend_policy("ge")
  153. self._context_switches.push(False, None)
  154. else:
  155. raise ValueError(f'The execution mode {mode} is invalid!')
  156. self.set_param(ms_ctx_param.mode, mode)
  157. def set_backend_policy(self, policy):
  158. success = self._context_handle.set_backend_policy(policy)
  159. if not success:
  160. raise RuntimeError("Backend policy must be one of ge, vm, ms.")
  161. def set_save_graphs_path(self, save_graphs_path):
  162. self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
  163. def set_device_target(self, target):
  164. valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
  165. if not target in valid_targets:
  166. raise ValueError(f"Target device name {target} is invalid! It must be one of {valid_targets}")
  167. if target == "Davinci":
  168. target = "Ascend"
  169. self.set_param(ms_ctx_param.device_target, target)
  170. if self.enable_debug_runtime and target == "CPU":
  171. self.set_backend_policy("vm")
  172. def set_device_id(self, device_id):
  173. if device_id < 0 or device_id > 4095:
  174. raise ValueError(f"Device id must be in [0, 4095], but got {device_id}")
  175. self.set_param(ms_ctx_param.device_id, device_id)
  176. def set_max_call_depth(self, max_call_depth):
  177. if max_call_depth <= 0:
  178. raise ValueError(f"Max call depth must be greater than 0, but got {max_call_depth}")
  179. self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
  180. def set_profiling_options(self, option):
  181. if not isinstance(option, str):
  182. raise TypeError("The parameter option must be str.")
  183. self.set_param(ms_ctx_param.profiling_options, option)
  184. def set_variable_memory_max_size(self, variable_memory_max_size):
  185. """set values of variable_memory_max_size and graph_memory_max_size"""
  186. if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
  187. raise ValueError("Context param variable_memory_max_size should be in correct format! Such as \"5GB\"")
  188. if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
  189. raise ValueError("Context param variable_memory_max_size should be not greater than 31GB.")
  190. variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
  191. graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
  192. graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
  193. self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
  194. # pylint: disable=protected-access
  195. self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)
  196. def set_max_device_memory(self, max_device_memory):
  197. if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
  198. raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
  199. max_device_memory_value = float(max_device_memory[:-2])
  200. if max_device_memory_value == 0:
  201. raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
  202. self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
  203. def set_print_file_path(self, file_path):
  204. """Add timestamp suffix to file name. Sets print file path."""
  205. print_file_path = os.path.realpath(file_path)
  206. if os.path.isdir(print_file_path):
  207. raise IOError("Print_file_path should be file path, but got {}.".format(file_path))
  208. if os.path.exists(print_file_path):
  209. _path, _file_name = os.path.split(print_file_path)
  210. path = _make_directory(_path)
  211. file_name = _get_print_file_name(_file_name)
  212. full_file_name = os.path.join(path, file_name)
  213. else:
  214. full_file_name = print_file_path
  215. self.set_param(ms_ctx_param.print_file_path, full_file_name)
  216. def set_env_config_path(self, env_config_path):
  217. """Check and set env_config_path."""
  218. if not self._context_handle.enable_dump_ir():
  219. raise ValueError("The 'env_config_path' is not supported, please enable ENABLE_DUMP_IR "
  220. "with '-D on' and recompile source.")
  221. env_config_path = os.path.realpath(env_config_path)
  222. if not os.path.isfile(env_config_path):
  223. raise ValueError("The %r set by 'env_config_path' should be an existing json file." % env_config_path)
  224. try:
  225. with open(env_config_path, 'r') as f:
  226. json.load(f)
  227. except (TypeError, ValueError) as exo:
  228. raise ValueError("The %r set by 'env_config_path' should be a json file. "
  229. "Detail: %s." % (env_config_path, str(exo)))
  230. self.set_param(ms_ctx_param.env_config_path, env_config_path)
  231. setters = {
  232. 'mode': set_mode,
  233. 'save_graphs_path': set_save_graphs_path,
  234. 'device_target': set_device_target,
  235. 'device_id': set_device_id,
  236. 'max_call_depth': set_max_call_depth,
  237. 'profiling_options': set_profiling_options,
  238. 'variable_memory_max_size': set_variable_memory_max_size,
  239. 'max_device_memory': set_max_device_memory,
  240. 'print_file_path': set_print_file_path,
  241. 'env_config_path': set_env_config_path
  242. }
  243. @property
  244. def reserve_class_name_in_scope(self):
  245. """Gets whether to save the network class name in the scope."""
  246. return self._thread_local_info.reserve_class_name_in_scope
  247. @reserve_class_name_in_scope.setter
  248. def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
  249. """Sets whether to save the network class name in the scope."""
  250. self._thread_local_info.reserve_class_name_in_scope = reserve_class_name_in_scope
  251. @property
  252. def enable_ge(self):
  253. return self._context_handle.get_backend_policy() == 'ge'
  254. @property
  255. def enable_debug_runtime(self):
  256. return self._thread_local_info.debug_runtime
  257. @enable_debug_runtime.setter
  258. def enable_debug_runtime(self, enable):
  259. thread_info = self._thread_local_info
  260. thread_info.debug_runtime = enable
  261. def _context():
  262. """
  263. Get the global _context, if context is not created, create a new one.
  264. Returns:
  265. _Context, the global context in PyNative mode.
  266. """
  267. global _k_context
  268. if _k_context is None:
  269. default_backend = 'debug'
  270. try:
  271. from mindspore import default_config
  272. default_backend = default_config.__backend__
  273. except ImportError:
  274. logger.error("import default config fail")
  275. _k_context = _Context()
  276. _k_context.enable_debug_runtime = False
  277. if default_backend == 'debug':
  278. _k_context.enable_debug_runtime = True
  279. default_backend = 'vm'
  280. _k_context.set_backend_policy(default_backend)
  281. return _k_context
  282. @args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
  283. auto_parallel_search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
  284. strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
  285. all_reduce_fusion_config=list, pipeline_stages=int)
  286. def set_auto_parallel_context(**kwargs):
  287. r"""
  288. Set auto parallel context, which is valid only for Ascend and GPU target.
  289. Auto parallel context should be configured before the initialization of your network.
  290. Note:
  291. Attribute name is required for setting attributes.
  292. If a program has tasks with different parallel modes, then before setting new parallel mode for the
  293. next task, interface mindspore.context.reset_auto_parallel_context() needs to be called to reset
  294. the configuration.
  295. Setting or changing parallel modes must be called before any creating Initializer, otherwise,
  296. RuntimeError may be raised when compiling the network.
  297. Some configurations are parallel mode specific, see the below table for details:
  298. =========================== ===========================
  299. Common AUTO_PARALLEL
  300. =========================== ===========================
  301. device_num gradient_fp32_sync
  302. global_rank loss_repeated_mean
  303. gradients_mean auto_parallel_search_mode
  304. parallel_mode strategy_ckpt_load_file
  305. all_reduce_fusion_config strategy_ckpt_save_file
  306. enable_parallel_optimizer full_batch
  307. \ pipeline_stages
  308. =========================== ===========================
  309. Args:
  310. device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
  311. global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
  312. gradients_mean (bool): Whether to perform mean operator after allreduce of gradients.
  313. "stand_alone" do not support gradients_mean. Default: False.
  314. gradient_fp32_sync (bool): Run allreduce of gradients in fp32.
  315. "stand_alone", "data_parallel" and "hybrid_parallel" do not support
  316. gradient_fp32_sync. Default: True.
  317. parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
  318. "hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Default: "stand_alone".
  319. - stand_alone: Only one processor is working.
  320. - data_parallel: Distributes the data across different processors.
  321. - hybrid_parallel: Achieves data parallelism and model parallelism manually.
  322. - semi_auto_parallel: Achieves data parallelism and model parallelism by
  323. setting parallel strategies.
  324. - auto_parallel: Achieving parallelism automatically.
  325. auto_parallel_search_mode (str): There are two kinds of shard strategy search modes, "recursive_programming"
  326. and "dynamic_programming". Default: "dynamic_programming".
  327. - recursive_programming: Recursive programming search mode.
  328. - dynamic_programming: Dynamic programming search mode.
  329. parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
  330. the same network initialization parameter values for all devices, broadcast the parameters
  331. on device 0 to other devices. Parameter broadcasting in different parallel modes is different,
  332. data_parallel mode, all parameters are broadcast except for the parameter whose attribute
  333. layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
  334. segmented parameters do not participate in broadcasting. Default: False.
  335. strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
  336. strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
  337. full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
  338. should be set with True. Default: False.
  339. enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
  340. data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
  341. parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
  342. `Lamb` and `AdamWeightDecay` in Ascend . Default: False.
  343. all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices. Only support ReduceOp.SUM
  344. and HCCL_WORLD_GROUP/NCCL_WORLD_GROUP. No Default, if it is not set, the fusion is closed.
  345. pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how
  346. the devices are distributed alone the pipeline. The total devices will be divided into
  347. 'pipeline_stags' stages. This currently could only be used when
  348. parallel mode semi_auto_parallel is enabled. Default: 1.
  349. Raises:
  350. ValueError: If input key is not attribute in auto parallel context.
  351. Examples:
  352. >>> context.set_auto_parallel_context(device_num=8)
  353. >>> context.set_auto_parallel_context(global_rank=0)
  354. >>> context.set_auto_parallel_context(gradients_mean=True)
  355. >>> context.set_auto_parallel_context(gradient_fp32_sync=False)
  356. >>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
  357. >>> context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
  358. >>> context.set_auto_parallel_context(parameter_broadcast=False)
  359. >>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
  360. >>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
  361. >>> context.set_auto_parallel_context(full_batch=True)
  362. >>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
  363. >>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
  364. >>> context.set_auto_parallel_context(pipeline_stages=2)
  365. """
  366. _set_auto_parallel_context(**kwargs)
  367. def get_auto_parallel_context(attr_key):
  368. """
  369. Gets auto parallel context attribute value according to the key.
  370. Args:
  371. attr_key (str): The key of the attribute.
  372. Returns:
  373. Returns attribute value according to the key.
  374. Raises:
  375. ValueError: If input key is not attribute in auto parallel context.
  376. """
  377. return _get_auto_parallel_context(attr_key)
  378. def reset_auto_parallel_context():
  379. """
  380. Reset auto parallel context attributes to the default values:
  381. - device_num: 1.
  382. - global_rank: 0.
  383. - gradients_mean: False.
  384. - gradient_fp32_sync: True.
  385. - parallel_mode: 'stand_alone'.
  386. - auto_parallel_search_mode: 'dynamic_programming'.
  387. - parameter_broadcast: False.
  388. - strategy_ckpt_load_file: ''.
  389. - strategy_ckpt_save_file: ''.
  390. - full_batch: False.
  391. - enable_parallel_optimizer: False.
  392. - pipeline_stages: 1.
  393. """
  394. _reset_auto_parallel_context()
  395. def _check_target_specific_cfgs(device, arg_key):
  396. """Checking whether a config is suitable for a specified device"""
  397. device_cfgs = {
  398. 'enable_auto_mixed_precision': ['Ascend'],
  399. 'enable_dump': ['Ascend'],
  400. 'save_dump_path': ['Ascend'],
  401. 'enable_graph_kernel': ['Ascend', 'GPU'],
  402. 'enable_reduce_precision': ['Ascend'],
  403. 'enable_profiling': ['Ascend'],
  404. 'profiling_options': ['Ascend'],
  405. 'print_file_path': ['Ascend'],
  406. 'variable_memory_max_size': ['Ascend'],
  407. 'max_device_memory': ['GPU']
  408. }
  409. # configs not in map device_cfgs are supposed to be suitable for all devices
  410. if not arg_key in device_cfgs:
  411. return True
  412. supported_devices = device_cfgs[arg_key]
  413. if device in supported_devices:
  414. return True
  415. logger.warning(f"Config '{arg_key}' only supports devices in {supported_devices}, current device is '{device}'"
  416. ", ignore it.")
  417. return False
  418. @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
  419. save_graphs_path=str, enable_dump=bool,
  420. save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
  421. enable_profiling=bool, profiling_options=str, enable_auto_mixed_precision=bool,
  422. enable_graph_kernel=bool, check_bprop=bool, max_device_memory=str, print_file_path=str,
  423. enable_sparse=bool, max_call_depth=int, env_config_path=str)
  424. def set_context(**kwargs):
  425. """
  426. Sets context for running environment.
  427. Context should be configured before running your program. If there is no configuration,
  428. the "Ascend" device target will be used by default. GRAPH_MODE or
  429. PYNATIVE_MODE can be set by `mode` attribute and both modes support all backends, default
  430. mode is PYNATIVE_MODE.
  431. When the `save_graphs` attribute is set to True, attribute of `save_graphs_path` is used to set the
  432. intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
  433. For other configurations and arguments, please refer to the corresponding module
  434. description, the configuration is optional and can be enabled when needed.
  435. Note:
  436. Attribute name is required for setting attributes.
  437. The mode is not recommended to be changed after net was initialized because the implementations of some
  438. operations are different in graph mode and pynative mode. Default: PYNATIVE_MODE.
  439. Some configurations are device specific, see the below table for details:
  440. =========================== =========================== =================
  441. Common(CPU/GPU/Ascend) Ascend GPU
  442. =========================== =========================== =================
  443. check_bprop print_file_path max_device_memory
  444. device_id enable_dump enable_graph_kernel
  445. device_target save_dump_path
  446. enable_sparse enable_graph_kernel
  447. max_call_depth enable_reduce_precision
  448. mode enable_profiling
  449. reserve_class_name_in_scope profiling_options
  450. save_graphs variable_memory_max_size
  451. save_graphs_path
  452. env_config_path
  453. grad_for_scalar
  454. =========================== =========================== =================
  455. Args:
  456. mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: PYNATIVE_MODE(1).
  457. device_target (str): The target device to run, support "Ascend", "GPU", and "CPU". Default: "Ascend".
  458. device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
  459. while device_num_per_host should be no more than 4096. Default: 0.
  460. save_graphs (bool): Whether to save graphs. Default: False.
  461. save_graphs_path (str): Path to save graphs. Default: "."
  462. enable_graph_kernel (bool): Whether to enable composition of basic primitives. These primitives would be
  463. compiled into a fused kernel automatically. Default: False.
  464. reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
  465. enable_reduce_precision (bool): Whether to enable precision reduction. Default: True.
  466. enable_dump (bool): Whether to enable dump. Default: False.
  467. save_dump_path (str): When the program is executed on Ascend, operators can dump data in this path.
  468. The root dump path is configured in /home/HwHiAiUser/ide_daemon/ide_daemon.cfg.
  469. So the real dump path is "{configured root dump path}/{`save_dump_path`}". Default: ".".
  470. variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "0GB".
  471. enable_profiling (bool): Whether to open profiling. Default: False.
  472. profiling_options (str): Set profiling collection options, operators can profiling data here.
  473. The values of profiling collection options are as follows, supporting the collection of multiple data.
  474. - output: the saving the path of the profiling collection result file. The directory spectified by this
  475. parameter needs to be created in advance on the training environment (container or host side) and ensure
  476. that the running user configured during installation has read and write permissions.It supports the
  477. configuration of absolute or relative paths(relative to the current path when executing the command line).
  478. The absolute path configuration starts with '/', for example:/home/data/output.
  479. The relative path configuration directly starts with the directory name,for example:output.
  480. - training_trace: collect iterative trajectory data, that is, the training task and software information of
  481. the AI software stack, to achieve performance analysis of the training task, focusing on data
  482. enhancement, forward and backward calculation, gradient aggregation update and other related data.
  483. The value is on/off.
  484. - task_trace: collect task trajectory data, that is, the hardware information of the HWTS/AICore of
  485. the Ascend 910 processor, and analyze the information of beginning and ending of the task.
  486. The value is on/off.
  487. - aicpu: collect profiling data enhanced by aicpu data. The value is on/off.
  488. - fp_point: specify the start position of the forward operator of the training network iteration trajectory,
  489. which is used to record the start timestamp of the forward calculation.The configuration value is the name
  490. of the first operator specified in the forward direction. when the value is empty,the system will
  491. automatically obtain the forward operator name.
  492. - bp_point: specify the end position of the iteration trajectory reversal operator of the training network,
  493. record the end timestamp of the backward calculation. The configuration value is the name of the operator
  494. after the specified reverse. when the value is empty,the system will automatically obtain the backward
  495. operator name.
  496. - aic_metrics: the values are as follows:
  497. ArithmeticUtilization: percentage statistics of various calculation indicators.
  498. PipeUtilization: the time-consuming ratio of calculation unit and handling unit,this item is
  499. the default value.
  500. Memory: percentage of external memory read and write instructions.
  501. MemoryL0: percentage of internal memory read and write instructions.
  502. ResourceConflictRatio: proportion of pipline queue instructions.
  503. The profiling_options is like '{"output":'/home/data/output','training_trace':'on'}'
  504. check_bprop (bool): Whether to check bprop. Default: False.
  505. max_device_memory (str): Sets the maximum memory available for devices.
  506. Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
  507. print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
  508. a file by default, and turns off printing to the screen. If the file already exists, add a timestamp
  509. suffix to the file. Default: ''.
  510. enable_sparse (bool): Whether to enable sparsity feature. Default: False.
  511. max_call_depth (int): Specify the maximum depth of function call. Default: 1000.
  512. env_config_path (str): Config path for DFX.
  513. grad_for_scalar (bool): Whether to get gradient for scalar. Default: False.
  514. Raises:
  515. ValueError: If input key is not an attribute in context.
  516. Examples:
  517. >>> context.set_context(mode=context.GRAPH_MODE)
  518. >>> context.set_context(mode=context.PYNATIVE_MODE)
  519. >>> context.set_context(device_target="Ascend")
  520. >>> context.set_context(device_id=0)
  521. >>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
  522. >>> context.set_context(enable_reduce_precision=True)
  523. >>> context.set_context(enable_dump=True, save_dump_path=".")
  524. >>> context.set_context(reserve_class_name_in_scope=True)
  525. >>> context.set_context(variable_memory_max_size="6GB")
  526. >>> context.set_context(mode=context.GRAPH_MODE,
  527. ... device_target="Ascend",device_id=0, save_graphs=True,
  528. ... save_graphs_path="/mindspore")
  529. >>> context.set_context(enable_profiling=True, \
  530. profiling_options='{"output":"/home/data/output","training_trace":"on"}')
  531. >>> context.set_context(max_device_memory="3.5GB")
  532. >>> context.set_context(print_file_path="print.pb")
  533. >>> context.set_context(max_call_depth=80)
  534. >>> context.set_context(env_config_path="./env_config.json")
  535. """
  536. ctx = _context()
  537. # set device target first
  538. if 'device_target' in kwargs:
  539. ctx.set_device_target(kwargs['device_target'])
  540. device = ctx.get_param(ms_ctx_param.device_target)
  541. if not device.lower() in __device_target__:
  542. raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
  543. f"but got device target {device}")
  544. device = ctx.get_param(ms_ctx_param.device_target)
  545. for key, value in kwargs.items():
  546. if not _check_target_specific_cfgs(device, key):
  547. continue
  548. if hasattr(ctx, key):
  549. setattr(ctx, key, value)
  550. continue
  551. if key in ctx.setters:
  552. ctx.setters[key](ctx, value)
  553. continue
  554. # enum variables beginning with '_' are for internal use
  555. if key in ms_ctx_param.__members__ and key[0] != '_':
  556. ctx.set_param(ms_ctx_param.__members__[key], value)
  557. continue
  558. raise ValueError("Set context keyword %s is not recognized!" % key)
  559. def get_context(attr_key):
  560. """
  561. Gets context attribute value according to the input key.
  562. Args:
  563. attr_key (str): The key of the attribute.
  564. Returns:
  565. Object, The value of given attribute key.
  566. Raises:
  567. ValueError: If input key is not an attribute in context.
  568. """
  569. ctx = _context()
  570. device = ctx.get_param(ms_ctx_param.device_target)
  571. _ = _check_target_specific_cfgs(device, attr_key)
  572. if hasattr(ctx, attr_key):
  573. return getattr(ctx, attr_key)
  574. # enum variables beginning with '_' are for internal use
  575. if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
  576. return ctx.get_param(ms_ctx_param.__members__[attr_key])
  577. raise ValueError("Get context keyword %s is not recognized!" % attr_key)
  578. class ParallelMode:
  579. """
  580. Parallel mode options.
  581. There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
  582. "HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
  583. - STAND_ALONE: Only one processor is working.
  584. - DATA_PARALLEL: Distributes the data across different processors.
  585. - HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
  586. - SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
  587. - AUTO_PARALLEL: Achieves parallelism automatically.
  588. MODE_LIST: The list of all supported parallel modes.
  589. """
  590. STAND_ALONE = "stand_alone"
  591. DATA_PARALLEL = "data_parallel"
  592. HYBRID_PARALLEL = "hybrid_parallel"
  593. SEMI_AUTO_PARALLEL = "semi_auto_parallel"
  594. AUTO_PARALLEL = "auto_parallel"
  595. MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
  596. @args_type_check(enable_ps=bool)
  597. def set_ps_context(**kwargs):
  598. """
  599. Set parameter server training mode context.
  600. Note:
  601. Some other environment variables should also be set for parameter server training mode.
  602. These environment variables are listed below:
  603. .. code-block::
  604. MS_SERVER_NUM # Server number
  605. MS_WORKER_NUM # Worker number
  606. MS_SCHED_HOST # Scheduler IP address
  607. MS_SCHED_PORT # Scheduler port
  608. MS_ROLE # The role of this process:
  609. # MS_SCHED represents the scheduler,
  610. # MS_WORKER represents the worker,
  611. # MS_PSERVER represents the Server
  612. Args:
  613. enable_ps (bool): Whether to enable parameter server training mode.
  614. Only after enable_ps is set True, the environment variables will be effective.
  615. Default: False.
  616. Raises:
  617. ValueError: If input key is not the attribute in parameter server training mode context.
  618. Examples:
  619. >>> context.set_ps_context(enable_ps=True)
  620. """
  621. _set_ps_context(**kwargs)
  622. def get_ps_context(attr_key):
  623. """
  624. Get parameter server training mode context attribute value according to the key.
  625. Args:
  626. attr_key (str): The key of the attribute.
  627. Returns:
  628. Returns attribute value according to the key.
  629. Raises:
  630. ValueError: If input key is not attribute in auto parallel context.
  631. """
  632. return _get_ps_context(attr_key)
  633. def reset_ps_context():
  634. """
  635. Reset parameter server training mode context attributes to the default values:
  636. - enable_ps: False.
  637. """
  638. _reset_ps_context()