| @@ -61,8 +61,8 @@ def device_placement(framework, device_name='gpu:0'): | |||||
| if framework == Frameworks.tf: | if framework == Frameworks.tf: | ||||
| import tensorflow as tf | import tensorflow as tf | ||||
| if device_type == Devices.gpu and not tf.test.is_gpu_available(): | if device_type == Devices.gpu and not tf.test.is_gpu_available(): | ||||
| logger.warning( | |||||
| 'tensorflow cuda is not available, using cpu instead.') | |||||
| logger.debug( | |||||
| 'tensorflow: cuda is not available, using cpu instead.') | |||||
| device_type = Devices.cpu | device_type = Devices.cpu | ||||
| if device_type == Devices.cpu: | if device_type == Devices.cpu: | ||||
| with tf.device('/CPU:0'): | with tf.device('/CPU:0'): | ||||
| @@ -78,7 +78,8 @@ def device_placement(framework, device_name='gpu:0'): | |||||
| if torch.cuda.is_available(): | if torch.cuda.is_available(): | ||||
| torch.cuda.set_device(f'cuda:{device_id}') | torch.cuda.set_device(f'cuda:{device_id}') | ||||
| else: | else: | ||||
| logger.warning('cuda is not available, using cpu instead.') | |||||
| logger.debug( | |||||
| 'pytorch: cuda is not available, using cpu instead.') | |||||
| yield | yield | ||||
| else: | else: | ||||
| yield | yield | ||||
| @@ -96,9 +97,7 @@ def create_device(device_name): | |||||
| if device_type == Devices.gpu: | if device_type == Devices.gpu: | ||||
| use_cuda = True | use_cuda = True | ||||
| if not torch.cuda.is_available(): | if not torch.cuda.is_available(): | ||||
| logger.warning( | |||||
| 'cuda is not available, create gpu device failed, using cpu instead.' | |||||
| ) | |||||
| logger.info('cuda is not available, using cpu instead.') | |||||
| use_cuda = False | use_cuda = False | ||||
| if use_cuda: | if use_cuda: | ||||
| @@ -176,7 +176,7 @@ def build_from_cfg(cfg, | |||||
| raise TypeError('default_args must be a dict or None, ' | raise TypeError('default_args must be a dict or None, ' | ||||
| f'but got {type(default_args)}') | f'but got {type(default_args)}') | ||||
| # dynamic load installation reqruiements for this module | |||||
| # dynamic load installation requirements for this module | |||||
| from modelscope.utils.import_utils import LazyImportModule | from modelscope.utils.import_utils import LazyImportModule | ||||
| sig = (registry.name.upper(), group_key, cfg['type']) | sig = (registry.name.upper(), group_key, cfg['type']) | ||||
| LazyImportModule.import_module(sig) | LazyImportModule.import_module(sig) | ||||
| @@ -193,8 +193,11 @@ def build_from_cfg(cfg, | |||||
| if isinstance(obj_type, str): | if isinstance(obj_type, str): | ||||
| obj_cls = registry.get(obj_type, group_key=group_key) | obj_cls = registry.get(obj_type, group_key=group_key) | ||||
| if obj_cls is None: | if obj_cls is None: | ||||
| raise KeyError(f'{obj_type} is not in the {registry.name}' | |||||
| f' registry group {group_key}') | |||||
| raise KeyError( | |||||
| f'{obj_type} is not in the {registry.name}' | |||||
| f' registry group {group_key}. Please make' | |||||
| f' sure the correct version of 1qqQModelScope library is used.' | |||||
| ) | |||||
| obj_cls.group_key = group_key | obj_cls.group_key = group_key | ||||
| elif inspect.isclass(obj_type) or inspect.isfunction(obj_type): | elif inspect.isclass(obj_type) or inspect.isfunction(obj_type): | ||||
| obj_cls = obj_type | obj_cls = obj_type | ||||