Merge pull request !795 from jinyaohui/clean_pylint_0428tags/v0.3.0-alpha
| @@ -61,6 +61,7 @@ class Vgg(nn.Cell): | |||||
| def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1): | def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1): | ||||
| super(Vgg, self).__init__() | super(Vgg, self).__init__() | ||||
| _ = batch_size | |||||
| self.layers = _make_layer(base, batch_norm=batch_norm) | self.layers = _make_layer(base, batch_norm=batch_norm) | ||||
| self.flatten = nn.Flatten() | self.flatten = nn.Flatten() | ||||
| self.classifier = nn.SequentialCell([ | self.classifier = nn.SequentialCell([ | ||||
| @@ -14,7 +14,6 @@ | |||||
| # ============================================================================ | # ============================================================================ | ||||
| """FTRL""" | """FTRL""" | ||||
| from mindspore.ops import functional as F, composite as C, operations as P | from mindspore.ops import functional as F, composite as C, operations as P | ||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.common import Tensor | from mindspore.common import Tensor | ||||
| import mindspore.common.dtype as mstype | import mindspore.common.dtype as mstype | ||||
| from mindspore._checkparam import Validator as validator | from mindspore._checkparam import Validator as validator | ||||
| @@ -110,8 +110,8 @@ def _update_run_op(beta1, beta2, eps, lr, weight_decay_tensor, global_step, para | |||||
| def _check_param_value(decay_steps, warmup_steps, start_learning_rate, | def _check_param_value(decay_steps, warmup_steps, start_learning_rate, | ||||
| end_learning_rate, power, beta1, beta2, eps, weight_decay, prim_name): | end_learning_rate, power, beta1, beta2, eps, weight_decay, prim_name): | ||||
| """Check the type of inputs.""" | """Check the type of inputs.""" | ||||
| _ = warmup_steps | |||||
| validator.check_float_positive('start_learning_rate', start_learning_rate, prim_name) | validator.check_float_positive('start_learning_rate', start_learning_rate, prim_name) | ||||
| validator.check_float_legal_value('start_learning_rate', start_learning_rate, prim_name) | validator.check_float_legal_value('start_learning_rate', start_learning_rate, prim_name) | ||||
| validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name) | validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name) | ||||
| @@ -173,8 +173,8 @@ test_sets = [ | |||||
| embedding_size=768, | embedding_size=768, | ||||
| embedding_shape=[1, 128, 768], | embedding_shape=[1, 128, 768], | ||||
| use_one_hot_embeddings=True, | use_one_hot_embeddings=True, | ||||
| initializer_range=0.02), 1, 1), { | |||||
| 'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| initializer_range=0.02), 1, 1), | |||||
| {'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| 'desc_inputs': [input_ids], | 'desc_inputs': [input_ids], | ||||
| 'desc_bprop': [[128]]}), | 'desc_bprop': [[128]]}), | ||||
| ('EmbeddingLookup_multi_outputs_init_param', { | ('EmbeddingLookup_multi_outputs_init_param', { | ||||
| @@ -182,8 +182,8 @@ test_sets = [ | |||||
| embedding_size=768, | embedding_size=768, | ||||
| embedding_shape=[1, 128, 768], | embedding_shape=[1, 128, 768], | ||||
| use_one_hot_embeddings=False, | use_one_hot_embeddings=False, | ||||
| initializer_range=0.02), { | |||||
| 'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| initializer_range=0.02), | |||||
| {'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| 'desc_inputs': [input_ids], | 'desc_inputs': [input_ids], | ||||
| 'desc_bprop': [[1, 128, 768], [128]]}), | 'desc_bprop': [[1, 128, 768], [128]]}), | ||||
| ('EmbeddingLookup_multi_outputs_grad_with_no_sens', { | ('EmbeddingLookup_multi_outputs_grad_with_no_sens', { | ||||
| @@ -191,8 +191,8 @@ test_sets = [ | |||||
| embedding_size=768, | embedding_size=768, | ||||
| embedding_shape=[1, 128, 768], | embedding_shape=[1, 128, 768], | ||||
| use_one_hot_embeddings=False, | use_one_hot_embeddings=False, | ||||
| initializer_range=0.02), { | |||||
| 'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| initializer_range=0.02), | |||||
| {'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}), | |||||
| 'desc_inputs': [input_ids]}), | 'desc_inputs': [input_ids]}), | ||||
| ('GetMaskedLMOutput_grad_with_no_sens', { | ('GetMaskedLMOutput_grad_with_no_sens', { | ||||
| 'block': GetMaskedLMOutput(BertConfig(batch_size=1)), | 'block': GetMaskedLMOutput(BertConfig(batch_size=1)), | ||||
| @@ -69,6 +69,7 @@ class IthOutputCell(nn.Cell): | |||||
| return predict | return predict | ||||
| def get_output_cell(network, num_input, output_index, training=True): | def get_output_cell(network, num_input, output_index, training=True): | ||||
| _ = num_input | |||||
| net = IthOutputCell(network, output_index) | net = IthOutputCell(network, output_index) | ||||
| set_block_training(net, training) | set_block_training(net, training) | ||||
| return net | return net | ||||