Merge pull request !85 from jxlang910/mastertags/v0.7.0-beta
| @@ -93,7 +93,7 @@ class Fuzzer: | |||||
| >>> {'method': 'Translate', 'params': {'x_bias': 0.1, 'y_bias': 0.2}}, | >>> {'method': 'Translate', 'params': {'x_bias': 0.1, 'y_bias': 0.2}}, | ||||
| >>> {'method': 'FGSM', 'params': {'eps': 0.1, 'alpha': 0.1}}] | >>> {'method': 'FGSM', 'params': {'eps': 0.1, 'alpha': 0.1}}] | ||||
| >>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | >>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | ||||
| >>> model_fuzz_test = Fuzzer(model, train_images, 1000, 10) | |||||
| >>> model_fuzz_test = Fuzzer(model, train_images, 10, 1000) | |||||
| >>> samples, labels, preds, strategies, report = model_fuzz_test.fuzzing(mutate_config, initial_seeds) | >>> samples, labels, preds, strategies, report = model_fuzz_test.fuzzing(mutate_config, initial_seeds) | ||||
| """ | """ | ||||
| @@ -141,8 +141,9 @@ class Fuzzer: | |||||
| Args: | Args: | ||||
| mutate_config (list): Mutate configs. The format is | mutate_config (list): Mutate configs. The format is | ||||
| [{'method': 'Blur', 'params': {'auto_param': True}}, {'method': 'Contrast', 'params': {'factor': 2}}]. | [{'method': 'Blur', 'params': {'auto_param': True}}, {'method': 'Contrast', 'params': {'factor': 2}}]. | ||||
| The support methods list is in `self._strategies`, and the params of each | |||||
| method must within the range of changeable parameters. | |||||
| The supported methods list is in `self._strategies`, and the params of each method must within the | |||||
| range of changeable parameters. All supported methods are: 'Contrast', 'Brightness', 'Blur', | |||||
| 'Noise', 'Translate', 'Scale', 'Shear', 'Rotate', 'FGSM', 'PGD' and 'MDIIM'. | |||||
| initial_seeds (numpy.ndarray): Initial seeds used to generate | initial_seeds (numpy.ndarray): Initial seeds used to generate | ||||
| mutated samples. | mutated samples. | ||||
| coverage_metric (str): Model coverage metric of neural networks. | coverage_metric (str): Model coverage metric of neural networks. | ||||
| @@ -56,7 +56,7 @@ class ModelCoverageMetrics: | |||||
| >>> test_images = np.random.random((5000, 128)).astype(np.float32) | >>> test_images = np.random.random((5000, 128)).astype(np.float32) | ||||
| >>> model = Model(net) | >>> model = Model(net) | ||||
| >>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images) | >>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images) | ||||
| >>> model_fuzz_test.test_adequacy_coverage_calculate(test_images) | |||||
| >>> model_fuzz_test.calculate_coverage(test_images) | |||||
| >>> print('KMNC of this test is : %s', model_fuzz_test.get_kmnc()) | >>> print('KMNC of this test is : %s', model_fuzz_test.get_kmnc()) | ||||
| >>> print('NBC of this test is : %s', model_fuzz_test.get_nbc()) | >>> print('NBC of this test is : %s', model_fuzz_test.get_nbc()) | ||||
| >>> print('SNAC of this test is : %s', model_fuzz_test.get_snac()) | >>> print('SNAC of this test is : %s', model_fuzz_test.get_snac()) | ||||
| @@ -78,7 +78,11 @@ class LogUtil: | |||||
| def set_level(self, level): | def set_level(self, level): | ||||
| """ | """ | ||||
| Set the logging level of this logger, level must be an integer or a | Set the logging level of this logger, level must be an integer or a | ||||
| string. | |||||
| string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40), | |||||
| 'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10). | |||||
| For example, if logger.set_level('WARNING') or logger.set_level(21), then | |||||
| logger.warn() and logger.error() in scripts would be printed while running, | |||||
| while logger.info() or logger.debug() would not be printed. | |||||
| Args: | Args: | ||||
| level (Union[int, str]): Level of logger. | level (Union[int, str]): Level of logger. | ||||
| @@ -98,7 +98,7 @@ class GradWrapWithLoss(Cell): | |||||
| Examples: | Examples: | ||||
| >>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) | >>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) | ||||
| >>> label = Tensor(np.ones([1, 10]).astype(np.float32)) | |||||
| >>> labels = Tensor(np.ones([1, 10]).astype(np.float32)) | |||||
| >>> net = NET() | >>> net = NET() | ||||
| >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | ||||
| >>> loss_net = WithLossCell(net, loss_fn) | >>> loss_net = WithLossCell(net, loss_fn) | ||||