Browse Source

!9204 Fixbug: modify API docs to make example executable

From: @lixiaohui33
Reviewed-by: @ouwenchang,@wuxuejian
Signed-off-by: @wuxuejian
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
4638a0f378
7 changed files with 9 additions and 10 deletions
  1. +1
    -1
      mindspore/explainer/benchmark/_attribution/faithfulness.py
  2. +2
    -2
      mindspore/explainer/benchmark/_attribution/localization.py
  3. +1
    -1
      mindspore/explainer/explanation/_attribution/_backprop/gradcam.py
  4. +1
    -1
      mindspore/explainer/explanation/_attribution/_backprop/gradient.py
  5. +2
    -2
      mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py
  6. +1
    -1
      mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py
  7. +1
    -2
      mindspore/explainer/explanation/_attribution/_perturbation/rise.py

+ 1
- 1
mindspore/explainer/benchmark/_attribution/faithfulness.py View File

@@ -424,7 +424,7 @@ class Faithfulness(LabelSensitiveMetric):
>>> res = faithfulness.evaluate(gradient, inputs, targets) >>> res = faithfulness.evaluate(gradient, inputs, targets)
>>> # usage 2: input the generated saliency map >>> # usage 2: input the generated saliency map
>>> saliency = gradient(inputs, targets) >>> saliency = gradient(inputs, targets)
>>> res = faithfulenss.evaluate(gradient, inputs, targets, saliency)
>>> res = faithfulness.evaluate(gradient, inputs, targets, saliency)
""" """


self._check_evaluate_param(explainer, inputs, targets, saliency) self._check_evaluate_param(explainer, inputs, targets, saliency)


+ 2
- 2
mindspore/explainer/benchmark/_attribution/localization.py View File

@@ -110,10 +110,10 @@ class Localization(LabelSensitiveMetric):
numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`. numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`.


Examples: Examples:
>>> # init an explainer, the network should contain the output activation function.
>>> # init an explainer with a trained network
>>> gradient = Gradient(network) >>> gradient = Gradient(network)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32) >>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> masks = np.zeros(1, 1, 224, 224)
>>> masks = np.zeros([1, 1, 224, 224])
>>> masks[:, :, 65: 100, 65: 100] = 1 >>> masks[:, :, 65: 100, 65: 100] = 1
>>> targets = 5 >>> targets = 5
>>> # usage 1: input the explainer and the data to be explained, >>> # usage 1: input the explainer and the data to be explained,


+ 1
- 1
mindspore/explainer/explanation/_attribution/_backprop/gradcam.py View File

@@ -111,7 +111,7 @@ class GradCAM(IntermediateLayerAttribution):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.


Examples: Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5 >>> label = 5
>>> # gradcam is a GradCAM object, parse data and the target label to be explained and get the attribution >>> # gradcam is a GradCAM object, parse data and the target label to be explained and get the attribution
>>> saliency = gradcam(inputs, label) >>> saliency = gradcam(inputs, label)


+ 1
- 1
mindspore/explainer/explanation/_attribution/_backprop/gradient.py View File

@@ -89,7 +89,7 @@ class Gradient(Attribution):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.


Examples: Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5 >>> label = 5
>>> # gradient is a Gradient object, parse data and the target label to be explained and get the attribution >>> # gradient is a Gradient object, parse data and the target label to be explained and get the attribution
>>> saliency = gradient(inputs, label) >>> saliency = gradient(inputs, label)


+ 2
- 2
mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py View File

@@ -45,7 +45,7 @@ class ModifiedReLU(Gradient):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.


Examples: Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5 >>> label = 5
>>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be >>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be
>>> # explained and get the attribution >>> # explained and get the attribution
@@ -104,7 +104,7 @@ class Deconvolution(ModifiedReLU):
>>> # init Gradient with a trained network. >>> # init Gradient with a trained network.
>>> deconvolution = Deconvolution(net) >>> deconvolution = Deconvolution(net)
>>> # parse data and the target label to be explained and get the saliency map >>> # parse data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5 >>> label = 5
>>> saliency = deconvolution(inputs, label) >>> saliency = deconvolution(inputs, label)
""" """


+ 1
- 1
mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py View File

@@ -76,7 +76,7 @@ class Occlusion(PerturbationAttribution):
>>> param_dict = load_checkpoint("resnet50.ckpt") >>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(network, param_dict) >>> load_param_into_net(network, param_dict)
>>> occlusion = Occlusion(network) >>> occlusion = Occlusion(network)
>>> x = Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> x = Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 1 >>> label = 1
>>> saliency = occlusion(x, label) >>> saliency = occlusion(x, label)
""" """


+ 1
- 2
mindspore/explainer/explanation/_attribution/_perturbation/rise.py View File

@@ -114,14 +114,13 @@ class RISE(PerturbationAttribution):


Examples: Examples:
>>> # given an instance of RISE, saliency map can be generate >>> # given an instance of RISE, saliency map can be generate
>>> inputs = ms.Tensor(np.random.rand([2, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(2, 3, 224, 224), ms.float32)
>>> # when `targets` is an integer >>> # when `targets` is an integer
>>> targets = 5 >>> targets = 5
>>> saliency = rise(inputs, targets) >>> saliency = rise(inputs, targets)
>>> # `targets` can also be a tensor >>> # `targets` can also be a tensor
>>> targets = ms.Tensor([[5], [1]]) >>> targets = ms.Tensor([[5], [1]])
>>> saliency = rise(inputs, targets) >>> saliency = rise(inputs, targets)
>>>
""" """
self._verify_data(inputs, targets) self._verify_data(inputs, targets)
height, width = inputs.shape[2], inputs.shape[3] height, width = inputs.shape[2], inputs.shape[3]


Loading…
Cancel
Save