Compare commits

...

19 Commits
master ... r1.2

Author SHA1 Message Date
  mindspore-ci-bot 40880b54d0 !204 Update verion number from 1.2.0-rc1 to 1.2.0 5 years ago
  pkuliuliu 2cd249445b update version number 5 years ago
  mindspore-ci-bot 0c59a3f28c !201 Fix a bug of python-api 5 years ago
  jin-xiulang 32c541d39e Fix a bug of python-api 5 years ago
  mindspore-ci-bot 9cc67c9ce1 !199 Fix several indent bugs of mindarmour python-api 5 years ago
  jin-xiulang 4b41a52a3a Fix several issues of python-api 5 years ago
  mindspore-ci-bot 41ee10ef28 !197 Fix several issues of python-api 5 years ago
  jin-xiulang d4a447f518 Fix several issues of python-api 5 years ago
  mindspore-ci-bot bba9f37b7f !195 Update Release Notes 5 years ago
  pkuliuliu 76e1dc9c34 update release notes 5 years ago
  mindspore-ci-bot 45bdd8a505 !194 Fix an issue of api 5 years ago
  jin-xiulang 5b24d1b7b2 Fix an api issue 5 years ago
  mindspore-ci-bot 8a1c8af983 !191 Update version number to rc1 5 years ago
  pkuliuliu b32ab79025 update version number to rc1 5 years ago
  mindspore-ci-bot 8ecc67c80d !188 Remove the use of 'ControlDepend' in Diff privacy codes. 5 years ago
  jin-xiulang e64211ba9b Remove the use of 'ControlDepend' in Diff privacy 5 years ago
  mindspore-ci-bot 46c45e0114 !186 Update Release Notes to 1.2.0 5 years ago
  mindspore-ci-bot 3e47a73439 !186 Update Release Notes to 1.2.0 5 years ago
  pkuliuliu dc85ad8571 Update Release Notes to 1.2.0 5 years ago
6 changed files with 51 additions and 12 deletions
Unified View
  1. +41
    -0
      RELEASE.md
  2. +6
    -6
      mindarmour/adv_robustness/evaluations/attack_evaluation.py
  3. +1
    -1
      mindarmour/adv_robustness/evaluations/defense_evaluation.py
  4. +2
    -2
      mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py
  5. +0
    -2
      mindarmour/privacy/diff_privacy/train/model.py
  6. +1
    -1
      mindarmour/privacy/evaluation/membership_inference.py

+ 41
- 0
RELEASE.md View File

@@ -1,3 +1,44 @@
# MindArmour 1.2.0

## MindArmour 1.2.0 Release Notes

### Major Features and Improvements

#### Privacy

* [STABLE]Tailored-based privacy protection technology (Pynative)
* [STABLE]Model Inversion. Reverse analysis technology of privacy information

### API Change

#### Backwards Incompatible Change

##### C++ API

[Modify] ...
[Add] ...
[Delete] ...

##### Java API

[Add] ...

#### Deprecations

##### C++ API

##### Java API

### Bug fixes

[BUGFIX] ...

### Contributors

Thanks goes to these wonderful people:

han.yin

# MindArmour 1.1.0 Release Notes # MindArmour 1.1.0 Release Notes


## MindArmour ## MindArmour


+ 6
- 6
mindarmour/adv_robustness/evaluations/attack_evaluation.py View File

@@ -136,10 +136,10 @@ class AttackEvaluate:
- float, return average l0, l2, or linf distance of all success - float, return average l0, l2, or linf distance of all success
adversarial examples, return value includes following cases. adversarial examples, return value includes following cases.


- If return value :math:`>=` 0, average lp distance. The lower,
the more successful the attack is.
- If return value :math:`>=` 0, average lp distance. The lower,
the more successful the attack is.


- If return value is -1, there is no success adversarial examples.
- If return value is -1, there is no success adversarial examples.
""" """
idxes = self._success_idxes idxes = self._success_idxes
success_num = idxes.shape[0] success_num = idxes.shape[0]
@@ -164,10 +164,10 @@ class AttackEvaluate:
Returns: Returns:
- float, average structural similarity. - float, average structural similarity.


- If return value ranges between (0, 1), the higher, the more
successful the attack is.
- If return value ranges between (0, 1), the higher, the more
successful the attack is.


- If return value is -1: there is no success adversarial examples.
- If return value is -1: there is no success adversarial examples.
""" """
success_num = self._success_idxes.shape[0] success_num = self._success_idxes.shape[0]
if success_num == 0: if success_num == 0:


+ 1
- 1
mindarmour/adv_robustness/evaluations/defense_evaluation.py View File

@@ -106,7 +106,7 @@ class DefenseEvaluate:
Returns: Returns:
- float, the lower, the more successful the defense is. - float, the lower, the more successful the defense is.


- If return value == -1, len(idxes) == 0.
- If return value == -1, len(idxes) == 0.
""" """
idxes = np.arange(self._num_samples) idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels


+ 2
- 2
mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py View File

@@ -183,8 +183,8 @@ class NoiseGaussianRandom(_Mechanisms):
initial_noise_multiplier(float): Ratio of the standard deviation of initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 1.0. calculate privacy spent. Default: 1.0.
seed(int): Original random seed, if seed=0 random normal will use secure
random number. IF seed!=0 random normal will generate values using
seed(int): Original random seed, if seed=0, random normal will use secure
random number. If seed!=0, random normal will generate values using
given seed. Default: 0. given seed. Default: 0.
decay_policy(str): Mechanisms parameters update policy. Default: None. decay_policy(str): Mechanisms parameters update policy. Default: None.




+ 0
- 2
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -38,7 +38,6 @@ from mindspore.ops.operations import NPUAllocFloatStatus
from mindspore.ops.operations import NPUClearFloatStatus from mindspore.ops.operations import NPUClearFloatStatus
from mindspore.ops.operations import ReduceSum from mindspore.ops.operations import ReduceSum
from mindspore.ops.operations import LessEqual from mindspore.ops.operations import LessEqual
from mindspore.ops.operations import ControlDepend
from mindspore.parallel._utils import _get_gradients_mean from mindspore.parallel._utils import _get_gradients_mean
from mindspore.parallel._utils import _get_device_num from mindspore.parallel._utils import _get_device_num
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
@@ -395,7 +394,6 @@ class _TrainOneStepWithLossScaleCell(Cell):
self.reduce_sum = ReduceSum(keep_dims=False) self.reduce_sum = ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32) self.base = Tensor(1, mstype.float32)
self.less_equal = LessEqual() self.less_equal = LessEqual()
self.depend_parameter_use = ControlDepend(depend_mode=1)
self.allreduce = P.AllReduce() self.allreduce = P.AllReduce()
self.parallel_mode = _get_parallel_mode() self.parallel_mode = _get_parallel_mode()
self.grad_reducer = F.identity self.grad_reducer = F.identity


+ 1
- 1
mindarmour/privacy/evaluation/membership_inference.py View File

@@ -95,7 +95,7 @@ def _softmax_cross_entropy(logits, labels):
class MembershipInference: class MembershipInference:
""" """
Evaluation proposed by Shokri, Stronati, Song and Shmatikov is a grey-box attack. Evaluation proposed by Shokri, Stronati, Song and Shmatikov is a grey-box attack.
The attack requires obtain loss or logits results of training samples.
The attack requires loss or logits results of training samples.


References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov. References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov.
Membership Inference Attacks against Machine Learning Models. 2017. Membership Inference Attacks against Machine Learning Models. 2017.


Loading…
Cancel
Save