You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

optimizer.py 4.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Differential privacy optimizer.
  16. """
  17. import mindspore as ms
  18. from mindspore import nn
  19. from mindspore import Tensor
  20. from mindarmour.diff_privacy.mechanisms.mechanisms import MechanismsFactory
  21. from mindarmour.utils._check_param import check_int_positive
  22. class DPOptimizerClassFactory:
  23. """
  24. Factory class of Optimizer.
  25. Args:
  26. micro_batches (int): The number of small batches split from an origianl batch. Default: None.
  27. Returns:
  28. Optimizer, Optimizer class
  29. Examples:
  30. >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
  31. >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.5, initial_noise_multiplier=5.0)
  32. >>> net_opt = GaussianSGD.create('SGD')(params=network.trainable_params(),
  33. >>> learning_rate=cfg.lr,
  34. >>> momentum=cfg.momentum)
  35. """
  36. def __init__(self, micro_batches=None):
  37. self._mech_factory = MechanismsFactory()
  38. self.mech = None
  39. self._micro_batches = check_int_positive('micro_batches', micro_batches)
  40. def set_mechanisms(self, policy, *args, **kwargs):
  41. """
  42. Get noise mechanism object.
  43. Args:
  44. policy (str): Choose mechanism type.
  45. """
  46. self.mech = self._mech_factory.create(policy, *args, **kwargs)
  47. def create(self, policy, *args, **kwargs):
  48. """
  49. Create DP optimizer.
  50. Args:
  51. policy (str): Choose original optimizer type.
  52. Returns:
  53. Optimizer, A optimizer with DP.
  54. """
  55. if policy == 'SGD':
  56. cls = self._get_dp_optimizer_class(nn.SGD, self.mech, self._micro_batches, *args, **kwargs)
  57. return cls
  58. if policy == 'Momentum':
  59. cls = self._get_dp_optimizer_class(nn.Momentum, self.mech, self._micro_batches, *args, **kwargs)
  60. return cls
  61. if policy == 'Adam':
  62. cls = self._get_dp_optimizer_class(nn.Adam, self.mech, self._micro_batches, *args, **kwargs)
  63. return cls
  64. if policy == 'AdamWeightDecay':
  65. cls = self._get_dp_optimizer_class(nn.AdamWeightDecay, self.mech, self._micro_batches, *args, **kwargs)
  66. return cls
  67. if policy == 'AdamWeightDecayDynamicLR':
  68. cls = self._get_dp_optimizer_class(nn.AdamWeightDecayDynamicLR,
  69. self.mech,
  70. self._micro_batches,
  71. *args, **kwargs)
  72. return cls
  73. raise NameError("The {} is not implement, please choose ['SGD', 'Momentum', 'AdamWeightDecay', "
  74. "'Adam', 'AdamWeightDecayDynamicLR']".format(policy))
  75. def _get_dp_optimizer_class(self, cls, mech, micro_batches):
  76. """
  77. Wrap original mindspore optimizer with `self._mech`.
  78. """
  79. class DPOptimizer(cls):
  80. """
  81. Initialize the DPOptimizerClass.
  82. Returns:
  83. Optimizer, Optimizer class.
  84. """
  85. def __init__(self, *args, **kwargs):
  86. super(DPOptimizer, self).__init__(*args, **kwargs)
  87. self._mech = mech
  88. def construct(self, gradients):
  89. """
  90. construct a compute flow.
  91. """
  92. g_len = len(gradients)
  93. gradient_noise = list(gradients)
  94. for i in range(g_len):
  95. gradient_noise[i] = gradient_noise[i].asnumpy()
  96. gradient_noise[i] = self._mech(gradient_noise[i].shape).asnumpy() + gradient_noise[i]
  97. gradient_noise[i] = gradient_noise[i] / micro_batches
  98. gradient_noise[i] = Tensor(gradient_noise[i], ms.float32)
  99. gradients = tuple(gradient_noise)
  100. gradients = super(DPOptimizer, self).construct(gradients)
  101. return gradients
  102. return DPOptimizer

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。