You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_gradient_method.py 12 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Gradient-Attack test.
  16. """
  17. import gc
  18. import numpy as np
  19. import pytest
  20. import mindspore.nn as nn
  21. from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
  22. import mindspore.context as context
  23. from mindspore.ops.composite import GradOperation
  24. from mindarmour.adv_robustness.attacks import FastGradientMethod
  25. from mindarmour.adv_robustness.attacks import FastGradientSignMethod
  26. from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod
  27. from mindarmour.adv_robustness.attacks import RandomFastGradientMethod
  28. from mindarmour.adv_robustness.attacks import RandomFastGradientSignMethod
  29. from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod
  30. # for user
  31. class Net(Cell):
  32. """
  33. Construct the network of target model.
  34. Examples:
  35. >>> net = Net()
  36. """
  37. def __init__(self):
  38. """
  39. Introduce the layers used for network construction.
  40. """
  41. super(Net, self).__init__()
  42. self._relu = nn.ReLU()
  43. def construct(self, inputs):
  44. """
  45. Construct network.
  46. Args:
  47. inputs (Tensor): Input data.
  48. """
  49. out = self._relu(inputs)
  50. return out
  51. class Net2(Cell):
  52. """
  53. Construct the network of target model. A network with multiple input data.
  54. Examples:
  55. >>> net = Net2()
  56. """
  57. def __init__(self):
  58. super(Net2, self).__init__()
  59. self._relu = nn.ReLU()
  60. def construct(self, inputs1, inputs2):
  61. out1 = self._relu(inputs1)
  62. out2 = self._relu(inputs2)
  63. return out1 + out2, out1 - out2
  64. class LossNet(Cell):
  65. """
  66. Loss function for test.
  67. """
  68. def construct(self, loss1, loss2, labels1, labels2):
  69. return loss1 + loss2 - labels1 - labels2
  70. class WithLossCell(Cell):
  71. """Wrap the network with loss function"""
  72. def __init__(self, backbone, loss_fn):
  73. super(WithLossCell, self).__init__(auto_prefix=False)
  74. self._backbone = backbone
  75. self._loss_fn = loss_fn
  76. def construct(self, inputs1, inputs2, labels1, labels2):
  77. out = self._backbone(inputs1, inputs2)
  78. return self._loss_fn(*out, labels1, labels2)
  79. class GradWrapWithLoss(Cell):
  80. """
  81. Construct a network to compute the gradient of loss function in \
  82. input space and weighted by 'weight'.
  83. """
  84. def __init__(self, network):
  85. super(GradWrapWithLoss, self).__init__()
  86. self._grad_all = GradOperation(get_all=True, sens_param=False)
  87. self._network = network
  88. def construct(self, *inputs):
  89. gout = self._grad_all(self._network)(*inputs)
  90. return gout[0]
  91. @pytest.mark.level0
  92. @pytest.mark.platform_arm_ascend_training
  93. @pytest.mark.platform_x86_ascend_training
  94. @pytest.mark.env_card
  95. @pytest.mark.component_mindarmour
  96. def test_fast_gradient_method():
  97. """
  98. Fast gradient method unit test.
  99. """
  100. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  101. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  102. label = np.asarray([2], np.int32)
  103. label = np.eye(3)[label].astype(np.float32)
  104. attack = FastGradientMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  105. ms_adv_x = attack.generate(input_np, label)
  106. assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \
  107. ' must not be equal to original value.'
  108. del input_np, label, ms_adv_x
  109. gc.collect()
  110. @pytest.mark.level0
  111. @pytest.mark.platform_x86_gpu_training
  112. @pytest.mark.env_card
  113. @pytest.mark.component_mindarmour
  114. def test_fast_gradient_method_gpu():
  115. """
  116. Fast gradient method unit test.
  117. """
  118. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  119. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  120. label = np.asarray([2], np.int32)
  121. label = np.eye(3)[label].astype(np.float32)
  122. attack = FastGradientMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  123. ms_adv_x = attack.generate(input_np, label)
  124. assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \
  125. ' must not be equal to original value.'
  126. del input_np, label, ms_adv_x
  127. gc.collect()
  128. @pytest.mark.level0
  129. @pytest.mark.platform_x86_cpu
  130. @pytest.mark.env_card
  131. @pytest.mark.component_mindarmour
  132. def test_fast_gradient_method_cpu():
  133. """
  134. Fast gradient method unit test.
  135. """
  136. context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
  137. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  138. label = np.asarray([2], np.int32)
  139. loss = SoftmaxCrossEntropyWithLogits(sparse=True)
  140. attack = FastGradientMethod(Net(), loss_fn=loss)
  141. ms_adv_x = attack.generate(input_np, label)
  142. assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \
  143. ' must not be equal to original value.'
  144. del input_np, label, ms_adv_x
  145. gc.collect()
  146. @pytest.mark.level0
  147. @pytest.mark.platform_arm_ascend_training
  148. @pytest.mark.platform_x86_ascend_training
  149. @pytest.mark.env_card
  150. @pytest.mark.component_mindarmour
  151. def test_random_fast_gradient_method():
  152. """
  153. Random fast gradient method unit test.
  154. """
  155. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  156. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  157. label = np.asarray([2], np.int32)
  158. label = np.eye(3)[label].astype(np.float32)
  159. attack = RandomFastGradientMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  160. ms_adv_x = attack.generate(input_np, label)
  161. assert np.any(ms_adv_x != input_np), 'Random fast gradient method: ' \
  162. 'generate value must not be equal to' \
  163. ' original value.'
  164. del input_np, label, ms_adv_x
  165. gc.collect()
  166. @pytest.mark.level0
  167. @pytest.mark.platform_arm_ascend_training
  168. @pytest.mark.platform_x86_ascend_training
  169. @pytest.mark.env_card
  170. @pytest.mark.component_mindarmour
  171. def test_fast_gradient_sign_method():
  172. """
  173. Fast gradient sign method unit test.
  174. """
  175. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  176. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  177. label = np.asarray([2], np.int32)
  178. label = np.eye(3)[label].astype(np.float32)
  179. attack = FastGradientSignMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  180. ms_adv_x = attack.generate(input_np, label)
  181. assert np.any(ms_adv_x != input_np), 'Fast gradient sign method: generate' \
  182. ' value must not be equal to' \
  183. ' original value.'
  184. del input_np, label, ms_adv_x
  185. gc.collect()
  186. @pytest.mark.level0
  187. @pytest.mark.platform_arm_ascend_training
  188. @pytest.mark.platform_x86_ascend_training
  189. @pytest.mark.env_card
  190. @pytest.mark.component_mindarmour
  191. def test_random_fast_gradient_sign_method():
  192. """
  193. Random fast gradient sign method unit test.
  194. """
  195. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  196. input_np = np.random.random((1, 28)).astype(np.float32)
  197. label = np.asarray([2], np.int32)
  198. label = np.eye(28)[label].astype(np.float32)
  199. attack = RandomFastGradientSignMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  200. ms_adv_x = attack.generate(input_np, label)
  201. assert np.any(ms_adv_x != input_np), 'Random fast gradient sign method: ' \
  202. 'generate value must not be equal to' \
  203. ' original value.'
  204. del input_np, label, ms_adv_x
  205. gc.collect()
  206. @pytest.mark.level0
  207. @pytest.mark.platform_arm_ascend_training
  208. @pytest.mark.platform_x86_ascend_training
  209. @pytest.mark.env_card
  210. @pytest.mark.component_mindarmour
  211. def test_least_likely_class_method():
  212. """
  213. Least likely class method unit test.
  214. """
  215. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  216. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  217. label = np.asarray([2], np.int32)
  218. label = np.eye(3)[label].astype(np.float32)
  219. attack = LeastLikelyClassMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  220. ms_adv_x = attack.generate(input_np, label)
  221. assert np.any(ms_adv_x != input_np), 'Least likely class method: generate' \
  222. ' value must not be equal to' \
  223. ' original value.'
  224. del input_np, label, ms_adv_x
  225. gc.collect()
  226. @pytest.mark.level0
  227. @pytest.mark.platform_arm_ascend_training
  228. @pytest.mark.platform_x86_ascend_training
  229. @pytest.mark.env_card
  230. @pytest.mark.component_mindarmour
  231. def test_random_least_likely_class_method():
  232. """
  233. Random least likely class method unit test.
  234. """
  235. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  236. input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
  237. label = np.asarray([2], np.int32)
  238. label = np.eye(3)[label].astype(np.float32)
  239. attack = RandomLeastLikelyClassMethod(Net(), eps=0.1, alpha=0.01, \
  240. loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  241. ms_adv_x = attack.generate(input_np, label)
  242. assert np.any(ms_adv_x != input_np), 'Random least likely class method: ' \
  243. 'generate value must not be equal to' \
  244. ' original value.'
  245. del input_np, label, ms_adv_x
  246. gc.collect()
  247. @pytest.mark.level0
  248. @pytest.mark.platform_arm_ascend_training
  249. @pytest.mark.platform_x86_ascend_training
  250. @pytest.mark.env_card
  251. @pytest.mark.component_mindarmour
  252. def test_fast_gradient_method_multi_inputs():
  253. """
  254. Fast gradient method unit test.
  255. """
  256. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  257. inputs1 = np.asarray([[0.1, 0.2, 0.7]]).astype(np.float32)
  258. inputs2 = np.asarray([[0.4, 0.8, 0.5]]).astype(np.float32)
  259. labels1 = np.expand_dims(np.eye(3)[1].astype(np.float32), axis=0)
  260. labels2 = np.expand_dims(np.eye(3)[2].astype(np.float32), axis=0)
  261. with_loss_cell = WithLossCell(Net2(), LossNet())
  262. grad_with_loss_net = GradWrapWithLoss(with_loss_cell)
  263. attack = FastGradientMethod(grad_with_loss_net)
  264. ms_adv_x = attack.generate((inputs1, inputs2), (labels1, labels2))
  265. assert np.any(ms_adv_x != inputs1), 'Fast gradient method: generate value' \
  266. ' must not be equal to original value.'
  267. del inputs1, inputs2, labels1, labels2, ms_adv_x
  268. gc.collect()
  269. @pytest.mark.level0
  270. @pytest.mark.platform_arm_ascend_training
  271. @pytest.mark.platform_x86_ascend_training
  272. @pytest.mark.env_card
  273. @pytest.mark.component_mindarmour
  274. def test_assert_error():
  275. """
  276. Random least likely class method unit test.
  277. """
  278. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  279. with pytest.raises(ValueError) as e:
  280. assert RandomLeastLikelyClassMethod(Net(), eps=0.05, alpha=0.21, \
  281. loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
  282. assert str(e.value) == 'eps must be larger than alpha!'

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。