You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_lbfgs.py 4.0 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. LBFGS-Attack test.
  16. """
  17. import os
  18. import gc
  19. import numpy as np
  20. import pytest
  21. from mindspore import context
  22. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  23. from mindarmour.adv_robustness.attacks import LBFGS
  24. from mindarmour.utils.logger import LogUtil
  25. from tests.ut.python.utils.mock_net import Net
  26. LOGGER = LogUtil.get_instance()
  27. TAG = 'LBFGS_Test'
  28. LOGGER.set_level('DEBUG')
  29. @pytest.mark.level0
  30. @pytest.mark.platform_arm_ascend_training
  31. @pytest.mark.platform_x86_ascend_training
  32. @pytest.mark.env_card
  33. @pytest.mark.component_mindarmour
  34. def test_lbfgs_attack_ascend():
  35. """
  36. Feature: LBFGS-Attack testfor ascend
  37. Description: make sure that attack.generate works properly
  38. Expectation: attack.generate works properly
  39. """
  40. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  41. np.random.seed(123)
  42. # upload trained network
  43. current_dir = os.path.dirname(os.path.abspath(__file__))
  44. ckpt_path = os.path.join(current_dir,
  45. '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
  46. net = Net()
  47. load_dict = load_checkpoint(ckpt_path)
  48. load_param_into_net(net, load_dict)
  49. # get one mnist image
  50. input_np = np.load(os.path.join(current_dir,
  51. '../../dataset/test_images.npy'))[:1]
  52. label_np = np.load(os.path.join(current_dir,
  53. '../../dataset/test_labels.npy'))[:1]
  54. LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0]))
  55. classes = 10
  56. target_np = np.random.randint(0, classes, 1)
  57. while target_np == label_np[0]:
  58. target_np = np.random.randint(0, classes)
  59. target_np = np.eye(10)[target_np].astype(np.float32)
  60. attack = LBFGS(net, is_targeted=True)
  61. LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0]))
  62. _ = attack.generate(input_np, target_np)
  63. del input_np, label_np, target_np
  64. gc.collect()
  65. @pytest.mark.level0
  66. @pytest.mark.platform_x86_cpu
  67. @pytest.mark.env_card
  68. @pytest.mark.component_mindarmour
  69. def test_lbfgs_attack_cpu():
  70. """
  71. Feature: LBFGS-Attack testfor cpu
  72. Description: make sure that attack.generate works properly
  73. Expectation: attack.generate works properly
  74. """
  75. context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
  76. np.random.seed(123)
  77. # upload trained network
  78. current_dir = os.path.dirname(os.path.abspath(__file__))
  79. ckpt_path = os.path.join(current_dir,
  80. '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
  81. net = Net()
  82. load_dict = load_checkpoint(ckpt_path)
  83. load_param_into_net(net, load_dict)
  84. # get one mnist image
  85. input_np = np.load(os.path.join(current_dir,
  86. '../../dataset/test_images.npy'))[:1]
  87. label_np = np.load(os.path.join(current_dir,
  88. '../../dataset/test_labels.npy'))[:1]
  89. LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0]))
  90. classes = 10
  91. target_np = np.random.randint(0, classes, 1)
  92. while target_np == label_np[0]:
  93. target_np = np.random.randint(0, classes)
  94. target_np = np.eye(10)[target_np].astype(np.float32)
  95. attack = LBFGS(net, is_targeted=True)
  96. LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0]))
  97. _ = attack.generate(input_np, target_np)
  98. del input_np, label_np, target_np
  99. gc.collect()

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。