You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_attacker.py 3.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. attacker test
  16. """
  17. import pytest
  18. import numpy as np
  19. from sklearn.neighbors import KNeighborsClassifier as knn
  20. from sklearn.linear_model import LogisticRegression
  21. from sklearn.neural_network import MLPClassifier
  22. from sklearn.ensemble import RandomForestClassifier
  23. from mindarmour.diff_privacy.evaluation.attacker import get_attack_model
  24. @pytest.mark.level0
  25. @pytest.mark.platform_x86_ascend_training
  26. @pytest.mark.env_onecard
  27. @pytest.mark.component_mindarmour
  28. def test_get_knn_model():
  29. features = np.random.randint(0, 10, [10, 10])
  30. labels = np.random.randint(0, 2, [10])
  31. config_knn = {
  32. "method": "KNN",
  33. "params": {
  34. "n_neighbors": [3, 5, 7],
  35. }
  36. }
  37. knn_attacker = get_attack_model(features, labels, config_knn)
  38. assert isinstance(knn_attacker, knn)
  39. pred = knn_attacker.predict(features)
  40. assert pred is not None
  41. @pytest.mark.level0
  42. @pytest.mark.platform_x86_ascend_training
  43. @pytest.mark.env_onecard
  44. @pytest.mark.component_mindarmour
  45. def test_get_lr_model():
  46. features = np.random.randint(0, 10, [10, 10])
  47. labels = np.random.randint(0, 2, [10])
  48. config_lr = {
  49. "method": "LR",
  50. "params": {
  51. "C": np.logspace(-4, 2, 10),
  52. }
  53. }
  54. lr_attacker = get_attack_model(features, labels, config_lr)
  55. assert isinstance(lr_attacker, LogisticRegression)
  56. pred = lr_attacker.predict(features)
  57. assert pred is not None
  58. @pytest.mark.level0
  59. @pytest.mark.platform_x86_ascend_training
  60. @pytest.mark.env_onecard
  61. @pytest.mark.component_mindarmour
  62. def test_get_mlp_model():
  63. features = np.random.randint(0, 10, [10, 10])
  64. labels = np.random.randint(0, 2, [10])
  65. config_mlpc = {
  66. "method": "MLP",
  67. "params": {
  68. "hidden_layer_sizes": [(64,), (32, 32)],
  69. "solver": ["adam"],
  70. "alpha": [0.0001, 0.001, 0.01],
  71. }
  72. }
  73. mlpc_attacker = get_attack_model(features, labels, config_mlpc)
  74. assert isinstance(mlpc_attacker, MLPClassifier)
  75. pred = mlpc_attacker.predict(features)
  76. assert pred is not None
  77. @pytest.mark.level0
  78. @pytest.mark.platform_x86_ascend_training
  79. @pytest.mark.env_onecard
  80. @pytest.mark.component_mindarmour
  81. def test_get_rf_model():
  82. features = np.random.randint(0, 10, [10, 10])
  83. labels = np.random.randint(0, 2, [10])
  84. config_rf = {
  85. "method": "RF",
  86. "params": {
  87. "n_estimators": [100],
  88. "max_features": ["auto", "sqrt"],
  89. "max_depth": [5, 10, 20, None],
  90. "min_samples_split": [2, 5, 10],
  91. "min_samples_leaf": [1, 2, 4],
  92. }
  93. }
  94. rf_attacker = get_attack_model(features, labels, config_rf)
  95. assert isinstance(rf_attacker, RandomForestClassifier)
  96. pred = rf_attacker.predict(features)
  97. assert pred is not None

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。