You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

AFR.py 9.2 kB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. # Copyright 2022 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import os
  16. import re
  17. import numpy as np
  18. import mindspore
  19. import mindspore.dataset.vision.py_transforms as P
  20. from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
  21. from mindspore import Parameter, ops, nn, Tensor
  22. from mindspore.dataset.vision.py_transforms import ToTensor
  23. import dlib
  24. import matplotlib.image as mp
  25. import face_recognition as fr
  26. import face_recognition_models as frm
  27. from PIL import Image, ImageDraw
  28. from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack
  29. from FaceRecognition.eval import get_net
  30. class FaceAdversarialAttack(object):
  31. """
  32. Class used to create adversarial facial recognition attacks
  33. """
  34. def __init__(self, input_img, target_img, seed=None):
  35. """
  36. Initialization for Attack class.
  37. Args:
  38. input_img : Image to train on.
  39. target_img : Image to target the adversarial attack against.
  40. seed : optional Sets custom seed for reproducability. Default is generated randomly.
  41. """
  42. if (seed is not None): np.random.seed(seed)
  43. self.MEAN = Tensor([0.485, 0.456, 0.406])
  44. self.STD = Tensor([0.229, 0.224, 0.225])
  45. self.LOSS = Tensor(0)
  46. self.expand_dims = mindspore.ops.ExpandDims()
  47. self.imageize = ToPILImage()
  48. self.tensorize = ToTensor()
  49. self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  50. self.resnet = get_net()
  51. self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
  52. self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
  53. mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
  54. mp.imsave('./outputs/target图像.jpg',
  55. np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0)))
  56. self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
  57. self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
  58. self.adversarial_emb = None
  59. self.mask_tensor = self._create_mask(input_img)
  60. self.ref = self.mask_tensor
  61. self.pm = Parameter(self.mask_tensor)
  62. self.opt = nn.Adam([self.pm], learning_rate=0.01, weight_decay=0.0001)
  63. def train(self, attack_method):
  64. """
  65. Optimized adversarial image.
  66. """
  67. if attack_method == "non-target attack":
  68. LOSS = FaceLossNoTargetAttack(self.target_emb)
  69. if attack_method == "target_attack":
  70. LOSS = FaceLossTargetAttack(self.target_emb)
  71. net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor)
  72. train_net = MyTrainOneStepCell(net_with_criterion, self.opt)
  73. for i in range(2000):
  74. self.mask_tensor = Tensor(self.pm)
  75. grads, loss = train_net(self.mask_tensor)
  76. print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item()))
  77. self.mask_tensor = ops.clip_by_value(
  78. self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32))
  79. adversarial_tensor = self._apply(
  80. self.input_tensor,
  81. (self.mask_tensor - self.MEAN[:, None, None]) / self.STD[:, None, None],
  82. self.ref)
  83. adversarial_tensor = self._reverse_norm(adversarial_tensor)
  84. return adversarial_tensor, self.mask_tensor
  85. def test(self):
  86. """
  87. Test the recognition of adversarial images by the model.
  88. """
  89. adversarial_tensor = self._apply(
  90. self.input_tensor,
  91. (self.mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None],
  92. self.ref)
  93. self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
  94. self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
  95. self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
  96. adversarial = np.argmax(self.adversarial_emb.asnumpy())
  97. target = np.argmax(self.target_emb.asnumpy())
  98. input = np.argmax(self.input_emb.asnumpy())
  99. print("input:", input)
  100. print("input_confidence:", self.input_emb.asnumpy()[0][input])
  101. print("================================")
  102. print("adversarial:", adversarial)
  103. print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial])
  104. print("Confidence changes for target:", self.adversarial_emb.asnumpy()[0][target])
  105. print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input])
  106. print("================================")
  107. print("target:", target)
  108. print("target_confidence:", self.target_emb.asnumpy()[0][target])
  109. print("input: %d, target: %d, adversarial: %d" % (input, target, adversarial))
  110. def _reverse_norm(self, image_tensor):
  111. """
  112. Reverses normalization for a given image_tensor
  113. Args:
  114. image_tensor : Tensor
  115. Returns:
  116. Tensor
  117. """
  118. tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
  119. return tensor
  120. def _apply(self,
  121. image_tensor,
  122. mask_tensor,
  123. reference_tensor
  124. ):
  125. """
  126. Apply a mask over an image.
  127. Args:
  128. image_tensor : Canvas to be used to apply mask on.
  129. mask_tensor : Mask to apply over the image.
  130. reference_tensor : Used to reference mask boundaries
  131. Returns:
  132. Tensor
  133. """
  134. tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)
  135. return tensor
  136. def _create_mask(self, face_image):
  137. """
  138. Create mask image.
  139. Args:
  140. face_image : image of a detected face.
  141. Returns:
  142. mask_tensor : A mask image.
  143. """
  144. mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
  145. d = ImageDraw.Draw(mask)
  146. landmarks = fr.face_landmarks(np.array(face_image))
  147. area = [landmark
  148. for landmark in landmarks[0]['chin']
  149. if landmark[1] > max(landmarks[0]['nose_tip'])[1]]
  150. area.append(landmarks[0]['nose_bridge'][1])
  151. d.polygon(area, fill=(255, 255, 255))
  152. mask_array = np.array(mask)
  153. mask_array = mask_array.astype(np.float32)
  154. for i in range(mask_array.shape[0]):
  155. for j in range(mask_array.shape[1]):
  156. for k in range(mask_array.shape[2]):
  157. if mask_array[i][j][k] == 255.:
  158. mask_array[i][j][k] = 0.5
  159. else:
  160. mask_array[i][j][k] = 0
  161. mask_tensor = Tensor(mask_array)
  162. mask_tensor = mask_tensor.swapaxes(0, 2).swapaxes(1, 2)
  163. mask_tensor.requires_grad = True
  164. return mask_tensor
  165. def _reverse_norm(self, image_tensor):
  166. """
  167. Reverses normalization for a given image_tensor.
  168. Args:
  169. image_tensor : Tensor.
  170. Returns:
  171. Tensor.
  172. """
  173. tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
  174. return tensor
  175. def detect_face(image_loc):
  176. """
  177. Helper function to run the facial detection and alignment process using
  178. dlib. Detects a given face and aligns it using dlib's 5 point landmark
  179. detector.
  180. Args:
  181. image_loc : image file location.
  182. Returns:
  183. face_image : Resized face image.
  184. """
  185. detector = dlib.get_frontal_face_detector()
  186. shape_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())
  187. image = dlib.load_rgb_image(image_loc)
  188. dets = detector(image, 1)
  189. faces = dlib.full_object_detections()
  190. for detection in dets:
  191. faces.append(shape_predictor(image, detection))
  192. face_image = Image.fromarray(dlib.get_face_chip(image, faces[0], size=112))
  193. return face_image
  194. def load_data(path_to_data):
  195. """
  196. Helper function for loading image data. Allows user to load the input, target,
  197. and test images.
  198. Args:
  199. path_to_data : Path to the given data.
  200. Returns:
  201. list : List of resized face images.
  202. """
  203. img_files = [f for f in os.listdir(path_to_data) if re.search(r'.*\.(jpe?g|png)', f)]
  204. img_files_locs = [os.path.join(path_to_data, f) for f in img_files]
  205. image_list = []
  206. for loc in img_files_locs:
  207. image_list.append(detect_face(loc))
  208. return image_list

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。