⬅ robustML/advertrain/dependencies/cleverhans/fast_gradient_method.py source

1 """
2 Taken from https://github.com/rwightman/pytorch-image-models
3  
4 The Fast Gradient Method attack.
5  
6 MIT License
7 """
8 from typing import Optional
9  
10 import numpy as np
11 import torch
12  
13 from robustML.advertrain.dependencies.cleverhans.utils import optimize_linear
14  
15  
16 def fast_gradient_method(
17 model_fn,
18 x: torch.Tensor,
19 eps: float,
20 norm: int,
21 clip_min: Optional[float] = None,
22 clip_max: Optional[float] = None,
23 y: Optional[torch.Tensor] = None,
24 targeted: bool = False,
25 sanity_checks: bool = False,
26 ) -> torch.Tensor:
27 """
28 PyTorch implementation of the Fast Gradient Method (FGM).
29  
30 Args:
31 model_fn: A callable that takes an input tensor and returns the model logits.
32 x (torch.Tensor): Input tensor.
33 eps (float): Epsilon, the input variation parameter.
34 norm (int): Order of the norm (np.inf, 1, or 2).
35 clip_min (float, optional): Mininum value per input dimension.
36 clip_max (float, optional): Maximum value per input dimension.
37 y (torch.Tensor, optional): Labels or target labels for targeted attack.
38 targeted (bool): Whether to perform a targeted attack or not.
39 sanity_checks (bool): If True, include sanity checks.
40  
41 Returns:
42 torch.Tensor: A tensor containing the adversarial examples.
43 """
44 # Clipping perturbations
45 if eps < 0:
46 raise ValueError(
47 "eps must be greater than or equal to 0, got {} instead".format(eps)
48 )
49 if eps == 0:
50 return x
51 if clip_min is not None and clip_max is not None and clip_min > clip_max:
  • E501 Line too long (123 > 120 characters)
52 raise ValueError(f"clip_min must be less than or equal to clip_max, got clip_min={clip_min}, clip_max={clip_max}.")
53  
54 asserts = []
55  
56 # If a data range was specified,
57 if clip_min is not None:
58 assert_ge = torch.all(
59 torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype))
60 )
61 asserts.append(assert_ge)
62  
63 if clip_max is not None:
64 assert_le = torch.all(
65 torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype))
66 )
67 asserts.append(assert_le)
68  
69 if sanity_checks:
70 assert np.all(asserts)
71 # Prepare input tensor
72 x = x.clone().detach().float().requires_grad_(True)
73 y = torch.argmax(model_fn(x), dim=1) if y is None else y
74  
75 # Compute loss
76 loss_fn = torch.nn.CrossEntropyLoss()
77 loss = loss_fn(model_fn(x), y) * (-1 if targeted else 1)
78 loss.backward()
79 optimal_perturbation = optimize_linear(x.grad, eps, norm)
80  
81 # Optimize linear
82 optimal_perturbation = optimize_linear(x.grad, eps, norm)
83 adv_x = x + optimal_perturbation
84  
85 # Clipping perturbations
86 if (clip_min is not None) or (clip_max is not None):
87 if clip_min is None or clip_max is None:
88 raise ValueError(
89 "One of clip_min and clip_max is None but we don't currently support one-sided clipping"
90 )
91 adv_x = torch.clamp(adv_x, clip_min, clip_max)
92 return adv_x