1 """
2 Taken from https://github.com/rwightman/pytorch-image-models
3
4 MIT License
5 """
6 import numpy as np
7 import torch
8
9
10 def clip_eta(eta: torch.Tensor, norm: int, eps: float) -> torch.Tensor:
11 """
12 Clips the perturbation eta to be within the specified norm ball.
13
14 Args:
15 eta (torch.Tensor): The perturbation tensor.
16 norm (int): The norm to use.
17 eps (float): Epsilon, the maximum allowed norm of the perturbation.
18
19 Returns:
20 torch.Tensor: The clipped perturbation.
21 """
22 if norm not in [np.inf, 1, 2]:
-
F541
F-string is missing placeholders
23 raise ValueError(f"Norm must be np.inf, 1, or 2.")
24
25 elif norm == np.inf:
26 eta = torch.clamp(eta, -eps, eps)
27 else:
28 avoid_zero_div = torch.tensor(1e-12, dtype=eta.dtype, device=eta.device)
29 reduc_ind = list(range(1, len(eta.size())))
-
E501
Line too long (152 > 120 characters)
30 norm_val = torch.sqrt(torch.sum(eta ** 2, dim=reduc_ind, keepdim=True)) if norm == 2 else torch.sum(torch.abs(eta), dim=reduc_ind, keepdim=True)
31 norm_val = torch.max(norm_val, avoid_zero_div)
32 factor = torch.min(torch.tensor(1.0, dtype=eta.dtype, device=eta.device), eps / norm_val)
33 eta *= factor
34
35 return eta
36
37
38 def optimize_linear(grad: torch.Tensor, eps: float, norm: int = np.inf) -> torch.Tensor:
39 """
40 Solves for the optimal input to a linear function under a norm constraint.
41
42 Args:
43 grad (torch.Tensor): Tensor of gradients.
44 eps (float): Epsilon, the maximum allowed norm of the perturbation.
45 norm (int): The norm to use.
46
47 Returns:
48 torch.Tensor: The optimized perturbation.
49 """
50 red_ind = list(range(1, len(grad.size())))
51 avoid_zero_div = torch.tensor(1e-12, dtype=grad.dtype, device=grad.device)
52
53 if norm == np.inf:
54 optimal_perturbation = torch.sign(grad)
55 elif norm == 1:
56 abs_grad = torch.abs(grad)
57 sign = torch.sign(grad)
58 red_ind = list(range(1, len(grad.size())))
59 abs_grad = torch.abs(grad)
60 ori_shape = [1] * len(grad.size())
61 ori_shape[0] = grad.size(0)
62
63 max_abs_grad, _ = torch.max(abs_grad.view(grad.size(0), -1), 1)
64 max_mask = abs_grad.eq(max_abs_grad.view(ori_shape)).to(torch.float)
65 num_ties = max_mask
66 for red_scalar in red_ind:
67 num_ties = torch.sum(num_ties, red_scalar, keepdim=True)
68 optimal_perturbation = sign * max_mask / num_ties
69 opt_pert_norm = optimal_perturbation.abs().sum(dim=red_ind)
70 assert torch.all(opt_pert_norm == torch.ones_like(opt_pert_norm))
71 elif norm == 2:
72 square = torch.max(avoid_zero_div, torch.sum(grad ** 2, red_ind, keepdim=True))
73 optimal_perturbation = grad / torch.sqrt(square)
74
75 opt_pert_norm = (
76 optimal_perturbation.pow(2).sum(dim=red_ind, keepdim=True).sqrt()
77 )
78 one_mask = (square <= avoid_zero_div).to(torch.float) * opt_pert_norm + (
79 square > avoid_zero_div
80 ).to(torch.float)
81 assert torch.allclose(opt_pert_norm, one_mask, rtol=1e-05, atol=1e-08)
82 else:
-
F541
F-string is missing placeholders
83 raise ValueError(f"Only L-inf, L1 and L2 norms are currently implemented.")
84
85 scaled_perturbation = eps * optimal_perturbation
86 return scaled_perturbation