⬅ robustML/advertrain/dependencies/autoattack.py source

1 """
2 Taken from https://github.com/fra31/auto-attack
3  
4 MIT License
5 """
6 import math
7 import time
8 from typing import Callable, Optional, Tuple
9  
10 import torch
11 import torch.nn as nn
12 import torch.nn.functional as F
13  
14  
15 def L0_norm(x: torch.Tensor) -> torch.Tensor:
16 """
17 Calculate the L0 norm of a tensor.
18  
19 Args:
20 x (torch.Tensor): Input tensor.
21  
22 Returns:
23 torch.Tensor: The L0 norm of the input tensor.
24 """
25 return (x != 0.).view(x.shape[0], -1).sum(-1)
26  
27  
28 def L1_norm(x: torch.Tensor, keepdim: bool = False) -> torch.Tensor:
29 """
30 Calculate the L1 norm of a tensor.
31  
32 Args:
33 x (torch.Tensor): Input tensor.
34 keepdim (bool, optional): Whether to keep the dimensions or not. Defaults to False.
35  
36 Returns:
37 torch.Tensor: The L1 norm of the input tensor.
38 """
39 z = x.abs().view(x.shape[0], -1).sum(-1)
40 if keepdim:
  • E226 Missing whitespace around arithmetic operator
41 z = z.view(-1, *[1]*(len(x.shape) - 1))
42 return z
43  
44  
45 def L2_norm(x: torch.Tensor, keepdim: bool = False) -> torch.Tensor:
46 """
47 Calculate the L2 norm of a tensor.
48  
49 Args:
50 x (torch.Tensor): Input tensor.
51 keepdim (bool, optional): Whether to keep the dimensions or not. Defaults to False.
52  
53 Returns:
54 torch.Tensor: The L2 norm of the input tensor.
55 """
56 z = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
57 if keepdim:
  • E226 Missing whitespace around arithmetic operator
58 z = z.view(-1, *[1]*(len(x.shape) - 1))
59 return z
60  
61  
62 def L1_projection(x2: torch.Tensor, y2: torch.Tensor, eps1: float) -> torch.Tensor:
63 """
64 Project a point onto an L1 ball.
65  
66 Args:
67 x2 (torch.Tensor): Center of the L1 ball (bs x input_dim).
68 y2 (torch.Tensor): Current perturbation (x2 + y2 is the point to be projected).
69 eps1 (float): Radius of the L1 ball.
70  
71 Returns:
72 torch.Tensor: Delta such that ||y2 + delta||_1 <= eps1 and 0 <= x2 + y2 + delta <= 1.
73 """
74 x = x2.clone().float().view(x2.shape[0], -1)
75 y = y2.clone().float().view(y2.shape[0], -1)
76 sigma = y.clone().sign()
77 u = torch.min(1 - x - y, x + y)
78 u = torch.min(torch.zeros_like(y), u)
  • E741 Ambiguous variable name 'l'
79 l = -torch.clone(y).abs()
80 d = u.clone()
81 bs, indbs = torch.sort(-torch.cat((u, l), 1), dim=1)
82 bs2 = torch.cat((bs[:, 1:], torch.zeros(bs.shape[0], 1).to(bs.device)), 1)
  • E226 Missing whitespace around arithmetic operator
83 inu = 2*(indbs < u.shape[1]).float() - 1
84 size1 = inu.cumsum(dim=1)
85 s1 = -u.sum(dim=1)
86 c = eps1 - y.clone().abs().sum(dim=1)
87 c5 = s1 + c < 0
88 c2 = c5.nonzero().squeeze(1)
89 s = s1.unsqueeze(-1) + torch.cumsum((bs2 - bs) * size1, dim=1)
90  
91 if c2.nelement != 0:
92 lb = torch.zeros_like(c2).float()
93 ub = torch.ones_like(lb) * (bs.shape[1] - 1)
94  
95 nitermax = torch.ceil(torch.log2(torch.tensor(bs.shape[1]).float()))
96 counter2 = torch.zeros_like(lb).long()
97 counter = 0
98 while counter < nitermax:
99 counter4 = torch.floor((lb + ub) / 2.)
100 counter2 = counter4.type(torch.LongTensor)
101 c8 = s[c2, counter2] + c[c2] < 0
102 ind3 = c8.nonzero().squeeze(1)
103 ind32 = (~c8).nonzero().squeeze(1)
104 if ind3.nelement != 0:
105 lb[ind3] = counter4[ind3]
106 if ind32.nelement != 0:
107 ub[ind32] = counter4[ind32]
108 counter += 1
109 lb2 = lb.long()
110 alpha = (-s[c2, lb2] - c[c2]) / size1[c2, lb2 + 1] + bs2[c2, lb2]
111 d[c2] = -torch.min(torch.max(-u[c2], alpha.unsqueeze(-1)), -l[c2])
112 return (sigma * d).view(x2.shape)
113  
114  
115 class APGDAttack:
116 """
117 Implements the Auto-PGD (Auto Projected Gradient Descent) attack method.
118  
119 Attributes:
120 model (Callable): A function representing the forward pass of the model to be attacked.
121 n_iter (int): Number of iterations for the attack.
122 norm (str): The type of norm for the attack ('Linf', 'L2', 'L1').
123 n_restarts (int): Number of random restarts for the attack.
124 eps (float): The maximum perturbation amount allowed.
125 seed (int): Random seed for reproducibility.
126 loss (str): Type of loss function to use ('ce' for cross-entropy, 'dlr').
127 eot_iter (int): Number of iterations for Expectation over Transformation.
128 rho (float): Parameter for adjusting step size.
129 topk (Optional[float]): Parameter for controlling the sparsity of the attack.
130 verbose (bool): If True, prints verbose output during the attack.
131 device (Optional[torch.device]): The device on which to perform computations.
132 use_largereps (bool): If True, uses larger epsilon values in initial iterations.
133 is_tf_model (bool): If True, indicates the model is a TensorFlow model.
134  
135 Methods:
136 init_hyperparam(x): Initializes hyperparameters based on the input data.
137 check_oscillation(...): Checks for oscillation in the optimization process.
138 check_shape(x): Ensures the input has the expected shape.
139 normalize(x): Normalizes the input tensor.
140 lp_norm(x): Computes the Lp norm of the input.
141 dlr_loss(x, y): Computes the Deep Learning Robustness (DLR) loss.
142 attack_single_run(x, y, x_init=None): Performs a single run of the attack.
143 perturb(x, y=None, best_loss=False, x_init=None): Generates adversarial examples for the given inputs.
144 decr_eps_pgd(x, y, epss, iters, use_rs=True): Performs PGD with decreasing epsilon values.
145 """
146 def __init__(
147 self,
148 predict: Callable,
149 n_iter: int = 100,
150 norm: str = 'Linf',
151 n_restarts: int = 1,
152 eps: Optional[float] = None,
153 seed: int = 0,
154 loss: str = 'ce',
155 eot_iter: int = 1,
156 rho: float = .75,
157 topk: Optional[float] = None,
158 verbose: bool = False,
159 device: Optional[torch.device] = None,
160 use_largereps: bool = False,
161 is_tf_model: bool = False):
162 """
163 Initializes the APGDAttack object with the given parameters.
164  
165 Args:
166 predict: A callable representing the forward pass of the model.
167 n_iter: Number of iterations for the attack.
168 norm: The norm type for the attack ('Linf', 'L2', 'L1').
169 n_restarts: Number of random restarts for the attack.
170 eps: The maximum perturbation amount allowed.
171 seed: Random seed for reproducibility.
172 loss: Type of loss function to use.
173 eot_iter: Number of iterations for Expectation over Transformation.
174 rho: Parameter for adjusting step size.
175 topk: Parameter for controlling sparsity in 'L1' norm.
176 verbose: If True, enables verbose output.
177 device: The device on which to perform computations.
178 use_largereps: If True, uses larger epsilon values initially.
179 is_tf_model: If True, indicates a TensorFlow model.
180 """
181 self.model = predict
182 self.n_iter = n_iter
183 self.eps = eps
184 self.norm = norm
185 self.n_restarts = n_restarts
186 self.seed = seed
187 self.loss = loss
188 self.eot_iter = eot_iter
189 self.thr_decr = rho
190 self.topk = topk
191 self.verbose = verbose
192 self.device = device
193 self.use_rs = True
194 self.use_largereps = use_largereps
195 self.n_iter_orig = n_iter + 0
196 self.eps_orig = eps + 0.
197 self.is_tf_model = is_tf_model
198 self.y_target = None
199  
200  
  • E303 Too many blank lines (2)
201 def init_hyperparam(self, x: torch.Tensor) -> None:
202 """
203 Initializes various hyperparameters based on the input data.
204  
205 Args:
206 x (torch.Tensor): The input data.
207 """
208 assert self.norm in ['Linf', 'L2', 'L1']
  • E714 Test for object identity should be 'is not'
209 assert not self.eps is None
210 if self.device is None:
211 self.device = x.device
212 self.orig_dim = list(x.shape[1:])
213 self.ndims = len(self.orig_dim)
214 if self.seed is None:
215 self.seed = time.time()
216  
217 # set parameters for checkpoints
218 self.n_iter_2 = max(int(0.22 * self.n_iter), 1)
219 self.n_iter_min = max(int(0.06 * self.n_iter), 1)
220 self.size_decr = max(int(0.03 * self.n_iter), 1)
221  
222 def check_oscillation(self, x: torch.Tensor, j: int, k: int, y5: torch.Tensor, k3: float = 0.75) -> torch.Tensor:
223 """
224 Checks for oscillation in the optimization process to adjust step sizes.
225  
226 Args:
227 x (torch.Tensor): The input tensor.
228 j (int): Current iteration index.
229 k (int): The number of steps to look back for oscillation.
230 y5 (torch.Tensor): The tensor of losses.
231 k3 (float, optional): Threshold parameter for oscillation. Defaults to 0.75.
232  
233 Returns:
234 torch.Tensor: Tensor indicating if oscillation is detected.
235 """
236 t = torch.zeros(x.shape[1]).to(self.device)
237 for counter5 in range(k):
238 t += (x[j - counter5] > x[j - counter5 - 1]).float()
239 return (t <= k * k3 * torch.ones_like(t)).float()
240  
241 def check_shape(self, x: torch.Tensor) -> torch.Tensor:
242 """
243 Ensures the input tensor has the correct shape.
244  
245 Args:
246 x (torch.Tensor): The input tensor.
247  
248 Returns:
249 torch.Tensor: The reshaped tensor.
250 """
251 return x if len(x.shape) > 0 else x.unsqueeze(0)
252  
253 def normalize(self, x: torch.Tensor) -> torch.Tensor:
254 """
255 Normalizes the input tensor based on the specified norm type.
256  
257 Args:
258 x (torch.Tensor): The input tensor to be normalized.
259  
260 Returns:
261 torch.Tensor: The normalized tensor.
262 """
263 if self.norm == 'Linf':
264 t = x.abs().view(x.shape[0], -1).max(1)[0]
265 return x / (t.view(-1, *([1] * self.ndims)) + 1e-12)
266 elif self.norm == 'L2':
267 t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
268 return x / (t.view(-1, *([1] * self.ndims)) + 1e-12)
269 elif self.norm == 'L1':
270 try:
271 t = x.abs().view(x.shape[0], -1).sum(dim=-1)
  • E722 Do not use bare 'except'
272 except:
273 t = x.abs().reshape([x.shape[0], -1]).sum(dim=-1)
274 return x / (t.view(-1, *([1] * self.ndims)) + 1e-12)
275  
276 def lp_norm(self, x: torch.Tensor) -> torch.Tensor:
277 """
278 Computes the Lp norm of the input tensor.
279  
280 Args:
281 x (torch.Tensor): The input tensor.
282  
283 Returns:
284 torch.Tensor: The computed Lp norm of the input tensor.
285 """
286 if self.norm == 'L2':
287 t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
288 return t.view(-1, *([1] * self.ndims))
289  
290 def dlr_loss(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
291 """
292 Computes the Deep Learning Robustness (DLR) loss.
293  
294 Args:
295 x (torch.Tensor): The logits from the model.
296 y (torch.Tensor): The target labels.
297  
298 Returns:
299 torch.Tensor: The computed DLR loss.
300 """
301 x_sorted, ind_sorted = x.sort(dim=1)
302 ind = (ind_sorted[:, -1] == y).float()
303 u = torch.arange(x.shape[0])
304 return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (
305 1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
306  
  • E501 Line too long (170 > 120 characters)
307 def attack_single_run(self, x: torch.Tensor, y: torch.Tensor, x_init: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
308 """
309 Performs a single run of the attack.
310  
311 Args:
312 x (torch.Tensor): The input data (clean images).
313 y (torch.Tensor): The target labels.
314 x_init (Optional[torch.Tensor]): Initial starting point for the attack.
315  
316 Returns:
317 Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing the best perturbed inputs,
318 the accuracy tensor, the loss tensor, and the best adversarial examples found.
319 """
320 if len(x.shape) < self.ndims:
321 x = x.unsqueeze(0)
322 y = y.unsqueeze(0)
323 if self.norm == 'Linf':
324 t = 2 * torch.rand(x.shape).to(self.device).detach() - 1
325 x_adv = x + self.eps * torch.ones_like(x).detach() * self.normalize(t)
326 elif self.norm == 'L2':
327 t = torch.randn(x.shape).to(self.device).detach()
328 x_adv = x + self.eps * torch.ones_like(x).detach() * self.normalize(t)
329 elif self.norm == 'L1':
330 t = torch.randn(x.shape).to(self.device).detach()
331 delta = L1_projection(x, t, self.eps)
332 x_adv = x + t + delta
  • E714 Test for object identity should be 'is not'
333 if not x_init is None:
334 x_adv = x_init.clone()
335 if self.norm == 'L1' and self.verbose:
336 print('[custom init] L1 perturbation {:.5f}'.format(
337 (x_adv - x).abs().view(x.shape[0], -1).sum(1).max()))
338 x_adv = x_adv.clamp(0., 1.)
339 x_best = x_adv.clone()
340 x_best_adv = x_adv.clone()
341 loss_steps = torch.zeros([self.n_iter, x.shape[0]]).to(self.device)
342 loss_best_steps = torch.zeros([self.n_iter + 1, x.shape[0]]).to(self.device)
343 acc_steps = torch.zeros_like(loss_best_steps)
344 if not self.is_tf_model:
345 if self.loss == 'ce':
346 criterion_indiv = nn.CrossEntropyLoss(reduction='none')
347 elif self.loss == 'ce-targeted-cfts':
  • E731 Do not assign a lambda expression, use a def
348 criterion_indiv = lambda x, y: -1. * F.cross_entropy(x, y, reduction='none')
349 elif self.loss == 'dlr':
350 criterion_indiv = self.dlr_loss
351 elif self.loss == 'dlr-targeted':
352 criterion_indiv = self.dlr_loss_targeted
353 elif self.loss == 'ce-targeted':
354 criterion_indiv = self.ce_loss_targeted
355 else:
356 raise ValueError('unknowkn loss')
357 else:
358 if self.loss == 'ce':
359 criterion_indiv = self.model.get_logits_loss_grad_xent
360 elif self.loss == 'dlr':
361 criterion_indiv = self.model.get_logits_loss_grad_dlr
362 elif self.loss == 'dlr-targeted':
363 criterion_indiv = self.model.get_logits_loss_grad_target
364 else:
365 raise ValueError('unknowkn loss')
366  
367 x_adv.requires_grad_()
368 grad = torch.zeros_like(x)
369 for _ in range(self.eot_iter):
370 if not self.is_tf_model:
371 with torch.enable_grad():
372 logits = self.model(x_adv)
373 loss_indiv = criterion_indiv(logits, y)
374 loss = loss_indiv.sum()
375 grad += torch.autograd.grad(loss, [x_adv])[0].detach()
376 else:
377 if self.y_target is None:
378 logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y)
379 else:
380 logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y,
  • E128 Continuation line under-indented for visual indent
381 self.y_target)
382 grad += grad_curr
383  
384 grad /= float(self.eot_iter)
385 grad_best = grad.clone()
386 acc = logits.detach().max(1)[1] == y
387 acc_steps[0] = acc + 0
388 loss_best = loss_indiv.detach().clone()
389 alpha = 2. if self.norm in ['Linf', 'L2'] else 1. if self.norm in ['L1'] else 2e-2
390 step_size = alpha * self.eps * torch.ones([x.shape[0], *(
391 [1] * self.ndims)]).to(self.device).detach()
392 x_adv_old = x_adv.clone()
  • F841 Local variable 'counter' is assigned to but never used
393 counter = 0
394 k = self.n_iter_2 + 0
395 if self.norm == 'L1':
396 k = max(int(.04 * self.n_iter), 1)
397 n_fts = math.prod(self.orig_dim)
398 if x_init is None:
399 topk = .2 * torch.ones([x.shape[0]], device=self.device)
400 sp_old = n_fts * torch.ones_like(topk)
401 else:
402 topk = L0_norm(x_adv - x) / n_fts / 1.5
403 sp_old = L0_norm(x_adv - x)
404  
405 adasp_redstep = 1.5
406 adasp_minstep = 10.
407  
408 counter3 = 0
409 loss_best_last_check = loss_best.clone()
410 reduced_last_check = torch.ones_like(loss_best)
411 n_reduced = 0
412 n_fts = x.shape[-3] * x.shape[-2] * x.shape[-1]
413 u = torch.arange(x.shape[0], device=self.device)
414 for i in range(self.n_iter):
415 # gradient step
416 with torch.no_grad():
417 x_adv = x_adv.detach()
418 grad2 = x_adv - x_adv_old
419 x_adv_old = x_adv.clone()
420 a = 0.75 if i > 0 else 1.0
421 if self.norm == 'Linf':
422 x_adv_1 = x_adv + step_size * torch.sign(grad)
423 x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - self.eps), x + self.eps), 0.0, 1.0)
424 x_adv_1 = torch.clamp(torch.min(torch.max(
425 x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a),
426 x - self.eps), x + self.eps), 0.0, 1.0)
427 elif self.norm == 'L2':
428 x_adv_1 = x_adv + step_size * self.normalize(grad)
  • E501 Line too long (163 > 120 characters)
429 x_adv_1 = torch.clamp(x + self.normalize(x_adv_1 - x) * torch.min(self.eps * torch.ones_like(x).detach(), self.lp_norm(x_adv_1 - x)), 0.0, 1.0)
430 x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)
  • E501 Line too long (163 > 120 characters)
431 x_adv_1 = torch.clamp(x + self.normalize(x_adv_1 - x) * torch.min(self.eps * torch.ones_like(x).detach(), self.lp_norm(x_adv_1 - x)), 0.0, 1.0)
432 elif self.norm == 'L1':
433 grad_topk = grad.abs().view(x.shape[0], -1).sort(-1)[0]
434 topk_curr = torch.clamp((1. - topk) * n_fts, min=0, max=n_fts - 1).long()
  • E226 Missing whitespace around arithmetic operator
435 grad_topk = grad_topk[u, topk_curr].view(-1, *[1]*(len(x.shape) - 1))
436 sparsegrad = grad * (grad.abs() >= grad_topk).float()
  • E501 Line too long (170 > 120 characters)
  • E226 Missing whitespace around arithmetic operator
437 x_adv_1 = x_adv + step_size * sparsegrad.sign() / (sparsegrad.sign().abs().view(x.shape[0], -1).sum(dim=-1).view(-1, *[1]*(len(x.shape) - 1)) + 1e-10)
438 delta_u = x_adv_1 - x
439 delta_p = L1_projection(x, delta_u, self.eps)
440 x_adv_1 = x + delta_u + delta_p
441 x_adv = x_adv_1 + 0.
442 # get gradient
443 x_adv.requires_grad_()
444 grad = torch.zeros_like(x)
445 for _ in range(self.eot_iter):
446 if not self.is_tf_model:
447 with torch.enable_grad():
448 logits = self.model(x_adv)
449 loss_indiv = criterion_indiv(logits, y)
450 loss = loss_indiv.sum()
451  
452 grad += torch.autograd.grad(loss, [x_adv])[0].detach()
453 else:
454 if self.y_target is None:
455 logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y)
456 else:
457 logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y, self.y_target)
458 grad += grad_curr
459  
460 grad /= float(self.eot_iter)
461 pred = logits.detach().max(1)[1] == y
462 acc = torch.min(acc, pred)
463 acc_steps[i + 1] = acc + 0
464 ind_pred = (pred == 0).nonzero().squeeze()
465 x_best_adv[ind_pred] = x_adv[ind_pred] + 0.
466 if self.verbose:
467 str_stats = ' - step size: {:.5f} - topk: {:.2f}'.format(
468 step_size.mean(), topk.mean() * n_fts) if self.norm in ['L1'] else ''
469 print('[m] iteration: {} - best loss: {:.6f} - robust accuracy: {:.2%}{}'.format(
470 i, loss_best.sum(), acc.float().mean(), str_stats))
471  
472 # check step size
473 with torch.no_grad():
474 y1 = loss_indiv.detach().clone()
475 loss_steps[i] = y1 + 0
476 ind = (y1 > loss_best).nonzero().squeeze()
477 x_best[ind] = x_adv[ind].clone()
478 grad_best[ind] = grad[ind].clone()
479 loss_best[ind] = y1[ind] + 0
480 loss_best_steps[i + 1] = loss_best + 0
481 counter3 += 1
482 if counter3 == k:
483 if self.norm in ['Linf', 'L2']:
484 fl_oscillation = self.check_oscillation(loss_steps, i, k, loss_best, k3=self.thr_decr)
485 fl_reduce_no_impr = (1. - reduced_last_check) * (
486 loss_best_last_check >= loss_best).float()
487 fl_oscillation = torch.max(fl_oscillation, fl_reduce_no_impr)
488 reduced_last_check = fl_oscillation.clone()
489 loss_best_last_check = loss_best.clone()
490  
491 if fl_oscillation.sum() > 0:
492 ind_fl_osc = (fl_oscillation > 0).nonzero().squeeze()
493 step_size[ind_fl_osc] /= 2.0
  • F841 Local variable 'n_reduced' is assigned to but never used
494 n_reduced = fl_oscillation.sum()
495  
496 x_adv[ind_fl_osc] = x_best[ind_fl_osc].clone()
497 grad[ind_fl_osc] = grad_best[ind_fl_osc].clone()
498 k = max(k - self.size_decr, self.n_iter_min)
499  
500 elif self.norm == 'L1':
501 sp_curr = L0_norm(x_best - x)
502 fl_redtopk = (sp_curr / sp_old) < .95
503 topk = sp_curr / n_fts / 1.5
504 step_size[fl_redtopk] = alpha * self.eps
505 step_size[~fl_redtopk] /= adasp_redstep
506 step_size.clamp_(alpha * self.eps / adasp_minstep, alpha * self.eps)
507 sp_old = sp_curr.clone()
508  
509 x_adv[fl_redtopk] = x_best[fl_redtopk].clone()
510 grad[fl_redtopk] = grad_best[fl_redtopk].clone()
511  
512 counter3 = 0
513  
514 return (x_best, acc, loss_best, x_best_adv)
515  
516  
  • E303 Too many blank lines (2)
  • E501 Line too long (153 > 120 characters)
517 def perturb(self, x: torch.Tensor, y: Optional[torch.Tensor] = None, best_loss: bool = False, x_init: Optional[torch.Tensor] = None) -> torch.Tensor:
518 """
519 Generates adversarial examples for the given inputs.
520  
521 Args:
522 x (torch.Tensor): Clean images.
523 y (Optional[torch.Tensor]): Clean labels. If None, predicted labels are used.
524 best_loss (bool, optional): If True, returns points with highest loss. Defaults to False.
525 x_init (Optional[torch.Tensor]): Initial starting point for the attack.
526  
527 Returns:
528 torch.Tensor: Adversarial examples.
529 """
530 assert self.loss in ['ce', 'dlr']
  • E714 Test for object identity should be 'is not'
531 if not y is None and len(y.shape) == 0:
532 x.unsqueeze_(0)
533 y.unsqueeze_(0)
534 self.init_hyperparam(x)
535 x = x.detach().clone().float().to(self.device)
536 if not self.is_tf_model:
537 y_pred = self.model(x).max(1)[1]
538 else:
539 y_pred = self.model.predict(x).max(1)[1]
540 if y is None:
541 y = y_pred.detach().clone().long().to(self.device)
542 else:
543 y = y.detach().clone().long().to(self.device)
544 adv = x.clone()
545 if self.loss != 'ce-targeted':
546 acc = y_pred == y
547 else:
548 acc = y_pred != y
  • F841 Local variable 'loss' is assigned to but never used
549 loss = -1e10 * torch.ones_like(acc).float()
550 if self.verbose:
  • E501 Line too long (147 > 120 characters)
551 print('-------------------------- ', 'running {}-attack with epsilon {:.5f}'.format(self.norm, self.eps), '--------------------------')
552 print('initial accuracy: {:.2%}'.format(acc.float().mean()))
553  
554 if self.use_largereps:
555 epss = [3. * self.eps_orig, 2. * self.eps_orig, 1. * self.eps_orig]
556 iters = [.3 * self.n_iter_orig, .3 * self.n_iter_orig,
  • E128 Continuation line under-indented for visual indent
557 .4 * self.n_iter_orig]
558 iters = [math.ceil(c) for c in iters]
559 iters[-1] = self.n_iter_orig - sum(iters[:-1]) # make sure to use the given iterations
560 if self.verbose:
  • E501 Line too long (123 > 120 characters)
561 print('using schedule [{}x{}]'.format('+'.join([str(c) for c in epss]), '+'.join([str(c) for c in iters])))
562  
563 startt = time.time()
564 if not best_loss:
565 torch.random.manual_seed(self.seed)
566 torch.cuda.random.manual_seed(self.seed)
567 for counter in range(self.n_restarts):
568 ind_to_fool = acc.nonzero().squeeze()
569 if len(ind_to_fool.shape) == 0:
570 ind_to_fool = ind_to_fool.unsqueeze(0)
571 if ind_to_fool.numel() != 0:
572 x_to_fool = x[ind_to_fool].clone()
573 y_to_fool = y[ind_to_fool].clone()
574  
575 if not self.use_largereps:
576 res_curr = self.attack_single_run(x_to_fool, y_to_fool)
577 else:
578 res_curr = self.decr_eps_pgd(x_to_fool, y_to_fool, epss, iters)
579 best_curr, acc_curr, loss_curr, adv_curr = res_curr
580 ind_curr = (acc_curr == 0).nonzero().squeeze()
581 acc[ind_to_fool[ind_curr]] = 0
582 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
583 if self.verbose:
584 print('restart {} - robust accuracy: {:.2%}'.format(
585 counter, acc.float().mean()),
586 '- cum. time: {:.1f} s'.format(
587 time.time() - startt))
588 return adv.detach().clone()
589 else:
590 adv_best = x.detach().clone()
591 loss_best = torch.ones([x.shape[0]]).to(
592 self.device) * (-float('inf'))
593 for counter in range(self.n_restarts):
594 best_curr, _, loss_curr, _ = self.attack_single_run(x, y)
595 ind_curr = (loss_curr > loss_best).nonzero().squeeze()
596 adv_best[ind_curr] = best_curr[ind_curr] + 0.
597 loss_best[ind_curr] = loss_curr[ind_curr] + 0.
598 if self.verbose:
599 print('restart {} - loss: {:.5f}'.format(counter, loss_best.sum()))
600 return adv_best
601  
  • E501 Line too long (172 > 120 characters)
602 def decr_eps_pgd(self, x: torch.Tensor, y: torch.Tensor, epss: list, iters: list, use_rs: bool = True) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
603 """
604 Performs PGD with decreasing epsilon values.
605  
606 Args:
607 x (torch.Tensor): The input data.
608 y (torch.Tensor): The target labels.
609 epss (list): List of epsilon values to use in the attack.
610 iters (list): List of iteration counts corresponding to each epsilon value.
611 use_rs (bool, optional): If True, uses random start. Defaults to True.
612  
613 Returns:
  • E501 Line too long (121 > 120 characters)
614 Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing the final perturbed inputs,
615 the accuracy tensor, the loss tensor, and the best adversarial examples found.
616 """
617 assert len(epss) == len(iters)
618 assert self.norm in ['L1']
619 self.use_rs = False
620 if not use_rs:
621 x_init = None
622 else:
623 x_init = x + torch.randn_like(x)
624 x_init += L1_projection(x, x_init - x, 1. * float(epss[0]))
  • F841 Local variable 'eps_target' is assigned to but never used
625 eps_target = float(epss[-1])
626 if self.verbose:
627 print('total iter: {}'.format(sum(iters)))
628 for eps, niter in zip(epss, iters):
629 if self.verbose:
630 print('using eps: {:.2f}'.format(eps))
631 self.n_iter = niter + 0
632 self.eps = eps + 0.
633 #
  • E714 Test for object identity should be 'is not'
634 if not x_init is None:
635 x_init += L1_projection(x, x_init - x, 1. * eps)
636 x_init, acc, loss, x_adv = self.attack_single_run(x, y, x_init=x_init)
637 return (x_init, acc, loss, x_adv)