Coverage for tests/tests_advertrain/test_dependencies/test_cleverhans/test_utils.py: 100%

42 statements  

« prev     ^ index     » next       coverage.py v7.9.2, created at 2025-09-10 08:11 +0000

1import numpy as np 

2import pytest 

3import torch 

4 

5from robustML.advertrain.dependencies.cleverhans.utils import (clip_eta, 

6 optimize_linear) 

7 

8 

9def test_clip_eta_inf_norm(): 

10 eta = torch.tensor([0.5, -0.5, 1.0, -1.0]) 

11 eps = 0.3 

12 norm = np.inf 

13 

14 clipped_eta = clip_eta(eta, norm, eps) 

15 assert torch.all(clipped_eta <= eps) and torch.all(clipped_eta >= -eps) 

16 

17 

18def test_clip_eta_l2_norm(): 

19 eta = torch.tensor([0.3, 0.4]) 

20 eps = 0.5 

21 norm = 2 

22 

23 clipped_eta = clip_eta(eta, norm, eps) 

24 assert torch.sqrt(torch.sum(clipped_eta ** 2)) <= eps 

25 

26 

27def test_clip_eta_error_on_invalid_norm(): 

28 eta = torch.tensor([0.5, -0.5]) 

29 eps = 0.3 

30 norm = 0 

31 

32 with pytest.raises(ValueError): 

33 clip_eta(eta, norm, eps) 

34 

35 

36def test_optimize_linear_inf_norm(): 

37 grad = torch.tensor([0.1, -0.2, 0.3, -0.4]) 

38 eps = 0.5 

39 norm = np.inf 

40 

41 optimized_perturbation = optimize_linear(grad, eps, norm) 

42 assert torch.equal(optimized_perturbation, torch.tensor([0.5, -0.5, 0.5, -0.5])) 

43 

44 

45def test_optimize_linear_l2_norm(): 

46 grad = torch.tensor([0.3, 0.4]) 

47 eps = 0.5 

48 norm = 2 

49 

50 optimized_perturbation = optimize_linear(grad, eps, norm) 

51 scale = eps / torch.sqrt(torch.sum(grad ** 2)) 

52 expected_perturbation = grad * scale 

53 assert torch.allclose(optimized_perturbation, expected_perturbation) 

54 

55 

56def test_optimize_linear_error_on_invalid_norm(): 

57 grad = torch.tensor([0.1, -0.2]) 

58 eps = 0.5 

59 norm = 0 

60 

61 with pytest.raises(ValueError): 

62 optimize_linear(grad, eps, norm)