From 05d2923bcaea0f704075e770caa2f84bd8a72f39 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Wed, 27 Dec 2023 22:04:10 +0100 Subject: [PATCH 01/31] Update docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Beat Buesser Signed-off-by: Élie Goudout --- docs/modules/attacks/evasion.rst | 6 ++++++ docs/modules/defences/trainer.rst | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/docs/modules/attacks/evasion.rst b/docs/modules/attacks/evasion.rst index d49b2be0bf..a1f7862af6 100644 --- a/docs/modules/attacks/evasion.rst +++ b/docs/modules/attacks/evasion.rst @@ -86,6 +86,12 @@ Carlini and Wagner ASR Attack :members: :special-members: +Composite Adversarial Attack - PyTorch +-------------------------------------- +.. autoclass:: CompositeAdversarialAttackPyTorch + :members: + :special-members: + Decision Tree Attack -------------------- .. autoclass:: DecisionTreeAttack diff --git a/docs/modules/defences/trainer.rst b/docs/modules/defences/trainer.rst index d476535b8d..cdd78019cc 100644 --- a/docs/modules/defences/trainer.rst +++ b/docs/modules/defences/trainer.rst @@ -20,6 +20,24 @@ Adversarial Training Madry PGD :members: :special-members: +Adversarial Training Adversarial Weight Perturbation (AWP) - PyTorch +-------------------------------------------------------------------- +.. autoclass:: AdversarialTrainerAWPPyTorch + :members: + :special-members: + +Adversarial Training Oracle Aligned Adversarial Training (OAAT) - PyTorch +------------------------------------------------------------------------- +.. autoclass:: AdversarialTrainerOAATPyTorch + :members: + :special-members: + +Adversarial Training TRADES - PyTorch +------------------------------------- +.. autoclass:: AdversarialTrainerTRADESPyTorch + :members: + :special-members: + Base Class Adversarial Training Fast is Better than Free -------------------------------------------------------- .. autoclass:: AdversarialTrainerFBF From ae4b4f0995e43695bcc16a3b588cd4e51204123c Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Wed, 27 Dec 2023 22:07:24 +0100 Subject: [PATCH 02/31] Bump version to ART 1.17.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Beat Buesser Signed-off-by: Élie Goudout --- .github/workflows/dockerhub.yml | 2 +- README-cn.md | 2 +- README.md | 2 +- art/__init__.py | 2 +- docs/conf.py | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/dockerhub.yml b/.github/workflows/dockerhub.yml index 122f5a366f..abe91f711f 100644 --- a/.github/workflows/dockerhub.yml +++ b/.github/workflows/dockerhub.yml @@ -35,7 +35,7 @@ jobs: with: images: adversarialrobustnesstoolbox/releases tags: | - type=raw,value={{branch}}-1.16.0-{{sha}} + type=raw,value={{branch}}-1.17.0-{{sha}} type=semver,pattern={{version}} - name: Build and push Docker image diff --git a/README-cn.md b/README-cn.md index cbb95eb8be..ba6e563612 100644 --- a/README-cn.md +++ b/README-cn.md @@ -1,4 +1,4 @@ -# Adversarial Robustness Toolbox (ART) v1.16 +# Adversarial Robustness Toolbox (ART) v1.17

diff --git a/README.md b/README.md index c63bac70dc..6ec333d643 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Adversarial Robustness Toolbox (ART) v1.16 +# Adversarial Robustness Toolbox (ART) v1.17

diff --git a/art/__init__.py b/art/__init__.py index 25914028c4..f6335c6bb2 100644 --- a/art/__init__.py +++ b/art/__init__.py @@ -12,7 +12,7 @@ from art import preprocessing # Semantic Version -__version__ = "1.16.0" +__version__ = "1.17.0" # pylint: disable=C0103 diff --git a/docs/conf.py b/docs/conf.py index 2d9b82fe18..3e6663830d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,9 +25,9 @@ author = "Maria-Irina Nicolae" # The short X.Y version -version = "1.16" +version = "1.17" # The full version, including alpha/beta/rc tags -release = "1.16.0" +release = "1.17.0" # -- General configuration --------------------------------------------------- From a8cb2d888aa00e3dbc599bfa5e2f3c9855bda7d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Tue, 9 Jan 2024 14:51:25 +0100 Subject: [PATCH 03/31] Improved L1 extension for FGM evasion attack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 7 ++++++- tests/attacks/evasion/test_fast_gradient.py | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 637d19f010..0674069fc3 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -438,7 +438,12 @@ def _apply_norm(norm, grad, object_type=False): ind = tuple(range(1, len(x.shape))) else: ind = None - grad = grad / (np.sum(np.abs(grad), axis=ind, keepdims=True) + tol) + if grad.ndim != 1: + raise NotImplementedError("TO DO (grad.ndim != 1)") + i_max = np.argmax(np.abs(g), axis=None) + pos = grad[i_max] >= 0 + grad = np.zeros_like(grad) + grad[i_max] = 1 if pos else -1 elif norm == 2: if not object_type: ind = tuple(range(1, len(x.shape))) diff --git a/tests/attacks/evasion/test_fast_gradient.py b/tests/attacks/evasion/test_fast_gradient.py index 58301ee61b..b7b0f83211 100644 --- a/tests/attacks/evasion/test_fast_gradient.py +++ b/tests/attacks/evasion/test_fast_gradient.py @@ -162,6 +162,7 @@ def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator } elif norm == 1: + raise NotImplementedError("TO DO (New expected values to fill)") expected_values = { "x_test_mean": ExpectedValue(0.00051374, 0.002), "x_test_min": ExpectedValue(-0.01486498, 0.001), From 4e2e837dd2dd0124b9cbdb221fd62db7cbafb615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 13 Jan 2024 01:15:06 +0100 Subject: [PATCH 04/31] Properly generalize FGM to all p MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 35 ++++++++----------- .../projected_gradient_descent.py | 9 +++-- .../projected_gradient_descent_numpy.py | 11 +++--- .../projected_gradient_descent_pytorch.py | 30 +++++----------- ...rojected_gradient_descent_tensorflow_v2.py | 30 ++++++---------- 5 files changed, 48 insertions(+), 67 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 0674069fc3..467a862fb0 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -84,7 +84,7 @@ def __init__( Create a :class:`.FastGradientMethod` instance. :param estimator: A trained classifier. - :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation. Possible values: "inf", `np.inf` or a real `p >= 1`. :param eps: Attack step size (input variation). :param eps_step: Step size of input variation for minimal perturbation computation. :param targeted: Indicates whether the attack is targeted (True) or untargeted (False) @@ -334,8 +334,11 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def _check_params(self) -> None: - if self.norm not in [1, 2, np.inf, "inf"]: - raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".') + if not ( + self.norm == "inf" + or self.norm >= 1 + ): + raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( isinstance(self.eps, (int, float)) @@ -431,25 +434,17 @@ def _apply_norm(norm, grad, object_type=False): ).any(): logger.info("The loss gradient array contains at least one positive or negative infinity.") + flat = grad.reshape(1 if object_type else len(grad), -1) if norm in [np.inf, "inf"]: - grad = np.sign(grad) + flat = np.ones_like(flat) elif norm == 1: - if not object_type: - ind = tuple(range(1, len(x.shape))) - else: - ind = None - if grad.ndim != 1: - raise NotImplementedError("TO DO (grad.ndim != 1)") - i_max = np.argmax(np.abs(g), axis=None) - pos = grad[i_max] >= 0 - grad = np.zeros_like(grad) - grad[i_max] = 1 if pos else -1 - elif norm == 2: - if not object_type: - ind = tuple(range(1, len(x.shape))) - else: - ind = None - grad = grad / (np.sqrt(np.sum(np.square(grad), axis=ind, keepdims=True)) + tol) + i_max = np.argmax(np.abs(flat), axis=-1) + flat = np.zeros_like(flat) + flat[range(len(flat)), i_max] = 1 + elif norm > 1: + q = norm / (norm - 1) + flat = (np.abs(flat) / (np.linalg.norm(flat, ord=q, axis=-1, keepdims=True) + tol)) ** (q - 1) + grad = flat.reshape(grad.shape) * np.sign(grad) return grad # Add momentum diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index 87ce6fea6c..103a926b4d 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -95,7 +95,7 @@ def __init__( Create a :class:`.ProjectedGradientDescent` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation supporting "inf", np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature @@ -210,8 +210,11 @@ def set_params(self, **kwargs) -> None: def _check_params(self) -> None: - if self.norm not in [1, 2, np.inf, "inf"]: - raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".') + if not ( + self.norm == "inf" + or self.norm >= 1 + ): + raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( isinstance(self.eps, (int, float)) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index 1ecc8b31f1..df798f1624 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -77,7 +77,7 @@ def __init__( Create a :class:`.ProjectedGradientDescentCommon` instance. :param estimator: A trained classifier. - :param norm: The norm of the adversarial perturbation supporting "inf", np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature @@ -179,8 +179,11 @@ def _set_targets(self, x: np.ndarray, y: Optional[np.ndarray], classifier_mixin: def _check_params(self) -> None: # pragma: no cover - if self.norm not in [1, 2, np.inf, "inf"]: - raise ValueError('Norm order must be either 1, 2, `np.inf` or "inf".') + if not ( + self.norm == "inf" + or self.norm >= 1 + ): + raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( isinstance(self.eps, (int, float)) @@ -263,7 +266,7 @@ def __init__( Create a :class:`.ProjectedGradientDescentNumpy` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation supporting "inf", np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 557d8e25de..5d121e4a3a 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -78,7 +78,7 @@ def __init__( Create a :class:`.ProjectedGradientDescentPyTorch` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation. Possible values: "inf", `np.inf` or a real `p >= 1`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature @@ -344,10 +344,12 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 grad = grad.sign() elif self.norm == 1: + raise NotImplementedError("TO DO (fix L1)") ind = tuple(range(1, len(x.shape))) grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore - elif self.norm == 2: + elif self.norm > 1: + raise NotImplementedError("TO DO (properly generalize to `1 < p < inf`)") ind = tuple(range(1, len(x.shape))) grad = grad / (torch.sqrt(torch.sum(grad * grad, axis=ind, keepdims=True)) + tol) # type: ignore @@ -456,7 +458,7 @@ def _projection( :param values: Values to clip. :param eps: Maximum norm allowed. - :param norm_p: L_p norm to use for clipping supporting 1, 2, `np.Inf` and "inf". + :param norm_p: L_p norm to use for clipping supporting "inf", `np.inf` or a real `p >= 1`. :return: Values of `values` after projection. """ import torch @@ -465,31 +467,17 @@ def _projection( tol = 10e-8 values_tmp = values.reshape(values.shape[0], -1) - if norm_p == 2: + if 1 <= norm_p < np.inf: if isinstance(eps, np.ndarray): raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm 2." + "The parameter `eps` of type `np.ndarray` is not supported to use with norm `1 <= p < np.inf`." ) values_tmp = ( values_tmp * torch.min( torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), - eps / (torch.norm(values_tmp, p=2, dim=1) + tol), - ).unsqueeze_(-1) - ) - - elif norm_p == 1: - if isinstance(eps, np.ndarray): - raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm 1." - ) - - values_tmp = ( - values_tmp - * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), - eps / (torch.norm(values_tmp, p=1, dim=1) + tol), + eps / (torch.norm(values_tmp, p=norm_p, dim=1) + tol), ).unsqueeze_(-1) ) @@ -504,7 +492,7 @@ def _projection( else: raise NotImplementedError( - "Values of `norm_p` different from 1, 2 and `np.inf` are currently not supported." + "Values of `norm_p < 1` are currently not supported." ) values = values_tmp.reshape(values.shape) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 2dddf5b4ec..1435c30981 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -78,7 +78,7 @@ def __init__( Create a :class:`.ProjectedGradientDescentTensorFlowV2` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation. Possible values: np.inf, 1 or 2. + :param norm: The norm of the adversarial perturbation. Possible values: "inf", `np.inf` or a real `p >= 1`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature @@ -351,14 +351,16 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound - if self.norm == np.inf: + if self.norm in [np.inf, "inf"]: grad = tf.sign(grad) elif self.norm == 1: + raise NotImplementedError("TO DO (fix L1)") ind = tuple(range(1, len(x.shape))) grad = tf.divide(grad, (tf.math.reduce_sum(tf.abs(grad), axis=ind, keepdims=True) + tol)) - elif self.norm == 2: + elif self.norm > 1: + raise NotImplementedError("TO DO (properly generalize to `1 < p < inf`)") ind = tuple(range(1, len(x.shape))) grad = tf.divide( grad, (tf.math.sqrt(tf.math.reduce_sum(tf.math.square(grad), axis=ind, keepdims=True)) + tol) @@ -463,7 +465,7 @@ def _projection( :param values: Values to clip. :param eps: Maximum norm allowed. - :param norm_p: L_p norm to use for clipping supporting 1, 2 and `np.Inf`. + :param norm_p: L_p norm to use for clipping supporting "inf", `np.inf` or a real `p >= 1`. :return: Values of `values` after projection. """ import tensorflow as tf @@ -472,27 +474,17 @@ def _projection( tol = 10e-8 values_tmp = tf.reshape(values, (values.shape[0], -1)) - if norm_p == 2: + if 1 <= norm_p < np.inf: if isinstance(eps, np.ndarray): raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm 2." + "The parameter `eps` of type `np.ndarray` is not supported to use with norm `1 <= p < np.inf`." ) values_tmp = values_tmp * tf.expand_dims( - tf.minimum(1.0, eps / (tf.norm(values_tmp, ord=2, axis=1) + tol)), axis=1 + tf.minimum(1.0, eps / (tf.norm(values_tmp, ord=norm_p, axis=1) + tol)), axis=1 ) - elif norm_p == 1: - if isinstance(eps, np.ndarray): - raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm 1." - ) - - values_tmp = values_tmp * tf.expand_dims( - tf.minimum(1.0, eps / (tf.norm(values_tmp, ord=1, axis=1) + tol)), axis=1 - ) - - elif norm_p in ["inf", np.inf]: + elif norm_p in [np.inf, "inf"]: if isinstance(eps, np.ndarray): eps = eps * np.ones(shape=values.shape) eps = eps.reshape([eps.shape[0], -1]) # type: ignore @@ -501,7 +493,7 @@ def _projection( else: raise NotImplementedError( - 'Values of `norm_p` different from 1, 2 "inf" and `np.inf` are currently not supported.' + 'Values of `norm_p < 1` are currently not supported.' ) values = tf.reshape(values_tmp, values.shape) From a772f0d623a8363fefc8ad7fe78c753ab4d0b62f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 13 Jan 2024 23:35:07 +0100 Subject: [PATCH 05/31] Corrected L1 FGM test. L10 supplementary test (expected to find) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/utils.py | 1 + tests/attacks/evasion/test_fast_gradient.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/art/utils.py b/art/utils.py index 7c4ff28348..b0c2b3bcd5 100644 --- a/art/utils.py +++ b/art/utils.py @@ -563,6 +563,7 @@ def projection(values: np.ndarray, eps: Union[int, float, np.ndarray], norm_p: U values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), eps) else: + raise NotImplementedError("TO DO (properly generalize to any `p >= 1`)") raise NotImplementedError( 'Values of `norm_p` different from 1, 2, `np.inf` and "inf" are currently not ' "supported." ) diff --git a/tests/attacks/evasion/test_fast_gradient.py b/tests/attacks/evasion/test_fast_gradient.py index b7b0f83211..910e9b35d7 100644 --- a/tests/attacks/evasion/test_fast_gradient.py +++ b/tests/attacks/evasion/test_fast_gradient.py @@ -146,7 +146,7 @@ def test_minimal_perturbations_images(art_warning, fix_get_mnist_subset, image_d art_warning(e) -@pytest.mark.parametrize("norm", [np.inf, 1, 2]) +@pytest.mark.parametrize("norm", [np.inf, 1, 2, 10]) @pytest.mark.skip_framework("pytorch") # temporarily skipping for pytorch until find bug fix in bounded test @pytest.mark.framework_agnostic def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator_for_attack): @@ -162,12 +162,11 @@ def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator } elif norm == 1: - raise NotImplementedError("TO DO (New expected values to fill)") expected_values = { "x_test_mean": ExpectedValue(0.00051374, 0.002), "x_test_min": ExpectedValue(-0.01486498, 0.001), "x_test_max": ExpectedValue(0.014761963, 0.001), - "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4, 4]), 4), + "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 4, 1, 4, 4, 1, 4, 4, 4, 4, 4]), 4), } elif norm == 2: expected_values = { @@ -176,6 +175,13 @@ def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator "x_test_max": ExpectedValue(0.209592223, 0.001), "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 2, 4, 4, 4, 7, 7, 4, 0, 4, 4]), 2), } + elif norm == 10: + expected_values = { + "x_test_mean": ExpectedValue(0.007636416, 0.001), + "x_test_min": ExpectedValue(-0.211054801, 0.001), + "x_test_max": ExpectedValue(0.209592223, 0.001), + "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 2, 4, 4, 4, 7, 7, 4, 0, 4, 4]), 2), + } else: raise ValueError("Value of `expected_values` not recognized.") From 930cbac14a0a891c4bbe1f2f71edb86b572003a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Wed, 17 Jan 2024 15:06:57 +0100 Subject: [PATCH 06/31] projection compatible with all p>=1 in suboptimal mode + renamed test appropriately MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/utils.py | 77 +++++++++++++++++++++------------------------ tests/test_utils.py | 6 +++- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/art/utils.py b/art/utils.py index b0c2b3bcd5..355598494c 100644 --- a/art/utils.py +++ b/art/utils.py @@ -519,54 +519,47 @@ def projection_l1_2(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n return proj -def projection(values: np.ndarray, eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str]) -> np.ndarray: +def projection( + values: np.ndarray, + eps: Union[int, float, np.ndarray], + norm_p: Union[int, float, str], + *, + suboptimal: bool = True, +) -> np.ndarray: """ Project `values` on the L_p norm ball of size `eps`. :param values: Array of perturbations to clip. - :param eps: Maximum norm allowed. - :param norm_p: L_p norm to use for clipping. - Only 1, 2 , `np.Inf` 1.1 and 1.2 supported for now. - 1.1 and 1.2 compute orthogonal projections on l1-ball, using two different algorithms + :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param norm_p: L_p norm to use for clipping. Only 1, 2 , `np.inf` and "inf" are supported with `suboptimal=False` + for now. + :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. + Ignored when `norm_p in [np.inf, "inf"]` because the optimal solution is explicit. Defaults to `True`. :return: Values of `values` after projection. """ - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - values_tmp = values.reshape((values.shape[0], -1)) - - if norm_p == 2: - if isinstance(eps, np.ndarray): - raise NotImplementedError("The parameter `eps` of type `np.ndarray` is not supported to use with norm 2.") - - values_tmp = values_tmp * np.expand_dims( - np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1) + tol)), axis=1 - ) - - elif norm_p == 1: - if isinstance(eps, np.ndarray): - raise NotImplementedError("The parameter `eps` of type `np.ndarray` is not supported to use with norm 1.") - - values_tmp = values_tmp * np.expand_dims( - np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1, ord=1) + tol)), - axis=1, - ) - elif norm_p == 1.1: - values_tmp = projection_l1_1(values_tmp, eps) - elif norm_p == 1.2: - values_tmp = projection_l1_2(values_tmp, eps) - - elif norm_p in [np.inf, "inf"]: - if isinstance(eps, np.ndarray): - eps = eps * np.ones_like(values) - eps = eps.reshape([eps.shape[0], -1]) # type: ignore - - values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), eps) - - else: - raise NotImplementedError("TO DO (properly generalize to any `p >= 1`)") - raise NotImplementedError( - 'Values of `norm_p` different from 1, 2, `np.inf` and "inf" are currently not ' "supported." - ) + p = np.inf if norm_p == "inf" else float(norm_p) + assert p > 0 + + values_tmp = values.reshape(len(np.atleast_2d(values)), -1) # (n_samples, d) + eps = np.atleast_2d(eps).T # (1 or n_samples, 1) + + if (suboptimal or p == 2) and p != np.inf: # Simple rescaling + values_norm = np.linalg.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) + with np.errstate(divide='ignore'): + values_tmp = values_tmp * np.where(values_norm, np.minimum(1, eps / values_norm), 0) + else: # Optimal + if p == np.inf: # Easy exact case + values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), eps) + elif p == 1: # Harder exact case + projection_l1 = projection_l1_1 if values_tmp.shape[1] > 29 else projection_l1_2 # From weak empirical tests + values_tmp = projection_l1(values_tmp, eps[:, 0]) + elif p > 1: # Convex optim + raise NotImplementedError( + 'Values of `norm_p > 1` different from 2, `np.inf` and "inf" are currently not supported with ' + '`suboptimal=False`.' + ) + else: # Non-convex optim + raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') values = values_tmp.reshape(values.shape) diff --git a/tests/test_utils.py b/tests/test_utils.py index 65f8b3d8a7..a1a295ae8d 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -121,7 +121,7 @@ def test_master_seed_tf_v2(self): self.assertTrue((xv != yv).any()) np.testing.assert_array_almost_equal(zv, xv, decimal=4) - def test_projection(self): + def test_projection_norm(self): # Get MNIST (x, _), (_, _), _, _ = load_mnist() @@ -134,6 +134,10 @@ def test_projection(self): self.assertEqual(x.shape, x_proj.shape) self.assertTrue(np.allclose(np.sum(np.abs(x_proj), axis=t), 3.14159, atol=10e-8)) + x_proj = projection(rand_sign * x, 3.14159, 1, suboptimal=False) + self.assertEqual(x.shape, x_proj.shape) + self.assertTrue(np.allclose(np.sum(np.abs(x_proj), axis=t), 3.14159, atol=10e-8)) + x_proj = projection(rand_sign * x, 3.14159, 2) self.assertEqual(x.shape, x_proj.shape) self.assertTrue(np.allclose(np.sqrt(np.sum(x_proj ** 2, axis=t)), 3.14159, atol=10e-8)) From f29e5ad93a5571ec08a37a3de52b861bc17f2a03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Wed, 17 Jan 2024 15:23:11 +0100 Subject: [PATCH 07/31] FGM tests ok with added p=10. Cleaner implem without MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 7 +++---- tests/attacks/evasion/test_fast_gradient.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 467a862fb0..a56b00ee87 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -394,9 +394,6 @@ def _compute_perturbation( decay: Optional[float] = None, momentum: Optional[np.ndarray] = None, ) -> np.ndarray: - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - # Get gradient wrt loss; invert it if attack is targeted grad = self.estimator.loss_gradient(x, y) * (1 - 2 * int(self.targeted)) @@ -443,7 +440,9 @@ def _apply_norm(norm, grad, object_type=False): flat[range(len(flat)), i_max] = 1 elif norm > 1: q = norm / (norm - 1) - flat = (np.abs(flat) / (np.linalg.norm(flat, ord=q, axis=-1, keepdims=True) + tol)) ** (q - 1) + q_norm = np.linalg.norm(flat, ord=q, axis=-1, keepdims=True) + with np.errstate(divide='ignore'): + flat = (np.abs(flat) * np.where(q_norm, 1 / q_norm, 0)) ** (q - 1) grad = flat.reshape(grad.shape) * np.sign(grad) return grad diff --git a/tests/attacks/evasion/test_fast_gradient.py b/tests/attacks/evasion/test_fast_gradient.py index 910e9b35d7..56b4eb1265 100644 --- a/tests/attacks/evasion/test_fast_gradient.py +++ b/tests/attacks/evasion/test_fast_gradient.py @@ -180,7 +180,7 @@ def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator "x_test_mean": ExpectedValue(0.007636416, 0.001), "x_test_min": ExpectedValue(-0.211054801, 0.001), "x_test_max": ExpectedValue(0.209592223, 0.001), - "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 2, 4, 4, 4, 7, 7, 4, 0, 4, 4]), 2), + "y_test_pred_adv_expected": ExpectedValue(np.asarray([4, 2, 4, 7, 0, 4, 7, 7, 2, 3, 0]), 2), } else: raise ValueError("Value of `expected_values` not recognized.") From cc091b3937772f8063c21f4c5af085c0ec579a31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 20 Jan 2024 12:38:29 +0100 Subject: [PATCH 08/31] axis=1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index a56b00ee87..7b5d91be0a 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -435,12 +435,12 @@ def _apply_norm(norm, grad, object_type=False): if norm in [np.inf, "inf"]: flat = np.ones_like(flat) elif norm == 1: - i_max = np.argmax(np.abs(flat), axis=-1) + i_max = np.argmax(np.abs(flat), axis=1) flat = np.zeros_like(flat) flat[range(len(flat)), i_max] = 1 elif norm > 1: q = norm / (norm - 1) - q_norm = np.linalg.norm(flat, ord=q, axis=-1, keepdims=True) + q_norm = np.linalg.norm(flat, ord=q, axis=1, keepdims=True) with np.errstate(divide='ignore'): flat = (np.abs(flat) * np.where(q_norm, 1 / q_norm, 0)) ** (q - 1) grad = flat.reshape(grad.shape) * np.sign(grad) From a6581afb9d797e6c66b9144a65dd1aba4644ef1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 20 Jan 2024 12:39:02 +0100 Subject: [PATCH 09/31] projection doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/art/utils.py b/art/utils.py index 355598494c..adfc556c17 100644 --- a/art/utils.py +++ b/art/utils.py @@ -531,10 +531,10 @@ def projection( :param values: Array of perturbations to clip. :param eps: Maximum norm allowed. One scalar or one per sample in `values`. - :param norm_p: L_p norm to use for clipping. Only 1, 2 , `np.inf` and "inf" are supported with `suboptimal=False` - for now. + :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 1, 2 , `np.inf` and "inf" are currently + supported with `suboptimal=False` for now. :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. - Ignored when `norm_p in [np.inf, "inf"]` because the optimal solution is explicit. Defaults to `True`. + Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ p = np.inf if norm_p == "inf" else float(norm_p) From 1c41b71c971fc68284c6b5ada01d40240578bb91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 20 Jan 2024 12:40:23 +0100 Subject: [PATCH 10/31] PGD norm doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent.py | 4 +++- .../projected_gradient_descent_numpy.py | 8 ++++++-- .../projected_gradient_descent_pytorch.py | 4 +++- .../projected_gradient_descent_tensorflow_v2.py | 4 +++- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index 103a926b4d..4b76a2cb27 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -95,7 +95,9 @@ def __init__( Create a :class:`.ProjectedGradientDescent` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. + :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. + Currently, when `p` is not infinity, the projection step only rescales the noise, which may be + suboptimal for `p != 2`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index df798f1624..4eb4a90c05 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -77,7 +77,9 @@ def __init__( Create a :class:`.ProjectedGradientDescentCommon` instance. :param estimator: A trained classifier. - :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. + :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. + Currently, when `p` is not infinity, the projection step only rescales the noise, which may be + suboptimal for `p != 2`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature @@ -266,7 +268,9 @@ def __init__( Create a :class:`.ProjectedGradientDescentNumpy` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation supporting "inf", `np.inf` or a real `p >= 1`. + :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. + Currently, when `p` is not infinity, the projection step only rescales the noise, which may be + suboptimal for `p != 2`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 5d121e4a3a..d6eb0b8cad 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -78,7 +78,9 @@ def __init__( Create a :class:`.ProjectedGradientDescentPyTorch` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation. Possible values: "inf", `np.inf` or a real `p >= 1`. + :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. + Currently, when `p` is not infinity, the projection step only rescales the noise, which may be + suboptimal for `p != 2`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 1435c30981..b46398e509 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -78,7 +78,9 @@ def __init__( Create a :class:`.ProjectedGradientDescentTensorFlowV2` instance. :param estimator: An trained estimator. - :param norm: The norm of the adversarial perturbation. Possible values: "inf", `np.inf` or a real `p >= 1`. + :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. + Currently, when `p` is not infinity, the projection step only rescales the noise, which may be + suboptimal for `p != 2`. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. :param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature From 7b149b435de828b3a3e4aaabd26707524767d41e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 20 Jan 2024 12:42:12 +0100 Subject: [PATCH 11/31] TO DO: projection adaptation (see art.utils.projection) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 1 + .../projected_gradient_descent_tensorflow_v2.py | 1 + 2 files changed, 2 insertions(+) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index d6eb0b8cad..00350f40e5 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -469,6 +469,7 @@ def _projection( tol = 10e-8 values_tmp = values.reshape(values.shape[0], -1) + raise NotImplementedError("TO DO (Follow `art.utils.projection implementation)") if 1 <= norm_p < np.inf: if isinstance(eps, np.ndarray): raise NotImplementedError( diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index b46398e509..470d5cc013 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -476,6 +476,7 @@ def _projection( tol = 10e-8 values_tmp = tf.reshape(values, (values.shape[0], -1)) + raise NotImplementedError("TO DO (Follow `art.utils.projection` implementation)") if 1 <= norm_p < np.inf: if isinstance(eps, np.ndarray): raise NotImplementedError( From a1434767869511e7bfa2f237b29a91844fff6691 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 20 Jan 2024 12:43:20 +0100 Subject: [PATCH 12/31] PGD torch: fix L1 perturbation and extend to all p>=1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 26 +++++++++---------- ...rojected_gradient_descent_tensorflow_v2.py | 25 +++++++++++++----- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 00350f40e5..d7aabbcabb 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -305,11 +305,8 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 """ import torch - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - # Get gradient wrt loss; invert it if attack is targeted - grad = self.estimator.loss_gradient(x=x, y=y) * (1 - 2 * int(self.targeted)) + grad = self.estimator.loss_gradient(x=x, y=y) * (-1 if self.targeted else 1) # Write summary if self.summary_writer is not None: # pragma: no cover @@ -342,18 +339,19 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 momentum += grad # Apply norm bound - if self.norm in ["inf", np.inf]: - grad = grad.sign() - + flat = grad.reshape(len(grad), -1) + if self.norm in [np.inf, "inf"]: + flat = torch.ones_like(flat) elif self.norm == 1: - raise NotImplementedError("TO DO (fix L1)") - ind = tuple(range(1, len(x.shape))) - grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore - + i_max = torch.argmax(flat.abs_(), dim=1) + flat = torch.zeros_like(flat) + flat[range(len(flat)), i_max] = 1 elif self.norm > 1: - raise NotImplementedError("TO DO (properly generalize to `1 < p < inf`)") - ind = tuple(range(1, len(x.shape))) - grad = grad / (torch.sqrt(torch.sum(grad * grad, axis=ind, keepdims=True)) + tol) # type: ignore + q = self.norm / (self.norm - 1) + q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) + flat = (flat.abs_() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + + grad = flat.reshape(grad.shape) * grad.sign(grad) assert x.shape == grad.shape diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 470d5cc013..34a9c4d5e9 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -314,12 +314,9 @@ def _compute_perturbation( # pylint: disable=W0221 """ import tensorflow as tf - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - # Get gradient wrt loss; invert it if attack is targeted grad: tf.Tensor = self.estimator.loss_gradient(x, y) * tf.constant( - 1 - 2 * int(self.targeted), dtype=ART_NUMPY_DTYPE + -1 if self.targeted else 1, dtype=ART_NUMPY_DTYPE ) # Write summary @@ -353,6 +350,23 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound + flat = grad.reshape(len(grad), -1) + if self.norm in [np.inf, "inf"]: + flat = torch.ones_like(flat) + elif self.norm == 1: + i_max = torch.argmax(flat.abs_(), dim=1) + flat = torch.zeros_like(flat) + flat[range(len(flat)), i_max] = 1 + elif self.norm > 1: + q = self.norm / (self.norm - 1) + q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) + flat = (flat.abs_() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + + grad = flat.reshape(grad.shape) * grad.sign(grad) + + assert x.shape == grad.shape + + # OLDDDD if self.norm in [np.inf, "inf"]: grad = tf.sign(grad) @@ -367,8 +381,7 @@ def _compute_perturbation( # pylint: disable=W0221 grad = tf.divide( grad, (tf.math.sqrt(tf.math.reduce_sum(tf.math.square(grad), axis=ind, keepdims=True)) + tol) ) - - assert x.shape == grad.shape + # END OLDDDD return grad From dffe33da8b898d9812607095e9089a45c6f3205a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sun, 21 Jan 2024 22:39:47 +0100 Subject: [PATCH 13/31] PGD tf: fix L1 perturbation and extend to all p>=1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 2 +- ...rojected_gradient_descent_tensorflow_v2.py | 33 +++++-------------- 2 files changed, 9 insertions(+), 26 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index d7aabbcabb..acd7737ef6 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -351,7 +351,7 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) flat = (flat.abs_() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) - grad = flat.reshape(grad.shape) * grad.sign(grad) + grad = flat.reshape(grad.shape) * grad.sign() assert x.shape == grad.shape diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 34a9c4d5e9..515b5147b8 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -350,39 +350,22 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound - flat = grad.reshape(len(grad), -1) + flat = tf.reshape(grad, (len(grad), -1)) if self.norm in [np.inf, "inf"]: - flat = torch.ones_like(flat) + flat = tf.ones_like(flat) elif self.norm == 1: - i_max = torch.argmax(flat.abs_(), dim=1) - flat = torch.zeros_like(flat) - flat[range(len(flat)), i_max] = 1 + flat = tf.abs(flat) + flat = tf.where(flat == tf.reduce_max(flat, axis=1, keepdims=True), 1, 0) + flat /= tf.reduce_sum(flat, axis=1, keepdims=True) elif self.norm > 1: q = self.norm / (self.norm - 1) - q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) - flat = (flat.abs_() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + q_norm = tf.norm(flat, ord=q, axis=1, keepdims=True) + flat = (tf.abs(flat) * tf.where(q_norm == 0, 0, 1 / q_norm)) ** (q - 1) - grad = flat.reshape(grad.shape) * grad.sign(grad) + grad = tf.reshape(flat, grad.shape) * tf.sign(grad) assert x.shape == grad.shape - # OLDDDD - if self.norm in [np.inf, "inf"]: - grad = tf.sign(grad) - - elif self.norm == 1: - raise NotImplementedError("TO DO (fix L1)") - ind = tuple(range(1, len(x.shape))) - grad = tf.divide(grad, (tf.math.reduce_sum(tf.abs(grad), axis=ind, keepdims=True) + tol)) - - elif self.norm > 1: - raise NotImplementedError("TO DO (properly generalize to `1 < p < inf`)") - ind = tuple(range(1, len(x.shape))) - grad = tf.divide( - grad, (tf.math.sqrt(tf.math.reduce_sum(tf.math.square(grad), axis=ind, keepdims=True)) + tol) - ) - # END OLDDDD - return grad def _apply_perturbation( # pylint: disable=W0221 From 97ed4fe481573da38fcbf97abed4f4dc2fc7b8f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Fri, 26 Jan 2024 17:07:40 +0100 Subject: [PATCH 14/31] np.abs instead of built-in abs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/art/utils.py b/art/utils.py index adfc556c17..0b6a6d0966 100644 --- a/art/utils.py +++ b/art/utils.py @@ -531,7 +531,7 @@ def projection( :param values: Array of perturbations to clip. :param eps: Maximum norm allowed. One scalar or one per sample in `values`. - :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 1, 2 , `np.inf` and "inf" are currently + :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 1, 2, `np.inf` and "inf" are currently supported with `suboptimal=False` for now. :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. @@ -549,7 +549,7 @@ def projection( values_tmp = values_tmp * np.where(values_norm, np.minimum(1, eps / values_norm), 0) else: # Optimal if p == np.inf: # Easy exact case - values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), eps) + values_tmp = np.sign(values_tmp) * np.minimum(np.abs(values_tmp), eps) elif p == 1: # Harder exact case projection_l1 = projection_l1_1 if values_tmp.shape[1] > 29 else projection_l1_2 # From weak empirical tests values_tmp = projection_l1(values_tmp, eps[:, 0]) From 2695a1f2818435ec64a2fcfe03582b8cfd8f3eb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Fri, 26 Jan 2024 17:11:02 +0100 Subject: [PATCH 15/31] PGD torch: applied _projection mods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 60 +++++++++---------- 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index acd7737ef6..997ee196dd 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -451,50 +451,44 @@ def _compute_pytorch( return x_adv def _projection( - self, values: "torch.Tensor", eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str] + self, + values: "torch.Tensor", + eps: Union[int, float, np.ndarray], + norm_p: Union[int, float, str], + *, + suboptimal: bool = True, ) -> "torch.Tensor": """ Project `values` on the L_p norm ball of size `eps`. :param values: Values to clip. - :param eps: Maximum norm allowed. - :param norm_p: L_p norm to use for clipping supporting "inf", `np.inf` or a real `p >= 1`. + :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 2, `np.inf` and "inf" are supported + with `suboptimal=False` for now. + :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. + Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ import torch - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - values_tmp = values.reshape(values.shape[0], -1) - - raise NotImplementedError("TO DO (Follow `art.utils.projection implementation)") - if 1 <= norm_p < np.inf: - if isinstance(eps, np.ndarray): + p = np.inf if norm_p == "inf" else float(norm_p) + assert p > 0 + + values_tmp = values.reshape(len(values), -1) # (n_samples, d) + eps = np.atleast_2d(eps).T # (1 or n_samples, 1) + + if (suboptimal or p == 2) and p != np.inf: # Simple rescaling + values_norm = torch.linalg.norm(values_tmp, ord=p, dim=1, keepdim=True) # (n_samples, 1) + values_tmp = values_tmp * values_norm.where(values_norm == 0, torch.minimum(1, eps / values_norm)) + else: # Optimal + if p == np.inf: # Easy exact case + values_tmp = values_tmp.sign() * torch.minimum(values_tmp.abs(), eps) + elif p >= 1: # Convex optim raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm `1 <= p < np.inf`." + 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' ) - - values_tmp = ( - values_tmp - * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), - eps / (torch.norm(values_tmp, p=norm_p, dim=1) + tol), - ).unsqueeze_(-1) - ) - - elif norm_p in [np.inf, "inf"]: - if isinstance(eps, np.ndarray): - eps = eps * np.ones_like(values.cpu()) - eps = eps.reshape([eps.shape[0], -1]) # type: ignore - - values_tmp = values_tmp.sign() * torch.min( - values_tmp.abs(), torch.tensor([eps], dtype=torch.float32).to(self.estimator.device) - ) - - else: - raise NotImplementedError( - "Values of `norm_p < 1` are currently not supported." - ) + else: # Non-convex optim + raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') values = values_tmp.reshape(values.shape) From 93e3add7f8c958001d1b3e86916eb78b92700cb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Fri, 26 Jan 2024 23:23:14 +0100 Subject: [PATCH 16/31] PGD torch: _projection staticmethod MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 997ee196dd..5b1b5b0c73 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -450,8 +450,8 @@ def _compute_pytorch( return x_adv + @staticmethod def _projection( - self, values: "torch.Tensor", eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str], From cbe1fa5e7fb0445b5ac4e9e93c5dbd0092537b01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 27 Jan 2024 00:36:57 +0100 Subject: [PATCH 17/31] PGD tf: applied _projection mods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- ...rojected_gradient_descent_tensorflow_v2.py | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 515b5147b8..0edacfca38 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -456,44 +456,43 @@ def _compute_tf( @staticmethod def _projection( - values: "tf.Tensor", eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str] - ) -> "tf.Tensor": + values: "torch.Tensor", + eps: Union[int, float, np.ndarray], + norm_p: Union[int, float, str], + *, + suboptimal: bool = True, + ) -> "torch.Tensor": """ Project `values` on the L_p norm ball of size `eps`. :param values: Values to clip. - :param eps: Maximum norm allowed. - :param norm_p: L_p norm to use for clipping supporting "inf", `np.inf` or a real `p >= 1`. + :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 2, `np.inf` and "inf" are supported + with `suboptimal=False` for now. + :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. + Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ import tensorflow as tf - # Pick a small scalar to avoid division by 0 - tol = 10e-8 - values_tmp = tf.reshape(values, (values.shape[0], -1)) - - raise NotImplementedError("TO DO (Follow `art.utils.projection` implementation)") - if 1 <= norm_p < np.inf: - if isinstance(eps, np.ndarray): + p = np.inf if norm_p == "inf" else float(norm_p) + assert p > 0 + + values_tmp = tf.reshape.reshape(values, (len(values), -1)) # (n_samples, d) + eps = np.atleast_2d(eps).T # (1 or n_samples, 1) + + if (suboptimal or p == 2) and p != np.inf: # Simple rescaling + values_norm = tf.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) + values_tmp = values_tmp * tf.where(values_norm == 0, 0, tf.minimum(1, eps / values_norm)) + else: # Optimal + if p == np.inf: # Easy exact case + values_tmp = tf.sign(values_tmp) * tf.minimum(tf.abs(values_tmp), eps) + elif p >= 1: # Convex optim raise NotImplementedError( - "The parameter `eps` of type `np.ndarray` is not supported to use with norm `1 <= p < np.inf`." + 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' ) - - values_tmp = values_tmp * tf.expand_dims( - tf.minimum(1.0, eps / (tf.norm(values_tmp, ord=norm_p, axis=1) + tol)), axis=1 - ) - - elif norm_p in [np.inf, "inf"]: - if isinstance(eps, np.ndarray): - eps = eps * np.ones(shape=values.shape) - eps = eps.reshape([eps.shape[0], -1]) # type: ignore - - values_tmp = tf.sign(values_tmp) * tf.minimum(tf.math.abs(values_tmp), eps) - - else: - raise NotImplementedError( - 'Values of `norm_p < 1` are currently not supported.' - ) + else: # Non-convex optim + raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') values = tf.reshape(values_tmp, values.shape) From 7c1c93c1093ce643e25def235ad302073ee9642e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 27 Jan 2024 00:57:43 +0100 Subject: [PATCH 18/31] PGD: debug for tets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 4 ++-- .../projected_gradient_descent_tensorflow_v2.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 5b1b5b0c73..9d89a7190c 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -479,10 +479,10 @@ def _projection( if (suboptimal or p == 2) and p != np.inf: # Simple rescaling values_norm = torch.linalg.norm(values_tmp, ord=p, dim=1, keepdim=True) # (n_samples, 1) - values_tmp = values_tmp * values_norm.where(values_norm == 0, torch.minimum(1, eps / values_norm)) + values_tmp = values_tmp * values_norm.where(values_norm == 0, torch.minimum(torch.ones(1), eps / values_norm)) else: # Optimal if p == np.inf: # Easy exact case - values_tmp = values_tmp.sign() * torch.minimum(values_tmp.abs(), eps) + values_tmp = values_tmp.sign() * torch.minimum(values_tmp.abs(), torch.Tensor(eps)) elif p >= 1: # Convex optim raise NotImplementedError( 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 0edacfca38..f33d34cd5e 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -478,7 +478,7 @@ def _projection( p = np.inf if norm_p == "inf" else float(norm_p) assert p > 0 - values_tmp = tf.reshape.reshape(values, (len(values), -1)) # (n_samples, d) + values_tmp = tf.reshape(values, (len(values), -1)) # (n_samples, d) eps = np.atleast_2d(eps).T # (1 or n_samples, 1) if (suboptimal or p == 2) and p != np.inf: # Simple rescaling From b7d0a6a5b3d58117a4bd0e1e8476467fe3dc6060 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Mon, 29 Jan 2024 03:46:56 +0100 Subject: [PATCH 19/31] projection: back to feature-wise priority (eps broadcasted to samples shape). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 24 ++++++++++++++----- ...rojected_gradient_descent_tensorflow_v2.py | 24 ++++++++++++++----- art/utils.py | 19 +++++++++++---- 3 files changed, 51 insertions(+), 16 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 9d89a7190c..0183b42e33 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -462,10 +462,15 @@ def _projection( Project `values` on the L_p norm ball of size `eps`. :param values: Values to clip. - :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param eps: If a scalar, the norm of the L_p ball onto which samples are projected. Equivalently in general, + can be any array of non-negatives broadcastable with `values`, and the projection occurs onto the + unit ball for the weighted L_{p, w} norm with `w = 1 / eps`. Currently, for any given sample, + non-uniform weights are only supported with infinity norm. Example: To specify sample-wise scalar, + you can provide `eps.shape = (n_samples,) + (1,) * values[0].ndim`. :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 2, `np.inf` and "inf" are supported with `suboptimal=False` for now. - :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. + :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for + `norm_p != 2`. Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ @@ -473,10 +478,17 @@ def _projection( p = np.inf if norm_p == "inf" else float(norm_p) assert p > 0 - + values_tmp = values.reshape(len(values), -1) # (n_samples, d) - eps = np.atleast_2d(eps).T # (1 or n_samples, 1) - + + eps = np.broadcast_to(eps, values.shape) + eps = eps.reshape(len(eps), -1) # (n_samples, d) + assert np.all(eps >= 0) + if p != np.inf and not np.all(eps == eps[:, [0]]): + raise NotImplementedError( + 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + ) + if (suboptimal or p == 2) and p != np.inf: # Simple rescaling values_norm = torch.linalg.norm(values_tmp, ord=p, dim=1, keepdim=True) # (n_samples, 1) values_tmp = values_tmp * values_norm.where(values_norm == 0, torch.minimum(torch.ones(1), eps / values_norm)) @@ -488,7 +500,7 @@ def _projection( 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' ) else: # Non-convex optim - raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') + raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') values = values_tmp.reshape(values.shape) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index f33d34cd5e..722499691a 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -339,7 +339,7 @@ def _compute_perturbation( # pylint: disable=W0221 # Apply mask if mask is not None: - grad = tf.where(mask == 0.0, 0.0, grad) + grad = tf.where(mask == 0., 0., grad) # Add momentum if decay is not None and momentum is not None: @@ -466,10 +466,15 @@ def _projection( Project `values` on the L_p norm ball of size `eps`. :param values: Values to clip. - :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param eps: If a scalar, the norm of the L_p ball onto which samples are projected. Equivalently in general, + can be any array of non-negatives broadcastable with `values`, and the projection occurs onto the + unit ball for the weighted L_{p, w} norm with `w = 1 / eps`. Currently, for any given sample, + non-uniform weights are only supported with infinity norm. Example: To specify sample-wise scalar, + you can provide `eps.shape = (n_samples,) + (1,) * values[0].ndim`. :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 2, `np.inf` and "inf" are supported with `suboptimal=False` for now. - :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. + :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for + `norm_p != 2`. Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ @@ -479,8 +484,15 @@ def _projection( assert p > 0 values_tmp = tf.reshape(values, (len(values), -1)) # (n_samples, d) - eps = np.atleast_2d(eps).T # (1 or n_samples, 1) - + + eps = np.broadcast_to(eps, values.shape) + eps = eps.reshape(len(eps), -1) # (n_samples, d) + assert np.all(eps >= 0) + if p != np.inf and not np.all(eps == eps[:, [0]]): + raise NotImplementedError( + 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + ) + if (suboptimal or p == 2) and p != np.inf: # Simple rescaling values_norm = tf.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) values_tmp = values_tmp * tf.where(values_norm == 0, 0, tf.minimum(1, eps / values_norm)) @@ -492,7 +504,7 @@ def _projection( 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' ) else: # Non-convex optim - raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') + raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') values = tf.reshape(values_tmp, values.shape) diff --git a/art/utils.py b/art/utils.py index 0b6a6d0966..165b85716b 100644 --- a/art/utils.py +++ b/art/utils.py @@ -530,7 +530,11 @@ def projection( Project `values` on the L_p norm ball of size `eps`. :param values: Array of perturbations to clip. - :param eps: Maximum norm allowed. One scalar or one per sample in `values`. + :param eps: If a scalar, the norm of the L_p ball onto which samples are projected. Equivalently in general, can be + any array of non-negatives broadcastable with `values`, and the projection occurs onto the unit ball + for the weighted L_{p, w} norm with `w = 1 / eps`. Currently, for any given sample, non-uniform weights + are only supported with infinity norm. Example: To specify sample-wise scalar, you can provide + `eps.shape = (n_samples,) + (1,) * values[0].ndim`. :param norm_p: Lp norm to use for clipping, with `norm_p > 0`. Only 1, 2, `np.inf` and "inf" are currently supported with `suboptimal=False` for now. :param suboptimal: If `True` simply projects by rescaling to Lp ball. Fast but may be suboptimal for `norm_p != 2`. @@ -540,8 +544,15 @@ def projection( p = np.inf if norm_p == "inf" else float(norm_p) assert p > 0 - values_tmp = values.reshape(len(np.atleast_2d(values)), -1) # (n_samples, d) - eps = np.atleast_2d(eps).T # (1 or n_samples, 1) + values_tmp = values.reshape(len(values), -1) # (n_samples, d) + + eps = np.broadcast_to(eps, values.shape) + eps = eps.reshape(len(eps), -1) # (n_samples, d) + assert np.all(eps >= 0) + if p != np.inf and not np.all(eps == eps[:, [0]]): + raise NotImplementedError( + 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + ) if (suboptimal or p == 2) and p != np.inf: # Simple rescaling values_norm = np.linalg.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) @@ -551,7 +562,7 @@ def projection( if p == np.inf: # Easy exact case values_tmp = np.sign(values_tmp) * np.minimum(np.abs(values_tmp), eps) elif p == 1: # Harder exact case - projection_l1 = projection_l1_1 if values_tmp.shape[1] > 29 else projection_l1_2 # From weak empirical tests + projection_l1 = projection_l1_1 if values_tmp.shape[1] > 29 else projection_l1_2 # From empirical tests values_tmp = projection_l1(values_tmp, eps[:, 0]) elif p > 1: # Convex optim raise NotImplementedError( From 6038995f22498f43d21c84cd7ef0a93d033cd934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Mon, 29 Jan 2024 03:48:17 +0100 Subject: [PATCH 20/31] avoid wrong type casting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 4 +++- .../projected_gradient_descent_tensorflow_v2.py | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 0183b42e33..1fafc75f28 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -491,7 +491,9 @@ def _projection( if (suboptimal or p == 2) and p != np.inf: # Simple rescaling values_norm = torch.linalg.norm(values_tmp, ord=p, dim=1, keepdim=True) # (n_samples, 1) - values_tmp = values_tmp * values_norm.where(values_norm == 0, torch.minimum(torch.ones(1), eps / values_norm)) + values_tmp = values_tmp * values_norm.where( + values_norm == 0, torch.minimum(torch.ones(1), torch.Tensor(eps) / values_norm) + ) else: # Optimal if p == np.inf: # Easy exact case values_tmp = values_tmp.sign() * torch.minimum(values_tmp.abs(), torch.Tensor(eps)) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 722499691a..b4e8a894d7 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -352,15 +352,15 @@ def _compute_perturbation( # pylint: disable=W0221 # Apply norm bound flat = tf.reshape(grad, (len(grad), -1)) if self.norm in [np.inf, "inf"]: - flat = tf.ones_like(flat) + flat = tf.ones_like(flat, dtype=flat.dtype) elif self.norm == 1: flat = tf.abs(flat) - flat = tf.where(flat == tf.reduce_max(flat, axis=1, keepdims=True), 1, 0) + flat = tf.where(flat == tf.reduce_max(flat, axis=1, keepdims=True), 1., 0.) flat /= tf.reduce_sum(flat, axis=1, keepdims=True) elif self.norm > 1: q = self.norm / (self.norm - 1) q_norm = tf.norm(flat, ord=q, axis=1, keepdims=True) - flat = (tf.abs(flat) * tf.where(q_norm == 0, 0, 1 / q_norm)) ** (q - 1) + flat = (tf.abs(flat) * tf.where(q_norm == 0, 0., 1 / q_norm)) ** (q - 1) grad = tf.reshape(flat, grad.shape) * tf.sign(grad) From 12e13472bb923d154820a2741e6380734b62e458 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Mon, 29 Jan 2024 17:24:55 +0100 Subject: [PATCH 21/31] dont use inplace abs_() on view MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 1fafc75f28..c39f39bbe9 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -343,13 +343,13 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 if self.norm in [np.inf, "inf"]: flat = torch.ones_like(flat) elif self.norm == 1: - i_max = torch.argmax(flat.abs_(), dim=1) + i_max = torch.argmax(flat.abs(), dim=1) flat = torch.zeros_like(flat) flat[range(len(flat)), i_max] = 1 elif self.norm > 1: q = self.norm / (self.norm - 1) q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) - flat = (flat.abs_() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + flat = (flat.abs() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) grad = flat.reshape(grad.shape) * grad.sign() From d36948c79a659b72056226ff951a87f86a58609c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Fri, 23 Feb 2024 00:32:35 +0100 Subject: [PATCH 22/31] projection: out casted to input values dtype MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_pytorch.py | 2 +- .../projected_gradient_descent_tensorflow_v2.py | 2 +- art/utils.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index c39f39bbe9..58f5a30b0e 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -504,6 +504,6 @@ def _projection( else: # Non-convex optim raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') - values = values_tmp.reshape(values.shape) + values = values_tmp.reshape(values.shape).to(values.dtype) return values diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index b4e8a894d7..f1a5c239a8 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -506,6 +506,6 @@ def _projection( else: # Non-convex optim raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') - values = tf.reshape(values_tmp, values.shape) + values = tf.cast(tf.reshape(values_tmp, values.shape), values.dtype) return values diff --git a/art/utils.py b/art/utils.py index 165b85716b..9abf8c4dde 100644 --- a/art/utils.py +++ b/art/utils.py @@ -572,8 +572,7 @@ def projection( else: # Non-convex optim raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') - values = values_tmp.reshape(values.shape) - + values = values_tmp.reshape(values.shape).astype(values.dtype) return values From 0cfe35ab6a94a61ea9ba2a6bbd966dcd6351b4cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Fri, 23 Feb 2024 04:57:22 +0100 Subject: [PATCH 23/31] Keep tol for momentum. Better naming grad_2d MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 16 ++++++++-------- .../projected_gradient_descent_pytorch.py | 17 +++++++++-------- .../projected_gradient_descent_tensorflow_v2.py | 17 +++++++++-------- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 7b5d91be0a..9af6d1d139 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -431,19 +431,19 @@ def _apply_norm(norm, grad, object_type=False): ).any(): logger.info("The loss gradient array contains at least one positive or negative infinity.") - flat = grad.reshape(1 if object_type else len(grad), -1) + grad_2d = grad.reshape(1 if object_type else len(grad), -1) if norm in [np.inf, "inf"]: - flat = np.ones_like(flat) + grad_2d = np.ones_like(grad_2d) elif norm == 1: - i_max = np.argmax(np.abs(flat), axis=1) - flat = np.zeros_like(flat) - flat[range(len(flat)), i_max] = 1 + i_max = np.argmax(np.abs(grad_2d), axis=1) + grad_2d = np.zeros_like(grad_2d) + grad_2d[range(len(grad_2d)), i_max] = 1 elif norm > 1: q = norm / (norm - 1) - q_norm = np.linalg.norm(flat, ord=q, axis=1, keepdims=True) + q_norm = np.linalg.norm(grad_2d, ord=q, axis=1, keepdims=True) with np.errstate(divide='ignore'): - flat = (np.abs(flat) * np.where(q_norm, 1 / q_norm, 0)) ** (q - 1) - grad = flat.reshape(grad.shape) * np.sign(grad) + grad_2d = (np.abs(grad_2d) * np.where(q_norm, 1 / q_norm, 0)) ** (q - 1) + grad = grad_2d.reshape(grad.shape) * np.sign(grad) return grad # Add momentum diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 58f5a30b0e..b46a7ad854 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -332,6 +332,7 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 # Apply momentum if self.decay is not None: + tol = 1e-7 ind = tuple(range(1, len(x.shape))) grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore grad = self.decay * momentum + grad @@ -339,19 +340,19 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 momentum += grad # Apply norm bound - flat = grad.reshape(len(grad), -1) + grad_2d = grad.reshape(len(grad), -1) if self.norm in [np.inf, "inf"]: - flat = torch.ones_like(flat) + grad_2d = torch.ones_like(grad_2d) elif self.norm == 1: - i_max = torch.argmax(flat.abs(), dim=1) - flat = torch.zeros_like(flat) - flat[range(len(flat)), i_max] = 1 + i_max = torch.argmax(grad_2d.abs(), dim=1) + grad_2d = torch.zeros_like(grad_2d) + grad_2d[range(len(grad_2d)), i_max] = 1 elif self.norm > 1: q = self.norm / (self.norm - 1) - q_norm = torch.linalg.norm(flat, ord=q, dim=1, keepdim=True) - flat = (flat.abs() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + q_norm = torch.linalg.norm(grad_2d, ord=q, dim=1, keepdim=True) + grad_2d = (grad_2d.abs() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) - grad = flat.reshape(grad.shape) * grad.sign() + grad = grad_2d.reshape(grad.shape) * grad.sign() assert x.shape == grad.shape diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index f1a5c239a8..00c7353012 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -343,6 +343,7 @@ def _compute_perturbation( # pylint: disable=W0221 # Add momentum if decay is not None and momentum is not None: + tol = 1e-7 ind = tuple(range(1, len(x.shape))) grad = tf.divide(grad, (tf.math.reduce_sum(tf.abs(grad), axis=ind, keepdims=True) + tol)) grad = self.decay * momentum + grad @@ -350,19 +351,19 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound - flat = tf.reshape(grad, (len(grad), -1)) + grad_2d = tf.reshape(grad, (len(grad), -1)) if self.norm in [np.inf, "inf"]: - flat = tf.ones_like(flat, dtype=flat.dtype) + grad_2d = tf.ones_like(grad_2d, dtype=grad_2d.dtype) elif self.norm == 1: - flat = tf.abs(flat) - flat = tf.where(flat == tf.reduce_max(flat, axis=1, keepdims=True), 1., 0.) - flat /= tf.reduce_sum(flat, axis=1, keepdims=True) + grad_2d = tf.abs(grad_2d) + grad_2d = tf.where(grad_2d == tf.reduce_max(grad_2d, axis=1, keepdims=True), 1., 0.) + grad_2d /= tf.reduce_sum(grad_2d, axis=1, keepdims=True) elif self.norm > 1: q = self.norm / (self.norm - 1) - q_norm = tf.norm(flat, ord=q, axis=1, keepdims=True) - flat = (tf.abs(flat) * tf.where(q_norm == 0, 0., 1 / q_norm)) ** (q - 1) + q_norm = tf.norm(grad_2d, ord=q, axis=1, keepdims=True) + grad_2d = (tf.abs(grad_2d) * tf.where(q_norm == 0, 0., 1 / q_norm)) ** (q - 1) - grad = tf.reshape(flat, grad.shape) * tf.sign(grad) + grad = tf.reshape(grad_2d, grad.shape) * tf.sign(grad) assert x.shape == grad.shape From fe7ea47639384e42dbefd3d86e14353a8a44cfb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Mon, 8 Apr 2024 14:01:15 +0200 Subject: [PATCH 24/31] Review pull/2382#pullrequestreview-1985672896 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 36 +++++++-------- .../projected_gradient_descent.py | 6 +-- .../projected_gradient_descent_numpy.py | 6 +-- .../projected_gradient_descent_pytorch.py | 35 ++++++++------- ...rojected_gradient_descent_tensorflow_v2.py | 45 ++++++++++--------- art/utils.py | 24 +++++----- 6 files changed, 75 insertions(+), 77 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 9af6d1d139..80d818412b 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -288,16 +288,18 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n logger.info( "Success rate of FGM attack: %.2f%%", - rate_best - if rate_best is not None - else 100 - * compute_success( - self.estimator, # type: ignore - x, - y_array, - adv_x_best, - self.targeted, - batch_size=self.batch_size, + ( + rate_best + if rate_best is not None + else 100 + * compute_success( + self.estimator, # type: ignore + x, + y_array, + adv_x_best, + self.targeted, + batch_size=self.batch_size, + ) ), ) @@ -334,10 +336,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def _check_params(self) -> None: - if not ( - self.norm == "inf" - or self.norm >= 1 - ): + norm: float = np.inf if self.norm == "inf" else float(norm) + if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( @@ -439,10 +439,10 @@ def _apply_norm(norm, grad, object_type=False): grad_2d = np.zeros_like(grad_2d) grad_2d[range(len(grad_2d)), i_max] = 1 elif norm > 1: - q = norm / (norm - 1) - q_norm = np.linalg.norm(grad_2d, ord=q, axis=1, keepdims=True) - with np.errstate(divide='ignore'): - grad_2d = (np.abs(grad_2d) * np.where(q_norm, 1 / q_norm, 0)) ** (q - 1) + conjugate = norm / (norm - 1) + q_norm = np.linalg.norm(grad_2d, ord=conjugate, axis=1, keepdims=True) + with np.errstate(divide="ignore"): + grad_2d = (np.abs(grad_2d) * np.where(q_norm, 1 / q_norm, 0)) ** (conjugate - 1) grad = grad_2d.reshape(grad.shape) * np.sign(grad) return grad diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index 4b76a2cb27..695a2b66b4 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -212,10 +212,8 @@ def set_params(self, **kwargs) -> None: def _check_params(self) -> None: - if not ( - self.norm == "inf" - or self.norm >= 1 - ): + norm: float = np.inf if self.norm == "inf" else float(norm) + if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index 4eb4a90c05..d749d71cf1 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -181,10 +181,8 @@ def _set_targets(self, x: np.ndarray, y: Optional[np.ndarray], classifier_mixin: def _check_params(self) -> None: # pragma: no cover - if not ( - self.norm == "inf" - or self.norm >= 1 - ): + norm: float = np.inf if self.norm == "inf" else float(norm) + if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') if not ( diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index b46a7ad854..bff4425fc3 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -187,7 +187,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n adv_x = x.astype(ART_NUMPY_DTYPE) # Compute perturbation with batching - for (batch_id, batch_all) in enumerate( + for batch_id, batch_all in enumerate( tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose) ): @@ -340,17 +340,18 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 momentum += grad # Apply norm bound + norm: float = np.inf if self.norm == "inf" else float(norm) grad_2d = grad.reshape(len(grad), -1) - if self.norm in [np.inf, "inf"]: + if norm == np.inf: grad_2d = torch.ones_like(grad_2d) - elif self.norm == 1: + elif norm == 1: i_max = torch.argmax(grad_2d.abs(), dim=1) grad_2d = torch.zeros_like(grad_2d) grad_2d[range(len(grad_2d)), i_max] = 1 - elif self.norm > 1: - q = self.norm / (self.norm - 1) - q_norm = torch.linalg.norm(grad_2d, ord=q, dim=1, keepdim=True) - grad_2d = (grad_2d.abs() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (q - 1) + elif norm > 1: + conjugate = norm / (norm - 1) + q_norm = torch.linalg.norm(grad_2d, ord=conjugate, dim=1, keepdim=True) + grad_2d = (grad_2d.abs() * q_norm.where(q_norm == 0, 1 / q_norm)) ** (conjugate - 1) grad = grad_2d.reshape(grad.shape) * grad.sign() @@ -477,33 +478,33 @@ def _projection( """ import torch - p = np.inf if norm_p == "inf" else float(norm_p) - assert p > 0 + norm = np.inf if norm_p == "inf" else float(norm_p) + assert norm > 0 values_tmp = values.reshape(len(values), -1) # (n_samples, d) eps = np.broadcast_to(eps, values.shape) eps = eps.reshape(len(eps), -1) # (n_samples, d) assert np.all(eps >= 0) - if p != np.inf and not np.all(eps == eps[:, [0]]): + if norm != np.inf and not np.all(eps == eps[:, [0]]): raise NotImplementedError( - 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + "Projection onto the weighted L_p ball is currently not supported with finite `norm_p`." ) - if (suboptimal or p == 2) and p != np.inf: # Simple rescaling - values_norm = torch.linalg.norm(values_tmp, ord=p, dim=1, keepdim=True) # (n_samples, 1) + if (suboptimal or norm == 2) and norm != np.inf: # Simple rescaling + values_norm = torch.linalg.norm(values_tmp, ord=norm, dim=1, keepdim=True) # (n_samples, 1) values_tmp = values_tmp * values_norm.where( values_norm == 0, torch.minimum(torch.ones(1), torch.Tensor(eps) / values_norm) ) else: # Optimal - if p == np.inf: # Easy exact case + if norm == np.inf: # Easy exact case values_tmp = values_tmp.sign() * torch.minimum(values_tmp.abs(), torch.Tensor(eps)) - elif p >= 1: # Convex optim + elif norm >= 1: # Convex optim raise NotImplementedError( - 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' + "Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`." ) else: # Non-convex optim - raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') + raise NotImplementedError("Values of `norm_p < 1` are currently not supported with `suboptimal=False`") values = values_tmp.reshape(values.shape).to(values.dtype) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 00c7353012..059bb32b54 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -188,7 +188,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n data_loader = iter(dataset) # Compute perturbation with batching - for (batch_id, batch_all) in enumerate( + for batch_id, batch_all in enumerate( tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose) ): @@ -339,7 +339,7 @@ def _compute_perturbation( # pylint: disable=W0221 # Apply mask if mask is not None: - grad = tf.where(mask == 0., 0., grad) + grad = tf.where(mask == 0.0, 0.0, grad) # Add momentum if decay is not None and momentum is not None: @@ -351,17 +351,18 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound + norm: float = np.inf if self.norm == "inf" else float(norm) grad_2d = tf.reshape(grad, (len(grad), -1)) - if self.norm in [np.inf, "inf"]: + if norm == np.inf: grad_2d = tf.ones_like(grad_2d, dtype=grad_2d.dtype) - elif self.norm == 1: + elif norm == 1: grad_2d = tf.abs(grad_2d) - grad_2d = tf.where(grad_2d == tf.reduce_max(grad_2d, axis=1, keepdims=True), 1., 0.) + grad_2d = tf.where(grad_2d == tf.reduce_max(grad_2d, axis=1, keepdims=True), 1.0, 0.0) grad_2d /= tf.reduce_sum(grad_2d, axis=1, keepdims=True) - elif self.norm > 1: - q = self.norm / (self.norm - 1) - q_norm = tf.norm(grad_2d, ord=q, axis=1, keepdims=True) - grad_2d = (tf.abs(grad_2d) * tf.where(q_norm == 0, 0., 1 / q_norm)) ** (q - 1) + elif norm > 1: + conjugate = norm / (norm - 1) + q_norm = tf.norm(grad_2d, ord=conjugate, axis=1, keepdims=True) + grad_2d = (tf.abs(grad_2d) * tf.where(q_norm == 0, 0.0, 1 / q_norm)) ** (conjugate - 1) grad = tf.reshape(grad_2d, grad.shape) * tf.sign(grad) @@ -457,12 +458,12 @@ def _compute_tf( @staticmethod def _projection( - values: "torch.Tensor", + values: "tf.Tensor", eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str], *, suboptimal: bool = True, - ) -> "torch.Tensor": + ) -> "tf.Tensor": """ Project `values` on the L_p norm ball of size `eps`. @@ -481,31 +482,31 @@ def _projection( """ import tensorflow as tf - p = np.inf if norm_p == "inf" else float(norm_p) - assert p > 0 - + norm = np.inf if norm_p == "inf" else float(norm_p) + assert norm > 0 + values_tmp = tf.reshape(values, (len(values), -1)) # (n_samples, d) eps = np.broadcast_to(eps, values.shape) eps = eps.reshape(len(eps), -1) # (n_samples, d) assert np.all(eps >= 0) - if p != np.inf and not np.all(eps == eps[:, [0]]): + if norm != np.inf and not np.all(eps == eps[:, [0]]): raise NotImplementedError( - 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + "Projection onto the weighted L_p ball is currently not supported with finite `norm_p`." ) - if (suboptimal or p == 2) and p != np.inf: # Simple rescaling - values_norm = tf.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) + if (suboptimal or norm == 2) and norm != np.inf: # Simple rescaling + values_norm = tf.norm(values_tmp, ord=norm, axis=1, keepdims=True) # (n_samples, 1) values_tmp = values_tmp * tf.where(values_norm == 0, 0, tf.minimum(1, eps / values_norm)) else: # Optimal - if p == np.inf: # Easy exact case + if norm == np.inf: # Easy exact case values_tmp = tf.sign(values_tmp) * tf.minimum(tf.abs(values_tmp), eps) - elif p >= 1: # Convex optim + elif norm >= 1: # Convex optim raise NotImplementedError( - 'Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`.' + "Finite values of `norm_p >= 1` are currently not supported with `suboptimal=False`." ) else: # Non-convex optim - raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`') + raise NotImplementedError("Values of `norm_p < 1` are currently not supported with `suboptimal=False`") values = tf.cast(tf.reshape(values_tmp, values.shape), values.dtype) diff --git a/art/utils.py b/art/utils.py index 9abf8c4dde..f3f38b636f 100644 --- a/art/utils.py +++ b/art/utils.py @@ -541,36 +541,36 @@ def projection( Ignored when `norm_p in [np.inf, "inf"]` because optimal solution is fast. Defaults to `True`. :return: Values of `values` after projection. """ - p = np.inf if norm_p == "inf" else float(norm_p) - assert p > 0 + norm = np.inf if norm_p == "inf" else float(norm_p) + assert norm > 0 values_tmp = values.reshape(len(values), -1) # (n_samples, d) eps = np.broadcast_to(eps, values.shape) eps = eps.reshape(len(eps), -1) # (n_samples, d) assert np.all(eps >= 0) - if p != np.inf and not np.all(eps == eps[:, [0]]): + if norm != np.inf and not np.all(eps == eps[:, [0]]): raise NotImplementedError( - 'Projection onto the weighted L_p ball is currently not supported with finite `norm_p`.' + "Projection onto the weighted L_p ball is currently not supported with finite `norm_p`." ) - if (suboptimal or p == 2) and p != np.inf: # Simple rescaling - values_norm = np.linalg.norm(values_tmp, ord=p, axis=1, keepdims=True) # (n_samples, 1) - with np.errstate(divide='ignore'): + if (suboptimal or norm == 2) and norm != np.inf: # Simple rescaling + values_norm = np.linalg.norm(values_tmp, ord=norm, axis=1, keepdims=True) # (n_samples, 1) + with np.errstate(divide="ignore"): values_tmp = values_tmp * np.where(values_norm, np.minimum(1, eps / values_norm), 0) else: # Optimal - if p == np.inf: # Easy exact case + if norm == np.inf: # Easy exact case values_tmp = np.sign(values_tmp) * np.minimum(np.abs(values_tmp), eps) - elif p == 1: # Harder exact case + elif norm == 1: # Harder exact case projection_l1 = projection_l1_1 if values_tmp.shape[1] > 29 else projection_l1_2 # From empirical tests values_tmp = projection_l1(values_tmp, eps[:, 0]) - elif p > 1: # Convex optim + elif norm > 1: # Convex optim raise NotImplementedError( 'Values of `norm_p > 1` different from 2, `np.inf` and "inf" are currently not supported with ' - '`suboptimal=False`.' + "`suboptimal=False`." ) else: # Non-convex optim - raise NotImplementedError('Values of `norm_p < 1` are currently not supported with `suboptimal=False`.') + raise NotImplementedError("Values of `norm_p < 1` are currently not supported with `suboptimal=False`.") values = values_tmp.reshape(values.shape).astype(values.dtype) return values From 96a6c93a375c8b7681fddef81aa41eea54e2b36a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Wed, 10 Apr 2024 10:08:36 +0200 Subject: [PATCH 25/31] fixed typo (norm) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 2 +- .../projected_gradient_descent/projected_gradient_descent.py | 2 +- .../projected_gradient_descent_numpy.py | 2 +- .../projected_gradient_descent_pytorch.py | 2 +- .../projected_gradient_descent_tensorflow_v2.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 80d818412b..88ed9f501d 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -336,7 +336,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def _check_params(self) -> None: - norm: float = np.inf if self.norm == "inf" else float(norm) + norm: float = np.inf if self.norm == "inf" else float(self.norm) if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index 695a2b66b4..bb4f58b5fb 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -212,7 +212,7 @@ def set_params(self, **kwargs) -> None: def _check_params(self) -> None: - norm: float = np.inf if self.norm == "inf" else float(norm) + norm: float = np.inf if self.norm == "inf" else float(self.norm) if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index d749d71cf1..89da8dc80b 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -181,7 +181,7 @@ def _set_targets(self, x: np.ndarray, y: Optional[np.ndarray], classifier_mixin: def _check_params(self) -> None: # pragma: no cover - norm: float = np.inf if self.norm == "inf" else float(norm) + norm: float = np.inf if self.norm == "inf" else float(self.norm) if norm < 1: raise ValueError('Norm order must be either "inf", `np.inf` or a real `p >= 1`.') diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index bff4425fc3..214f8b839a 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -340,7 +340,7 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 momentum += grad # Apply norm bound - norm: float = np.inf if self.norm == "inf" else float(norm) + norm: float = np.inf if self.norm == "inf" else float(self.norm) grad_2d = grad.reshape(len(grad), -1) if norm == np.inf: grad_2d = torch.ones_like(grad_2d) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 059bb32b54..27ae98a76a 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -351,7 +351,7 @@ def _compute_perturbation( # pylint: disable=W0221 momentum += grad # Apply norm bound - norm: float = np.inf if self.norm == "inf" else float(norm) + norm: float = np.inf if self.norm == "inf" else float(self.norm) grad_2d = tf.reshape(grad, (len(grad), -1)) if norm == np.inf: grad_2d = tf.ones_like(grad_2d, dtype=grad_2d.dtype) From 7eb30c4978b61527bdedac982092d82f6902e34d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Sat, 27 Apr 2024 14:18:12 +0200 Subject: [PATCH 26/31] Fix momentum computation (wrong formula) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- art/attacks/evasion/fast_gradient.py | 20 +++++++++++++------ .../projected_gradient_descent_pytorch.py | 17 +++++++++------- ...rojected_gradient_descent_tensorflow_v2.py | 17 +++++++++------- .../evasion/test_momentum_iterative_method.py | 4 ++-- 4 files changed, 36 insertions(+), 22 deletions(-) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 88ed9f501d..060bc68612 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -426,6 +426,7 @@ def _compute_perturbation( # Apply norm bound def _apply_norm(norm, grad, object_type=False): + """Returns an x maximizing subject to ||x||_norm<=1.""" if (grad.dtype != object and np.isinf(grad).any()) or np.isnan( # pragma: no cover grad.astype(np.float32) ).any(): @@ -441,16 +442,23 @@ def _apply_norm(norm, grad, object_type=False): elif norm > 1: conjugate = norm / (norm - 1) q_norm = np.linalg.norm(grad_2d, ord=conjugate, axis=1, keepdims=True) - with np.errstate(divide="ignore"): - grad_2d = (np.abs(grad_2d) * np.where(q_norm, 1 / q_norm, 0)) ** (conjugate - 1) + grad_2d = (np.abs(grad_2d) / np.where(q_norm, q_norm, np.inf)) ** (conjugate - 1) grad = grad_2d.reshape(grad.shape) * np.sign(grad) return grad - # Add momentum + # Compute gradient momentum if decay is not None and momentum is not None: - grad = _apply_norm(norm=1, grad=grad) - grad = decay * momentum + grad - momentum += grad + if x.dtype == object: + raise NotImplementedError("Momentum Iterative Method not yet implemented for object type input.") + # Update momentum in-place (important). + # The L1 normalization for accumulation is an arbitrary choice of the paper. + grad_2d = grad.reshape(len(grad), -1) + norm1 = np.linalg.norm(grad_2d, ord=1, axis=1, keepdims=True) + normalized_grad = (grad_2d / np.where(norm1, norm1, np.inf)).reshape(grad.shape) + momentum *= decay + momentum += normalized_grad + # Use the momentum to compute the perturbation, instead of the gradient + grad = momentum if x.dtype == object: for i_sample in range(x.shape[0]): diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 214f8b839a..e0c840c1db 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -330,14 +330,17 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 if mask is not None: grad = torch.where(mask == 0.0, torch.tensor(0.0).to(self.estimator.device), grad) - # Apply momentum + # Compute gradient momentum if self.decay is not None: - tol = 1e-7 - ind = tuple(range(1, len(x.shape))) - grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore - grad = self.decay * momentum + grad - # Accumulate the gradient for the next iter - momentum += grad + # Update momentum in-place (important). + # The L1 normalization for accumulation is an arbitrary choice of the paper. + grad_2d = grad.reshape(len(grad), -1) + norm1 = torch.linalg.norm(grad_2d, ord=1, dim=1, keepdim=True) + normalized_grad = (grad_2d * norm1.where(norm1 == 0, 1 / norm1)).reshape(grad.shape) + momentum *= self.decay + momentum += normalized_grad + # Use the momentum to compute the perturbation, instead of the gradient + grad = momentum # Apply norm bound norm: float = np.inf if self.norm == "inf" else float(self.norm) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 27ae98a76a..7fe2fddec8 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -341,14 +341,17 @@ def _compute_perturbation( # pylint: disable=W0221 if mask is not None: grad = tf.where(mask == 0.0, 0.0, grad) - # Add momentum + # Compute gradient momentum if decay is not None and momentum is not None: - tol = 1e-7 - ind = tuple(range(1, len(x.shape))) - grad = tf.divide(grad, (tf.math.reduce_sum(tf.abs(grad), axis=ind, keepdims=True) + tol)) - grad = self.decay * momentum + grad - # Accumulate the gradient for the next iter - momentum += grad + # Update momentum in-place (important). + # The L1 normalization for accumulation is an arbitrary choice of the paper. + grad_2d = tf.reshape(grad, (len(grad), -1)) + norm1 = tf.norm(grad_2d, ord=1, axis=1, keepdims=True) + normalized_grad = tf.reshape((grad_2d * tf.where(norm1 == 0, 0.0, 1 / norm1)), grad.shape) + momentum *= self.decay + momentum += normalized_grad + # Use the momentum to compute the perturbation, instead of the gradient + grad = momentum # Apply norm bound norm: float = np.inf if self.norm == "inf" else float(self.norm) diff --git a/tests/attacks/evasion/test_momentum_iterative_method.py b/tests/attacks/evasion/test_momentum_iterative_method.py index b63a917c69..b27856e85c 100644 --- a/tests/attacks/evasion/test_momentum_iterative_method.py +++ b/tests/attacks/evasion/test_momentum_iterative_method.py @@ -48,7 +48,7 @@ def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack x_train_mnist_adv = attack.generate(x=x_train_mnist) assert np.mean(x_train_mnist) == pytest.approx(0.12659499049186707, 0.01) - assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.09437845647335052, abs=0.05) + assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1288, 0.003) assert np.max(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.3) except ARTTestException as e: @@ -64,7 +64,7 @@ def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_f attack = MomentumIterativeMethod(classifier, eps=0.3, eps_step=0.1, decay=1.0, max_iter=10) x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist) - assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.08690829575061798, abs=0.05) + assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1068, 0.003) assert np.max(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.3) except ARTTestException as e: From 5c74f1eeedad17f7243942db8a10ff91b36dc489 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Tue, 7 May 2024 15:35:25 +0200 Subject: [PATCH 27/31] skip momentum iterative tests for tf framework (#2439) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_tensorflow_v2.py | 3 +++ tests/attacks/evasion/test_momentum_iterative_method.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 7fe2fddec8..71981f2223 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -343,6 +343,9 @@ def _compute_perturbation( # pylint: disable=W0221 # Compute gradient momentum if decay is not None and momentum is not None: + raise NotImplementedError( + "Momentum Iterative Attack currently not working with the Tensorflow framework. See issue #2439" + ) # Update momentum in-place (important). # The L1 normalization for accumulation is an arbitrary choice of the paper. grad_2d = tf.reshape(grad, (len(grad), -1)) diff --git a/tests/attacks/evasion/test_momentum_iterative_method.py b/tests/attacks/evasion/test_momentum_iterative_method.py index b27856e85c..64eefebda7 100644 --- a/tests/attacks/evasion/test_momentum_iterative_method.py +++ b/tests/attacks/evasion/test_momentum_iterative_method.py @@ -38,7 +38,7 @@ def fix_get_mnist_subset(get_mnist_dataset): yield x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test] -@pytest.mark.framework_agnostic +@pytest.mark.skip_framework("tensorflow") # See issue #2439 def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): try: (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset @@ -55,7 +55,7 @@ def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack art_warning(e) -@pytest.mark.framework_agnostic +@pytest.mark.skip_framework("tensorflow") # See issue #2439 def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): try: (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset From d5a3178bf8ef62ee7ccaa51e9a57e540e0a6bd7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Wed, 22 May 2024 08:51:19 +0200 Subject: [PATCH 28/31] rectified Momentum Iterative Method test values for test_images_targeted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- tests/attacks/evasion/test_momentum_iterative_method.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/attacks/evasion/test_momentum_iterative_method.py b/tests/attacks/evasion/test_momentum_iterative_method.py index 64eefebda7..4dd2dabcf3 100644 --- a/tests/attacks/evasion/test_momentum_iterative_method.py +++ b/tests/attacks/evasion/test_momentum_iterative_method.py @@ -64,7 +64,7 @@ def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_f attack = MomentumIterativeMethod(classifier, eps=0.3, eps_step=0.1, decay=1.0, max_iter=10) x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist) - assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1068, 0.003) + assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1077, 0.01) assert np.max(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.3) except ARTTestException as e: From 0ddafcd83feb70c5a4a91282a7cef4e64e380e91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Wed, 22 May 2024 09:10:01 +0200 Subject: [PATCH 29/31] disable unreachable pylint warning after temporary NotImplementedError safeguard MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../projected_gradient_descent_tensorflow_v2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 71981f2223..41538aacd1 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -343,12 +343,12 @@ def _compute_perturbation( # pylint: disable=W0221 # Compute gradient momentum if decay is not None and momentum is not None: - raise NotImplementedError( - "Momentum Iterative Attack currently not working with the Tensorflow framework. See issue #2439" + raise NotImplementedError( # Upon fixing #2439, remove pylint disable flag below. + "Momentum Iterative Attack currently disabled for Tensorflow framework. See issue #2439" ) # Update momentum in-place (important). # The L1 normalization for accumulation is an arbitrary choice of the paper. - grad_2d = tf.reshape(grad, (len(grad), -1)) + grad_2d = tf.reshape(grad, (len(grad), -1)) # pylint: disable=unreachable norm1 = tf.norm(grad_2d, ord=1, axis=1, keepdims=True) normalized_grad = tf.reshape((grad_2d * tf.where(norm1 == 0, 0.0, 1 / norm1)), grad.shape) momentum *= self.decay From e2666b490b980215e66a1d523a923367386ac1cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Tue, 28 May 2024 20:35:29 +0200 Subject: [PATCH 30/31] mxnet separate test values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- .../evasion/test_momentum_iterative_method.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/attacks/evasion/test_momentum_iterative_method.py b/tests/attacks/evasion/test_momentum_iterative_method.py index 4dd2dabcf3..284f07415d 100644 --- a/tests/attacks/evasion/test_momentum_iterative_method.py +++ b/tests/attacks/evasion/test_momentum_iterative_method.py @@ -39,7 +39,7 @@ def fix_get_mnist_subset(get_mnist_dataset): @pytest.mark.skip_framework("tensorflow") # See issue #2439 -def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): +def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack, framework): try: (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset classifier = image_dl_estimator_for_attack(MomentumIterativeMethod) @@ -48,15 +48,19 @@ def test_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack x_train_mnist_adv = attack.generate(x=x_train_mnist) assert np.mean(x_train_mnist) == pytest.approx(0.12659499049186707, 0.01) - assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1288, 0.003) assert np.max(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.3) + if framework == 'mxnet': # Big discrepancy for mxnet framework + expected_mean_diff = pytest.approx(0.1116, 0.01) + else: + expected_mean_diff = pytest.approx(0.1288, 0.003) + assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == expected_mean_diff except ARTTestException as e: art_warning(e) @pytest.mark.skip_framework("tensorflow") # See issue #2439 -def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): +def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack, framework): try: (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset classifier = image_dl_estimator_for_attack(MomentumIterativeMethod) @@ -64,8 +68,12 @@ def test_images_targeted(art_warning, fix_get_mnist_subset, image_dl_estimator_f attack = MomentumIterativeMethod(classifier, eps=0.3, eps_step=0.1, decay=1.0, max_iter=10) x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist) - assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.1077, 0.01) assert np.max(np.abs(x_train_mnist - x_train_mnist_adv)) == pytest.approx(0.3) + if framework == 'mxnet': # Big discrepancy for mxnet framework + expected_mean_diff = pytest.approx(0.0975, 0.01) + else: + expected_mean_diff = pytest.approx(0.1077, 0.01) + assert np.mean(np.abs(x_train_mnist - x_train_mnist_adv)) == expected_mean_diff except ARTTestException as e: art_warning(e) From 9e5e0ed4805a8f58081b20f1af0624113ef114ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89lie=20Goudout?= Date: Tue, 28 May 2024 20:36:57 +0200 Subject: [PATCH 31/31] Update AUTHORS (2382#issuecomment-2128144501) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Élie Goudout --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 12f000cc3d..b5a8b35ab0 100644 --- a/AUTHORS +++ b/AUTHORS @@ -19,3 +19,4 @@ - VMware Inc. - University of Michigan - University of Wisconsin-Madison +- Élie Goudout