diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6e2b9a0
--- /dev/null
+++ b/README.md
@@ -0,0 +1,29 @@
+# Increasing Confidence in Adversarial Robustness Evaluations
+
+This is the official repository of the paper _Increasing Confidence in Adversarial Robustness Evaluations_
+by Zimmermann et al. 2022.
+
+The reference implementation of our proposed active test is in
+[active_tests/decision_boundary_binarization.py](active_tests/decision_boundary_binarization.py),
+and the code to reproduce our experimental findings is in [case_studies](case_studies). Note, that when evaluating
+the defense of our authors we always used their reference implementation and only performed the _minimal_ modification
+to integrate our test in their respective code base.
+##
+
+
+## Citing
+If you use this library, you can cite our [paper](https://openreview.net/forum?id=NkK4i91VWp).
+Here is an example BibTeX entry:
+
+```bibtex
+@inproceedings{zimmermann2022increasing,
+ title={Increasing Confidence in Adversarial Robustness Evaluations},
+ author={Roland S. Zimmermann and Wieland Brendel and Florian Tramer and Nicholas Carlini},
+ booktitle={Advances in Neural Information Processing Systems},
+ editor={Alice H. Oh and Alekh Agarwal and Danielle Belgrave and Kyunghyun Cho},
+ year={2022},
+ url={https://openreview.net/forum?id=NkK4i91VWp}
+}
+```
+
+_Disclaimer: This is not an official Google product._
\ No newline at end of file
diff --git a/active_tests/__init__.py b/active_tests/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/active_tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/active_tests/decision_boundary_binarization.py b/active_tests/decision_boundary_binarization.py
new file mode 100644
index 0000000..12b8c19
--- /dev/null
+++ b/active_tests/decision_boundary_binarization.py
@@ -0,0 +1,1402 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import traceback
+import sys
+import warnings
+from typing import Callable
+from typing import List
+
+from torch.utils.data import SequentialSampler
+from typing_extensions import Literal
+from typing import Optional
+from typing import Tuple
+from typing import Union
+
+import numpy as np
+from sklearn.linear_model import LogisticRegression
+from sklearn.svm import LinearSVC, SVC
+import torch
+import torch.utils.data
+from torch.utils.data import DataLoader
+import tqdm
+
+import argparse_utils as aut
+import networks
+import utils as ut
+
+__all__ = ["interior_boundary_discrimination_attack"]
+
+
+LogitRescalingType = Optional[
+ Union[Literal["fixed"], Literal["adaptive"], Literal["tight"]]
+]
+SolutionGoodnessType = Union[Literal["perfect"], Literal["good"], None]
+OptimizerType = Union[
+ Literal["sklearn"], Literal["sklearn-svm"], Literal["sgd"], Literal["adam"]
+]
+
+
+class __KwargsSequential(torch.nn.Sequential):
+ """
+ Modification of a torch.nn.Sequential model that allows kwargs in the
+ forward pass. These will be passed to the first module of the network.
+ """
+
+ def forward(self, x, **kwargs):
+ for idx, module in enumerate(self):
+ if idx == 0:
+ x = module(x, **kwargs)
+ else:
+ x = module(x)
+ return x
+
+
+def _create_raw_data(
+ x: torch.Tensor,
+ y: torch.Tensor,
+ n_inner_points: int,
+ n_boundary_points: int,
+ n_boundary_adversarial_points: int,
+ n_far_off_boundary_points: int,
+ n_far_off_adversarial_points: int,
+ batch_size: int,
+ fill_batches_for_verification: bool,
+ verify_valid_inner_input_data_fn: Optional[Callable],
+ verify_valid_boundary_input_data_fn: Optional[Callable],
+ get_boundary_adversarials_fn: Optional[
+ Callable[[torch.Tensor, torch.Tensor, int, float], torch.Tensor]
+ ],
+ device: str,
+ epsilon: float,
+ norm: ut.NormType,
+ n_boundary_classes: int = 1,
+ eta: float = 0.95,
+ xi: float = 1.50,
+ include_original: bool = True,
+ rejection_resampling_max_repetitions: int = 10,
+ sample_boundary_from_corners: bool = False,
+) -> Tuple[DataLoader, DataLoader, DataLoader, int]:
+ """Creates the raw training data in image space. Label 0 corresponds to
+ inner points and label 1 to boundary points."""
+
+ def _sample_inner_points(n_samples):
+ # We want to keep the original data point -> only generate n-1 new points
+ x_inner = torch.repeat_interleave(torch.unsqueeze(x, 0), n_samples, 0)
+
+ if norm == "linf":
+ # Random noise in [-1, 1].
+ delta_inner = 2 * torch.rand_like(x_inner) - 1.0
+ # Random noise in [-eps*eta, eps*eta]
+ delta_inner = delta_inner * eta * epsilon
+ elif norm == "l2":
+ # sample uniformly in ball with max radius eta*epsilon
+ delta_inner = torch.randn_like(x_inner)
+ delta_inner /= torch.norm(delta_inner, p=2, dim=[1, 2, 3], keepdim=True)
+ delta_inner *= torch.pow(
+ torch.rand(
+ (len(delta_inner), 1, 1, 1),
+ dtype=delta_inner.dtype,
+ device=delta_inner.device,
+ ),
+ 1 / np.prod(x.shape[1:]),
+ )
+ delta_inner *= epsilon * eta
+ else:
+ raise ValueError
+
+ if norm != "linf":
+ _, delta_inner = ut.clipping_aware_rescaling(
+ x_inner,
+ delta_inner,
+ target_distance=epsilon * eta,
+ norm=norm,
+ shrinking=True,
+ return_delta=True,
+ )
+
+ x_inner = torch.clamp(x_inner + delta_inner, 0, 1)
+ y_inner = torch.zeros(len(x_inner), dtype=torch.long, device=device)
+
+ return x_inner, y_inner
+
+ def _sample_boundary_points(n_samples, distance=epsilon):
+ x_boundary = torch.unsqueeze(x, 0).repeat(
+ tuple([n_samples] + [1] * len(x.shape))
+ )
+
+ if norm == "linf":
+ if sample_boundary_from_corners:
+ delta_boundary = torch.randint(
+ 0,
+ 2,
+ size=x_boundary.shape,
+ device=x_boundary.device,
+ dtype=x_boundary.dtype,
+ )
+ delta_boundary = (delta_boundary * 2.0 - 1.0) * distance
+ else:
+ delta_boundary = (torch.rand_like(x_boundary) * 2.0 - 1.0) * distance
+ elif norm == "l2":
+ # sample uniformly on sphere with radius epsilon
+ delta_boundary = torch.randn_like(x_boundary)
+ delta_boundary /= torch.norm(
+ delta_boundary, p=2, dim=[1, 2, 3], keepdim=True
+ )
+ delta_boundary *= distance
+ else:
+ raise ValueError
+
+ if not sample_boundary_from_corners:
+ _, delta_boundary = ut.clipping_aware_rescaling(
+ x_boundary,
+ delta_boundary,
+ target_distance=distance,
+ norm=norm,
+ growing=True,
+ shrinking=True,
+ return_delta=True,
+ )
+
+ x_boundary = torch.clamp(x_boundary + delta_boundary, 0, 1)
+ y_boundary = torch.ones(len(x_boundary), dtype=torch.long, device=device)
+
+ return x_boundary, y_boundary
+
+ def _create_boundary_data():
+ # TODO(zimmerrol): Extend this logic for multiple boundary classes
+ n_random_boundary_samples = n_boundary_points - n_boundary_adversarial_points
+ if n_random_boundary_samples == 0:
+ x_random, y_random = None, None
+ else:
+ if verify_valid_boundary_input_data_fn is None:
+ x_random, y_random = _sample_boundary_points(n_random_boundary_samples)
+ else:
+ x_random, y_random = _rejection_resampling(
+ _sample_boundary_points,
+ n_random_boundary_samples,
+ verify_valid_boundary_input_data_fn,
+ n_repetitions=rejection_resampling_max_repetitions,
+ )
+
+ if n_random_boundary_samples == n_boundary_points:
+ # do not have to add any special adversarial points anymore
+ x_total, y_total = x_random, y_random
+ else:
+ x_adv = get_boundary_adversarials_fn(
+ x.clone(), y, n_boundary_adversarial_points, epsilon
+ )
+ y_adv = torch.ones(len(x_adv), dtype=y.dtype, device=y.device)
+ if x_random is not None:
+ x_total = torch.cat((x_random, x_adv))
+ y_total = torch.cat((y_random, y_adv))
+ else:
+ x_total, y_total = x_adv, y_adv
+
+ if n_boundary_classes > 1:
+ raise NotImplementedError("n_boundary_classes > 1 is not yet implemented.")
+
+ if n_far_off_boundary_points > 0:
+ # add examples that have magnitude larger than epsilon but can be used
+ # e.g., by logit matching attacks as a reference point
+ n_random_far_off_samples = (
+ n_far_off_boundary_points - n_far_off_adversarial_points
+ )
+ if n_random_boundary_samples == 0:
+ x_faroff_random, y_faroff_random = None, None
+ else:
+ if verify_valid_boundary_input_data_fn is None:
+ x_faroff_random, y_faroff_random = _sample_boundary_points(
+ n_random_far_off_samples * n_boundary_classes,
+ distance=xi * epsilon,
+ )
+ else:
+ x_faroff_random, y_faroff_random = _rejection_resampling(
+ functools.partial(
+ _sample_boundary_points, distance=xi * epsilon
+ ),
+ n_random_far_off_samples * n_boundary_classes,
+ verify_valid_boundary_input_data_fn,
+ n_repetitions=rejection_resampling_max_repetitions,
+ )
+ if n_boundary_classes > 1:
+ raise NotImplementedError(
+ "n_boundary_classes > 1 is not yet implemented."
+ )
+
+ if n_far_off_adversarial_points > 0:
+ x_faroff_adv = get_boundary_adversarials_fn(
+ x.clone(), y, n_far_off_adversarial_points, epsilon
+ )
+ y_faroff_adv = torch.ones(
+ len(x_faroff_adv), dtype=y.dtype, device=y.device
+ )
+ if x_faroff_random is not None:
+ x_faroff = torch.cat((x_faroff_random, x_faroff_adv))
+ y_faroff = torch.cat((y_faroff_random, y_faroff_adv))
+ else:
+ x_faroff, y_faroff = x_faroff_adv, y_faroff_adv
+ else:
+ x_faroff, y_faroff = x_faroff_random, y_faroff_random
+
+ x_total = torch.cat((x_total, x_faroff))
+ y_total = torch.cat((y_total, y_faroff))
+
+ return x_total, y_total
+
+ def _create_inner_data():
+ if include_original:
+ n_random_points = n_inner_points - 1
+ else:
+ n_random_points = n_inner_points
+
+ if n_random_points > 0:
+ if verify_valid_inner_input_data_fn is None:
+ x_random, y_random = _sample_inner_points(n_inner_points)
+ else:
+ x_random, y_random = _rejection_resampling(
+ _sample_inner_points,
+ n_inner_points,
+ verify_valid_inner_input_data_fn,
+ n_repetitions=rejection_resampling_max_repetitions,
+ )
+
+ if include_original:
+ x_total = torch.cat((torch.unsqueeze(x, 0), x_random))
+ y_total = torch.zeros(
+ len(y_random) + 1, dtype=y_random.dtype, device=y_random.device
+ )
+ else:
+ x_total, y_total = x_random, y_random
+ else:
+ x_total = torch.unsqueeze(x, 0)
+ y_total = torch.zeros(1, dtype=y_boundary.dtype, device=y_boundary.device)
+
+ return x_total, y_total
+
+ def _rejection_resampling(
+ sampling_fn, n_samples, verify_valid_input_data_fn, n_repetitions=10
+ ):
+ """Resample & replace until all samples returned by the sampling_fn are
+ valid according to verify_valid_input_data_fn."""
+ # do not waste time but running a non-full batch
+ if fill_batches_for_verification:
+ n_sampling_samples = max(n_samples, batch_size)
+ else:
+ n_sampling_samples = n_samples
+
+ x, y = sampling_fn(n_sampling_samples)
+ x_valid_mask = verify_valid_input_data_fn(x)
+ for i in range(n_repetitions + 1):
+ if np.sum(x_valid_mask) >= n_samples:
+ # found enough samples
+ # now restrict x to the valid samples
+ # and x and y such that their length matches n_samples
+ x = x[x_valid_mask]
+ x = x[:n_samples]
+ y = y[:n_samples]
+ return x, y
+
+ if i == n_repetitions:
+ raise RuntimeError(
+ f"Rejection resampling failed after {n_repetitions} " f"rounds."
+ )
+
+ # check how many samples to be replaced
+ n_x_invalid = len(x_valid_mask) - np.sum(x_valid_mask)
+ # generate new samples
+ c = sampling_fn(n_sampling_samples)[0]
+ # check how many of them are valid and are needed
+ c_valid_mask = verify_valid_input_data_fn(c)
+ c = c[c_valid_mask][:n_x_invalid]
+ c_valid_mask = c_valid_mask[c_valid_mask][:n_x_invalid]
+ n_x_invalid_c_valid = min(n_x_invalid, len(c))
+ # replace samples and update the mask
+ x[~x_valid_mask][:n_x_invalid_c_valid] = c
+ x_valid_mask[~x_valid_mask][:n_x_invalid_c_valid] = c_valid_mask
+
+ if not n_inner_points > 0:
+ raise ValueError("n_inner_points must be > 0.")
+ if not n_boundary_points > 0:
+ raise ValueError("n_boundary_points must be > 0.")
+ if not n_boundary_classes == 1:
+ raise NotImplementedError("More than 1 boundary class is not yet supported.")
+ if not n_far_off_adversarial_points >= 0:
+ raise ValueError("n_far_off_adversarial_points must not be negative.")
+ if not n_far_off_boundary_points >= 0:
+ raise ValueError("n_far_off_boundary_points must not be negative.")
+ if not n_boundary_adversarial_points >= 0:
+ raise ValueError("n_boundary_adversarial_points must not be negative.")
+
+ x = x.to(device)
+ y = y.to(device)
+ (x_boundary, y_boundary) = _create_boundary_data()
+ (x_inner, y_inner) = _create_inner_data()
+
+ x = torch.cat((x_inner, x_boundary))
+ y = torch.cat((y_inner, y_boundary))
+
+ dataset = torch.utils.data.TensorDataset(x, y)
+ dataset_boundary = torch.utils.data.TensorDataset(x_boundary, y_boundary)
+ dataset_inner = torch.utils.data.TensorDataset(x_inner, y_inner)
+
+ dataloader = torch.utils.data.DataLoader(
+ dataset, shuffle=False, batch_size=batch_size
+ )
+ dataloader_boundary = torch.utils.data.DataLoader(
+ dataset_boundary, shuffle=False, batch_size=batch_size
+ )
+ dataloader_inner = torch.utils.data.DataLoader(
+ dataset_inner, shuffle=False, batch_size=batch_size
+ )
+
+ return dataloader, dataloader_boundary, dataloader_inner, len(x)
+
+
+def _get_data_features_and_maybe_logits(
+ classifier: Callable,
+ raw_data_loader: torch.utils.data.DataLoader,
+ get_logits: bool,
+ device: str,
+ include_raw_data: bool = False,
+ raw_data_loader_boundary: Optional[torch.utils.data.DataLoader] = None,
+ raw_data_loader_inner: Optional[torch.utils.data.DataLoader] = None,
+ n_repetitions_boundary: Optional[int] = None,
+ n_repetitions_inner: Optional[int] = None,
+) -> Tuple[torch.utils.data.DataLoader, torch.Tensor, int, int]:
+ """
+ Collects the intermediate features for a classifier and creates a new data
+ loader consisting only of these features.
+
+ Args:
+ classifier: Classifier to use as a feature extractor.
+ raw_data_loader: Data loader that contains images which
+ shall be mapped to intermediate features.
+ get_logits: Extract not only features but also logits
+ device: torch device.
+ include_raw_data: Include raw images in the data loader.
+ Returns:
+ Data loader mapping intermediate features to class labels.
+ """
+ all_features = []
+ all_logits = [] if get_logits else None
+ all_labels = []
+ all_images = []
+
+ def _process_dataloader(dataloader: DataLoader):
+ with torch.no_grad():
+ for x, y in dataloader:
+ x_ = x.to(device)
+ if get_logits:
+ features, logits = classifier(x_, features_and_logits=True)
+ all_logits.append(logits)
+ else:
+ features = classifier(x_, features_only=True)
+
+ all_features.append(features.detach())
+ all_labels.append(y)
+ if include_raw_data:
+ all_images.append(x)
+
+ _process_dataloader(raw_data_loader)
+
+ if n_repetitions_boundary is not None:
+ raw_data_loader_boundary = torch.utils.data.DataLoader(
+ torch.utils.data.TensorDataset(
+ torch.repeat_interleave(
+ raw_data_loader_boundary.dataset.tensors[0],
+ n_repetitions_boundary,
+ 0,
+ ),
+ torch.repeat_interleave(
+ raw_data_loader_boundary.dataset.tensors[1],
+ n_repetitions_boundary,
+ 0,
+ ),
+ ),
+ batch_size=raw_data_loader_boundary.batch_size,
+ )
+ _process_dataloader(raw_data_loader_boundary)
+ if n_repetitions_inner is not None:
+ raw_data_loader_inner = torch.utils.data.DataLoader(
+ torch.utils.data.TensorDataset(
+ torch.repeat_interleave(
+ raw_data_loader_inner.dataset.tensors[0], n_repetitions_inner, 0
+ ),
+ torch.repeat_interleave(
+ raw_data_loader_inner.dataset.tensors[1], n_repetitions_inner, 0
+ ),
+ ),
+ batch_size=raw_data_loader_inner.batch_size,
+ )
+ _process_dataloader(raw_data_loader_inner)
+
+ all_features = torch.cat(all_features, 0)
+
+ if get_logits:
+ all_logits = torch.cat(all_logits, 0)
+ all_labels = torch.cat(all_labels, 0)
+ if include_raw_data:
+ all_images = torch.cat(all_images)
+
+ if len(all_features.shape) > 2:
+ warnings.warn(
+ f"Features are not vectors but higher dimensional "
+ f"({len(all_features.shape) - 1})"
+ )
+
+ if include_raw_data:
+ dataset = torch.utils.data.TensorDataset(all_features, all_labels, all_images)
+ else:
+ dataset = torch.utils.data.TensorDataset(all_features, all_labels)
+ dataloader = torch.utils.data.DataLoader(
+ dataset,
+ shuffle=not isinstance(raw_data_loader.sampler, SequentialSampler),
+ batch_size=raw_data_loader.batch_size,
+ )
+
+ return dataloader, all_logits, all_features.shape[-1], all_features.shape[0]
+
+
+def _train_logistic_regression_classifier(
+ n_features: int,
+ train_loader: DataLoader,
+ classifier_logits: Optional[torch.Tensor],
+ optimizer: OptimizerType,
+ lr: float,
+ device: str,
+ n_classes: int = 2,
+ rescale_logits: LogitRescalingType = "fixed",
+ decision_boundary_closeness: Optional[float] = None,
+ solution_goodness: SolutionGoodnessType = "perfect",
+ class_weight: Optional[Union[Literal["balanced"], dict]] = None,
+) -> torch.nn.Module:
+ """
+ Trains a logistic regression model.
+
+ Args:
+ n_features: Feature dimensionality.
+ train_loader: Data loader containing the data to fit model on.
+ classifier_logits: Logits of the underlying classifier; will be used for logit
+ rescaling.
+ optimizer: Type of optimizer to use.
+ lr: Learning rate (only applies to explicit gradient-descent optimizer).
+ device: torch device.
+ rescale_logits: Rescale weights of model such that the logits have
+ at most unit absolute magnitude.
+ decision_boundary_closeness: (optional) The larger this value, the will
+ the decision boundary be placed to the boundary sample(s).
+ Returns:
+ Logistic regression model.
+ """
+
+ if rescale_logits == "adaptive" and classifier_logits is None:
+ raise ValueError("classifier_logits must be supplied for adaptive rescaling")
+
+ def get_accuracy() -> Tuple[float, float, float]:
+ """
+
+ :return: (total accuracy, accuracy for inner samples, for outer samples)
+ """
+ # calculate accuracy
+ n_correct = {0: 0, 1: 0}
+ n_total = {0: 0, 1: 0}
+ with torch.no_grad():
+ for x, y in train_loader:
+ x = x.to(device)
+ logits = binary_classifier(x)
+ for k in n_total.keys():
+ n_correct[k] += (
+ (logits.argmax(-1).cpu() == y.cpu())
+ .float()[y == k]
+ .sum()
+ .item()
+ )
+ n_total[k] += len(x[y == k])
+
+ accuracy = (n_correct[0] + n_correct[1]) / (n_total[0] + n_total[1])
+ accuracy_inner = n_correct[0] / n_total[0]
+ accuracy_outer = n_correct[1] / n_total[1]
+
+ return accuracy, accuracy_inner, accuracy_outer
+
+ if not n_classes == 2:
+ raise NotImplementedError("Currently only supports 1 boundary class")
+
+ if optimizer.startswith("sklearn"):
+ if optimizer == "sklearn":
+ # Use logistic regression of sklearn to speed up fitting.
+ regression = LogisticRegression(
+ penalty="none",
+ max_iter=max(1000, int(lr)),
+ multi_class="multinomial",
+ class_weight=class_weight,
+ )
+ elif optimizer == "sklearn-svm":
+ # Since the problem should be perfectly possible to solve, C should not
+ # have any effect.
+ regression = LinearSVC(
+ penalty="l2", C=10e5, max_iter=max(1000, int(lr)), multi_class="ovr"
+ )
+ else:
+ raise ValueError("Invalid optimizer choice.")
+
+ regression.fit(
+ train_loader.dataset.tensors[0].cpu().numpy(),
+ train_loader.dataset.tensors[1].cpu().numpy(),
+ )
+
+ binary_classifier = torch.nn.Linear(n_features, n_classes)
+
+ binary_classifier.weight.data = torch.Tensor(
+ np.concatenate((-regression.coef_, regression.coef_), 0)
+ )
+ binary_classifier.bias.data = torch.Tensor(
+ np.concatenate((-regression.intercept_, regression.intercept_), 0)
+ )
+
+ binary_classifier = binary_classifier.to(device)
+
+ accuracy, accuracy_inner, accuracy_outer = get_accuracy()
+
+ if solution_goodness is not None and accuracy < 1.0:
+ raise_error = solution_goodness == "perfect"
+ raise_error |= (
+ solution_goodness == "good"
+ and accuracy_inner == 0
+ or accuracy_outer == 0
+ )
+
+ message = (
+ f"sklearn solver failed to find perfect solution, "
+ f"Accuracy = {accuracy:.4f} instead of 1.0; "
+ f"{accuracy_inner:.4f} and {accuracy_outer:.4f} for "
+ f"inner and boundary points."
+ )
+
+ if raise_error:
+ raise RuntimeError(message)
+ else:
+ warnings.warn(message)
+ else:
+ binary_classifier = torch.nn.Linear(n_features, n_classes).to(device)
+ criterion = torch.nn.CrossEntropyLoss()
+ optimizer = {"sgd": torch.optim.SGD, "adam": torch.optim.Adam}[optimizer](
+ lr=lr, params=binary_classifier.parameters()
+ )
+ epoch = 0
+ while True:
+ epoch += 1
+ for x, y in train_loader:
+ optimizer.zero_grad()
+ x = x.to(device)
+ y = y.to(device)
+ logits = binary_classifier(x)
+ loss = criterion(logits, y)
+ loss.backward()
+ optimizer.step()
+
+ if epoch > 50000:
+ raise RuntimeError(
+ f"Could not fit binary discriminator in 50k iterations "
+ f"(Loss = {loss.item()}."
+ "Consider using different settings for the optimizer."
+ )
+
+ accuracy = get_accuracy()
+ # stop training once perfect accuracy is achieved
+ if accuracy == 1.0:
+ break
+
+ if rescale_logits is not None or decision_boundary_closeness is not None:
+ # Get value range of binarized logits.
+ with torch.no_grad():
+ logits = binary_classifier(
+ train_loader.dataset.tensors[0].to(device)
+ ).detach()
+
+ if decision_boundary_closeness is not None:
+ logit_differences = logits[:, 0] - logits[:, 1]
+ lowest_difference = np.min(logit_differences.cpu().numpy())
+ binary_classifier.bias.data[0] -= (
+ decision_boundary_closeness * lowest_difference / 2
+ )
+ binary_classifier.bias.data[1] += (
+ decision_boundary_closeness * lowest_difference / 2
+ )
+
+ if rescale_logits is not None:
+ binarized_logit_range = (
+ logits.detach().cpu().numpy().max()
+ - logits.detach().cpu().numpy().min()
+ )
+
+ if rescale_logits == "fixed":
+ target_logit_range = 1.0
+ logit_offset = 0.0
+ logit_rescaling_factor = binarized_logit_range / target_logit_range
+ elif rescale_logits == "adaptive":
+ # Rescale the binarized logits such that they have the same value range
+ # i.e., min and max value match.
+ target_logit_range = (
+ classifier_logits.detach().cpu().numpy().max()
+ - classifier_logits.detach().cpu().numpy().min()
+ )
+ logit_rescaling_factor = binarized_logit_range / target_logit_range
+ logit_offset = (
+ classifier_logits.detach().cpu().numpy().min()
+ - logits.detach().cpu().numpy().min() / logit_rescaling_factor
+ )
+ elif rescale_logits == "tight":
+ # Rescale/shift weights such that the distance between the decision
+ # boundary and the boundary data is small.
+
+ # Calculate distance of boundary points to decision boundary.
+ distances = binary_classifier(
+ train_loader.dataset.tensors[0].to(device)[
+ train_loader.dataset.tensors[1].to(device) != 0
+ ]
+ )[:, 1:].cpu()
+ min_distance = distances.min()
+ # Move decision boundary close to true boundary points
+ logit_rescaling_factor = 1.0
+ logit_offset = torch.tensor(
+ [+0.999 * min_distance.item(), -0.999 * min_distance.item()],
+ device=device,
+ )
+ else:
+ raise ValueError(f"Invalid value for rescale_logits: {rescale_logits}")
+
+ binary_classifier.bias.data /= logit_rescaling_factor
+ binary_classifier.bias.data += logit_offset
+ binary_classifier.weight.data /= logit_rescaling_factor
+
+ return binary_classifier
+
+
+def _get_interior_boundary_discriminator_and_dataloaders(
+ classifier: torch.nn.Module,
+ x: torch.Tensor,
+ y: torch.Tensor,
+ linearization_settings: aut.DecisionBoundaryBinarizationSettings,
+ device: str,
+ batch_size: int = 512,
+ rescale_logits: LogitRescalingType = "fixed",
+ n_samples_evaluation: int = 0,
+ n_samples_asr_evaluation: int = 0,
+ verify_valid_inner_training_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_boundary_training_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_inner_validation_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_boundary_validation_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ get_boundary_adversarials_fn: Optional[
+ Callable[[torch.Tensor, torch.Tensor, float], np.ndarray]
+ ] = None,
+ fill_batches_for_verification: bool = False,
+ far_off_distance: float = 1.25,
+ rejection_resampling_max_repetitions: int = 10,
+ train_classifier_fn: Callable[
+ [int, DataLoader, DataLoader, torch.Tensor, str, LogitRescalingType],
+ torch.nn.Module,
+ ] = None,
+ decision_boundary_closeness: Optional[float] = None,
+ n_inference_repetitions_boundary: Optional[int] = None,
+ n_inference_repetitions_inner: Optional[int] = None,
+ relative_inner_boundary_gap: Optional[float] = 0.05,
+ sample_training_data_from_corners: bool = False,
+) -> Tuple[
+ Tuple[torch.nn.Module, torch.nn.Module],
+ Tuple[DataLoader, DataLoader],
+ Tuple[float, float, float, float, bool, bool],
+]:
+ """
+ Creates a number of perturbed images, obtains the features for these images
+ and trains a linear, binary discriminator of these samples.
+
+ Args:
+ classifier: The classifier that will be used as a feature encoder.
+ x: The single clean image to apply apply the method on.
+ y: The ground-truth classification label of x.
+ linearization_settings: How to construct the binary classifier.
+ device: torch device
+ batch_size: Max batch size allowed to use
+ rescale_logits: Rescale weights of linear classifier such that logits
+ have a max scale of 1
+ n_samples_evaluation: Number of random samples to use for evaluation
+ verify_valid_inner_training_data_fn: Can be used for e.g. detector-based defenses.
+ Check whether all input points used for training/testing are actually valid
+ and won't get filtered out be the model/detector.
+ verify_valid_boundary_training_data_fn: See verify_valid_inner_training_data_fn but for boundary samples.
+ verify_valid_inner_validation_data_fn: See
+ verify_valid_inner_training_data_fn but for calculating the validation
+ scores, i.e. the random ASR.
+ verify_valid_boundary_validation_data_fn: See
+ verify_valid_boundary_training_data_fn but for calculating the validation
+ scores, i.e. the random ASR.
+ get_boundary_adversarials_fn: If given, use this function to
+ generate all but one of the boundary samples. This can be used for
+ evaluating detector-based evaluation functions.
+ fill_batches_for_verification: If computational cost of verification_fn
+ does not depend on the batch size, set this to True.
+ far_off_distance: Relative multiplier (in terms of epsilon) controlling
+ the distance between clean and far off training samples.
+ decision_boundary_closeness: (optional) The larger this value, the will
+ the decision boundary be placed to the boundary sample(s).
+ n_inference_repetitions_boundary: (optional) How often to repeat
+ inference for boundary samples for obtaining their features.
+ n_inference_repetitions_inner: (optional) How often to repeat
+ inference for inner samples for obtaining their features.
+ relative_inner_boundary_gap: (optional) Gap between interior and
+ boundary data relative to epsilon (i.e. a value of 0 means boundary points
+ can lie directly next to inner points)
+ sample_training_data_from_corners: Sample training data from the corners
+ of the epsilon ball; this setting is only possible when using linf norm.
+ Returns:
+ Tuple containing ((binary discriminator between interior and boundary points,
+ binary readout only),
+ (dataset of perturbed images, dataset of features of perturbed images),
+ (validation accuracies of inner/boundary/boundary surface/boundary corner points,
+ random attack success rate of surface/corner points))
+ """
+
+ if sample_training_data_from_corners and linearization_settings.norm != "linf":
+ raise ValueError("Corners are only defined for linf norm.")
+
+ # Check if model is compatible with this check.
+ try:
+ with torch.no_grad():
+ if rescale_logits == "adaptive":
+ classifier(
+ torch.ones((1, 1, 1, 1), device=device), features_and_logits=True
+ )
+ else:
+ classifier(torch.ones((1, 1, 1, 1), device=device), features_only=True)
+ except TypeError as e:
+ message = str(e)
+ if "unexpected keyword argument 'features_only'" in message:
+ raise ValueError(
+ "model does not support `features_only` flag in forward pass."
+ )
+ if "unexpected keyword argument 'features_and_logits'" in message:
+ raise ValueError(
+ "model does not support `features_and_logits` flag in forward pass."
+ )
+ except Exception:
+ pass
+
+ (
+ raw_train_loader,
+ raw_train_loader_boundary,
+ raw_train_loader_inner,
+ n_raw_training_samples,
+ ) = _create_raw_data(
+ x,
+ y,
+ linearization_settings.n_inner_points,
+ linearization_settings.n_boundary_points,
+ linearization_settings.n_boundary_adversarial_points,
+ linearization_settings.n_far_off_boundary_points,
+ linearization_settings.n_far_off_adversarial_points,
+ batch_size=batch_size,
+ fill_batches_for_verification=fill_batches_for_verification,
+ verify_valid_inner_input_data_fn=verify_valid_inner_training_data_fn,
+ verify_valid_boundary_input_data_fn=verify_valid_boundary_training_data_fn,
+ get_boundary_adversarials_fn=get_boundary_adversarials_fn,
+ device=device,
+ epsilon=linearization_settings.epsilon,
+ norm=linearization_settings.norm,
+ n_boundary_classes=1,
+ include_original=True,
+ xi=far_off_distance,
+ eta=1.0 - relative_inner_boundary_gap,
+ rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
+ sample_boundary_from_corners=sample_training_data_from_corners,
+ )
+
+ # Get features to train binary classifier on.
+ (
+ train_loader,
+ logits,
+ n_features,
+ n_training_samples,
+ ) = _get_data_features_and_maybe_logits(
+ classifier,
+ raw_train_loader,
+ rescale_logits == "adaptive",
+ device,
+ include_raw_data=False,
+ n_repetitions_boundary=n_inference_repetitions_boundary,
+ n_repetitions_inner=n_inference_repetitions_inner,
+ raw_data_loader_boundary=raw_train_loader_boundary,
+ raw_data_loader_inner=raw_train_loader_inner,
+ )
+
+ if not (n_features > n_training_samples):
+ warnings.warn(
+ f"Feature dimension ({n_features}) should not be smaller than the "
+ f"number of training samples ({n_training_samples})",
+ RuntimeWarning,
+ )
+
+ # finally train new binary classifier on features
+ if train_classifier_fn is None:
+ binary_classifier = _train_logistic_regression_classifier(
+ n_features,
+ train_loader,
+ logits,
+ linearization_settings.optimizer,
+ linearization_settings.lr,
+ device,
+ class_weight=linearization_settings.class_weight,
+ rescale_logits=rescale_logits,
+ decision_boundary_closeness=decision_boundary_closeness,
+ )
+ linearized_model = __KwargsSequential(
+ networks.Lambda(
+ lambda x, **kwargs: classifier(x, features_only=True, **kwargs)
+ ),
+ binary_classifier,
+ )
+
+ else:
+ binary_classifier = None
+ linearized_model = train_classifier_fn(
+ n_features,
+ train_loader,
+ raw_train_loader,
+ logits,
+ device,
+ rescale_logits=rescale_logits,
+ )
+
+ # evaluate on another set of random samples (we are only interested in the
+ # performance of points inside the epsilon ball)
+ if n_samples_evaluation > 0:
+ raw_validation_loader, _, _, _ = _create_raw_data(
+ x,
+ y,
+ n_samples_evaluation,
+ n_samples_evaluation,
+ 0,
+ 0,
+ 0,
+ batch_size=batch_size,
+ fill_batches_for_verification=fill_batches_for_verification,
+ # TODO(zimmerrol): check if this makes sense. The motivation to remove this here
+ # was that the moved the check down to when the accuracy is calculated
+ verify_valid_boundary_input_data_fn=None,
+ verify_valid_inner_input_data_fn=None,
+ # verify_valid_input_data_fn=verify_valid_input_validation_data_fn,
+ get_boundary_adversarials_fn=get_boundary_adversarials_fn,
+ device=device,
+ epsilon=linearization_settings.epsilon,
+ norm=linearization_settings.norm,
+ n_boundary_classes=1,
+ include_original=False,
+ xi=far_off_distance,
+ rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
+ eta=1.0,
+ sample_boundary_from_corners=False,
+ )
+
+ _, raw_validation_loader_corners, _, _ = _create_raw_data(
+ x,
+ y,
+ 1,
+ n_samples_evaluation,
+ 0,
+ 0,
+ 0,
+ batch_size=batch_size,
+ fill_batches_for_verification=fill_batches_for_verification,
+ verify_valid_boundary_input_data_fn=None,
+ verify_valid_inner_input_data_fn=None,
+ get_boundary_adversarials_fn=get_boundary_adversarials_fn,
+ device=device,
+ epsilon=linearization_settings.epsilon,
+ norm=linearization_settings.norm,
+ n_boundary_classes=1,
+ include_original=False,
+ xi=far_off_distance,
+ rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
+ eta=1.0,
+ sample_boundary_from_corners=True,
+ )
+
+ raw_validation_loader = torch.utils.data.DataLoader(
+ torch.utils.data.TensorDataset(
+ torch.cat(
+ (
+ raw_validation_loader.dataset.tensors[0],
+ raw_validation_loader_corners.dataset.tensors[0],
+ ),
+ 0,
+ ),
+ torch.cat(
+ (
+ raw_validation_loader.dataset.tensors[1],
+ raw_validation_loader_corners.dataset.tensors[1],
+ ),
+ 0,
+ ),
+ ),
+ batch_size=raw_validation_loader.batch_size,
+ shuffle=False,
+ )
+
+ # Get features to test binary classifier on.
+ validation_loader, _, _, _ = _get_data_features_and_maybe_logits(
+ classifier, raw_validation_loader, False, device, include_raw_data=True
+ )
+
+ inner_correctly_classified = []
+ boundary_correctly_classified = []
+ for it, (x_features, y, x_images) in enumerate(validation_loader):
+ x_features = x_features.to(device)
+ x_images = x_images.to(device)
+
+ # If we use a custom train method use the raw images for validation as
+ # it is possible that the classifier has no simple linear readout.
+
+ # TODO(zimmerrol): also allow detector-like models here
+ # if the verify_valid_input_data_fn is used, this shouldn't be a
+ # concern anymore since all samples generated here have already passed
+ # the detector
+ with torch.no_grad():
+ if binary_classifier is not None:
+ y_pred = binary_classifier(x_features).argmax(-1).to("cpu")
+ else:
+ y_pred = linearized_model(x_images).argmax(-1).to("cpu")
+ # flag predictions for invalid data points such they are not
+ # counted as correctly classified samples
+ if verify_valid_inner_validation_data_fn is not None:
+ is_valid_input = verify_valid_inner_validation_data_fn(
+ x_images[y == 0]
+ )
+ y_pred[y == 0][~is_valid_input] = -1
+ if verify_valid_boundary_validation_data_fn is not None:
+ is_valid_input = verify_valid_boundary_validation_data_fn(
+ x_images[y == 1]
+ )
+ y_pred[y == 1][~is_valid_input] = -1
+
+ inner_correctly_classified += (y_pred[y == 0] == 0).numpy().tolist()
+ boundary_correctly_classified += (y_pred[y == 1] == 1).numpy().tolist()
+
+ inner_correctly_classified = np.array(inner_correctly_classified)
+ boundary_correctly_classified = np.array(boundary_correctly_classified)
+
+ validation_accuracy_inner = float(np.mean(inner_correctly_classified))
+ validation_accuracy_boundary = float(np.mean(boundary_correctly_classified))
+
+ validation_accuracy_boundary_surface = float(
+ np.mean(boundary_correctly_classified[:n_samples_evaluation])
+ )
+ validation_accuracy_boundary_corners = float(
+ np.mean(boundary_correctly_classified[n_samples_evaluation:])
+ )
+ random_attack_success_inner = (
+ np.mean(inner_correctly_classified[:n_samples_asr_evaluation]) < 1.0
+ )
+ random_attack_success_boundary_surface = (
+ np.mean(
+ boundary_correctly_classified[:n_samples_evaluation][
+ :n_samples_asr_evaluation
+ ]
+ )
+ > 0.0
+ )
+ random_attack_success_boundary_corners = (
+ np.mean(
+ boundary_correctly_classified[n_samples_evaluation:][
+ :n_samples_asr_evaluation
+ ]
+ )
+ > 0.0
+ )
+ random_attack_success_boundary_corners = np.logical_or(
+ random_attack_success_inner, random_attack_success_boundary_corners
+ )
+ random_attack_success_boundary_surface = np.logical_or(
+ random_attack_success_inner, random_attack_success_boundary_surface
+ )
+ validation_accuracies_and_asr = (
+ validation_accuracy_inner,
+ validation_accuracy_boundary,
+ validation_accuracy_boundary_surface,
+ validation_accuracy_boundary_corners,
+ random_attack_success_boundary_surface,
+ random_attack_success_boundary_corners,
+ )
+ else:
+ validation_accuracies_and_asr = None
+
+ return (
+ (linearized_model, binary_classifier),
+ (raw_train_loader, train_loader),
+ validation_accuracies_and_asr,
+ )
+
+
+def __wrap_assert_get_boundary_adversarials_fn(
+ fn: Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray],
+ norm: ut.NormType,
+) -> Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray]:
+ """Make sure adversarial examples really lie on the epsilon ball boundary
+ (or are within a relative distance of 1%)."""
+
+ def inner(x: torch.Tensor, y: torch.Tensor, n: int, epsilon: float):
+ x_ = fn(x, y, n, epsilon)
+ delta = (x_ - x).cpu()
+ if norm == "linf":
+ distance = torch.abs(delta).flatten(1).max(1)[0].numpy()
+ elif norm in ("l2", "l1"):
+ distance = torch.norm(
+ delta, p=1 if norm == "l1" else 2, keepdim=False, dim=[1, 2, 3]
+ ).numpy()
+ else:
+ raise ValueError(f"Unknown norm: {norm}")
+ # TODO(zimmerrol): Verify whether 1% tolerance is sensible.
+ assert np.isclose(distance, epsilon, atol=0.01 * epsilon), (
+ f"Magnitude of boundary adversarial examples ({distance}) "
+ f"does not match target distance ({epsilon}"
+ )
+ return x_
+
+ return inner
+
+
+def interior_boundary_discrimination_attack(
+ classifier: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ attack_fn: Callable[
+ [torch.nn.Module, torch.utils.data.DataLoader, dict],
+ Tuple[np.ndarray, Tuple[torch.Tensor, torch.Tensor]],
+ ],
+ linearization_settings: aut.DecisionBoundaryBinarizationSettings,
+ n_samples: int,
+ device: str,
+ batch_size: int = 512,
+ rescale_logits: LogitRescalingType = "fixed",
+ n_samples_evaluation: int = 0,
+ n_samples_asr_evaluation: int = 0,
+ verify_valid_inner_training_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_boundary_training_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_inner_validation_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ verify_valid_boundary_validation_data_fn: Optional[
+ Callable[[torch.Tensor], np.ndarray]
+ ] = None,
+ get_boundary_adversarials_fn: Optional[
+ Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray]
+ ] = None,
+ fill_batches_for_verification: bool = True,
+ far_off_distance: float = 1.50,
+ rejection_resampling_max_repetitions: int = 10,
+ train_classifier_fn: Callable[
+ [int, DataLoader, torch.Tensor, str, LogitRescalingType], torch.nn.Module
+ ] = None,
+ fail_on_exception: bool = False,
+ decision_boundary_closeness: Optional[float] = None,
+ n_inference_repetitions_boundary: Optional[int] = None,
+ n_inference_repetitions_inner: Optional[int] = None,
+ relative_inner_boundary_gap: Optional[float] = 0.05,
+ sample_training_data_from_corners: bool = False,
+) -> List[Tuple[bool, float, float, Tuple[float, float, float, float, bool, bool]]]:
+
+ """
+ Performs the binarization test. This means, replacing the last linear layer
+ of the classifier with a binary classifier distinguishing between images
+ of different perturbation magnitude.
+
+ Args:
+ classifier: The classifier that will be used as a feature encoder.
+ test_loader: Data loader of the data to run the test on.
+ attack_fn: Function performing an adversarial attack on a classifier and
+ dataset passed as arguments.
+ linearization_settings: How to construct the binarized classifier.
+ n_samples: Number of samples to perform this test on.
+ device: torch device
+ batch_size: Max batch size allowed to use
+ rescale_logits: Rescale weights of linear classifier such that logits
+ have a max scale of 1
+ n_samples_evaluation: Number of random samples to use for evaluation
+ n_samples_asr_evaluation: Number of random samples used to calculate
+ the ASR of a random attacker
+ verify_valid_inner_training_data_fn: Can be used for e.g.
+ detector-based defenses.
+ Check whether all input points used for training/testing are actually valid
+ and won't get filtered out be the model/detector.
+ verify_valid_boundary_training_data_fn: See
+ verify_valid_inner_training_data_fn but for boundary samples.
+ verify_valid_inner_validation_data_fn: See
+ verify_valid_inner_training_data_fn but for calculating the validation
+ scores, i.e. the random ASR.
+ verify_valid_boundary_validation_data_fn: See
+ verify_valid_boundary_training_data_fn but for calculating the validation
+ scores, i.e. the random ASR.
+ get_boundary_adversarials_fn: If given, use this function to
+ generate all but one of the boundary samples. This can be used for
+ evaluating detector-based evaluation functions.
+ fill_batches_for_verification: If computational cost of verification_fn
+ does not depend on the batch size, set this to True.
+ far_off_distance: Relative multiplier (in terms of epsilon) controlling
+ the distance between clean and far off training samples.
+ rejection_resampling_max_repetitions: How often to resample to satisfy
+ constraints on training samples.
+ train_classifier_fn: Callback that trains a readout classifier on a set of
+ features.
+ fail_on_exception: Raise exception if a single samples fails or keep
+ running and report about this later.
+ decision_boundary_closeness: (optional) The larger this value, the closer
+ the decision boundary will be placed to the boundary sample(s).
+ n_inference_repetitions_boundary: (optional) How often to repeat
+ inference for boundary samples for obtaining their features.
+ n_inference_repetitions_inner: (optional) How often to repeat
+ inference for inner samples for obtaining their features.
+ relative_inner_boundary_gap: (optional) Gap between interior and
+ boundary data (in pixel space) relative to epsilon (i.e. a value of 0 means
+ boundary points can lie directly next to inner points)
+ sample_training_data_from_corners: Sample training data from the corners
+ of the epsilon ball; this setting is only possible when using
+ Returns:
+ List containing tuples of (attack successful, logit diff of results of attack_fn,
+ logit diff of best training sample, (validation accuracy inner,
+ validation accuracy boundary, random ASR)))
+ """
+
+ if get_boundary_adversarials_fn is not None and (
+ linearization_settings.n_boundary_adversarial_points == 0
+ and linearization_settings.n_far_off_adversarial_points == 0
+ ):
+ warnings.warn(
+ "get_boundary_adversarials_fn is set but number of boundary "
+ "and far-off adversarial examples is set to 0",
+ UserWarning,
+ )
+
+ results = []
+ data_iterator = iter(test_loader)
+ current_batch_x = None
+ current_batch_y = None
+ current_batch_index = 0
+
+ if get_boundary_adversarials_fn is not None:
+ # Make sure this function really returns boundary adversarials.
+ get_boundary_adversarials_fn = __wrap_assert_get_boundary_adversarials_fn(
+ get_boundary_adversarials_fn, linearization_settings.norm
+ )
+
+ # Show warnings only once
+ warnings_shown_for_messages = []
+ for i in tqdm.tqdm(range(n_samples)):
+ if current_batch_x is None or current_batch_index == len(current_batch_x) - 1:
+ try:
+ # Only use input and label.
+ current_batch_x, current_batch_y = next(data_iterator)
+ except StopIteration:
+ warnings.warn(
+ f"Could only gather {i} and not the "
+ f"{n_samples} requested samples."
+ )
+ break
+ current_batch_index = 0
+ else:
+ current_batch_index += 1
+
+ # Get current item/input data.
+ x = current_batch_x[current_batch_index]
+ y = current_batch_y[current_batch_index]
+
+ setup_successful = False
+ with warnings.catch_warnings(record=True) as ws:
+ try:
+ (
+ (binary_discriminator, binary_linear_layer),
+ (image_loader, feature_loader),
+ validation_accuracies,
+ ) = _get_interior_boundary_discriminator_and_dataloaders(
+ classifier,
+ x,
+ y,
+ linearization_settings,
+ device,
+ rescale_logits=rescale_logits,
+ n_samples_evaluation=n_samples_evaluation,
+ n_samples_asr_evaluation=n_samples_asr_evaluation,
+ batch_size=batch_size,
+ verify_valid_inner_training_data_fn=verify_valid_inner_training_data_fn,
+ verify_valid_boundary_training_data_fn=verify_valid_boundary_training_data_fn,
+ verify_valid_inner_validation_data_fn=verify_valid_inner_validation_data_fn,
+ verify_valid_boundary_validation_data_fn=verify_valid_boundary_validation_data_fn,
+ get_boundary_adversarials_fn=get_boundary_adversarials_fn,
+ fill_batches_for_verification=fill_batches_for_verification,
+ far_off_distance=far_off_distance,
+ rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
+ train_classifier_fn=train_classifier_fn,
+ decision_boundary_closeness=decision_boundary_closeness,
+ n_inference_repetitions_boundary=n_inference_repetitions_boundary,
+ n_inference_repetitions_inner=n_inference_repetitions_inner,
+ relative_inner_boundary_gap=relative_inner_boundary_gap,
+ sample_training_data_from_corners=sample_training_data_from_corners,
+ )
+ setup_successful = True
+ except RuntimeError as ex:
+ exc_type, exc_obj, exc_tb = sys.exc_info()
+ fname, lineno, fnname, code = traceback.extract_tb(exc_tb)[-1]
+ if fail_on_exception:
+ raise ex
+ else:
+ warnings.warn(f"Exception caught: {fname}:{lineno}({fnname}): {ex}")
+
+ for w in ws:
+ if str(w.message) not in warnings_shown_for_messages:
+ warnings_shown_for_messages.append(str(w.message))
+ warnings.warn(str(w.message), w.category)
+
+ if not setup_successful:
+ continue
+
+ attack_loader = torch.utils.data.DataLoader(
+ torch.utils.data.TensorDataset(
+ torch.unsqueeze(x, 0), torch.zeros(1, dtype=torch.long)
+ ),
+ shuffle=False,
+ batch_size=1,
+ )
+
+ if linearization_settings.n_far_off_boundary_points == 0:
+ attack_kwargs = {}
+ else:
+ attack_kwargs = dict(
+ reference_points_x=image_loader.dataset.tensors[0][
+ -linearization_settings.n_far_off_boundary_points * 1 :
+ ],
+ reference_points_y=image_loader.dataset.tensors[1][
+ -linearization_settings.n_far_off_boundary_points * 1 :
+ ],
+ )
+
+ with warnings.catch_warnings(record=True) as ws:
+ attack_successful, (x_adv, logits_adv) = attack_fn(
+ binary_discriminator, attack_loader, attack_kwargs
+ )
+ for w in ws:
+ if str(w.message) not in warnings_shown_for_messages:
+ warnings_shown_for_messages.append(str(w.message))
+ warnings.warn(f"{w.filename}:{w.lineno}:{w.message}", w.category)
+
+ logit_diff_adv = (logits_adv[:, 1] - logits_adv[:, 0]).item()
+
+ # Now compare the result of the attack (x_adv) with the training samples
+ # in terms of their confidence.
+ # For this, first get logits of binary discriminator for data it was
+ # trained on, but only do this for samples of the adversarial class (y = 1).
+ logits_training = []
+ if train_classifier_fn is None:
+ for x, y in feature_loader:
+ with torch.no_grad():
+ x = x[y == 1]
+ if len(x) == 0:
+ continue
+ logits_training.append(binary_linear_layer(x.to(device)).cpu())
+ else:
+ for x, y in image_loader:
+ with torch.no_grad():
+ x = x[y == 1]
+ if len(x) == 0:
+ continue
+ logits_training.append(binary_discriminator(x.to(device)).cpu())
+ logits_training = torch.cat(logits_training, 0)
+
+ # Now get training sample with max confidence (alternatively we could also
+ # just compute the distance to the linear boundary for all sample and pick
+ # the one with max distance).
+ logit_diff_training = torch.max(
+ logits_training[:, 1] - logits_training[:, 0]
+ ).item()
+
+ result = (
+ attack_successful,
+ logit_diff_adv,
+ logit_diff_training,
+ validation_accuracies,
+ )
+
+ results.append(result)
+
+ return results
+
+
+def format_result(
+ scores_logit_differences_and_validation_accuracies,
+ n_samples,
+ indent=0,
+ title="interior-vs-boundary discrimination",
+):
+ """Formats the result of the interior-vs-boundary discriminator test"""
+ if len(scores_logit_differences_and_validation_accuracies) == 0:
+ test_result = (np.nan, np.nan, np.nan, np.nan)
+ else:
+ scores = [it[0] for it in scores_logit_differences_and_validation_accuracies]
+ validation_scores = [
+ it[3] for it in scores_logit_differences_and_validation_accuracies
+ ]
+ if validation_scores[0] is None:
+ validation_scores = (np.nan, np.nan, np.nan)
+ else:
+ validation_scores = np.array(validation_scores)
+ validation_scores = tuple(np.mean(validation_scores, 0))
+ logit_differences = [
+ (it[1], it[2]) for it in scores_logit_differences_and_validation_accuracies
+ ]
+ logit_differences = np.array(logit_differences)
+ relative_performance = (logit_differences[:, 0] - logit_differences[:, 1]) / (
+ logit_differences[:, 1] + 1e-12
+ )
+
+ test_result = (
+ np.mean(scores),
+ np.mean(relative_performance),
+ np.std(relative_performance),
+ validation_scores,
+ )
+
+ indent = "\t" * indent
+
+ return (
+ "{0}{1}, ASR: {2}\n”, "
+ "{0}\tNormalized Logit-Difference-Improvement: {3} +- {4}\n"
+ "{0}\tValidation Accuracy (I, B, BS, BC, R. ASR S, R. ASR C): {5}\n"
+ "{0}\tSetup failed for {6}/{7} samples".format(
+ indent,
+ title,
+ *test_result,
+ n_samples - len(scores_logit_differences_and_validation_accuracies),
+ n_samples,
+ )
+ )
diff --git a/argparse_utils.py b/argparse_utils.py
new file mode 100644
index 0000000..7668519
--- /dev/null
+++ b/argparse_utils.py
@@ -0,0 +1,164 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import importlib
+import re
+from dataclasses import dataclass
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Optional
+from typing import Tuple
+from typing import Union
+
+from typing_extensions import Literal
+
+import utils as ut
+
+
+@dataclass
+class AdversarialAttackSettings:
+ epsilon: float
+ norm: ut.NormType
+ step_size: float
+ n_steps: int = 20
+ n_averages: int = 1
+ attack: Tuple[Literal["pgd", "kwta"]] = "pgd"
+ random_start: bool = True
+
+ def __repr__(self):
+ return (
+ f"{self.attack}_{self.norm}_{self.epsilon}_{self.step_size}_"
+ f"{self.n_steps}_{self.n_averages}_{self.random_start}"
+ )
+
+
+@dataclass
+class DecisionBoundaryBinarizationSettings:
+ epsilon: float
+ norm: ut.NormType
+ n_inner_points: int
+ n_boundary_points: int
+ adversarial_attack_settings: Optional[AdversarialAttackSettings]
+ n_boundary_adversarial_points: int = 0
+ n_far_off_boundary_points: int = 0
+ n_far_off_adversarial_points: int = 0
+ optimizer: str = "adam"
+ lr: float = 5e-2
+ class_weight: Optional[Union[Literal["balanced"], dict]] = None
+
+ def __repr__(self):
+ return (
+ f"{self.norm}_{self.epsilon}_{self.n_inner_points}_"
+ f"{self.n_boundary_points}_{self.n_far_off_boundary_points}_"
+ f"{self.adversarial_attack_settings}_{self.optimizer}_{self.lr}"
+ )
+
+
+def __parse_structure_argument(
+ value,
+ argument_type: Union[Callable[[str], Any], type],
+ known_flags: Dict[str, Tuple[str, bool]],
+ argument_types: Dict[str, Callable],
+):
+ """
+ Recursively parses structured arguments encoded as a string.
+
+ Args:
+ argument_type: Class to store values in.
+ known_flags: Map between name and default value of flags.
+ argument_types: Map between argument names and argument constructors
+ for variables.
+
+ Returns:
+ Object created based on string.
+ """
+ arguments = re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', value)
+ kwargs = {}
+ for argument in arguments:
+ parts = argument.split("=")
+ if len(parts) > 2:
+ parts = [parts[0], "=".join(parts[1:])]
+ if len(parts) != 2:
+ # argument is a flag
+ if argument not in known_flags:
+ raise argparse.ArgumentTypeError(
+ "invalid argument/unknown flag:", argument
+ )
+ else:
+ kwargs[known_flags[argument][0]] = known_flags[argument][1]
+ else:
+ key, value = parts
+ value = value.replace(r"\"", '"')
+ if value.startswith('"') and value.endswith('"'):
+ value = value[1:-1]
+ if key in argument_types:
+ kwargs[key] = argument_types[key](value)
+ else:
+ raise argparse.ArgumentTypeError(
+ f"invalid argument `{argument}` for type `{argument_type}`"
+ )
+
+ try:
+ return argument_type(**kwargs)
+ except Exception as ex:
+ raise argparse.ArgumentTypeError("Could not create type:", ex)
+
+
+def parse_adversarial_attack_argument(value):
+ """Parse a string defining a AdversarialAttackSettings object."""
+ return __parse_structure_argument(
+ value,
+ AdversarialAttackSettings,
+ {},
+ {
+ "norm": str,
+ "n_steps": int,
+ "epsilon": float,
+ "step_size": float,
+ "attack": str,
+ "n_averages": int,
+ "random_start": lambda x: x.lower() == "true",
+ },
+ )
+
+
+def parse_classifier_argument(value):
+ """Parse a string describing a classifier object."""
+ class_name = value.split(".")[-1]
+ module_path = ".".join(value.split(".")[:-1])
+ module = importlib.import_module(module_path)
+ return getattr(module, class_name)
+
+
+def parse_decision_boundary_binarization_argument(value):
+ """Parse a string defining a DecisionBoundaryBinarizationSettings object."""
+ return __parse_structure_argument(
+ value,
+ DecisionBoundaryBinarizationSettings,
+ {},
+ {
+ "norm": str,
+ "epsilon": float,
+ "n_boundary_points": int,
+ "n_inner_points": int,
+ "adversarial_attack_settings": lambda x: parse_adversarial_attack_argument(
+ x
+ ),
+ "optimizer": str,
+ "lr": float,
+ "class_weight": str,
+ },
+ )
diff --git a/attacks/__init__.py b/attacks/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/attacks/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/attacks/adaptive_kwta_attack.py b/attacks/adaptive_kwta_attack.py
new file mode 100644
index 0000000..a909a7b
--- /dev/null
+++ b/attacks/adaptive_kwta_attack.py
@@ -0,0 +1,156 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Adapted and heavily modified from
+https://github.com/wielandbrendel/adaptive_attacks_paper/blob/master/01_kwta/kwta_attack.ipynb
+"""
+
+from typing import Callable
+from typing import Optional
+
+import numpy as np
+import torch
+import torch.nn.functional
+from tqdm import tqdm
+
+import utils as ut
+
+
+def __best_other_classes(logits: torch.Tensor,
+ exclude: torch.Tensor) -> torch.Tensor:
+ other_logits = logits - torch.nn.functional.one_hot(exclude,
+ num_classes=logits.shape[
+ -1]) * np.inf
+
+ return other_logits.argmax(axis=-1)
+
+
+def __logit_diff_loss_fn(model: Callable, x: torch.Tensor,
+ classes: torch.Tensor,
+ targeted: bool):
+ with torch.no_grad():
+ logits = model(x)
+
+ if targeted:
+ c_minimize = classes
+ c_maximize = __best_other_classes(logits, classes)
+ else:
+ c_minimize = __best_other_classes(logits, classes)
+ c_maximize = classes
+
+ N = len(x)
+ rows = range(N)
+
+ logits_diffs = logits[rows, c_minimize] - logits[rows, c_maximize]
+ assert logits_diffs.shape == (N,)
+
+ return logits_diffs
+
+
+def __es_gradient_estimator(loss_fn: Callable, x: torch.Tensor, y: torch.Tensor,
+ n_samples: int, sigma: float, clip=False, bounds=(0, 1)):
+ assert len(x) == len(y)
+ assert n_samples > 0
+
+ gradient = torch.zeros_like(x)
+ with torch.no_grad():
+ for k in range(n_samples // 2):
+ noise = torch.randn_like(x)
+
+ pos_theta = x + sigma * noise
+ neg_theta = x - sigma * noise
+
+ if clip:
+ pos_theta = pos_theta.clip(*bounds)
+ neg_theta = neg_theta.clip(*bounds)
+
+ pos_loss = loss_fn(pos_theta, y)
+ neg_loss = loss_fn(neg_theta, y)
+
+ gradient += (pos_loss - neg_loss)[:, None, None, None] * noise
+
+ gradient /= 2 * sigma * 2 * n_samples
+
+ return gradient
+
+
+def gradient_estimator_pgd(model: Callable,
+ x: torch.Tensor, y: torch.Tensor,
+ n_steps: int,
+ step_size: float, epsilon: float, norm: ut.NormType,
+ loss_fn: Optional[Callable] = None,
+ random_start: bool = True,
+ early_stopping: bool = False, targeted: bool = False):
+ if loss_fn is None:
+ loss_fn = lambda x, y: __logit_diff_loss_fn(model, x, y, targeted)
+
+ assert len(x) == len(y)
+
+ if random_start:
+ delta = torch.rand_like(x)
+ delta = ut.normalize(delta, norm)
+ x_adv, delta = ut.clipping_aware_rescaling(x, delta, epsilon, norm=norm,
+ growing=False, return_delta=True)
+ else:
+ x_adv = x
+ delta = torch.zeros_like(x)
+
+ if targeted:
+ is_adversarial_fn = lambda x: model(x).argmax(-1) == y
+ else:
+ is_adversarial_fn = lambda x: model(x).argmax(-1) != y
+
+ mask = ~is_adversarial_fn(x_adv)
+ if not early_stopping:
+ mask = torch.ones_like(mask)
+ else:
+ if mask.sum() == 0:
+ return x_adv.detach(), ~mask.detach()
+
+ if len(x) > 1:
+ iterator = tqdm(range(n_steps))
+ else:
+ iterator = range(n_steps)
+ for it in iterator:
+ if it < 0.6 * n_steps:
+ n_samples = 100
+ elif it < 0.8 * n_steps:
+ n_samples = 1000
+ elif it >= 0.8 * n_steps:
+ n_samples = 20000
+
+ pert_x = (x + delta).clip(0, 1)
+ grad_x = __es_gradient_estimator(loss_fn, pert_x[mask], y[mask], n_samples,
+ epsilon)
+
+ grad_x = ut.normalize(grad_x, norm)
+
+ # update only subportion of deltas
+ delta[mask] = delta[mask] - step_size * grad_x
+
+ # project back to feasible set
+ x_adv, delta = ut.clipping_aware_rescaling(x, delta, epsilon, norm=norm,
+ growing=False, return_delta=True)
+
+ mask = ~is_adversarial_fn(x_adv)
+ # new_logit_diffs = loss_fn(x_adv, y)
+ # mask = new_logit_diffs >= 0
+ if not early_stopping:
+ mask = torch.ones_like(mask)
+
+ if early_stopping and mask.sum() == 0:
+ break
+
+ return x_adv.detach(), ~mask.detach()
diff --git a/attacks/autopgd.py b/attacks/autopgd.py
new file mode 100644
index 0000000..0cb14c5
--- /dev/null
+++ b/attacks/autopgd.py
@@ -0,0 +1,131 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing_extensions import Literal
+from typing import Tuple
+
+import torch
+from typing import Callable
+import utils as ut
+from autoattack import autopgd_base
+
+
+class __PatchedAPGDAttack(autopgd_base.APGDAttack):
+ def dlr_loss(self, x, y):
+ """Patched DLR loss that works with less than 3 classes. Taken and modified
+ from: https://github.com/fra31/auto-attack/blob/master/autoattack/
+ autopgd_base.py#L567"""
+
+ x_sorted, ind_sorted = x.sort(dim=1)
+ ind = (ind_sorted[:, -1] == y).float()
+ u = torch.arange(x.shape[0])
+
+ if x_sorted.shape[-1] > 2:
+ # normal dlr loss
+ return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (
+ 1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
+ else:
+ # modified dlr loss (w/o the normalizer)
+ return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (
+ 1. - ind))
+
+
+class __PatchedAPGDAttack_targeted(autopgd_base.APGDAttack_targeted):
+ def dlr_loss_targeted(self, x, y):
+ """Patched DLR loss that works with less than 3 classes. Taken and modified
+ from: https://github.com/fra31/auto-attack/blob/master/autoattack/
+ autopgd_base.py#L606"""
+
+ x_sorted, ind_sorted = x.sort(dim=1)
+ u = torch.arange(x.shape[0])
+
+ if x_sorted.shape[-1] > 2:
+ # normal dlr loss
+ return -(x[u, y] - x[u, self.y_target]) / (x_sorted[:, -1] - .5 * (
+ x_sorted[:, -3] + x_sorted[:, -4]) + 1e-12)
+ else:
+ # modified dlr loss (w/o the normalizer)
+ return -(x[u, y] - x[u, self.y_target])
+
+
+def auto_pgd(model: Callable, x: torch.Tensor, y: torch.Tensor,
+ n_steps: int,
+ epsilon: float, norm: ut.NormType,
+ loss: Tuple[Literal["ce"], Literal["logit-diff"]] = "ce",
+ targeted: bool = False,
+ n_restarts: int = 1,
+ n_averaging_steps: int = 1,
+ n_classes: int = 10):
+ """Performs a standard projected gradient descent (PGD) with a cross-entropy
+ objective.
+
+ :param model: Inference function of the model yielding logits.
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ examples have been found.
+ :param targeted: Perform a targeted adversarial attack.
+ :param n_restarts: How often to restart attack.
+ :param n_averaging_steps: Number over repetitions for every gradient
+ calculation.
+ :return: (Adversarial examples, attack success for each sample,
+ target labels (optional))
+ """
+ assert norm in ("linf", "l2", "l1")
+
+ norm = {"linf": "Linf", "l2": "L2", "l1": "L1"}[norm]
+
+ attack_cls = __PatchedAPGDAttack_targeted if targeted \
+ else __PatchedAPGDAttack
+
+ n_restarts += 1
+
+ optional_kwargs = {}
+ if targeted:
+ optional_kwargs["n_target_classes"] = n_classes - 1
+ attack = attack_cls(predict=model, n_iter=n_steps, norm=norm,
+ n_restarts=n_restarts, eps=epsilon,
+ eot_iter=n_averaging_steps, device=x.device,
+ seed=None, **optional_kwargs)
+ attack.loss = "ce" if loss == "ce" else "dlr"
+ if targeted:
+ attack.loss += "-targeted"
+
+ x_adv = attack.perturb(x, y)
+ y_pred = model(x_adv).argmax(-1)
+
+ if targeted:
+ is_adv = y_pred == y
+ else:
+ is_adv = y_pred != y
+
+ if targeted:
+ return x_adv.detach(), is_adv.detach(), attack.y_target.detach()
+ else:
+ return x_adv.detach(), is_adv.detach()
+
+
+def fix_autoattack(attack):
+ attack.apgd_targeted = __PatchedAPGDAttack_targeted(
+ attack.model, n_restarts=attack.apgd_targeted.n_restarts, n_iter=attack.apgd_targeted.n_iter,
+ verbose=attack.apgd_targeted.verbose, eps=attack.apgd_targeted.eps, norm=attack.apgd_targeted.norm,
+ eot_iter=attack.apgd_targeted.eot_iter, rho=attack.apgd_targeted.thr_decr, seed=attack.apgd_targeted.seed,
+ device=attack.apgd_targeted.device, is_tf_model=attack.apgd_targeted.is_tf_model,
+ logger=attack.apgd_targeted.logger)
+ attack.apgd = __PatchedAPGDAttack(
+ attack.model, n_restarts=attack.apgd.n_restarts, n_iter=attack.apgd.n_iter, verbose=attack.apgd.verbose,
+ eps=attack.apgd.eps, norm=attack.apgd.norm, eot_iter=attack.apgd.eot_iter, rho=attack.apgd.thr_decr,
+ seed=attack.apgd.seed, device=attack.apgd.device, is_tf_model=attack.apgd.is_tf_model, logger=attack.apgd.logger)
diff --git a/attacks/fab.py b/attacks/fab.py
new file mode 100644
index 0000000..fc04f7a
--- /dev/null
+++ b/attacks/fab.py
@@ -0,0 +1,66 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+from typing import Callable
+import utils as ut
+from autoattack import fab_pt
+
+
+def fab(model: Callable, x: torch.Tensor, y: torch.Tensor,
+ n_steps: int,
+ epsilon: float, norm: ut.NormType,
+ targeted: bool = False,
+ n_restarts: int = 1,
+ n_classes: int = 10):
+ """Runs the Fast Adaptive Boundary Attack (Linf, L2, L1).
+
+ :param model: Inference function of the model yielding logits.
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ examples have been found.
+ :param targeted: Perform a targeted adversarial attack.
+ :param n_restarts: How often to restart attack.
+ :return: (Adversarial examples, attack success for each sample,
+ target labels (optional))
+ """
+ assert norm in ("linf", "l2", "l1")
+
+ norm = {"linf": "Linf", "l2": "L2", "l1": "L1"}[norm]
+
+ n_restarts += 1
+
+ optional_kwargs = {}
+ if targeted:
+ optional_kwargs["n_target_classes"] = n_classes - 1
+ attack = fab_pt.FABAttack_PT(
+ predict=model, n_iter=n_steps, norm=norm,
+ n_restarts=n_restarts, eps=epsilon,
+ device=x.device, targeted=targeted,
+ **optional_kwargs)
+ x_adv = attack.perturb(x, y)
+ y_pred = model(x_adv).argmax(-1)
+
+ if targeted:
+ is_adv = y_pred == y
+ else:
+ is_adv = y_pred != y
+
+ if targeted:
+ return x_adv.detach(), is_adv.detach(), attack.y_target.detach()
+ else:
+ return x_adv.detach(), is_adv.detach()
diff --git a/attacks/pgd.py b/attacks/pgd.py
new file mode 100644
index 0000000..b50f80c
--- /dev/null
+++ b/attacks/pgd.py
@@ -0,0 +1,124 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable
+
+import torch
+
+import utils as ut
+
+
+def general_pgd(loss_fn: Callable, is_adversarial_fn: Callable, x: torch.Tensor,
+ y: torch.Tensor, n_steps: int,
+ step_size: float, epsilon: float, norm: ut.NormType,
+ random_start: bool = True,
+ early_stopping: bool = False,
+ n_averaging_steps: int = 1):
+ """Performs a projected gradient descent (PGD) for an arbitrary loss function
+ and success criterion.
+
+ :param loss_fn: Loss function to minimize.
+ :param is_adversarial_fn: Check if examples are adversarial
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param step_size: Size of the steps/learning rate.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ :param random_start: Randomly start within the epsilon ball.
+ :param early_stopping: Stop once an adversarial perturbation for all
+ examples have been found.
+ :param n_averaging_steps: Number over repetitions for every gradient
+ calculation.
+ :return: (Adversarial examples, attack success for each sample)
+ """
+ assert norm in ("linf", "l2", "l1")
+
+ x_orig = x
+ x = x.clone()
+
+ if random_start:
+ delta = torch.rand_like(x)
+ delta = ut.normalize(delta, norm)
+ x = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
+ growing=False)
+
+ for step in range(n_steps):
+ x = x.requires_grad_()
+
+ # check early stopping
+ with torch.no_grad():
+ is_adv = is_adversarial_fn(x, y)
+ if early_stopping and torch.all(is_adv): #
+ return x.detach(), is_adv.detach()
+
+ grad_x = torch.zeros_like(x)
+ for _ in range(n_averaging_steps):
+ # get gradient of cross-entropy wrt to input
+ loss = loss_fn(x, y)
+ grad_x += torch.autograd.grad(loss, x)[0].detach() / n_averaging_steps
+
+ # normalize gradient
+ grad_x = ut.normalize(grad_x, norm)
+
+ # perform step
+ delta = (x - x_orig).detach() - step_size * grad_x.detach()
+
+ # project back to feasible set
+ x = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
+ growing=False)
+ del loss, grad_x
+
+ with torch.no_grad():
+ is_adv = is_adversarial_fn(x, y)
+
+ return x.detach(), is_adv.detach()
+
+
+def pgd(model: Callable, x: torch.Tensor, y: torch.Tensor, n_steps: int,
+ step_size: float, epsilon: float, norm: ut.NormType,
+ random_start: bool = True,
+ early_stopping: bool = False,
+ targeted: bool = False,
+ n_averaging_steps: int = 1):
+ """Performs a standard projected gradient descent (PGD) with a cross-entropy
+ objective.
+
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param step_size: Size of the steps/learning rate.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ :param random_start: Randomly start within the epsilon ball.
+ :param early_stopping: Stop once an adversarial perturbation for all
+ examples have been found.
+ :param targeted: Perform a targeted adversarial attack.
+ :param n_averaging_steps: Number over repetitions for every gradient
+ calculation.
+ :return: (Adversarial examples, attack success for each sample)
+ """
+ assert norm in ("linf", "l2", "l1")
+
+ criterion = torch.nn.CrossEntropyLoss()
+
+ sign = 1 if targeted else -1
+
+ return general_pgd(loss_fn=lambda x, y: sign * criterion(model(x), y),
+ is_adversarial_fn=lambda x, y: model(x).argmax(
+ -1) == y if targeted else model(x).argmax(-1) != y,
+ x=x, y=y, n_steps=n_steps, step_size=step_size,
+ epsilon=epsilon, norm=norm, random_start=random_start,
+ n_averaging_steps=n_averaging_steps,
+ early_stopping=early_stopping)
\ No newline at end of file
diff --git a/attacks/thermometer_ls_pgd.py b/attacks/thermometer_ls_pgd.py
new file mode 100644
index 0000000..422e3de
--- /dev/null
+++ b/attacks/thermometer_ls_pgd.py
@@ -0,0 +1,196 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable
+
+import numpy as np
+import torch
+
+import utils as ut
+
+
+def general_thermometer_ls_pgd(
+ loss_fn: Callable, is_adversarial_fn: Callable,
+ x: torch.Tensor,
+ y: torch.Tensor, n_steps: int,
+ step_size: float, epsilon: float, norm: ut.NormType,
+ l: int, temperature: float = 1.0, annealing_factor=1.0 / 1.2,
+ random_start: bool = True,
+ n_restarts=0,
+ early_stopping: bool = False,
+ n_averaging_steps: int = 1):
+ """Performs a logit-space projected gradient descent (PGD) for an arbitrary loss function
+ and success criterion for a thermometer-encoded model.
+
+ :param loss_fn: Loss function to minimize.
+ :param is_adversarial_fn: Check if examples are adversarial
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param step_size: Size of the steps/learning rate.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ :param l:
+ :param temperature:
+ :param annealing_factor:
+ :param random_start: Randomly start within the epsilon ball.
+ :param early_stopping: Stop once an adversarial perturbation for all
+ examples have been found.
+ :param n_averaging_steps: Number over repetitions for every gradient
+ calculation.
+ :return: (Adversarial examples, attack success for each sample)
+ """
+ assert norm in ("linf",), "LS-PGD only supports linf norm"
+ assert random_start, "LS-PGD only works with random starts"
+ #assert epsilon < 1.0 / l, f"Epsilon ({epsilon}) must be smaller " \
+ # f"than 1.0/l ({1.0 / l})"
+
+ def one_hot(y):
+ L = torch.arange(l, dtype=x.dtype, device=x.device)
+ L = L.view((1, 1, l, 1, 1)) / l
+ y = torch.unsqueeze(y, 2)
+ y = torch.logical_and(
+ y >= L,
+ y <= L + 1 / l).float()
+ return y
+
+ def init_mask(x):
+ # Compute the mask over the bits that we are allowed to attack
+ mask = torch.zeros((len(x), 3, l, x.shape[-2], x.shape[-1]), dtype=x.dtype,
+ device=x.device)
+ for alpha in np.linspace(0, 1, l):
+ mask += one_hot(torch.maximum(torch.zeros_like(x), x - alpha * epsilon))
+ mask += one_hot(torch.minimum(torch.ones_like(x), x + alpha * epsilon))
+ mask = (mask > 0).float()
+ return mask
+
+ def get_final_x(u):
+ x = torch.argmax(u, 2) / l
+
+ # now move x as close as possible to x_orig without changing
+ # the argmax of the logits
+ delta = x - x_orig
+ delta[delta > 0] = torch.floor(delta[delta > 0] * l) / l
+ delta[delta < 0] = torch.ceil(delta[delta < 0] * l) / l
+
+ # only relevant for debugging:
+ # assert torch.all(torch.abs(delta) <= 1.0/l)
+
+ delta = torch.minimum(torch.ones_like(delta) * epsilon, delta)
+ delta = torch.maximum(-torch.ones_like(delta) * epsilon, delta)
+ x = x_orig + delta
+
+ # only relevant for debugging:
+ # project back to feasible set (if everything was correct before,
+ # this shouldn't change anything)
+ # x2 = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
+ # growing=False)
+ # assert torch.all(torch.abs(x - x2) < 1e-8)
+
+ return x
+
+ x_final = x.clone()
+ x_orig = x
+ mask = init_mask(x_orig)
+
+ for _ in range(n_restarts + 1):
+ x_logits = torch.randn_like(mask)
+ for step in range(n_steps):
+ # mask u so that x(u) is within the epsilon ball
+ x_logits = x_logits * mask - (1.0 - mask) * 1e12
+ # check early stopping
+ x = get_final_x(x_logits)
+ is_adv = is_adversarial_fn(x, y)
+ # print(is_adv[:32].long())
+ if early_stopping and torch.all(is_adv): #
+ return x.detach(), is_adv.detach()
+
+ x_logits = x_logits.requires_grad_()
+ x_thermometer = torch.softmax(x_logits / temperature, 2)
+ # convert something like [0, 0, 1, 0, .., 0] to [1, 1, 1, 0, ..., 0]
+ x_thermometer = torch.flip(
+ torch.cumsum(torch.flip(x_thermometer, (2,)), 2), (2,))
+ x_thermometer = x_thermometer.view((
+ x_thermometer.shape[0], -1, x_thermometer.shape[-2],
+ x_thermometer.shape[-1]))
+
+ grad_x_logits = torch.zeros_like(x_logits)
+ for _ in range(n_averaging_steps):
+ # get gradient of cross-entropy wrt to the thermometer encoded input
+ loss = loss_fn(x_thermometer, y)
+ # print(step, loss.item(), is_adv.sum().item())
+ grad_x_logits += torch.autograd.grad(loss, x_logits)[0] / n_averaging_steps
+
+ # perform step
+ x_logits = (x_logits - step_size * torch.sign(grad_x_logits)).detach()
+
+ temperature *= annealing_factor
+
+ x = get_final_x(x_logits)
+ is_adv = is_adversarial_fn(x, y)
+
+ x_final[is_adv] = x[is_adv]
+
+ is_adv = is_adversarial_fn(x, y)
+
+ return x.detach(), is_adv.detach()
+
+
+def thermometer_ls_pgd(model: Callable, x: torch.Tensor, y: torch.Tensor,
+ n_steps: int,
+ step_size: float, epsilon: float, norm: ut.NormType,
+ l: int,
+ temperature: float = 1.0,
+ annealing_factor=1.0 / 1.2,
+ random_start: bool = True,
+ early_stopping: bool = False,
+ targeted: bool = False,
+ n_averaging_steps: int = 1):
+ """Performs a logit-space projected gradient descent (PGD) with a cross-entropy
+ objective for a thermometer-encoded model.
+
+ :param x: Input images.
+ :param y: Ground-truth labels.
+ :param n_steps: Number of steps.
+ :param step_size: Size of the steps/learning rate.
+ :param epsilon: Maximum size of the perturbation measured by the norm.
+ :param norm: Norm to use for measuring the size of the perturbation.
+ :param l:
+ :param temperature:
+ :param annealing_factor:
+ :param random_start: Randomly start within the epsilon ball.
+ :param early_stopping: Stop once an adversarial perturbation for all
+ examples have been found.
+ :param targeted: Perform a targeted adversarial attack.
+ :param n_averaging_steps: Number over repetitions for every gradient
+ calculation.
+ :return: (Adversarial examples, attack success for each sample)
+ """
+ assert norm in ("linf", "l2", "l1")
+
+ criterion = torch.nn.CrossEntropyLoss()
+
+ sign = 1 if targeted else -1
+
+ return general_thermometer_ls_pgd(
+ loss_fn=lambda x, y: sign * criterion(model(x), y),
+ is_adversarial_fn=lambda x, y: model(x).argmax(
+ -1) == y if targeted else model(x).argmax(-1) != y,
+ x=x, y=y, n_steps=n_steps, step_size=step_size,
+ epsilon=epsilon, norm=norm,
+ l=l, temperature=temperature,
+ annealing_factor=annealing_factor,
+ random_start=random_start,
+ n_averaging_steps=n_averaging_steps,
+ early_stopping=early_stopping)
diff --git a/case_studies/__init__.py b/case_studies/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/bat/README.md b/case_studies/bat/README.md
new file mode 100644
index 0000000..01e5d87
--- /dev/null
+++ b/case_studies/bat/README.md
@@ -0,0 +1,34 @@
+# Bilateral Adversarial Training: Towards Fast Training of More Robust Models Against Adversarial Attacks
+
+Code and models for the ICCV 2019 [paper](https://arxiv.org/abs/1811.10716).
+
+The code is based on this [repo](https://github.com/MadryLab/cifar10_challenge) from MadryLab.
+
+We use one-step adversarial training with targeted attack and random start, which significantly improves both the speed and accuracy.
+
+# CIFAR10
+The training time is about 12 hours on one V100 GPU with 200 epochs + CIFAR10 + WRN28-10. The models can be downloaded [here](https://drive.google.com/file/d/11uUH1iV6xENARYWnzFnEZkKLb2y_8vHJ/view?usp=sharing).
+
+In the table below, the rows are various robust models (differ by how to choose the target label in targeted attack and the perturbation budget used in training), and the columns are two commonly used attacks for evaluation. Specifically, PGD100-2-8 means 100 iterations of PGD with step size 2 and perturbation budget 8.
+
+Note that the two robust models, R-MOSA-LA and R-RAND-LA, are not included in the original paper, due to space limit and possible distraction from main theme. R-MOSA-LA means Most One-Step Adversarial and R-RAND-LA means random target among non-groundtruth classes. I may upload a report providing details if time allows in future but for now you can check the corresponding [code block](https://github.com/wjyouch/bilateral-adversarial-training/blob/master/train.py#L157). The takeaway is that using a proper targeted attack in one-step adversarial training makes a huge difference.
+
+Besides, we also use PGD100-2-8 with 100 random starts (the last column of the table below). Specifically, if any of the 100 trials causes the model to fail then we say the attack succeeds. It turns out that our models perform reasonably well against this super strong and time-consuming (100 iterations + 100 random starts) attack.
+
+|Accuracy (%)| Clean | PGD100-2-8 | CW100-2-8 | PGD1000-2-8 | CW1000-2-8|100xPGD100-2-8|
+|--------|--------|--------|--------|--------|--------|----------|
+|R-MC-LA (eps=8)|91.2|55.3|53.9|54.8|53.4|52.5|
+|R-MC-LA (eps=4)|92.8|62.4|60.5|61.4|59.3|58.6|
+|R-MOSA-LA (eps=8)|90.7|61.3|58.3|NA|NA|57.3|
+|R-MOSA-LA (eps=4)|**92.8**|**71.0**|**67.9**|NA|NA|**66.9**|
+|R-RAND-LA (eps=8)|89.7|59.3|56.8|NA|NA|NA|
+|R-RAND-LA (eps=4)|92.8|65.7|62.9|NA|NA|NA|
+|Our implementation of [Madry's method](https://arxiv.org/abs/1706.06083)|88.0|47.2|48.1|NA|NA|NA|
+
+
+FYI: CIFAR100 dataset can be downloaded [here](https://drive.google.com/file/d/1Lo32gut3G9Sg4pz-ACFdsVzsxMRmCe-1/view?usp=sharing) and SVHN dataset can be downloaded [here](https://drive.google.com/file/d/1gd3-p2_9NN6k9UshmER0fyRG3UL_D2ug/view?usp=sharing).
+
+# Environment
+Python 3.6.7
+
+TensorFlow 1.12.0
diff --git a/case_studies/bat/adversarial_evaluation.py b/case_studies/bat/adversarial_evaluation.py
new file mode 100644
index 0000000..46987e5
--- /dev/null
+++ b/case_studies/bat/adversarial_evaluation.py
@@ -0,0 +1,249 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+
+import time
+import sys
+
+import numpy as np
+
+from bat_utils import *
+from wide_resnet import Model
+
+
+def load_data(FLAGS):
+ # load data
+ if FLAGS.dataset == 'SVHN':
+ raise ValueError("not supported")
+ elif FLAGS.dataset == 'CIFAR':
+ if FLAGS.num_classes == 10:
+ dataset = load_cifar10_data('data/cifar-10-batches-py/')
+ elif FLAGS.num_classes == 20:
+ dataset = load_cifar100_data('cifar100_data', is_fine=False)
+ elif FLAGS.num_classes == 100:
+ dataset = load_cifar100_data('cifar100_data', is_fine=True)
+ else:
+ raise ValueError('Number of classes not valid!')
+ train_images = dataset['train_images']
+ train_labels = dataset['train_labels']
+ test_images = dataset['test_images']
+ test_labels = dataset['test_labels']
+ else:
+ raise ValueError('Dataset not valid!')
+
+ return train_images, train_labels, test_images, test_labels
+
+
+def load_model(FLAGS):
+ x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
+ y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
+ is_train = tf.placeholder(tf.bool, name='is_train')
+
+ model = Model(is_train)
+ x_transformed = x_pl * 2.0 - 1.0
+ logits, _ = model.build_model(images=x_transformed, num_classes=FLAGS.num_classes)
+
+ prob = tf.nn.softmax(logits)
+ correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
+ accuracy = tf.reduce_mean(correct)
+
+ saver = tf.train.Saver(max_to_keep=100)
+
+ config = tf.ConfigProto()
+ config.gpu_options.allow_growth = True
+ sess = tf.Session(config=config)
+
+ saver.restore(sess, FLAGS.ckpt_path)
+ print('restored checkpoint from %s' % FLAGS.ckpt_path)
+
+ return sess, (x_pl, y_pl, is_train, logits, accuracy)
+
+
+def setup_attack(logits, x_pl, y_pl, FLAGS):
+ xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
+ labels=y_pl)
+ # loss for adversarial attack
+ if FLAGS.loss_type == 'xent':
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(xent)
+ else:
+ loss_att = -tf.reduce_sum(xent)
+ elif FLAGS.loss_type == 'CW':
+ y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
+ self = tf.reduce_sum(y_loss * logits, axis=1)
+ other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
+ if FLAGS.targeted:
+ raise ValueError("not supported")
+ else:
+ loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
+ else:
+ raise ValueError('loss type not supported!')
+
+ grad, = tf.gradients(loss_att, x_pl)
+
+ return grad
+
+
+def adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS):
+ epsilon = FLAGS.epsilon / 255.0
+ step_size = FLAGS.step_size / 255.0
+
+ if not FLAGS.targeted:
+ y_att = np.copy(y_batch)
+ else:
+ raise ValueError("targeted mode not supported")
+
+ # randomly perturb the original images
+ if FLAGS.random_start:
+ x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
+ else:
+ x = np.copy(x_batch)
+
+ for i in range(FLAGS.num_steps):
+ logits, grad_val = sess.run(grad,
+ feed_dict={
+ x_pl: x,
+ y_pl: y_att,
+ is_train: False
+ })
+
+ x = x - step_size * np.sign(grad_val)
+
+ x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
+
+ x = np.clip(x, 0, 1.0)
+
+ return x
+
+
+def run_eval(sess, grad, x_pl, y_pl, is_train, logits, FLAGS,
+ test_images, test_labels, num_classes=10):
+ test_size = test_images.shape[0]
+ epoch_steps = np.ceil(test_size / FLAGS.batch_size).astype(np.int32)
+ nat_total = 0.0
+ adv_total = 0.0
+ class_nat_total = np.zeros([num_classes])
+ class_adv_total = np.zeros([num_classes])
+ nat_cnt_list = np.zeros([test_size])
+ adv_cnt_list = np.zeros([test_size])
+ idx = np.random.permutation(test_size)
+ for step_idx in range(epoch_steps):
+ start = step_idx * FLAGS.batch_size
+ end = np.minimum((step_idx + 1) * FLAGS.batch_size,
+ test_size).astype(np.int32)
+ x_batch = test_images[idx[start:end]]
+ y_batch = test_labels[idx[start:end]]
+
+ start_time = time.time()
+
+ nat_logits = sess.run(logits,
+ feed_dict={
+ x_pl: x_batch,
+ is_train: False
+ })
+ nat_cnt = nat_logits.argmax(-1) == y_batch
+
+ x_batch_adv = adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS)
+
+ adv_logits = sess.run(logits,
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_pl: y_batch,
+ is_train: False
+ })
+ adv_cnt = adv_logits.argmax(-1) == y_batch
+
+ nat_cnt_list[start:end] = nat_cnt
+ adv_cnt_list[start:end] = adv_cnt
+
+ for ii in range(FLAGS.num_classes):
+ class_nat_total[ii] += np.sum(nat_cnt[y_batch == ii])
+ class_adv_total[ii] += np.sum(adv_cnt[y_batch == ii])
+
+ nat_total += np.sum(nat_cnt)
+ adv_total += np.sum(adv_cnt)
+
+ duration = time.time() - start_time
+ print('finished batch %d/%d, duration %.2f, nat acc %.2f, adv acc %.2f' %
+ (step_idx, epoch_steps, duration, 100 * np.mean(nat_cnt),
+ 100 * np.mean(adv_cnt)))
+ sys.stdout.flush()
+
+ nat_acc = nat_total / test_size
+ adv_acc = adv_total / test_size
+ class_nat_total /= (test_size / FLAGS.num_classes)
+ class_adv_total /= (test_size / FLAGS.num_classes)
+ print('clean accuracy: %.2f, adv accuracy: %.2f' %
+ (100 * nat_acc, 100 * adv_acc))
+ for ii in range(FLAGS.num_classes):
+ print('class %d, clean %.2f, adv %.2f' %
+ (ii, 100 * class_nat_total[ii], 100 * class_adv_total[ii]))
+
+
+def parse_args():
+ tf.flags.DEFINE_string('ckpt_path', '', '')
+ tf.flags.DEFINE_string('dataset', 'CIFAR', '')
+ tf.flags.DEFINE_integer('num_classes', 10, '')
+ tf.flags.DEFINE_integer('batch_size', 100, '')
+ tf.flags.DEFINE_string('loss_type', 'xent', '')
+ tf.flags.DEFINE_float('margin', 50.0, '')
+ tf.flags.DEFINE_float('epsilon', 8.0, '')
+ tf.flags.DEFINE_integer('num_steps', 10, '')
+ tf.flags.DEFINE_float('step_size', 2.0, '')
+ tf.flags.DEFINE_boolean('random_start', False, '')
+ tf.flags.DEFINE_boolean('targeted', False, '')
+ tf.flags.DEFINE_integer('n_samples', 2048, '')
+
+ FLAGS = tf.flags.FLAGS
+
+ print(FLAGS.flag_values_dict())
+
+ return FLAGS
+
+
+def main():
+ FLAGS = parse_args()
+ _, _, test_images, test_labels = load_data(FLAGS)
+
+ # normalize to [0,1] value range from [-1, 1] since we put normalization
+ # inside of the model
+ test_images = (test_images + 1.0) / 2.0
+
+ # subsample test data
+ if FLAGS.n_samples == -1:
+ FLAGS.n_samples = len(test_images)
+ idxs = np.arange(len(test_images))
+ np.random.shuffle(idxs)
+ idxs = idxs[:FLAGS.n_samples]
+
+ test_images, test_labels = test_images[idxs], test_labels[idxs]
+
+ sess, (x_pl, y_pl, is_train, logits, accuracy) = load_model(FLAGS)
+ attack_grad = setup_attack(logits, x_pl, y_pl, FLAGS)
+ run_eval(sess, attack_grad, x_pl, y_pl, is_train, logits, FLAGS,
+ test_images, test_labels)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/bat/adversarial_evaluation.sh b/case_studies/bat/adversarial_evaluation.sh
new file mode 100644
index 0000000..7ad65ed
--- /dev/null
+++ b/case_studies/bat/adversarial_evaluation.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+TF_CPP_MIN_LOG_LEVEL=3 ./venv3.8tf/bin/python case_studies/bat/adversarial_evaluation.py \
+ --num_classes=10 \
+ --ckpt_path=checkpoints/bat/mosa_eps4/checkpoint-200 \
+ --dataset=CIFAR \
+ --loss_type=xent \
+ --margin=50.0 \
+ --epsilon=8.0 \
+ --num_steps=100 \
+ --step_size=2.0 \
+ --random_start=True \
+ --batch_size=256 \
+ --n_samples=2048
diff --git a/case_studies/bat/bat_utils.py b/case_studies/bat/bat_utils.py
new file mode 100644
index 0000000..78d0cee
--- /dev/null
+++ b/case_studies/bat/bat_utils.py
@@ -0,0 +1,222 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gzip
+import os
+
+import numpy as np
+
+
+def extract_images(file_path):
+ '''Extract the images into a 4D uint8 numpy array [index, y, x, depth].'''
+ def _read32(bytestream):
+ dt = np.dtype(np.uint32).newbyteorder('>')
+ return np.frombuffer(bytestream.read(4), dtype=dt)[0]
+
+ f = open(file_path, 'rb')
+ with gzip.GzipFile(fileobj=f) as bytestream:
+ magic = _read32(bytestream)
+ if magic != 2051:
+ raise ValueError(
+ 'Invalid magic number %d in MNIST image file: %s' %
+ (magic, f.name))
+ num_images = _read32(bytestream)
+ rows = _read32(bytestream)
+ cols = _read32(bytestream)
+ buf = bytestream.read(rows * cols * num_images)
+ data = np.frombuffer(buf, dtype=np.uint8)
+ data = data.reshape(num_images, rows, cols, 1)
+ return data
+
+
+def extract_labels(file_path):
+ '''Extract the labels into a 1D uint8 numpy array [index].'''
+ def _read32(bytestream):
+ dt = np.dtype(np.uint32).newbyteorder('>')
+ return np.frombuffer(bytestream.read(4), dtype=dt)[0]
+
+ f = open(file_path, 'rb')
+ with gzip.GzipFile(fileobj=f) as bytestream:
+ magic = _read32(bytestream)
+ if magic != 2049:
+ raise ValueError(
+ 'Invalid magic number %d in MNIST label file: %s' %
+ (magic, f.name))
+ num_items = _read32(bytestream)
+ buf = bytestream.read(num_items)
+ labels = np.frombuffer(buf, dtype=np.uint8)
+ return labels
+
+
+def load_mnist_data(data_path, is_uint8=False):
+ TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
+ TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
+ TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
+ TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
+
+ train_images = extract_images(os.path.join(data_path, TRAIN_IMAGES))
+ if not is_uint8:
+ train_images = 2 * (train_images / 255.0 - 0.5)
+ train_labels = extract_labels(os.path.join(data_path, TRAIN_LABELS))
+ test_images = extract_images(os.path.join(data_path, TEST_IMAGES))
+ if not is_uint8:
+ test_images = 2 * (test_images / 255.0 - 0.5)
+ test_labels = extract_labels(os.path.join(data_path, TEST_LABELS))
+
+ train_data = order_data(train_images, train_labels, 10)
+ test_data = order_data(test_images, test_labels, 10)
+
+ return dict(train_images=train_data['images'],
+ train_labels=train_data['labels'],
+ train_count=train_data['count'],
+ test_images=test_data['images'],
+ test_labels=test_data['labels'],
+ test_count=test_data['count'])
+
+
+# python2
+#def unpickle(file):
+# import cPickle
+# fo = open(file, 'rb')
+# dict = cPickle.load(fo)
+# fo.close()
+# return dict
+
+
+# python3
+def unpickle(file):
+ import pickle
+ fo = open(file, 'rb')
+ dict = pickle.load(fo, encoding='bytes')
+ fo.close()
+ return dict
+
+
+def load_cifar100_data(data_path, is_fine=True, is_uint8=False):
+ # train
+ train_set = unpickle(os.path.join(data_path, 'train'))
+ train_images = train_set[b'data']
+ train_images = np.dstack([
+ train_images[:, :1024], train_images[:, 1024:2048],
+ train_images[:, 2048:]
+ ])
+ train_images = train_images.reshape([train_images.shape[0], 32, 32, 3])
+ if not is_uint8:
+ train_images = train_images / 255.0
+ train_images = 2.0 * (train_images - 0.5)
+ if is_fine:
+ train_labels = np.array(train_set[b'fine_labels'])
+ else:
+ train_labels = np.array(train_set[b'coarse_labels'])
+
+ # test
+ test_set = unpickle(os.path.join(data_path, 'test'))
+ test_images = test_set[b'data']
+ test_images = np.dstack([
+ test_images[:, :1024], test_images[:, 1024:2048], test_images[:, 2048:]
+ ])
+ test_images = test_images.reshape([test_images.shape[0], 32, 32, 3])
+ if not is_uint8:
+ test_images = test_images / 255.0
+ test_images = 2.0 * (test_images - 0.5)
+ if is_fine:
+ test_labels = np.array(test_set[b'fine_labels'])
+ else:
+ test_labels = np.array(test_set[b'coarse_labels'])
+
+ return dict(train_images=train_images,
+ train_labels=train_labels,
+ test_images=test_images,
+ test_labels=test_labels)
+
+
+def load_cifar10_data(data_path, is_uint8=False):
+ # train
+ train_names = [
+ 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
+ 'data_batch_5'
+ ]
+ all_images = []
+ all_labels = []
+ for filename in train_names:
+ train_set = unpickle(os.path.join(data_path, filename))
+ all_images.append(train_set[b'data'])
+ all_labels.append(train_set[b'labels'])
+ train_images = np.concatenate(all_images, axis=0)
+ train_images = np.dstack([
+ train_images[:, :1024], train_images[:, 1024:2048],
+ train_images[:, 2048:]
+ ])
+ train_images = train_images.reshape([train_images.shape[0], 32, 32, 3])
+ if not is_uint8:
+ train_images = train_images / 255.0
+ train_images = 2.0 * (train_images - 0.5)
+ train_labels = np.concatenate(all_labels, axis=0)
+
+ # test
+ test_set = unpickle(os.path.join(data_path, 'test_batch'))
+ test_images = test_set[b'data']
+ test_images = np.dstack([
+ test_images[:, :1024], test_images[:, 1024:2048], test_images[:, 2048:]
+ ])
+ test_images = test_images.reshape([test_images.shape[0], 32, 32, 3])
+ if not is_uint8:
+ test_images = test_images / 255.0
+ test_images = 2.0 * (test_images - 0.5)
+ test_labels = np.array(test_set[b'labels'])
+
+ return dict(train_images=train_images,
+ train_labels=train_labels,
+ test_images=test_images,
+ test_labels=test_labels)
+
+
+def preprocess_py(images, pad_size, target_size):
+ '''Preprocess images in python.
+ Args:
+ images: 4-D numpy array.
+ Returns:
+ preprocessed images, 4-D numpy array.
+ '''
+ assert images.shape[1] == images.shape[2], 'can only handle square images!'
+ image_number = images.shape[0]
+ image_size = images.shape[1]
+ # padding, with equal pad size on both sides.
+ padded_images = np.pad(images, [(0, 0), (pad_size, pad_size),
+ (pad_size, pad_size), (0, 0)],
+ mode='constant',
+ constant_values=0)
+ # random crop
+ idx = np.random.random_integers(low=0,
+ high=2 * pad_size,
+ size=[image_number, 2])
+ cropped_images = np.zeros([image_number, target_size, target_size, 3])
+ for i in np.arange(image_number):
+ cropped_images[i] = padded_images[i, idx[i, 0]:idx[i, 0] +
+ target_size, idx[i, 1]:idx[i, 1] +
+ target_size]
+ # random flip
+ if np.random.rand() > 0.5:
+ cropped_images = cropped_images[:, :, ::-1]
+ return cropped_images
+
+
+def one_hot(y, dim):
+ y_dense = np.zeros([len(y), dim])
+ y_dense[np.arange(len(y)), y] = 1.0
+ return y_dense
diff --git a/case_studies/bat/binarization_test.py b/case_studies/bat/binarization_test.py
new file mode 100644
index 0000000..98101bc
--- /dev/null
+++ b/case_studies/bat/binarization_test.py
@@ -0,0 +1,294 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+from functools import partial
+from typing import Tuple
+
+import torch
+
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import time
+import sys
+
+import numpy as np
+
+from bat_utils import *
+from wide_resnet import Model
+
+from utils import build_dataloader_from_arrays
+from active_tests import decision_boundary_binarization as dbb
+from argparse_utils import DecisionBoundaryBinarizationSettings
+
+
+def load_data(FLAGS):
+ # load data
+ if FLAGS.dataset == 'SVHN':
+ raise ValueError("not supported")
+ elif FLAGS.dataset == 'CIFAR':
+ if FLAGS.num_classes == 10:
+ dataset = load_cifar10_data('data/cifar-10-batches-py/')
+ elif FLAGS.num_classes == 20:
+ dataset = load_cifar100_data('cifar100_data', is_fine=False)
+ elif FLAGS.num_classes == 100:
+ dataset = load_cifar100_data('cifar100_data', is_fine=True)
+ else:
+ raise ValueError('Number of classes not valid!')
+ train_images = dataset['train_images']
+ train_labels = dataset['train_labels']
+ test_images = dataset['test_images']
+ test_labels = dataset['test_labels']
+ else:
+ raise ValueError('Dataset not valid!')
+
+ return train_images, train_labels, test_images, test_labels
+
+
+def load_model(FLAGS):
+ x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
+ y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
+ is_train = tf.placeholder(tf.bool, name='is_train')
+
+ model = Model(is_train)
+ x_transformed = x_pl * 2.0 - 1.0
+ fe_logits, features = model.build_model(images=x_transformed,
+ num_classes=FLAGS.num_classes)
+
+ saver = tf.train.Saver(max_to_keep=100)
+
+ config = tf.ConfigProto()
+ config.gpu_options.allow_growth = True
+ sess = tf.Session(config=config)
+
+ saver.restore(sess, FLAGS.ckpt_path)
+ print('restored checkpoint from %s' % FLAGS.ckpt_path)
+
+ # create binary classifier
+ bro_w = tf.get_variable(
+ 'DW', [features.shape[-1], 2],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ bro_b = tf.get_variable('biases', [2], initializer=tf.constant_initializer())
+ bro_w_pl = tf.placeholder(tf.float32, shape=[features.shape[-1], 2])
+ bro_b_pl = tf.placeholder(tf.float32, shape=[2])
+ bro_w_set_weight = bro_w.assign(bro_w_pl)
+ bro_b_set_weight = bro_b.assign(bro_b_pl)
+ logits = tf.nn.xw_plus_b(features, bro_w, bro_b)
+
+ prob = tf.nn.softmax(logits)
+ correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
+ accuracy = tf.reduce_mean(correct)
+
+ return sess, (x_pl, y_pl, is_train, logits, fe_logits, features, accuracy), \
+ (bro_w_pl, bro_b_pl, bro_w_set_weight, bro_b_set_weight)
+
+
+def setup_attack(logits, x_pl, y_pl, FLAGS):
+ xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
+ labels=y_pl)
+ # loss for adversarial attack
+ if FLAGS.loss_type == 'xent':
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(xent)
+ else:
+ loss_att = -tf.reduce_sum(xent)
+ elif FLAGS.loss_type == 'CW':
+ y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
+ self = tf.reduce_sum(y_loss * logits, axis=1)
+ other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
+ if FLAGS.targeted:
+ raise ValueError("not supported")
+ else:
+ loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
+ else:
+ raise ValueError('loss type not supported!')
+
+ grad, = tf.gradients(loss_att, x_pl)
+
+ return grad
+
+
+def adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS):
+ epsilon = FLAGS.epsilon / 255.0
+ step_size = FLAGS.step_size / 255.0
+
+ if not FLAGS.targeted:
+ y_att = np.copy(y_batch)
+ else:
+ raise ValueError("targeted mode not supported")
+
+ # randomly perturb the original images
+ if FLAGS.random_start:
+ x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
+ else:
+ x = np.copy(x_batch)
+
+ for i in range(FLAGS.num_steps):
+ grad_val = sess.run(grad,
+ feed_dict={
+ x_pl: x,
+ y_pl: y_att,
+ is_train: False
+ })
+
+ x = x - step_size * np.sign(grad_val)
+ x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
+ x = np.clip(x, 0, 1.0)
+
+ return x
+
+
+def parse_args():
+ tf.flags.DEFINE_string('ckpt_path', '', '')
+ tf.flags.DEFINE_string('dataset', 'CIFAR', '')
+ tf.flags.DEFINE_integer('num_classes', 10, '')
+ tf.flags.DEFINE_integer('batch_size', 100, '')
+ tf.flags.DEFINE_string('loss_type', 'xent', '')
+ tf.flags.DEFINE_float('margin', 50.0, '')
+ tf.flags.DEFINE_float('epsilon', 8.0, '')
+ tf.flags.DEFINE_integer('num_steps', 10, '')
+ tf.flags.DEFINE_float('step_size', 2.0, '')
+ tf.flags.DEFINE_boolean('random_start', False, '')
+ tf.flags.DEFINE_boolean('targeted', False, '')
+ tf.flags.DEFINE_integer('n_samples', 2048, '')
+
+ tf.flags.DEFINE_integer('n_boundary_points', 1, '')
+ tf.flags.DEFINE_integer('n_inner_points', 999, '')
+ tf.flags.DEFINE_boolean('sample_from_corners', False, '')
+
+ FLAGS = tf.flags.FLAGS
+
+ # print(FLAGS.flag_values_dict())
+
+ return FLAGS
+
+
+def run_attack(m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
+ bro_w_assign, bro_b_assign, attack_fn):
+ linear_layer = m[-1]
+ del m
+
+ sess.run(bro_w_assign, {bro_w_pl: linear_layer.weight.data.numpy().T})
+ sess.run(bro_b_assign, {bro_b_pl: linear_layer.bias.data.numpy()})
+
+ for x, y in l:
+ x, y = x.numpy(), y.numpy()
+ x = x.transpose((0, 2, 3, 1))
+ x_batch_adv = attack_fn(x, y)
+
+ adv_logits: np.ndarray = sess.run(logits,
+ feed_dict={
+ x_pl: x_batch_adv,
+ is_train: False
+ })
+ is_adv: np.ndarray = adv_logits.argmax(-1) != y
+
+ return is_adv, (torch.tensor(x_batch_adv.transpose((0, 3, 1, 2))),
+ torch.tensor(adv_logits))
+
+
+def main():
+ FLAGS = parse_args()
+ _, _, test_images, test_labels = load_data(FLAGS)
+
+ print(FLAGS.flag_values_dict())
+
+ # normalize to [0,1] value range from [-1, 1] since we put normalization
+ # inside of the model
+ test_images = (test_images + 1.0) / 2.0
+
+ # subsample test data
+ if FLAGS.n_samples == -1:
+ FLAGS.n_samples = len(test_images)
+ idxs = np.arange(len(test_images))
+ np.random.shuffle(idxs)
+ test_images, test_labels = test_images[idxs], test_labels[idxs]
+ test_images = test_images.transpose((0, 3, 1, 2))
+
+ test_loader = build_dataloader_from_arrays(test_images, test_labels,
+ FLAGS.batch_size)
+
+ sess, (x_pl, y_pl, is_train, logits, fe_logits, features, accuracy), \
+ (bro_w_pl, bro_b_pl, bro_w_set_weight, bro_b_set_weight) = load_model(FLAGS)
+ attack_grad = setup_attack(logits, x_pl, y_pl, FLAGS)
+ attack_fn = lambda x, y: adv_attack(sess, attack_grad, x_pl, y_pl, is_train,
+ x, y, FLAGS)
+
+ def feature_extractor_forward_pass(x, features_and_logits: bool = False,
+ features_only: bool = False):
+ if features_and_logits:
+ assert not features_only, "Only one of the flags must be set."
+ if features_and_logits:
+ f, l = sess.run(
+ (features, fe_logits),
+ feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
+ return f, l
+ elif features_only:
+ return sess.run(
+ features,
+ feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
+ else:
+ return sess.run(
+ fe_logits,
+ feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=feature_extractor_forward_pass,
+ logit_forward_and_backward_pass=None,
+ )
+
+ scores_logit_differences_and_validation_accuracies = \
+ dbb.interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ # m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
+ # bro_w_assign, bro_b_assign, attack_fn)
+ attack_fn=lambda m, l, kwargs: partial(run_attack,
+ sess=sess, logits=logits, x_pl=x_pl, is_train=is_train,
+ bro_w_pl=bro_w_pl, bro_b_pl=bro_b_pl, bro_w_assign=bro_w_set_weight,
+ bro_b_assign=bro_b_set_weight,
+ attack_fn=attack_fn)(m, l),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=FLAGS.epsilon / 255.0,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=FLAGS.n_boundary_points,
+ n_inner_points=FLAGS.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=FLAGS.n_samples,
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ # rescale_logits="adaptive",
+ sample_training_data_from_corners=FLAGS.sample_from_corners,
+ decision_boundary_closeness=0.9999,
+ # args.num_samples_test * 10
+ )
+
+ print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
+ FLAGS.n_samples))
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/bat/binarization_test.sh b/case_studies/bat/binarization_test.sh
new file mode 100644
index 0000000..5134706
--- /dev/null
+++ b/case_studies/bat/binarization_test.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+nsamples=${1:-512}
+
+echo "#samples: $nsamples"
+
+kwargs=""
+# kwargs="--sample_from_corners=True"
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/bat/binarization_test.py \
+ --num_classes=10 \
+ --ckpt_path=checkpoints/bat/mosa_eps4/checkpoint-200 \
+ --dataset=CIFAR \
+ --loss_type=xent \
+ --margin=50.0 \
+ --epsilon=8.0 \
+ --num_steps=100 \
+ --step_size=2.0 \
+ --random_start=True \
+ --batch_size=256 \
+ --n_samples=$nsamples \
+ --n_inner_points=999 \
+ $kwargs
diff --git a/case_studies/bat/eval.py b/case_studies/bat/eval.py
new file mode 100644
index 0000000..8494736
--- /dev/null
+++ b/case_studies/bat/eval.py
@@ -0,0 +1,209 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import time
+import os
+import sys
+
+import numpy as np
+import tensorflow as tf
+
+from utils import *
+from wide_resnet import Model
+
+tf.flags.DEFINE_string('ckpt_path', '', '')
+tf.flags.DEFINE_string('dataset', 'CIFAR', '')
+tf.flags.DEFINE_integer('num_classes', 10, '')
+tf.flags.DEFINE_integer('batch_size', 100, '')
+tf.flags.DEFINE_string('loss_type', 'xent', '')
+tf.flags.DEFINE_float('margin', 50.0, '')
+tf.flags.DEFINE_float('epsilon', 8.0, '')
+tf.flags.DEFINE_integer('num_steps', 10, '')
+tf.flags.DEFINE_float('step_size', 2.0, '')
+tf.flags.DEFINE_boolean('random_start', False, '')
+tf.flags.DEFINE_boolean('targeted', False, '')
+
+tf.flags.DEFINE_string('cuda_device', '3', '')
+
+FLAGS = tf.flags.FLAGS
+
+os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
+
+print(FLAGS.flag_values_dict())
+
+# load data
+if FLAGS.dataset == 'SVHN':
+ train_data = np.load('svhn_data/train_32x32.npz')
+ train_images = train_data['arr_0']
+ train_images = 2.0 * (train_images / 255.0 - 0.5)
+ train_labels = train_data['arr_1']
+ test_data = np.load('svhn_data/test_32x32.npz')
+ test_images = test_data['arr_0']
+ test_images = 2 * (test_images / 255.0 - 0.5)
+ test_labels = test_data['arr_1']
+elif FLAGS.dataset == 'CIFAR':
+ if FLAGS.num_classes == 10:
+ dataset = load_cifar10_data('cifar10_data')
+ elif FLAGS.num_classes == 20:
+ dataset = load_cifar100_data('cifar100_data', is_fine=False)
+ elif FLAGS.num_classes == 100:
+ dataset = load_cifar100_data('cifar100_data', is_fine=True)
+ else:
+ raise ValueError('Number of classes not valid!')
+ train_images = dataset['train_images']
+ train_labels = dataset['train_labels']
+ test_images = dataset['test_images']
+ test_labels = dataset['test_labels']
+else:
+ raise ValueError('Dataset not valid!')
+
+x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
+y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
+is_train = tf.placeholder(tf.bool, name='is_train')
+
+model = Model(is_train)
+logits, _ = model.build_model(images=x_pl, num_classes=FLAGS.num_classes)
+prob = tf.nn.softmax(logits)
+xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
+ labels=y_pl)
+
+correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
+accuracy = tf.reduce_mean(correct)
+
+# loss for adversarial attack
+if FLAGS.loss_type == 'xent':
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(xent)
+ else:
+ loss_att = -tf.reduce_sum(xent)
+elif FLAGS.loss_type == 'CW':
+ y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
+ self = tf.reduce_sum(y_loss * logits, axis=1)
+ other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(tf.maximum(other - self + FLAGS.margin, 0))
+ else:
+ loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
+else:
+ raise ValueError('loss type not supported!')
+
+grad, = tf.gradients(loss_att, x_pl)
+
+saver = tf.train.Saver(max_to_keep=100)
+
+config = tf.ConfigProto()
+config.gpu_options.allow_growth = True
+sess = tf.Session(config=config)
+
+saver.restore(sess, FLAGS.ckpt_path)
+print('restored checkpoint from %s' % FLAGS.ckpt_path)
+
+
+def adv_attack(nat_prob, x_batch, y_batch):
+ epsilon = FLAGS.epsilon / 255.0 * 2
+ step_size = FLAGS.step_size / 255.0 * 2
+
+ if not FLAGS.targeted:
+ y_att = np.copy(y_batch)
+ else:
+ # most confusing targeted attack
+ nat_prob[np.arange(y_batch.shape[0]), y_batch] = 0.0
+ y_att = np.argmax(nat_prob, axis=1)
+
+ # randomly perturb the original images
+ if FLAGS.random_start:
+ x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
+ else:
+ x = np.copy(x_batch)
+
+ for i in range(FLAGS.num_steps):
+ grad_val = sess.run(grad,
+ feed_dict={
+ x_pl: x,
+ y_pl: y_att,
+ is_train: False
+ })
+
+ x = x - step_size * np.sign(grad_val)
+
+ x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
+
+ x = np.clip(x, -1.0, 1.0)
+
+ return x
+
+
+test_size = test_images.shape[0]
+epoch_steps = np.ceil(test_size / FLAGS.batch_size).astype(np.int32)
+nat_total = 0.0
+adv_total = 0.0
+class_nat_total = np.zeros([FLAGS.num_classes])
+class_adv_total = np.zeros([FLAGS.num_classes])
+nat_cnt_list = np.zeros([test_size])
+adv_cnt_list = np.zeros([test_size])
+idx = np.random.permutation(test_size)
+for step_idx in range(epoch_steps):
+ start = step_idx * FLAGS.batch_size
+ end = np.minimum((step_idx + 1) * FLAGS.batch_size,
+ test_size).astype(np.int32)
+ x_batch = test_images[idx[start:end]]
+ y_batch = test_labels[idx[start:end]]
+
+ start_time = time.time()
+
+ nat_cnt, nat_prob = sess.run([correct, prob],
+ feed_dict={
+ x_pl: x_batch,
+ y_pl: y_batch,
+ is_train: False
+ })
+
+ x_batch_adv = adv_attack(nat_prob, x_batch, y_batch)
+
+ adv_cnt = sess.run(correct,
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_pl: y_batch,
+ is_train: False
+ })
+
+ nat_cnt_list[start:end] = nat_cnt
+ adv_cnt_list[start:end] = adv_cnt
+
+ for ii in range(FLAGS.num_classes):
+ class_nat_total[ii] += np.sum(nat_cnt[y_batch == ii])
+ class_adv_total[ii] += np.sum(adv_cnt[y_batch == ii])
+
+ nat_total += np.sum(nat_cnt)
+ adv_total += np.sum(adv_cnt)
+
+ duration = time.time() - start_time
+ print('finished batch %d/%d, duration %.2f, nat acc %.2f, adv acc %.2f' %
+ (step_idx, epoch_steps, duration, 100 * np.mean(nat_cnt),
+ 100 * np.mean(adv_cnt)))
+ sys.stdout.flush()
+
+nat_acc = nat_total / test_size
+adv_acc = adv_total / test_size
+class_nat_total /= (test_size / FLAGS.num_classes)
+class_adv_total /= (test_size / FLAGS.num_classes)
+print('clean accuracy: %.2f, adv accuracy: %.2f' %
+ (100 * nat_acc, 100 * adv_acc))
+for ii in range(FLAGS.num_classes):
+ print('class %d, clean %.2f, adv %.2f' %
+ (ii, 100 * class_nat_total[ii], 100 * class_adv_total[ii]))
diff --git a/case_studies/bat/eval.sh b/case_studies/bat/eval.sh
new file mode 100644
index 0000000..e9145b7
--- /dev/null
+++ b/case_studies/bat/eval.sh
@@ -0,0 +1,14 @@
+#! /bin/bash
+
+python eval.py --cuda_device=3 \
+--num_classes=10 \
+--ckpt_path=models/mc_eps4/checkpoint-200 \
+--dataset=CIFAR \
+--loss_type=xent \
+--margin=50.0 \
+--epsilon=8.0 \
+--num_steps=100 \
+--step_size=2.0 \
+--random_start=True \
+--targeted=False \
+--batch_size=100 \
diff --git a/case_studies/bat/train.py b/case_studies/bat/train.py
new file mode 100644
index 0000000..5a50363
--- /dev/null
+++ b/case_studies/bat/train.py
@@ -0,0 +1,356 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import time
+import os
+import sys
+
+import numpy as np
+import tensorflow as tf
+
+from utils import *
+from wide_resnet import Model
+
+tf.flags.DEFINE_string('model_dir', '/tmp/adv_train/', '')
+tf.flags.DEFINE_string('dataset', '', '')
+tf.flags.DEFINE_integer('num_classes', 10, '')
+
+tf.flags.DEFINE_string('restore_ckpt_path', '', '')
+tf.flags.DEFINE_integer('start_epoch', 0, '')
+tf.flags.DEFINE_integer('max_epoch', 201, '')
+tf.flags.DEFINE_integer('decay_epoch1', 100, '')
+tf.flags.DEFINE_integer('decay_epoch2', 150, '')
+tf.flags.DEFINE_float('decay_rate', 0.1, '')
+tf.flags.DEFINE_float('learning_rate', 0.1, '')
+tf.flags.DEFINE_float('momentum', 0.9, '')
+tf.flags.DEFINE_integer('batch_size', 128, '')
+tf.flags.DEFINE_float('weight_decay', 2e-4, '')
+
+tf.flags.DEFINE_float('margin', 50.0, '')
+tf.flags.DEFINE_string('loss_type', 'xent', '')
+tf.flags.DEFINE_float('epsilon', 8.0, '')
+tf.flags.DEFINE_integer('num_steps', 7, '')
+tf.flags.DEFINE_float('step_size', 2.0, '')
+tf.flags.DEFINE_boolean('random_start', True, '')
+tf.flags.DEFINE_boolean('targeted', True, '')
+tf.flags.DEFINE_string('target_type', 'MC', '')
+
+tf.flags.DEFINE_boolean('label_adversary', True, '')
+tf.flags.DEFINE_float('multi', 9, '')
+
+tf.flags.DEFINE_integer('log_steps', 10, '')
+tf.flags.DEFINE_integer('save_epochs', 20, '')
+tf.flags.DEFINE_integer('eval_epochs', 10, '')
+
+tf.flags.DEFINE_string('cuda_device', '3', '')
+
+FLAGS = tf.flags.FLAGS
+
+os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
+
+print(FLAGS.flag_values_dict())
+
+# load data
+if FLAGS.dataset == 'SVHN':
+ train_data = np.load('svhn_data/train_32x32.npz')
+ train_images = train_data['arr_0']
+ train_images = 2.0 * (train_images / 255.0 - 0.5)
+ train_labels = train_data['arr_1']
+ test_data = np.load('svhn_data/test_32x32.npz')
+ test_images = test_data['arr_0']
+ test_images = 2 * (test_images / 255.0 - 0.5)
+ test_labels = test_data['arr_1']
+elif FLAGS.dataset == 'CIFAR':
+ if FLAGS.num_classes == 10:
+ dataset = load_cifar10_data('cifar10_data')
+ elif FLAGS.num_classes == 20:
+ dataset = load_cifar100_data('cifar100_data', is_fine=False)
+ elif FLAGS.num_classes == 100:
+ dataset = load_cifar100_data('cifar100_data', is_fine=True)
+ else:
+ raise ValueError('Number of classes not valid!')
+ train_images = dataset['train_images']
+ train_labels = dataset['train_labels']
+ test_images = dataset['test_images']
+ test_labels = dataset['test_labels']
+else:
+ raise ValueError('Dataset not valid!')
+
+x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
+y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
+y_loss = tf.placeholder(tf.float32,
+ shape=[None, FLAGS.num_classes],
+ name='y_loss')
+lr = tf.placeholder(tf.float32, name='lr')
+is_train = tf.placeholder(tf.bool, name='is_train')
+global_step = tf.Variable(0, trainable=False, name='global_step')
+
+model = Model(is_train)
+logits, _ = model.build_model(images=x_pl, num_classes=FLAGS.num_classes)
+prob = tf.nn.softmax(logits)
+xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_loss)
+mean_xent = tf.reduce_mean(xent)
+total_loss = mean_xent + FLAGS.weight_decay * model.weight_decay_loss
+
+correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
+accuracy = tf.reduce_mean(correct)
+
+# loss for adversarial attack
+if FLAGS.loss_type == 'xent':
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(xent)
+ else:
+ loss_att = -tf.reduce_sum(xent)
+elif FLAGS.loss_type == 'CW':
+ self = tf.reduce_sum(y_loss * logits, axis=1)
+ other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1000.0, axis=1)
+ if FLAGS.targeted:
+ loss_att = tf.reduce_sum(tf.maximum(other - self + FLAGS.margin, 0))
+ else:
+ loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
+else:
+ raise ValueError('loss type not supported!')
+
+grad, = tf.gradients(loss_att, x_pl)
+
+update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
+opt = tf.train.MomentumOptimizer(lr, FLAGS.momentum)
+grads_and_vars = opt.compute_gradients(total_loss, tf.trainable_variables())
+with tf.control_dependencies(update_ops):
+ train_step = opt.apply_gradients(grads_and_vars, global_step=global_step)
+
+saver = tf.train.Saver(max_to_keep=100)
+
+init_op = tf.global_variables_initializer()
+
+config = tf.ConfigProto()
+config.gpu_options.allow_growth = True
+sess = tf.Session(config=config)
+sess.run(init_op)
+
+if FLAGS.restore_ckpt_path:
+ saver.restore(sess, os.path.abspath(FLAGS.restore_ckpt_path))
+ print('Restored checkpoints from %s' % FLAGS.restore_ckpt_path)
+
+
+def adv_attack(nat_logits,
+ x_batch,
+ y_batch,
+ epsilon=FLAGS.epsilon,
+ step_size=FLAGS.step_size,
+ num_steps=FLAGS.num_steps):
+ epsilon = epsilon / 255.0 * 2.0
+ step_size = step_size / 255.0 * 2.0
+ y_batch_dense = one_hot(y_batch, FLAGS.num_classes)
+ if not FLAGS.targeted: # non-targeted
+ y_att = np.copy(y_batch)
+ elif FLAGS.target_type == 'MC': # most confusing target label
+ nat_logits[np.arange(y_batch.shape[0]), y_batch] = -1e4
+ y_att = np.argmax(nat_logits, axis=1)
+ elif FLAGS.target_type == 'RAND': # random target label
+ y_att = np.zeros_like(y_batch)
+ for ii in np.arange(y_batch.shape[0]):
+ tmp = np.ones([FLAGS.num_classes]) / (FLAGS.num_classes - 1)
+ tmp[y_batch[ii]] = 0.0
+ y_att[ii] = np.random.choice(FLAGS.num_classes, p=tmp)
+ elif FLAGS.target_type == 'MOSA': # most one-step adversarial one-step target label
+ weight = sess.run(tf.get_default_graph().get_tensor_by_name(
+ 'logit/DW:0')).T # num_classes * num_features
+ dist = euclidean_distances(
+ weight[y_batch],
+ weight) + y_batch_dense # batch_size * num_classes
+ gt_logits = np.sum(nat_logits * y_batch_dense, axis=1)
+ diff_logits = np.reshape(gt_logits, [-1, 1]) - nat_logits
+ truncated = np.where(diff_logits > 1e-4, diff_logits, 1e4)
+ y_att = np.argmin(truncated / dist, axis=1)
+ elif FLAGS.target_type == 'MIX': # mix of MC and MOSA
+ weight = sess.run(tf.get_default_graph().get_tensor_by_name(
+ 'logit/DW:0')).T # num_classes * num_features
+ dist = euclidean_distances(
+ weight[y_batch],
+ weight) + y_batch_dense # batch_size * num_classes
+ gt_logits = np.sum(nat_logits * y_batch_dense, axis=1)
+ diff_logits = np.reshape(gt_logits, [-1, 1]) - nat_logits
+ truncated = np.where(diff_logits > 1e-4, diff_logits, 1e4)
+ y_att_MOSA = np.argmin(truncated / dist, axis=1)
+ y_att_MC = np.argmax((1.0 - y_batch_dense) * nat_logits, axis=1)
+ y_att = np.where(
+ np.argmax(nat_logits, axis=1) == y_batch, y_att_MOSA, y_att_MC)
+ else:
+ raise ValueError('Target type not valid!')
+ y_att_dense = one_hot(y_att, FLAGS.num_classes)
+
+ # randomly perturb as initialization
+ if FLAGS.random_start:
+ noise = np.random.uniform(-epsilon, epsilon, x_batch.shape)
+ x = x_batch + noise
+ else:
+ x = np.copy(x_batch)
+
+ for i in range(num_steps):
+ grad_val = sess.run(grad,
+ feed_dict={
+ x_pl: x,
+ y_loss: y_att_dense,
+ is_train: False
+ })
+
+ x = x - step_size * np.sign(grad_val)
+
+ x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
+
+ x = np.clip(x, -1.0, 1.0)
+
+ return x
+
+
+def adv_labels(nat_prob, y_batch, gamma=0.01):
+ L = -np.log(nat_prob + 1e-8) # log-likelihood
+ LL = np.copy(L)
+ LL[np.arange(y_batch.shape[0]), y_batch] = 1e4
+ minval = np.min(LL, axis=1)
+ LL[np.arange(y_batch.shape[0]), y_batch] = -1e4
+ maxval = np.max(LL, axis=1)
+
+ denom = np.sum(L, axis=1) - L[np.arange(y_batch.shape[0]), y_batch] - (
+ FLAGS.num_classes - 1) * (minval - gamma)
+ delta = 1 / (1 + FLAGS.multi * (maxval - minval + gamma) / denom)
+ alpha = delta / denom
+
+ y_batch_adv = np.reshape(
+ alpha, [-1, 1]) * (L - np.reshape(minval, [-1, 1]) + gamma)
+ y_batch_adv[np.arange(y_batch.shape[0]), y_batch] = 1.0 - delta
+
+ return y_batch_adv
+
+
+# training loop
+train_size = train_images.shape[0]
+epoch_steps = np.ceil(train_size / FLAGS.batch_size).astype(np.int32)
+for epoch_idx in np.arange(FLAGS.start_epoch, FLAGS.max_epoch):
+ if epoch_idx < FLAGS.decay_epoch1:
+ lr_val = FLAGS.learning_rate
+ elif epoch_idx < FLAGS.decay_epoch2:
+ lr_val = FLAGS.learning_rate * FLAGS.decay_rate
+ else:
+ lr_val = FLAGS.learning_rate * FLAGS.decay_rate * FLAGS.decay_rate
+
+ # each epoch random shuffle of training images
+ idx = np.random.permutation(train_size)
+ for step_idx in np.arange(epoch_steps):
+ start = step_idx * FLAGS.batch_size
+ end = np.minimum((step_idx + 1) * FLAGS.batch_size,
+ train_size).astype(np.int32)
+ x_batch = preprocess_py(train_images[idx[start:end]], 4, 32)
+ y_batch = train_labels[idx[start:end]]
+ y_batch_dense = one_hot(y_batch, FLAGS.num_classes)
+
+ start_time = time.time()
+
+ nat_prob, nat_logits = sess.run([prob, logits],
+ feed_dict={
+ x_pl: x_batch,
+ is_train: False
+ })
+
+ # generate adversarial images
+ x_batch_adv = adv_attack(nat_logits, x_batch, y_batch)
+
+ # generate adversarial labels
+ y_batch_adv = adv_labels(nat_prob, y_batch)
+
+ # eval accuracy
+ if step_idx % FLAGS.log_steps == 0:
+ nat_acc = sess.run(accuracy,
+ feed_dict={
+ x_pl: x_batch,
+ y_pl: y_batch,
+ is_train: False
+ })
+ adv_acc = sess.run(accuracy,
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_pl: y_batch,
+ is_train: False
+ })
+
+ # training step
+ if FLAGS.label_adversary:
+ _, loss_val = sess.run([train_step, total_loss],
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_loss: y_batch_adv,
+ is_train: True,
+ lr: lr_val
+ })
+ else:
+ _, loss_val = sess.run(
+ [train_step, total_loss],
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_loss: y_batch_dense,
+ is_train: True,
+ lr: lr_val
+ })
+
+ duration = time.time() - start_time
+
+ # print to stdout
+ if step_idx % FLAGS.log_steps == 0:
+ print(
+ "epoch %d, step %d, lr %.4f, duration %.2f, training nat acc %.2f, training adv acc %.2f, training adv loss %.4f"
+ % (epoch_idx, step_idx, lr_val, duration, 100 * nat_acc,
+ 100 * adv_acc, loss_val))
+ sys.stdout.flush()
+
+ # save checkpoint
+ if epoch_idx % FLAGS.save_epochs == 0:
+ saver.save(sess, os.path.join(FLAGS.model_dir, 'checkpoint'),
+ epoch_idx)
+
+ # evaluate
+ def eval_once():
+ eval_size = test_images.shape[0]
+ epoch_steps = np.ceil(eval_size / FLAGS.batch_size).astype(np.int32)
+ # random shuffle of test images does not affect the result
+ idx = np.random.permutation(eval_size)
+ count = 0.0
+ for step_idx in np.arange(epoch_steps):
+ start = step_idx * FLAGS.batch_size
+ end = np.minimum((step_idx + 1) * FLAGS.batch_size,
+ eval_size).astype(np.int32)
+ x_batch = test_images[idx[start:end]]
+ y_batch = test_labels[idx[start:end]]
+ nat_logits = sess.run(logits,
+ feed_dict={
+ x_pl: x_batch,
+ is_train: False
+ })
+ x_batch_adv = adv_attack(nat_logits, x_batch, y_batch)
+ count += np.sum(
+ sess.run(correct,
+ feed_dict={
+ x_pl: x_batch_adv,
+ y_pl: y_batch,
+ is_train: False
+ }))
+ acc = count / eval_size
+ return acc
+
+ if epoch_idx % FLAGS.eval_epochs == 0:
+ print('epoch %d, adv acc %.2f' % (epoch_idx, 100 * eval_once()))
diff --git a/case_studies/bat/train.sh b/case_studies/bat/train.sh
new file mode 100644
index 0000000..fd60ef3
--- /dev/null
+++ b/case_studies/bat/train.sh
@@ -0,0 +1,22 @@
+#! /bin/bash
+
+python train.py --cuda_device=0 \
+--model_dir=/tmp \
+--dataset=CIFAR \
+--num_classes=10 \
+--max_epoch=201 \
+--decay_epoch1=100 \
+--decay_epoch2=150 \
+--start_epoch=0 \
+--restore_ckpt_path='' \
+--loss_type=xent \
+--margin=50.0 \
+--epsilon=8.0 \
+--num_steps=1 \
+--step_size=8.0 \
+--random_start=True \
+--targeted=True \
+--target_type=MC \
+--save_epochs=20 \
+--eval_epochs=10 \
+--multi=9.0 \
diff --git a/case_studies/bat/wide_resnet.py b/case_studies/bat/wide_resnet.py
new file mode 100644
index 0000000..6459a2f
--- /dev/null
+++ b/case_studies/bat/wide_resnet.py
@@ -0,0 +1,185 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# based on https://github.com/tensorflow/models/tree/master/resnet
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+
+
+class Model(object):
+ """ResNet model."""
+ def __init__(self, is_train):
+ """ResNet constructor."""
+ self.is_train = is_train
+
+ def add_internal_summaries(self):
+ pass
+
+ def build_model(self, images, num_classes):
+ """Build the core model within the graph."""
+ with tf.variable_scope('input'):
+ x = self._conv('init_conv', images, 3, 3, 16, self._stride_arr(1))
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ #filters = [16, 16, 32, 64] # WRN-28-1
+ #filters = [16, 80, 160, 320] # WRN-28-5
+ filters = [16, 160, 320, 640] # WRN-28-10
+ #filters = [16, 256, 512, 1024] # WRN-28-16
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1],
+ self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1),
+ False)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2],
+ self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1),
+ False)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3],
+ self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1),
+ False)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, num_classes)
+
+ self.weight_decay_loss = self._decay()
+
+ return self.pre_softmax, x
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(
+ inputs=x,
+ decay=.9,
+ center=True,
+ scale=True,
+ activation_fn=None,
+ updates_collections=tf.GraphKeys.UPDATE_OPS,
+ is_training=self.is_train)
+
+ def _residual(self,
+ x,
+ in_filter,
+ out_filter,
+ stride,
+ activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2,
+ (out_filter - in_filter) // 2]])
+
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32,
+ initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 /
+ n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
diff --git a/case_studies/curriculum_at/PGD_attack.py b/case_studies/curriculum_at/PGD_attack.py
new file mode 100644
index 0000000..ea39bd2
--- /dev/null
+++ b/case_studies/curriculum_at/PGD_attack.py
@@ -0,0 +1,173 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Implementation of attack methods. Running this file as a program will
+apply the attack to the model specified by the config file and store
+the examples in an .npy file.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import os
+import tensorflow as tf
+import numpy as np
+
+import cifar10_input
+
+import config_attack
+
+class LinfPGDAttack:
+ def __init__(self, model, epsilon, num_steps, step_size, random_start, loss_func, dataset='cifar10'):
+ """Attack parameter initialization. The attack performs k steps of
+ size a, while always staying within epsilon from the initial
+ point."""
+ self.model = model
+ self.epsilon = epsilon
+ self.num_steps = num_steps
+ self.step_size = step_size
+ self.rand = random_start
+ self.dataset = dataset
+
+ if loss_func == 'xent':
+ loss = model.xent
+ elif loss_func == "logit-diff":
+ loss = model.top2_logit_diff_loss
+ elif loss_func == 'target_task_xent':
+ raise ValueError("Not implemented")
+ loss = model.target_task_mean_xent
+ correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
+ wrong_logit = tf.reduce_max((1-label_mask) * model.pre_softmax - 1e4*label_mask, axis=1)
+ loss = -tf.nn.relu(correct_logit - wrong_logit + 50)
+ else:
+ print('Unknown loss function. Defaulting to cross-entropy')
+ loss = model.xent
+
+ self.grad = tf.gradients(loss, model.x_input)[0]
+ self.loss = loss
+ # self.logit = tf.placeholder(tf.float32, shape=[None, 100])
+ # self.grad2 = tf.gradients(loss + tf.reduce_mean(tf.reduce_sum(tf.pow(tf.subtract(self.logit, model.pre_softmax), 2.0), keepdims=True)), model.x_input)[0]
+
+ def perturb(self, x_nat, y, sess, feed_dict={}):
+ """Given a set of examples (x_nat, y), returns a set of adversarial
+ examples within epsilon of x_nat in l_infinity norm."""
+ if self.rand:
+ x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ x = np.clip(x, 0, 255) # ensure valid pixel range
+ else:
+ x = np.copy(x_nat)
+
+ for i in range(self.num_steps):
+ loss, grad = sess.run((self.loss, self.grad), feed_dict={self.model.x_input: x,
+ self.model.y_input: y,
+ **feed_dict})
+
+ x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
+
+ x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
+ x = np.clip(x, 0, 255) # ensure valid pixel range
+
+ return x
+
+ def perturb_l2(self, x_nat, y, sess, feed_dict={}):
+ """Given a set of examples (x_nat, y), returns a set of adversarial
+ examples within epsilon of x_nat in l_2 norm."""
+ if self.rand:
+ pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ pert_norm = np.linalg.norm(pert)
+ pert = pert / max(1, pert_norm)
+ else:
+ pert = np.zeros(x_nat.shape)
+
+ for i in range(self.num_steps):
+ x = x_nat + pert
+ # x = np.clip(x, 0, 255)
+ grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
+ self.model.y_input: y,
+ **feed_dict})
+
+ normalized_grad = grad / np.linalg.norm(grad)
+ pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
+
+ # project pert to norm ball
+ pert_norm = np.linalg.norm(pert)
+ rescale_factor = pert_norm / self.epsilon
+ pert = pert / max(1, rescale_factor)
+
+ x = x_nat + pert
+ x = np.clip(x, 0, 255)
+
+ return x
+
+ # def perturb_TRADES(self, x_nat, y, sess):
+ # """Given a set of examples (x_nat, y), returns a set of adversarial
+ # examples within epsilon of x_nat in l_2 norm of TRADES Loss."""
+ # if self.rand:
+ # pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ # pert_norm = np.linalg.norm(pert)
+ # pert = pert / max(1, pert_norm)
+ # else:
+ # pert = np.zeros(x_nat.shape)
+
+ # nat_logit = sess.run(model.pre_softmax, feed_dict={self.model.x_input: x_nat,
+ # self.model.y_input: y})
+ # for i in range(self.num_steps):
+ # x = x_nat + pert
+ # grad = sess.run(self.grad2, feed_dict={self.model.x_input: x,
+ # self.model.y_input: y, self.logit: nat_logit})
+ # normalized_grad = grad / np.linalg.norm(grad)
+ # pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
+ # pert_norm = np.linalg.norm(pert)
+ # rescale_factor = pert_norm / self.epsilon
+ # pert = pert / max(1, rescale_factor)
+
+ # #x = x_nat + pert
+ # x = np.clip(x, 0, 255)
+
+ # return x
+
+
+ def modified_perturb_l2(self, x_nat, y, feed_dict={}):
+ if self.rand:
+ pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ pert_norm = np.linalg.norm(pert)
+ pert = pert / max(1, pert_norm)
+ else:
+ pert = np.zeros(x_nat.shape)
+
+ for i in range(self.num_steps):
+ x = x_nat + pert
+ # x = np.clip(x, 0, 255)
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
+ self.model.y_input: y,
+ **feed_dict})
+
+ normalized_grad = grad / np.linalg.norm(grad)
+ pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
+
+ # project pert to norm ball
+ pert_norm = np.linalg.norm(pert)
+ rescale_factor = pert_norm / self.epsilon
+ pert = pert / max(1, rescale_factor)
+
+ x = x_nat + pert
+ x = np.clip(x, 0, 255)
+
+ return (x - x_nat)
+
+
diff --git a/case_studies/curriculum_at/README.md b/case_studies/curriculum_at/README.md
new file mode 100644
index 0000000..de23be6
--- /dev/null
+++ b/case_studies/curriculum_at/README.md
@@ -0,0 +1,37 @@
+# Get Fooled for the Right Reason
+Official repository for the NeurIPS 2021 paper Get Fooled for the Right Reason: Improving Adversarial Robustness through a Teacher-guided Curriculum Learning Approach
+
+## Dependencies
+1. Tensorflow 1.14.0
+2. Python 3.7
+
+## Datasets
+CIFAR10: https://www.cs.toronto.edu/~kriz/cifar.html
+
+## Models
+`modelGTP_cifar10`: https://www.dropbox.com/sh/29n2lt08ypjdw67/AABSZlD8nTM08E-bcZv1mdkOa?dl=0
+
+## Usage
+1. Install dependencies with `pip install -r requirements.txt`. Prefarably, create an anaconda environment.
+2. Download and save datasets in `datasets/` folder.
+3. Download and save model in `models/` folder.
+4. Run the `python eval_attack.py`
+5. The evaluation results will be stored in `attack_log` directory.
+
+### Note
+Using a GPU is highly recommended.
+
+
+## Code overview
+- `model_new.py`: contains code for IGAM model architectures.
+- `cifar10_input.py` provides utility functions and classes for loading the CIFAR10 dataset.
+- `PGD_attack.py`: generates adversarial examples and save them in `attacks/`.
+- `run_attack.py`: evaluates model on adversarial examples from `attacks/`.
+- `config_attack.py`: parameters for adversarial example evaluation.
+- `eval_attack.py`: runs **FGSM, PGD-5, PGD-10, PGD-20** attacks and logs the results in `attack_log` directory. However, you can get results for any attack by modifying the `num_steps` flag in the code.
+
+## Acknowledgements
+
+Useful code bases we used in our work:
+- https://github.com/MadryLab/cifar10_challenge (for adversarial example generation and evaluation)
+- https://github.com/ashafahi/free_adv_train (for model code)
diff --git a/case_studies/curriculum_at/adversarial_evaluation.py b/case_studies/curriculum_at/adversarial_evaluation.py
new file mode 100644
index 0000000..09cde63
--- /dev/null
+++ b/case_studies/curriculum_at/adversarial_evaluation.py
@@ -0,0 +1,237 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import tensorflow as tf
+import numpy as np
+
+import cifar10_input
+
+import config_attack
+
+import sys
+import math
+from tqdm import tqdm
+
+from case_studies.curriculum_at.PGD_attack import LinfPGDAttack
+
+if __name__ == '__main__':
+ config = vars(config_attack.get_args())
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ model_file = tf.train.latest_checkpoint(config['model_dir'])
+ # print("config['model_dir']: ", config['model_dir'])
+ if model_file is None:
+ print('No model found')
+ sys.exit()
+
+ if 'GTP' in config['model_dir']:
+ from model_new import Model, ModelTinyImagnet
+
+ if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
+ # TODO: verify this with the authors
+ # ATTENTION: mode was "train" before
+ model = Model(mode=config["inference_mode"], dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+ else:
+ model = ModelTinyImagnet(mode='train', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+
+ elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
+ 'model_dir']:
+ print("finetuned tinyimagenet MODEL")
+ from model_new import ModelTinyImagenetSourceExtendedLogits
+
+ full_source_model_x_input = tf.placeholder(tf.float32,
+ shape=[None, 32, 32, 3])
+ upresized_full_source_model_x_input = tf.image.resize_images(
+ full_source_model_x_input, size=[64, 64])
+ if config['dataset'] == 'cifar10':
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train',
+ dataset='tinyimagenet',
+ target_task_class_num=10,
+ train_batch_size=config[
+ 'eval_batch_size'],
+ input_tensor=upresized_full_source_model_x_input)
+ elif config['dataset'] == 'cifar100':
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train',
+ dataset='tinyimagenet',
+ target_task_class_num=100,
+ train_batch_size=config[
+ 'eval_batch_size'],
+ input_tensor=upresized_full_source_model_x_input)
+
+ model.x_input = full_source_model_x_input
+
+ t_vars = tf.trainable_variables()
+ source_model_vars = [var for var in t_vars if (
+ 'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+ source_model_target_logit_vars = [var for var in t_vars if
+ 'target_task_logit' in var.name]
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+ finetuned_source_model_saver = tf.train.Saver(
+ var_list=finetuned_source_model_vars)
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+ raise NotImplementedError
+ print("finetuned MODEL")
+ from model_original_cifar_challenge import ModelExtendedLogits
+
+ model = ModelExtendedLogits(mode='train', target_task_class_num=100,
+ train_batch_size=config['eval_batch_size'])
+
+ t_vars = tf.trainable_variables()
+ source_model_vars = [var for var in t_vars if (
+ 'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+ source_model_target_logit_vars = [var for var in t_vars if
+ 'target_task_logit' in var.name]
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+ finetuned_source_model_saver = tf.train.Saver(
+ var_list=finetuned_source_model_vars)
+ elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config[
+ 'model_dir'] or 'a_very_robust_model' in config['model_dir']):
+ raise NotImplementedError
+ print("original challenge MODEL")
+ from free_model_original import Model
+
+ model = Model(mode='eval', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'])
+ elif 'IGAM' in config['model_dir']:
+ print("IGAM MODEL")
+ from model_new import Model
+
+ model = Model(mode='train', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+ else:
+ raise NotImplementedError
+ print("other MODEL")
+ from free_model import Model
+
+ model = Model(mode='eval', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'])
+
+ attack = LinfPGDAttack(model,
+ config['epsilon'],
+ config['num_steps'],
+ config['step_size'],
+ config['random_start'],
+ config['loss_func'],
+ dataset=config['dataset'])
+ saver = tf.train.Saver()
+
+ data_path = config['data_path']
+ # print(data_path)
+ # x = input()
+
+ if config['dataset'] == 'cifar10':
+ # print("load cifar10 dataset")
+ cifar = cifar10_input.CIFAR10Data(data_path)
+ elif config['dataset'] == 'cifar100':
+ raise NotImplementedError
+ print("load cifar100 dataset")
+ cifar = cifar100_input.CIFAR100Data(data_path)
+ else:
+ raise NotImplementedError
+ print("load tinyimagenet dataset")
+ cifar = tinyimagenet_input.TinyImagenetData()
+
+ with tf.Session() as sess:
+ # Restore the checkpoint
+ if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
+ 'model_dir']:
+ sess.run(tf.global_variables_initializer())
+ source_model_file = tf.train.latest_checkpoint(
+ "models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
+ source_model_saver.restore(sess, source_model_file)
+ finetuned_source_model_file = tf.train.latest_checkpoint(
+ config['model_dir'])
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+ sess.run(tf.global_variables_initializer())
+ source_model_file = tf.train.latest_checkpoint("models/adv_trained")
+ source_model_saver.restore(sess, source_model_file)
+ finetuned_source_model_file = tf.train.latest_checkpoint(
+ config['model_dir'])
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ else:
+ saver.restore(sess, model_file)
+
+ # Iterate over the samples batch-by-batch
+ num_eval_examples = config['num_eval_examples']
+ eval_batch_size = config['eval_batch_size']
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+
+ x_adv = [] # adv accumulator
+ x = []
+ y = []
+ y_p = []
+ y_adv = []
+ is_correct = []
+ # print('Iterating over {} batches'.format(num_batches))
+
+ for ibatch in tqdm(range(num_batches)):
+ bstart = ibatch * eval_batch_size
+ bend = min(bstart + eval_batch_size, num_eval_examples)
+
+ x_batch = cifar.eval_data.xs[bstart:bend, :]
+ y_batch = cifar.eval_data.ys[bstart:bend]
+
+ if config['attack_norm'] == 'inf':
+ x_batch_adv = attack.perturb(x_batch, y_batch, sess)
+ elif config['attack_norm'] == '2':
+ x_batch_adv = attack.perturb_l2(x_batch, y_batch, sess)
+ elif config['attack_norm'] == 'TRADES':
+ x_batch_adv = attack.perturb_TRADES(x_batch, y_batch, sess)
+ elif config['attack_norm'] == "":
+ x_batch_adv = x_batch
+
+ y_pred = sess.run(model.predictions, feed_dict={model.x_input: x_batch_adv})
+
+ y_pred_clean = sess.run(model.predictions, feed_dict={model.x_input: x_batch})
+
+ x_adv.append(x_batch_adv)
+ x.append(x_batch)
+ y.append(y_batch)
+ y_p.append(y_pred_clean)
+ y_adv.append(y_pred)
+
+ is_correct.append(y_pred == y_batch)
+ is_correct = np.concatenate(is_correct)
+ x_adv = np.concatenate(x_adv)
+ x = np.concatenate(x)
+ y = np.concatenate(y)
+ y_p = np.concatenate(y_p)
+ y_adv = np.concatenate(y_adv)
+ if config["save_data_path"] is not None:
+ x = x.astype(int)
+ x_adv = x_adv.astype(int)
+ np.savez(config["save_data_path"], x_a=x, x_b=x_adv, y_a=y_p, y_b=y_adv)
+ print(f"Robust accuracy: {np.mean(is_correct)}")
+
+
diff --git a/case_studies/curriculum_at/adversarial_evaluation.sh b/case_studies/curriculum_at/adversarial_evaluation.sh
new file mode 100644
index 0000000..707d10c
--- /dev/null
+++ b/case_studies/curriculum_at/adversarial_evaluation.sh
@@ -0,0 +1,51 @@
+nsamples=10000
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using their attack parameters and train mode (default)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --step_size=2 \
+ --num_steps=20 \
+ --loss_func=xent \
+ --inference_mode=train \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using adapted attack parameters and train mode (default)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --random_start \
+ --step_size=0.5 \
+ --num_steps=75 \
+ --loss_func=logit-diff \
+ --inference_mode=train \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using their attack parameters and eval mode (modified)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --step_size=2 \
+ --num_steps=20 \
+ --loss_func=xent \
+ --inference_mode=eval \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using adapted attack parameters and eval mode (modified)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --random_start \
+ --step_size=0.5 \
+ --num_steps=75 \
+ --loss_func=logit-diff \
+ --inference_mode=eval \
+ --num_eval_examples=$nsamples
\ No newline at end of file
diff --git a/case_studies/curriculum_at/adversarial_evaluation_0.5.sh b/case_studies/curriculum_at/adversarial_evaluation_0.5.sh
new file mode 100644
index 0000000..960420e
--- /dev/null
+++ b/case_studies/curriculum_at/adversarial_evaluation_0.5.sh
@@ -0,0 +1,57 @@
+nsamples=10000
+
+echo "Running attacks with epsilon=128/255"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using their attack parameters and train mode (default)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --step_size=2 \
+ --num_steps=20 \
+ --loss_func=xent \
+ --inference_mode=train \
+ --epsilon=128 \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using adapted attack parameters and train mode (default)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --random_start \
+ --step_size=0.5 \
+ --num_steps=75 \
+ --loss_func=logit-diff \
+ --inference_mode=train \
+ --epsilon=128 \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using their attack parameters and eval mode (modified)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --step_size=2 \
+ --num_steps=20 \
+ --loss_func=xent \
+ --inference_mode=eval \
+ --epsilon=128 \
+ --num_eval_examples=$nsamples
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using adapted attack parameters and eval mode (modified)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/adversarial_evaluation.py \
+ --random_start \
+ --step_size=0.5 \
+ --num_steps=75 \
+ --loss_func=logit-diff \
+ --inference_mode=eval \
+ --epsilon=128 \
+ --num_eval_examples=$nsamples
\ No newline at end of file
diff --git a/case_studies/curriculum_at/binarization_test.py b/case_studies/curriculum_at/binarization_test.py
new file mode 100644
index 0000000..66fbab9
--- /dev/null
+++ b/case_studies/curriculum_at/binarization_test.py
@@ -0,0 +1,356 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+
+import torch
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import tensorflow as tf
+import numpy as np
+
+import cifar10_input
+
+import config_attack
+
+import sys
+import math
+from tqdm import tqdm
+import tqdm_utils
+
+from PGD_attack import LinfPGDAttack
+from active_tests.decision_boundary_binarization import interior_boundary_discrimination_attack
+
+if __name__ == '__main__':
+ config = vars(config_attack.get_args())
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ model_file = tf.train.latest_checkpoint(config['model_dir'])
+ # print("config['model_dir']: ", config['model_dir'])
+ if model_file is None:
+ print('No model found')
+ sys.exit()
+
+ if 'GTP' in config['model_dir']:
+ from model_new import Model, ModelTinyImagnet
+
+ if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
+ # TODO: verify this with the authors
+ # ATTENTION: mode was "train" before
+ model = Model(mode=config["inference_mode"], dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+ else:
+ model = ModelTinyImagnet(mode='train', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+
+ elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
+ 'model_dir']:
+ print("finetuned tinyimagenet MODEL")
+ from model_new import ModelTinyImagenetSourceExtendedLogits
+
+ full_source_model_x_input = tf.placeholder(tf.float32,
+ shape=[None, 32, 32, 3])
+ upresized_full_source_model_x_input = tf.image.resize_images(
+ full_source_model_x_input, size=[64, 64])
+ if config['dataset'] == 'cifar10':
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train',
+ dataset='tinyimagenet',
+ target_task_class_num=10,
+ train_batch_size=config[
+ 'eval_batch_size'],
+ input_tensor=upresized_full_source_model_x_input)
+ elif config['dataset'] == 'cifar100':
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train',
+ dataset='tinyimagenet',
+ target_task_class_num=100,
+ train_batch_size=config[
+ 'eval_batch_size'],
+ input_tensor=upresized_full_source_model_x_input)
+
+ model.x_input = full_source_model_x_input
+
+ t_vars = tf.trainable_variables()
+ source_model_vars = [var for var in t_vars if (
+ 'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+ source_model_target_logit_vars = [var for var in t_vars if
+ 'target_task_logit' in var.name]
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+ finetuned_source_model_saver = tf.train.Saver(
+ var_list=finetuned_source_model_vars)
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+ raise NotImplementedError
+ print("finetuned MODEL")
+ from model_original_cifar_challenge import ModelExtendedLogits
+
+ model = ModelExtendedLogits(mode='train', target_task_class_num=100,
+ train_batch_size=config['eval_batch_size'])
+
+ t_vars = tf.trainable_variables()
+ source_model_vars = [var for var in t_vars if (
+ 'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+ source_model_target_logit_vars = [var for var in t_vars if
+ 'target_task_logit' in var.name]
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+ finetuned_source_model_saver = tf.train.Saver(
+ var_list=finetuned_source_model_vars)
+ elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config[
+ 'model_dir'] or 'a_very_robust_model' in config['model_dir']):
+ raise NotImplementedError
+ print("original challenge MODEL")
+ from free_model_original import Model
+
+ model = Model(mode='eval', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'])
+ elif 'IGAM' in config['model_dir']:
+ print("IGAM MODEL")
+ from model_new import Model
+
+ model = Model(mode='train', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True)
+ else:
+ raise NotImplementedError
+ print("other MODEL")
+ from free_model import Model
+
+ model = Model(mode='eval', dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'])
+
+ saver = tf.train.Saver()
+
+ data_path = config['data_path']
+ # print(data_path)
+ # x = input()
+
+ if config['dataset'] == 'cifar10':
+ # print("load cifar10 dataset")
+ cifar = cifar10_input.CIFAR10Data(data_path)
+ elif config['dataset'] == 'cifar100':
+ raise NotImplementedError
+ print("load cifar100 dataset")
+ cifar = cifar100_input.CIFAR100Data(data_path)
+ else:
+ raise NotImplementedError
+ print("load tinyimagenet dataset")
+ cifar = tinyimagenet_input.TinyImagenetData()
+
+ with tf.Session() as sess:
+ # Restore the checkpoint
+ if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
+ 'model_dir']:
+ sess.run(tf.global_variables_initializer())
+ source_model_file = tf.train.latest_checkpoint(
+ "models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
+ source_model_saver.restore(sess, source_model_file)
+ finetuned_source_model_file = tf.train.latest_checkpoint(
+ config['model_dir'])
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+ sess.run(tf.global_variables_initializer())
+ source_model_file = tf.train.latest_checkpoint("models/adv_trained")
+ source_model_saver.restore(sess, source_model_file)
+ finetuned_source_model_file = tf.train.latest_checkpoint(
+ config['model_dir'])
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ else:
+ saver.restore(sess, model_file)
+
+ # Iterate over the samples batch-by-batch
+ num_eval_examples = config['num_eval_examples']
+ eval_batch_size = config['eval_batch_size']
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+
+ class ModelWrapper:
+ def __init__(self, model, weight_shape, bias_shape, num_classes=2):
+ self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
+ self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
+
+ y = model.neck
+
+ # TODO: check whether we need a separate placeholder for the binary label
+ self.y_input = model.y_input
+ self.x_input = model.x_input
+
+ self.logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
+ self.predictions = tf.argmax(self.logits, 1)
+
+ self.pre_softmax = self.logits
+
+ # define losses
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ # TODO: why the plus 50?
+ # self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+ self.top2_logit_diff_loss = -self.correct_logit + self.wrong_logit
+
+
+ wrapped_model = ModelWrapper(model, (2, 640), (2,))
+
+ attack = LinfPGDAttack(wrapped_model,
+ config['epsilon'],
+ config['num_steps'],
+ config['step_size'],
+ config['random_start'],
+ config['loss_func'],
+ dataset=config['dataset'])
+
+ def run_attack(m, l):
+ linear_layer = m[-1]
+ del m
+
+ # initialize an attack (it's a white box attack, and it's allowed to look
+ # at the internals of the model in any way it wants)
+ # attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
+ # m = PyTorchToTensorFlow1Wrapper(m, "cpu")
+
+ weights_feed_dict = {
+ wrapped_model.weight: linear_layer.weight.data.numpy(),
+ wrapped_model.bias: linear_layer.bias.data.numpy()
+ }
+
+ assert len(l) == 1, len(l)
+ for x, y in l:
+ x_batch = x.numpy().transpose((0, 2, 3, 1)) * 255.0
+ y_batch = y.numpy()
+
+ if config['attack_norm'] == 'inf':
+ x_batch_adv = attack.perturb(x_batch, y_batch, sess, weights_feed_dict)
+ elif config['attack_norm'] == '2':
+ x_batch_adv = attack.perturb_l2(x_batch, y_batch, sess, weights_feed_dict)
+ elif config['attack_norm'] == 'TRADES':
+ x_batch_adv = attack.perturb_TRADES(x_batch, y_batch, sess, weights_feed_dict)
+
+ logits, y_pred = sess.run((wrapped_model.logits, wrapped_model.predictions),
+ feed_dict={model.x_input: x_batch_adv,
+ **weights_feed_dict})
+ is_adv = y_pred != y_batch
+
+ return is_adv, (torch.Tensor(x_batch_adv) / 255.0, torch.Tensor(logits))
+
+ random_indices = list(range(len(cifar.eval_data.xs)))
+ np.random.shuffle(random_indices)
+
+ x_batch = []
+ y_batch = []
+ for j in range(config['num_eval_examples']):
+ x_ = cifar.eval_data.xs[random_indices[j]]
+ y_ = cifar.eval_data.ys[random_indices[j]]
+ x_batch.append(x_)
+ y_batch.append(y_)
+ x_batch = np.array(x_batch).transpose((0, 3, 1, 2)) / 255.0
+ y_batch = np.array(y_batch)
+
+ from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, PyTorchToTensorFlow1Wrapper
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
+
+ def _model_forward_pass(x, features_and_logits: bool = False, features_only: bool = False):
+ if features_and_logits:
+ assert not features_only, "Only one of the flags must be set."
+ if features_and_logits:
+ return sess.run(
+ (model.neck, model.pre_softmax),
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0})
+ elif features_only:
+ return sess.run(
+ model.neck,
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0})
+ else:
+ raise ValueError
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=_model_forward_pass,
+ logit_forward_and_backward_pass=lambda x, **kwargs: sess.run(
+ model.feature_grad,
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0}) / 255.0
+ )
+
+ assert config["n_boundary_points"] is not None
+ assert config["n_inner_points"] is not None
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+
+ with tqdm_utils.tqdm_print():
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, kwargs: run_attack(m, l),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=config["epsilon"]/255.0,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=config["n_boundary_points"],
+ n_inner_points=config["n_inner_points"],
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=config['num_eval_examples'],
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ rescale_logits="adaptive",
+ sample_training_data_from_corners=config["sample_from_corners"],
+ decision_boundary_closeness=0.99999
+ #args.num_samples_test * 10
+ )
+
+ scores = [it[0] for it in scores_logit_differences_and_validation_accuracies]
+ validation_scores = [it[3] for it in scores_logit_differences_and_validation_accuracies]
+ if validation_scores[0] is None:
+ validation_scores = (np.nan, np.nan)
+ else:
+ validation_scores = np.array(validation_scores)
+ validation_scores = tuple(np.mean(validation_scores, 0))
+ logit_differences = [(it[1], it[2]) for it in
+ scores_logit_differences_and_validation_accuracies]
+ logit_differences = np.array(logit_differences)
+ relative_performance = (logit_differences[:, 0] - logit_differences[:,
+ 1]) / logit_differences[:,
+ 1]
+
+ test_result = (np.mean(scores), np.mean(relative_performance),
+ np.std(relative_performance), validation_scores)
+
+ print("\tinterior-vs-boundary discrimination (ce loss), ASR: {0}\n”, "
+ "\t\tNormalized Logit-Difference-Improvement: {1} +- {2}\n"
+ "\t\tValidation Accuracy (inner, boundary): {3}".format(
+ *test_result))
+
+
diff --git a/case_studies/curriculum_at/binarization_test.sh b/case_studies/curriculum_at/binarization_test.sh
new file mode 100644
index 0000000..6feac6e
--- /dev/null
+++ b/case_studies/curriculum_at/binarization_test.sh
@@ -0,0 +1,42 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+mode=${3:-train}
+
+kwargs=""
+kwargs="--sample-from-corners"
+
+echo "Attacking $nsamples with epsilon = $epsilon and model inference = $mode"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (their attack parameters)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/binarization_test.py \
+ --step_size=2 \
+ --num_steps=20 \
+ --loss_func=xent \
+ --n_boundary_points=1 \
+ --n_inner_points=999 \
+ --epsilon=$epsilon \
+ --inference_mode=$mode \
+ --num_eval_examples=$nsamples \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (modified attack parameters)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ case_studies/curriculum_at/binarization_test.py \
+ --random_start \
+ --step_size=0.5 \
+ --num_steps=50 \
+ --loss_func=logit-diff \
+ --n_boundary_points=1 \
+ --n_inner_points=999 \
+ --epsilon=$epsilon \
+ --inference_mode=$mode \
+ --num_eval_examples=$nsamples\
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/curriculum_at/cifar10_input.py b/case_studies/curriculum_at/cifar10_input.py
new file mode 100644
index 0000000..74505cf
--- /dev/null
+++ b/case_studies/curriculum_at/cifar10_input.py
@@ -0,0 +1,204 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for importing the CIFAR10 dataset.
+Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
+being unsigned integers (i.e., in the range 0,1,...,255).
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import pickle
+import sys
+import tensorflow as tf
+import numpy as np
+import re
+
+version = sys.version_info
+
+
+class CIFAR10Data(object):
+ """
+ Unpickles the CIFAR10 dataset from a specified folder containing a pickled
+ version following the format of Krizhevsky which can be found
+ [here](https://www.cs.toronto.edu/~kriz/cifar.html).
+ Inputs to constructor
+ =====================
+ - path: path to the pickled dataset. The training data must be pickled
+ into five files named data_batch_i for i = 1, ..., 5, containing 10,000
+ examples each, the test data
+ must be pickled into a single file called test_batch containing 10,000
+ examples, and the 10 class names must be
+ pickled into a file called batches.meta. The pickled examples should
+ be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
+ arrays, and an array of their 10,000 true labels.
+ """
+
+ def __init__(self, path, batch_start = 0, init_shuffle=True, train_size_ratio=1):
+ num_classes = 10
+ path = CIFAR10Data.rec_search(path)
+ train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
+ eval_filename = 'test_batch'
+ metadata_filename = 'batches.meta'
+
+ train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
+ train_labels = np.zeros(50000, dtype='int32')
+ for ii, fname in enumerate(train_filenames):
+ cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
+ train_images[ii * 10000: (ii + 1) * 10000, ...] = cur_images
+ train_labels[ii * 10000: (ii + 1) * 10000, ...] = cur_labels
+ eval_images, eval_labels = self._load_datafile(
+ os.path.join(path, eval_filename))
+
+ with open(os.path.join(path, metadata_filename), 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ self.label_names = data_dict[b'label_names']
+ for ii in range(len(self.label_names)):
+ self.label_names[ii] = self.label_names[ii].decode('utf-8')
+
+ if train_size_ratio < 1:
+ new_train_images = []
+ new_train_labels = []
+ for class_ind in range(num_classes):
+ current_class_train_images = train_images[train_labels == class_ind]
+ num_train_per_class = int(current_class_train_images.shape[0] * train_size_ratio)
+ new_train_images.append(current_class_train_images[:num_train_per_class])
+ new_train_labels.append(np.full(num_train_per_class, class_ind, dtype='int32'))
+ train_images = np.concatenate(new_train_images, axis=0)
+ train_labels = np.concatenate(new_train_labels)
+
+ self.train_data = DataSubset(train_images, train_labels, batch_start = batch_start, init_shuffle=init_shuffle)
+ self.eval_data = DataSubset(eval_images, eval_labels, batch_start = batch_start, init_shuffle=init_shuffle)
+
+ @staticmethod
+ def rec_search(original_path):
+ rx = re.compile(r'data_batch_[0-9]+')
+ r = []
+ for path, _, file_names in os.walk(original_path):
+ r.extend([os.path.join(path, x) for x in file_names if rx.search(x)])
+ if len(r) is 0: # Is this the best way?
+ return original_path
+ return os.path.dirname(r[0])
+
+ @staticmethod
+ def _load_datafile(filename):
+ with open(filename, 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ assert data_dict[b'data'].dtype == np.uint8
+ image_data = data_dict[b'data']
+ image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
+ return image_data, np.array(data_dict[b'labels'])
+
+
+class AugmentedCIFAR10Data(object):
+ """
+ Data augmentation wrapper over a loaded dataset.
+ Inputs to constructor
+ =====================
+ - raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
+ - sess: current tensorflow session
+ - model: current model (needed for input tensor)
+ """
+
+ def __init__(self, raw_cifar10data, sess, model):
+ assert isinstance(raw_cifar10data, CIFAR10Data)
+ self.image_size = 32
+
+ # create augmentation computational graph
+ self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
+ padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
+ img, self.image_size + 4, self.image_size + 4),
+ self.x_input_placeholder)
+ cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
+ self.image_size,
+ 3]), padded)
+ flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
+ self.augmented = flipped
+
+ self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.label_names = raw_cifar10data.label_names
+
+
+class DataSubset(object):
+ def __init__(self, xs, ys, batch_start = 0, init_shuffle=True):
+ self.xs = xs
+ self.n = xs.shape[0]
+ self.ys = ys
+ self.batch_start = batch_start
+ if init_shuffle:
+ self.cur_order = np.random.permutation(self.n)
+ else:
+ self.cur_order = np.arange(self.n)
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ if self.n < batch_size:
+ raise ValueError('Batch size can be at most the dataset size')
+ if not multiple_passes:
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size <= 0:
+ raise ValueError('Pass through the dataset is complete.')
+ batch_end = self.batch_start + actual_batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
+ self.batch_start += actual_batch_size
+ if actual_batch_size < batch_size:
+ print('actual_batch_size < batch_size, padding with zeros')
+ batch_xs_pad = np.zeros(shape=(batch_size - actual_batch_size, batch_xs.shape[1], batch_xs.shape[2], batch_xs.shape[3]), dtype=batch_xs.dtype)
+ batch_ys_pad = np.zeros(batch_size - actual_batch_size, dtype=batch_ys.dtype)
+ batch_xs = np.concatenate([batch_xs, batch_xs_pad], axis=0)
+ batch_ys = np.concatenate([batch_ys, batch_ys_pad], axis=0)
+ return batch_xs, batch_ys
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size < batch_size:
+ if reshuffle_after_pass:
+ self.cur_order = np.random.permutation(self.n)
+ self.batch_start = 0
+ batch_end = self.batch_start + batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
+ self.batch_start += actual_batch_size
+ return batch_xs, batch_ys
+
+
+class AugmentedDataSubset(object):
+ def __init__(self, raw_datasubset, sess, x_input_placeholder,
+ augmented):
+ self.sess = sess
+ self.raw_datasubset = raw_datasubset
+ self.x_input_placeholder = x_input_placeholder
+ self.augmented = augmented
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
+ reshuffle_after_pass)
+ images = raw_batch[0].astype(np.float32)
+ return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
+ raw_batch[0]}), raw_batch[1]
diff --git a/case_studies/curriculum_at/config_attack.py b/case_studies/curriculum_at/config_attack.py
new file mode 100644
index 0000000..2c87665
--- /dev/null
+++ b/case_studies/curriculum_at/config_attack.py
@@ -0,0 +1,68 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import configargparse
+import pdb
+
+def pair(arg):
+ return [float(x) for x in arg.split(',')]
+
+def get_args():
+ parser = configargparse.ArgParser(default_config_files=[])
+ parser.add("--model_dir", type=str, default="checkpoints/tf_curriculum_at/modelGTP_cifar10/", help="Path to save/load the checkpoints, default=models/model")
+ parser.add("--data_path", type=str, default="data/cifar-10-batches-py/", help="Path to dataset, default=datasets/cifar10")
+ parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
+ parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
+ parser.add("--num_eval_examples", type=int, default=10000, help="Number of eval samples, default=10000")
+ parser.add("--eval_batch_size", type=int, default=512, help="Eval batch size, default=100")
+ parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
+ parser.add("--num_steps", type=int, default=10, help="Number of steps to PGD attack, default=10")
+ parser.add("--ckpt", type=int, default = 0, help = "Checkpoint number for midway evaluation, default = 0")
+ parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
+ parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
+ parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
+ parser.set_defaults(random_start=True)
+ parser.add("--loss_func", "-f", type=str, default="xent", choices=["logit-diff", "xent", "target_task_xent"], help="Loss function for the model, choices are [xent, target_task_xent], default=xent")
+ parser.add("--attack_norm", type=str, default="inf", choices=["", "inf", "2", "TRADES"], help="Lp norm type for attacks, choices are [inf, 2], default=inf")
+ parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "tinyimagenet"], help="Path to load dataset, default=cifar10")
+ parser.add("--store_adv_path", type=str, default=None, help="Path to save adversarial examples, default=None")
+ parser.add("--attack_name", type=str, default=None, help="Path to save adversarial examples, default=''")
+ parser.add("--save_eval_log", dest="save_eval_log", action="store_true", help="Save txt file for attack eval")
+ parser.add("--no-save_eval_log", dest="save_eval_log", action="store_false", help="Save txt file for attack eval")
+ parser.set_defaults(save_eval_log=False)
+
+ parser.add("--xfer_attack", dest="xfer_attack", action="store_true", help="Adversarial transfer attack")
+ parser.add("--no-xfer_attack", dest="xfer_attack", action="store_false", help="not adversarial transfer attack")
+ parser.set_defaults(xfer_attack=False)
+ parser.add("--custom_output_model_name", type=str, default=None, help="Custom model name, default=None")
+
+ # for binarization test
+ parser.add("--n_boundary_points", default=None, type=int)
+ parser.add("--n_inner_points", default=None, type=int)
+ parser.add("--sample-from-corners", action="store_true")
+
+ parser.add("--save_data_path", default=None, type=str)
+
+ parser.add("--inference_mode", default="train", choices=("train", "eval"), type=str)
+
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ print(get_args())
+ pdb.set_trace()
+
+# TODO Default for model_dir
+# TODO Need to update the helps
diff --git a/case_studies/curriculum_at/eval_attack.py b/case_studies/curriculum_at/eval_attack.py
new file mode 100644
index 0000000..343ff01
--- /dev/null
+++ b/case_studies/curriculum_at/eval_attack.py
@@ -0,0 +1,25 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import subprocess
+
+directory = "models/modelGTP_cifar10"
+subprocess.run("python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name fgsm --save_eval_log --num_steps 1 --no-random_start --step_size 8 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name fgsm --save_eval_log --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds10 --save_eval_log --num_steps 10 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds10 --save_eval_log --num_steps 10 --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds20 --save_eval_log --num_steps 20 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds20 --save_eval_log --num_steps 20 --model_dir {}".format(directory, directory, directory, directory, directory, directory, directory, directory, directory, directory), shell=True)
+print("{}: Ended evaluation on fgsm and pgd attacks".format(datetime.now()))
+
diff --git a/case_studies/curriculum_at/model_new.py b/case_studies/curriculum_at/model_new.py
new file mode 100644
index 0000000..c98fd24
--- /dev/null
+++ b/case_studies/curriculum_at/model_new.py
@@ -0,0 +1,1427 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# based on https://github.com/tensorflow/models/tree/master/resnet
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+import json
+from collections import OrderedDict
+
+
+class Model(object):
+ """ResNet model."""
+
+ def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=True, use_pert=False):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.dataset = dataset
+ self.pert = True if (mode == 'train' and use_pert) else False
+ if dataset == "cifar10":
+ self.num_classes = 10
+ elif dataset == "cifar100":
+ self.num_classes = 100
+ elif dataset == "GTSRB":
+ self.num_classes = 43
+ else:
+ self.num_classes = 200
+ self.train_batch_size = train_batch_size
+ self.activations = []
+ self.normalize_zero_mean = normalize_zero_mean
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('classifier'):
+ with tf.variable_scope('input'):
+
+ if self.dataset == 'cifar10' or self.dataset == 'cifar100' or self.dataset == 'GTSRB':
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, 32, 32, 3])
+ else:
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, 64, 64, 3])
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+ if self.pert:
+ self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, 32, 32, 3], dtype=tf.float32,
+ trainable=True)
+ self.final_input = self.x_input + self.pert
+ self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,32,32,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ # filters = [16, 16, 32, 64] # for debugging
+ filters = [16, 160, 320, 640]
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+ self.neck = x
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+ self.activations.append(self.pre_softmax)
+ self.softmax = tf.nn.softmax(self.pre_softmax)
+
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
+ # target logit is independent of other class logits while target softmax value is
+ self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ self.feature_grad = tf.gradients(self.neck, self.x_input)[0]
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.y_xent_dbp = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent_dbp = tf.reduce_sum(self.y_xent_dbp, name='y_xent_dbp')
+ self.mean_xent_dbp = tf.reduce_mean(self.y_xent_dbp)
+ self.weight_decay_loss = self._decay()
+ self.temploss = tf.reduce_sum(-tf.multiply(tf.one_hot(self.y_input, self.num_classes), tf.log(tf.clip_by_value(self.softmax, 1e-10, 1.0))), axis = 1)
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ self.num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+
+class ModelTinyImagnet(object):
+ """ResNet model."""
+
+ def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.pert = True if (mode == 'train' and use_pert) else False
+ if dataset == 'tinyimagenet':
+ self.num_classes = 200
+ self.input_size = 64
+ elif dataset == 'cifar100':
+ self.num_classes = 100
+ self.input_size = 32
+ else:
+ self.num_classes = 10
+ self.input_size = 32
+ self.train_batch_size = train_batch_size
+ self.activations = []
+ self.normalize_zero_mean = normalize_zero_mean
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('classifier'):
+ with tf.variable_scope('input'):
+
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.input_size, self.input_size, 3])
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+ if self.pert:
+ self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
+ trainable=True)
+ self.final_input = self.x_input + self.pert
+ self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ # filters = [16, 16, 32, 64] # for debugging
+ filters = [16, 160, 320, 640]
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+ self.neck = x
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+ self.activations.append(self.pre_softmax)
+ self.softmax = tf.nn.softmax(self.pre_softmax)
+ # y_one_hot = tf.one_hot(self.y_input, self.num_classes)
+ # self.target_softmax = self.softmax * y_one_hot
+
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
+ # target logit is independent of other class logits while target softmax value is
+ self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+ self.temploss = tf.reduce_sum(-tf.multiply(tf.one_hot(self.y_input, self.num_classes), tf.log(tf.clip_by_value(self.softmax, 1e-10, 1.0))), axis = 1)
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ self.num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+class ModelTinyImagenetSource(object):
+ """ResNet model."""
+
+ def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.pert = True if (mode == 'train' and use_pert) else False
+ if dataset == 'tinyimagenet':
+ self.num_classes = 200
+ self.input_size = 64
+ elif dataset == 'cifar100':
+ self.num_classes = 100
+ self.input_size = 32
+ else:
+ self.num_classes = 10
+ self.input_size = 32
+ self.train_batch_size = train_batch_size
+ self.activations = []
+ self.normalize_zero_mean = normalize_zero_mean
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('input'):
+
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.input_size, self.input_size, 3])
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+ if self.pert:
+ self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
+ trainable=True)
+ self.final_input = self.x_input + self.pert
+ self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ # filters = [16, 16, 32, 64] # for debugging
+ filters = [16, 160, 320, 640]
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+ self.neck = x
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+ self.activations.append(self.pre_softmax)
+ self.softmax = tf.nn.softmax(self.pre_softmax)
+
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
+ # target logit is independent of other class logits while target softmax value is
+ self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ self.num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+class ModelTinyImagenetSourceExtendedLogits(object):
+ """ResNet model."""
+
+ def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False, target_task_class_num=10, input_tensor=None):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.pert = True if (mode == 'train' and use_pert) else False
+ if dataset == 'tinyimagenet':
+ self.num_classes = 200
+ self.input_size = 64
+ elif dataset == 'cifar100':
+ self.num_classes = 100
+ self.input_size = 32
+ else:
+ self.num_classes = 10
+ self.input_size = 32
+ self.train_batch_size = train_batch_size
+ self.activations = []
+ self.normalize_zero_mean = normalize_zero_mean
+ self.input_tensor = input_tensor
+ self.target_task_class_num = target_task_class_num
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('input'):
+ if self.input_tensor == None:
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.input_size, self.input_size, 3])
+ else:
+ self.x_input = self.input_tensor
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+ if self.pert:
+ self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
+ trainable=True)
+ self.final_input = self.x_input + self.pert
+ self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ # filters = [16, 16, 32, 64] # for debugging
+ filters = [16, 160, 320, 640]
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+ self.neck = x
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+ self.activations.append(self.pre_softmax)
+ self.softmax = tf.nn.softmax(self.pre_softmax)
+
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
+ # target logit is independent of other class logits while target softmax value is
+ self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ self.num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+
+ with tf.variable_scope('target_task_logit'):
+ self.target_task_pre_softmax = self._fully_connected(x, self.target_task_class_num)
+
+ self.target_task_softmax = tf.nn.softmax(self.target_task_pre_softmax)
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.target_task_softmax, self.gather_indices, name="targetsoftmax")
+
+ self.target_task_predictions = tf.argmax(self.target_task_pre_softmax, 1)
+ self.target_task_correct_prediction = tf.equal(self.target_task_predictions, self.y_input)
+ self.target_task_num_correct = tf.reduce_sum(
+ tf.cast(self.target_task_correct_prediction, tf.int64))
+ self.target_task_accuracy = tf.reduce_mean(
+ tf.cast(self.target_task_correct_prediction, tf.float32))
+
+ with tf.variable_scope('target_task_costs'):
+ self.target_task_y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.target_task_pre_softmax, labels=self.y_input)
+ self.target_task_xent = tf.reduce_sum(self.target_task_y_xent, name='target_task_y_xent')
+ self.target_task_mean_xent = tf.reduce_mean(self.target_task_y_xent)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+class ModelExtendedLogitsC2I(object):
+ """ResNet model."""
+ def __init__(self, mode, target_task_class_num=200, train_batch_size=None, input_tensor=None, source_task="cifar10"):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.mode = mode
+ self.activations = []
+ self.target_task_class_num = target_task_class_num
+ self.train_batch_size = train_batch_size
+ self.input_tensor = input_tensor
+ self.source_task = source_task
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('input'):
+
+ if self.input_tensor == None:
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, 32, 32, 3])
+ else:
+ self.x_input = self.input_tensor
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+
+ input_standardized = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
+ self.x_input)
+ x = self._conv('init_conv', input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ filters = [16, 160, 320, 640]
+
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+
+ with tf.variable_scope('logit'):
+ if self.source_task == "cifar10":
+ self.pre_softmax = self._fully_connected(x, 10)
+ elif self.source_task == "cifar100":
+ self.pre_softmax = self._fully_connected(x, 100)
+ self.activations.append(self.pre_softmax)
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(
+ tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(
+ tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+
+ with tf.variable_scope('target_task_logit'):
+ self.target_task_pre_softmax = self._fully_connected(x, self.target_task_class_num)
+
+ self.target_task_softmax = tf.nn.softmax(self.target_task_pre_softmax)
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.target_task_softmax, self.gather_indices, name="targetsoftmax")
+
+ # self.target_task_pre_softmax = self._named_fully_connected('target_task_logit', x, self.target_task_class_num)
+
+ self.target_task_predictions = tf.argmax(self.target_task_pre_softmax, 1)
+ self.target_task_correct_prediction = tf.equal(self.target_task_predictions, self.y_input)
+ self.target_task_num_correct = tf.reduce_sum(
+ tf.cast(self.target_task_correct_prediction, tf.int64))
+ self.target_task_accuracy = tf.reduce_mean(
+ tf.cast(self.target_task_correct_prediction, tf.float32))
+
+ with tf.variable_scope('target_task_costs'):
+ self.target_task_y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.target_task_pre_softmax, labels=self.y_input)
+ self.target_task_xent = tf.reduce_sum(self.target_task_y_xent, name='target_task_y_xent')
+ self.target_task_mean_xent = tf.reduce_mean(self.target_task_y_xent)
+ # self.weight_decay_loss = self._decay()
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(
+ inputs=x,
+ decay=.9,
+ center=True,
+ scale=True,
+ activation_fn=None,
+ updates_collections=None,
+ is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride,
+ activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0/n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _named_fully_connected(self, name, x, out_dim):
+ """FullyConnected layer for final output."""
+ with tf.variable_scope(name):
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+
+
+class IgamConvDiscriminatorModel(object):
+ """Simple conv discriminator model."""
+ # based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
+
+ def __init__(self, mode, dataset, train_batch_size=None, num_conv_layers=5, base_num_channels=16, x_modelgrad_input_tensor=None, y_modelgrad_input_tensor=None, x_source_modelgrad_input_tensor=None,
+ y_source_modelgrad_input_tensor=None, normalize_zero_mean=False, only_fully_connected=False, num_fc_layers=3, image_size=32, cropped_input_size=None, crop_pad_x_tensor=None, crop_pad_y_tensor=None, avg_pool_hw=False):
+ """conv disc constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.pert = False
+ self.num_classes = 2 # grad from model or rand init grad
+ self.train_batch_size = train_batch_size
+ self.num_conv_layers = num_conv_layers
+ self.num_fc_layers = num_fc_layers
+ self.base_num_channels = base_num_channels
+ self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
+ self.y_modelgrad_input_tensor = y_modelgrad_input_tensor
+ self.x_source_modelgrad_input_tensor = x_source_modelgrad_input_tensor
+ self.y_source_modelgrad_input_tensor = y_source_modelgrad_input_tensor
+ self.normalize_zero_mean = normalize_zero_mean
+ self.only_fully_connected = only_fully_connected
+ self.image_size = image_size
+ self.cropped_input_size = cropped_input_size
+ self.crop_pad_x_tensor = crop_pad_x_tensor
+ self.crop_pad_y_tensor = crop_pad_y_tensor
+ self.avg_pool_hw = avg_pool_hw
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
+ with tf.variable_scope('input'):
+
+ if self.x_modelgrad_input_tensor == None:
+ # for assign to work
+ self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.image_size, self.image_size, 3], dtype=tf.float32)
+
+ self.x_source_modelgrad_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.image_size, self.image_size, 3])
+ else:
+ self.x_modelgrad_input = self.x_modelgrad_input_tensor
+ self.x_source_modelgrad_input = self.x_source_modelgrad_input_tensor
+
+ if self.cropped_input_size != None:
+ if self.crop_pad_x_tensor == None:
+ crop_pad = (self.image_size - self.cropped_input_size) // 2
+ cropped_x_modelgrad_input = tf.slice(self.x_modelgrad_input, [0, crop_pad, crop_pad, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
+ cropped_x_source_modelgrad_input = tf.slice(self.x_source_modelgrad_input, [0, crop_pad, crop_pad, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
+
+ self.x_input = tf.concat([cropped_x_modelgrad_input, cropped_x_source_modelgrad_input], axis=0)
+ else:
+ cropped_x_modelgrad_input = tf.slice(self.x_modelgrad_input, [0, self.crop_pad_x_tensor, self.crop_pad_y_tensor, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
+ cropped_x_source_modelgrad_input = tf.slice(self.x_source_modelgrad_input, [0, self.crop_pad_x_tensor, self.crop_pad_y_tensor, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
+
+ self.x_input = tf.concat([cropped_x_modelgrad_input, cropped_x_source_modelgrad_input], axis=0)
+ else:
+ self.x_input = tf.concat([self.x_modelgrad_input, self.x_source_modelgrad_input], axis=0)
+ self.cropped_input_size = self.image_size
+
+ if self.y_modelgrad_input_tensor == None:
+ # for assign to work
+ self.y_modelgrad_input = tf.get_variable(name='y_modelgrad_input', initializer=tf.zeros_initializer,
+ shape=self.train_batch_size, dtype=tf.int64)
+
+ self.y_source_modelgrad_input = tf.placeholder(tf.int64, shape=None)
+ else:
+ self.y_modelgrad_input = self.y_modelgrad_input_tensor
+ self.y_source_modelgrad_input = self.y_source_modelgrad_input_tensor
+
+ self.y_input = tf.concat([self.y_modelgrad_input, self.y_source_modelgrad_input], axis=0)
+
+
+ if self.pert:
+ self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.cropped_input_size, self.cropped_input_size, 3], dtype=tf.float32,
+ trainable=True)
+ self.final_input = self.x_input + self.pert
+ self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1, self.cropped_input_size, self.cropped_input_size,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self.input_standardized
+ base_num_channels = self.base_num_channels
+ if self.only_fully_connected == False:
+ for i in range(self.num_conv_layers):
+ output_num_channels = base_num_channels * 2**i
+ if i == 0:
+ x = self._conv('conv{}'.format(i), x, 4, 3, output_num_channels, self._stride_arr(2), bias=True)
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+ else:
+ x = self._conv('conv{}'.format(i), x, 4, output_num_channels // 2, output_num_channels, self._stride_arr(2), bias=True)
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+ else:
+ for i in range(self.num_fc_layers):
+ if i == self.num_fc_layers -1:
+ x = self._fully_connected(x, base_num_channels//2, name='fc{}'.format(i))
+ else:
+ x = self._fully_connected(x, base_num_channels, name='fc{}'.format(i))
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('logit'):
+ if self.avg_pool_hw:
+ x = self._global_avg_pool(x)
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ if bias == True:
+ b = tf.get_variable('biases', [out_filters],
+ initializer=tf.constant_initializer())
+ conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
+ conv_out_b = tf.nn.bias_add(conv_out, b)
+ return conv_out_b
+ else:
+ return tf.nn.conv2d(x, kernel, strides, padding=padding)
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim, name=None):
+ """FullyConnected layer for final output."""
+ if name == None:
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+ else:
+ with tf.variable_scope(name):
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+
diff --git a/case_studies/curriculum_at/requirements.txt b/case_studies/curriculum_at/requirements.txt
new file mode 100644
index 0000000..41a4d0b
--- /dev/null
+++ b/case_studies/curriculum_at/requirements.txt
@@ -0,0 +1,6 @@
+ConfigArgParse==0.14.0
+tqdm==4.31.1
+tensorflow-gpu==1.14.0
+numba>=0.43.1
+matplotlib>=3.0.3
+Pillow==5.4.1
\ No newline at end of file
diff --git a/case_studies/curriculum_at/run_attack.py b/case_studies/curriculum_at/run_attack.py
new file mode 100644
index 0000000..ebf24e3
--- /dev/null
+++ b/case_studies/curriculum_at/run_attack.py
@@ -0,0 +1,463 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Evaluates a model against examples from a .npy file as specified
+
+ in attack_config.json"""
+
+from __future__ import absolute_import
+
+from __future__ import division
+
+from __future__ import print_function
+
+
+
+from datetime import datetime
+
+import json
+
+import math
+
+import os
+
+import sys
+
+import time
+
+
+
+import tensorflow as tf
+
+import numpy as np
+
+from tqdm import tqdm
+
+
+
+import cifar10_input
+
+import config_attack
+
+
+
+
+
+config = vars(config_attack.get_args())
+
+
+
+data_path = config['data_path']
+
+
+
+def run_attack(checkpoint, x_adv, epsilon):
+
+ if config['dataset'] == 'cifar10':
+
+ cifar = cifar10_input.CIFAR10Data(data_path)
+
+ elif config['dataset'] == 'cifar100':
+
+ cifar = cifar100_input.CIFAR100Data(data_path)
+ else:
+ cifar = tinyimagenet_input.TinyImagenetData()
+
+
+
+ if 'GTP' in config['model_dir']:
+
+ print("GTP MODEL")
+
+ from model_new import Model, ModelTinyImagnet
+ if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
+ model = Model(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
+ else:
+ model = ModelTinyImagnet(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
+
+ elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+
+ print("finetuned tinyimagenet MODEL")
+
+ from model_new import ModelTinyImagenetSourceExtendedLogits
+
+ full_source_model_x_input = tf.placeholder(tf.float32, shape = [None, 32, 32, 3])
+
+ upresized_full_source_model_x_input = tf.image.resize_images(full_source_model_x_input, size=[64, 64])
+
+ if config['dataset'] == 'cifar10':
+
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train', dataset='tinyimagenet', target_task_class_num=10, train_batch_size=config['eval_batch_size'], input_tensor=upresized_full_source_model_x_input)
+
+ elif config['dataset'] == 'cifar100':
+
+ model = ModelTinyImagenetSourceExtendedLogits(mode='train', dataset='tinyimagenet', target_task_class_num=100, train_batch_size=config['eval_batch_size'], input_tensor=upresized_full_source_model_x_input)
+
+
+
+ model.x_input = full_source_model_x_input
+
+
+
+ t_vars = tf.trainable_variables()
+
+ source_model_vars = [var for var in t_vars if ('discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+
+ source_model_target_logit_vars = [var for var in t_vars if 'target_task_logit' in var.name]
+
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+
+ finetuned_source_model_saver = tf.train.Saver(var_list=finetuned_source_model_vars)
+
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+
+ print("finetuned MODEL")
+
+ from model_original_cifar_challenge import ModelExtendedLogits
+
+ model = ModelExtendedLogits(mode='train', target_task_class_num=100, train_batch_size=config['eval_batch_size'])
+
+
+
+ t_vars = tf.trainable_variables()
+
+ source_model_vars = [var for var in t_vars if ('discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
+
+ source_model_target_logit_vars = [var for var in t_vars if 'target_task_logit' in var.name]
+
+ source_model_saver = tf.train.Saver(var_list=source_model_vars)
+
+ finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
+
+ finetuned_source_model_saver = tf.train.Saver(var_list=finetuned_source_model_vars)
+
+ elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config['model_dir'] or 'a_very_robust_model' in config['model_dir']):
+
+ print("original challenge MODEL")
+
+ from free_model_original import Model
+
+ model = Model(mode='eval', dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
+
+ elif 'IGAM' in config['model_dir']:
+
+ print("IGAM MODEL")
+
+ from model_new import Model
+
+ model = Model(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
+
+ else:
+
+ print("other MODEL")
+
+ from free_model import Model
+
+ model = Model(mode='eval', dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
+
+
+
+ saver = tf.train.Saver()
+
+
+
+ num_eval_examples = 10000
+
+ eval_batch_size = 100
+
+
+
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+
+ total_corr = 0
+
+
+
+ x_nat = cifar.eval_data.xs
+
+ l_inf = np.amax(np.abs(x_nat - x_adv))
+
+
+
+ if l_inf > epsilon + 0.0001:
+
+ print('maximum perturbation found: {}'.format(l_inf))
+
+ print('maximum perturbation allowed: {}'.format(epsilon))
+
+ return
+
+
+
+ y_pred = [] # label accumulator
+
+
+
+ with tf.Session() as sess:
+
+ # Restore the checkpoint
+
+ if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+
+ sess.run(tf.global_variables_initializer())
+
+ source_model_file = tf.train.latest_checkpoint("models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
+
+ source_model_saver.restore(sess, source_model_file)
+
+ finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
+
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+
+ elif 'finetuned_on_cifar100' in config['model_dir']:
+
+ sess.run(tf.global_variables_initializer())
+
+ source_model_file = tf.train.latest_checkpoint("models/adv_trained")
+
+ source_model_saver.restore(sess, source_model_file)
+
+ finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
+
+ finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+
+ else:
+
+ saver.restore(sess, checkpoint)
+
+
+
+ # Iterate over the samples batch-by-batch
+
+ for ibatch in range(num_batches):
+
+ bstart = ibatch * eval_batch_size
+
+ bend = min(bstart + eval_batch_size, num_eval_examples)
+
+
+
+ x_batch = x_adv[bstart:bend, :]
+
+ y_batch = cifar.eval_data.ys[bstart:bend]
+
+
+
+ dict_adv = {model.x_input: x_batch,
+
+ model.y_input: y_batch}
+
+
+
+ if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+
+ cur_corr, y_pred_batch = sess.run([model.target_task_num_correct, model.target_task_predictions],
+
+ feed_dict=dict_adv)
+
+ else:
+
+ cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
+
+ feed_dict=dict_adv)
+
+
+
+ total_corr += cur_corr
+
+ y_pred.append(y_pred_batch)
+
+
+
+ accuracy = total_corr / num_eval_examples
+
+
+
+ print('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
+
+ y_pred = np.concatenate(y_pred, axis=0)
+
+
+
+ store_adv_pred_path = "preds/" + adv_examples_path.split("/")[-1]
+
+ if not os.path.exists("preds/"):
+
+ os.makedirs("preds/")
+
+ np.save(store_adv_pred_path, y_pred)
+
+ print('Output saved at ', store_adv_pred_path)
+
+
+
+ if config['save_eval_log']:
+
+ date_str = datetime.now().strftime("%d_%b")
+
+ log_dir = "attack_log/" + date_str
+
+ if not os.path.exists(log_dir):
+
+ os.makedirs(log_dir)
+
+ log_filename = adv_examples_path.split("/")[-1].replace('.npy', '.txt')
+
+ model_name = config['model_dir'].split('/')[1]
+
+ log_file_path = os.path.join(log_dir, log_filename)
+
+ with open(log_file_path, "w") as f:
+
+ f.write('Model checkpoint: {} \n'.format(checkpoint))
+
+ f.write('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
+
+ print('Results saved at ', log_file_path)
+
+
+
+ # full test evaluation
+
+ if config['dataset'] == 'cifar10':
+ raw_data = cifar10_input.CIFAR10Data(data_path)
+ elif config['dataset'] == 'cifar100':
+ raw_data = cifar100_input.CIFAR100Data(data_path)
+ else:
+ raw_data = tinyimagenet_input.TinyImagenetData()
+
+ data_size = raw_data.eval_data.n
+
+ if data_size % config['eval_batch_size'] == 0:
+
+ eval_steps = data_size // config['eval_batch_size']
+
+ else:
+
+ eval_steps = data_size // config['eval_batch_size'] + 1
+
+ total_num_correct = 0
+
+ for ii in tqdm(range(eval_steps)):
+
+ x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(config['eval_batch_size'], multiple_passes=False)
+
+ eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
+
+ if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+
+ num_correct = sess.run(model.target_task_num_correct, feed_dict=eval_dict)
+
+ else:
+
+ num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
+
+ total_num_correct += num_correct
+
+ eval_acc = total_num_correct / data_size
+
+ with open(log_file_path, "a+") as f:
+
+ f.write('\nClean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
+
+ print('Clean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
+
+ print('Results saved at ', log_file_path)
+
+
+
+if __name__ == '__main__':
+
+ import json
+
+
+
+ # with open('attack_config.json') as config_file:
+
+ # config = json.load(config_file)
+
+
+
+ model_dir = config['model_dir']
+
+
+
+ checkpoint = tf.train.latest_checkpoint(model_dir)
+
+
+ adv_examples_path = config['store_adv_path']
+
+ if adv_examples_path == None:
+
+ model_name = config['model_dir'].split('/')[1]
+
+ if config['attack_name'] == None:
+ if config['dataset'] == 'cifar10':
+ adv_examples_path = "attacks/{}_attack.npy".format(model_name)
+ elif config['dataset'] == 'cifar100':
+ adv_examples_path = "attacks/{}_c100attack.npy".format(model_name)
+ else:
+ adv_examples_path = "attacks/{}_tinyattack.npy".format(model_name)
+
+ else:
+ if config['dataset'] == 'cifar10':
+ adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
+ elif config['dataset'] == 'cifar100':
+ adv_examples_path = "attacks/{}_{}_c100attack.npy".format(model_name, config['attack_name'])
+ else:
+ adv_examples_path = "attacks/{}_{}_tinyattack.npy".format(model_name, config['attack_name'])
+
+
+
+ if config['attack_norm'] == '2':
+
+ adv_examples_path = adv_examples_path.replace("attack.npy", "l2attack.npy")
+
+
+
+ x_adv = np.load(adv_examples_path)
+
+
+
+ tf.set_random_seed(config['tf_seed'])
+
+ np.random.seed(config['np_seed'])
+
+
+
+ if checkpoint is None:
+
+ print('No checkpoint found')
+
+ elif x_adv.shape != (10000, 32, 32, 3):
+
+ print('Invalid shape: expected (10000, 32, 32, 3), found {}'.format(x_adv.shape))
+
+ elif np.amax(x_adv) > 255.0001 or np.amin(x_adv) < -0.0001:
+
+ print('Invalid pixel range. Expected [0, 255], found [{}, {}]'.format(
+
+ np.amin(x_adv),
+
+ np.amax(x_adv)))
+
+ else:
+
+ print("adv_examples_path: ", adv_examples_path)
+
+ run_attack(checkpoint, x_adv, config['epsilon'])
+
+
diff --git a/case_studies/diffpure/LICENSE b/case_studies/diffpure/LICENSE
new file mode 100644
index 0000000..bda414b
--- /dev/null
+++ b/case_studies/diffpure/LICENSE
@@ -0,0 +1,64 @@
+NVIDIA Source Code License for DiffPure
+
+1. Definitions
+
+“Licensor” means any person or entity that distributes its Work.
+
+“Software” means the original work of authorship made available under this License.
+
+“Work” means the Software and any additions to or derivative works of the Software that are made available under
+this License.
+
+The terms “reproduce,” “reproduction,” “derivative works,” and “distribution” have the meaning as provided under
+U.S. copyright law; provided, however, that for the purposes of this License, derivative works shall not include
+works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work.
+
+Works, including the Software, are “made available” under this License by including in or with the Work either
+(a) a copyright notice referencing the applicability of this License to the Work, or (b) a copy of this License.
+
+2. License Grant
+
+2.1 Copyright Grant. Subject to the terms and conditions of this License, each Licensor grants to you a perpetual,
+worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare derivative works of, publicly
+display, publicly perform, sublicense and distribute its Work and any resulting derivative works in any form.
+
+3. Limitations
+
+3.1 Redistribution. You may reproduce or distribute the Work only if (a) you do so under this License, (b) you
+include a complete copy of this License with your distribution, and (c) you retain without modification any
+copyright, patent, trademark, or attribution notices that are present in the Work.
+
+3.2 Derivative Works. You may specify that additional or different terms apply to the use, reproduction, and
+distribution of your derivative works of the Work (“Your Terms”) only if (a) Your Terms provide that the use
+limitation in Section 3.3 applies to your derivative works, and (b) you identify the specific derivative works
+that are subject to Your Terms. Notwithstanding Your Terms, this License (including the redistribution
+requirements in Section 3.1) will continue to apply to the Work itself.
+
+3.3 Use Limitation. The Work and any derivative works thereof only may be used or intended for use
+non-commercially. Notwithstanding the foregoing, NVIDIA and its affiliates may use the Work and any derivative
+works commercially. As used herein, “non-commercially” means for research or evaluation purposes only.
+
+3.4 Patent Claims. If you bring or threaten to bring a patent claim against any Licensor (including any claim,
+cross-claim or counterclaim in a lawsuit) to enforce any patents that you allege are infringed by any Work, then
+your rights under this License from such Licensor (including the grant in Section 2.1) will terminate immediately.
+
+3.5 Trademarks. This License does not grant any rights to use any Licensor’s or its affiliates’ names, logos,
+or trademarks, except as necessary to reproduce the notices described in this License.
+
+3.6 Termination. If you violate any term of this License, then your rights under this License (including the
+grant in Section 2.1) will terminate immediately.
+
+4. Disclaimer of Warranty.
+
+THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+WARRANTIES OR CONDITIONS OF M ERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. YOU
+BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE.
+
+5. Limitation of Liability.
+
+EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING
+NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
+INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, THE USE OR
+INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR
+DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER COMM ERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
\ No newline at end of file
diff --git a/case_studies/diffpure/README.md b/case_studies/diffpure/README.md
new file mode 100644
index 0000000..3102365
--- /dev/null
+++ b/case_studies/diffpure/README.md
@@ -0,0 +1,213 @@
+# Diffusion Models for Adversarial Purification
+
+
+
+
+
+Official PyTorch implementation of the ICML 2022 paper:
+**[Diffusion Models for Adversarial Purification](https://arxiv.org/abs/2205.07460)**
+
+Weili Nie, Brandon Guo, Yujia Huang, Chaowei Xiao, Arash Vahdat, Anima Anandkumar
+https://diffpure.github.io
+
+Abstract: *Adversarial purification refers to a class of defense methods that remove adversarial perturbations using a
+generative model. These methods do not make assumptions on the form of attack and the classification model, and thus can
+defend pre-existing classifiers against unseen threats. However, their performance currently falls behind adversarial
+training methods. In this work, we propose DiffPure that uses diffusion models for adversarial purification:
+Given an adversarial example, we first diffuse it with a small amount of noise following a forward diffusion process,
+and then recover the clean image through a reverse generative process. To evaluate our method against strong adaptive
+attacks in an efficient and scalable way, we propose to use the adjoint method to compute full gradients of the reverse
+generative process. Extensive experiments on three image datasets including CIFAR-10, ImageNet and CelebA-HQ with three
+classifier architectures including ResNet, WideResNet and ViT demonstrate that our method achieves the state-of-the-art
+results, outperforming current adversarial training and adversarial purification methods, often by a large margin.*
+
+## Requirements
+
+- 1-4 high-end NVIDIA GPUs with 32 GB of memory.
+- 64-bit Python 3.8.
+- CUDA=11.0 and docker must be installed first.
+- Installation of the required library dependencies with Docker:
+ ```bash
+ docker build -f diffpure.Dockerfile --tag=diffpure:0.0.1 .
+ docker run -it -d --gpus 0 --name diffpure --shm-size 8G -v $(pwd):/workspace -p 5001:6006 diffpure:0.0.1
+ docker exec -it diffpure bash
+ ```
+
+## Data and pre-trained models
+
+Before running our code on ImageNet and CelebA-HQ, you have to first download these two datasets. For example, you can
+follow [the instructions to download CelebA-HQ](https://github.com/suvojit-0x55aa/celebA-HQ-dataset-download). Note that
+we use the LMDB format for ImageNet, so you may need
+to [convert the ImageNet dataset to LMDB](https://github.com/Lyken17/Efficient-PyTorch/tree/master/tools). There is no
+need to download CIFAR-10 separately.
+
+Note that you have to put all the datasets in the `datasest` directory.
+
+For the pre-trained diffusion models, you need to first download them from the following links:
+
+- [Score SDE](https://github.com/yang-song/score_sde_pytorch) for
+ CIFAR-10: (`vp/cifar10_ddpmpp_deep_continuous`: [download link](https://drive.google.com/file/d/16_-Ahc6ImZV5ClUc0vM5Iivf8OJ1VSif/view?usp=sharing))
+- [Guided Diffusion](https://github.com/openai/guided-diffusion) for
+ ImageNet: (`256x256 diffusion unconditional`: [download link](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/256x256_diffusion_uncond.pt))
+- [DDPM](https://github.com/ermongroup/SDEdit) for CelebA-HQ: (`CelebA-HQ`: [download link](https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt))
+
+For the pre-trained classifiers, most of them do not need to be downloaded separately, except for
+
+- `attribute classifiers` from [gan-ensembling](https://github.com/chail/gan-ensembling) on
+ CelebA-HQ: [download link](http://latent-composition.csail.mit.edu/other_projects/gan_ensembling/zips/pretrained_classifiers.zip)
+- `wideresnet-70-16` on CIFAR-10: TBD
+- `resnet-50` on CIFAR-10: TBD
+- `wrn-70-16-dropout` on CIFAR-10: TBD
+
+Note that you have to put all the pretrained models in the `pretrained` directory.
+
+## Run experiments on CIFAR-10
+
+### AutoAttack Linf
+
+- To get results of defending against AutoAttack Linf (the `Rand` version):
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_rand_inf.sh [seed_id] [data_id] # WideResNet-28-10
+bash run_cifar_rand_inf_70-16-dp.sh [seed_id] [data_id] # WideResNet-70-16
+bash run_cifar_rand_inf_rn50.sh [seed_id] [data_id] # ResNet-50
+```
+
+- To get results of defending against AutoAttack Linf (the `Standard` version):
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_stand_inf.sh [seed_id] [data_id] # WideResNet-28-10
+bash run_cifar_stand_inf_70-16-dp.sh [seed_id] [data_id] # WideResNet-70-16
+bash run_cifar_stand_inf_rn50.sh [seed_id] [data_id] # ResNet-50
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and eight seeds
+(e.g., 0..7) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+To measure the worse-case defense performance of our method, the reported robust accuracy is the minimum robust accuracy
+of these two versions: `Rand` and `Standard`.
+
+### AutoAttack L2
+
+- To get results of defending against AutoAttack L2 (the `Rand` version):
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_rand_L2.sh [seed_id] [data_id] # WideResNet-28-10
+bash run_cifar_rand_L2_70-16-dp.sh [seed_id] [data_id] # WideResNet-70-16
+bash run_cifar_rand_L2_rn50.sh [seed_id] [data_id] # ResNet-50
+```
+
+- To get results of defending against AutoAttack L2 (the `Standard` version):
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_stand_L2.sh [seed_id] [data_id] # WideResNet-28-10
+bash run_cifar_stand_L2_70-16-dp.sh [seed_id] [data_id] # WideResNet-70-16
+bash run_cifar_stand_L2_rn50.sh [seed_id] [data_id] # ResNet-50
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and eight seeds
+(e.g., 0..7) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+To measure the worse-case defense performance of our method, the reported robust accuracy is the minimum robust accuracy
+of these two versions: `Rand` and `Standard`.
+
+### StAdv
+
+- To get results of defending against StAdv:
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_stadv_rn50.sh [seed_id] [data_id] # ResNet-50
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and eight seeds
+(e.g., 0..7) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+
+
+### BPDA+EOT
+
+- To get results of defending against BPDA+EOT:
+
+```bash
+cd run_scripts/cifar10
+bash run_cifar_bpda_eot.sh [seed_id] [data_id] # WideResNet-28-10
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and five seeds
+(e.g., 0..4) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+
+## Run experiments on ImageNet
+
+### AutoAttack Linf
+
+- To get results of defending against AutoAttack Linf (the `Rand` version):
+
+```bash
+cd run_scripts/imagenet
+bash run_in_rand_inf.sh [seed_id] [data_id] # ResNet-50
+bash run_in_rand_inf_50-2.sh [seed_id] [data_id] # WideResNet-50-2
+bash run_in_rand_inf_deits.sh [seed_id] [data_id] # DeiT-S
+```
+
+- To get results of defending against AutoAttack Linf (the `Standard` version):
+
+```bash
+cd run_scripts/imagenet
+bash run_in_stand_inf.sh [seed_id] [data_id] # ResNet-50
+bash run_in_stand_inf_50-2.sh [seed_id] [data_id] # WideResNet-50-2
+bash run_in_stand_inf_deits.sh [seed_id] [data_id] # DeiT-S
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and 16 seeds
+(e.g., 0..15) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+To measure the worse-case defense performance of our method, the reported robust accuracy is the minimum robust accuracy
+of these two versions: `Rand` and `Standard`.
+
+## Run experiments on CelebA-HQ
+
+### BPDA+EOT
+
+- To get results of defending against BPDA+EOT:
+
+```bash
+cd run_scripts/celebahq
+bash run_celebahq_bpda_glasses.sh [seed_id] [data_id] # the glasses attribute
+bash run_celebahq_bpda_smiling.sh [seed_id] [data_id] # the smiling attribute
+```
+
+Note that `[seed_id]` is used for getting error bars, and `[data_id]` is used for sampling a fixed set of images.
+
+To reproduce the numbers in the paper, we recommend using three seeds (e.g., 121..123) for `[seed_id]` and 64 seeds
+(e.g., 0..63) for `[data_id]`, and averaging all the results across `[seed_id]` and `[data_id]`, accordingly.
+
+## License
+
+Please check the [LICENSE](LICENSE) file. This work may be used non-commercially, meaning for research or evaluation
+purposes only. For business inquiries, please contact
+[researchinquiries@nvidia.com](mailto:researchinquiries@nvidia.com).
+
+## Citation
+
+Please cite our paper, if you happen to use this codebase:
+
+```
+@inproceedings{nie2022DiffPure,
+ title={Diffusion Models for Adversarial Purification},
+ author={Nie, Weili and Guo, Brandon and Huang, Yujia and Xiao, Chaowei and Vahdat, Arash and Anandkumar, Anima},
+ booktitle = {International Conference on Machine Learning (ICML)},
+ year={2022}
+}
+```
+
diff --git a/case_studies/diffpure/assets/teaser_v7.jpeg b/case_studies/diffpure/assets/teaser_v7.jpeg
new file mode 100644
index 0000000..cd42f32
Binary files /dev/null and b/case_studies/diffpure/assets/teaser_v7.jpeg differ
diff --git a/case_studies/diffpure/bpda_eot/LICENSE_BPDA b/case_studies/diffpure/bpda_eot/LICENSE_BPDA
new file mode 100644
index 0000000..1d44383
--- /dev/null
+++ b/case_studies/diffpure/bpda_eot/LICENSE_BPDA
@@ -0,0 +1,73 @@
+MIT License
+
+Copyright (c) 2020 Mitch Hill and Jonathan Mitchell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Code for train_ebm.py is derived from: https://github.com/point0bar1/ebm-anatomy
+Copyright (c) Mitch Hill and Erik Nijkamp under MIT License.
+
+MIT License
+
+Copyright (c) 2019 Mitch Hill and Erik Nijkamp
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Code for WideResNet class in nets.py is derived from: https://github.com/meliketoy/wide-resnet.pytorch
+Copyright (c) Bumsoo Kim under MIT License.
+
+MIT License
+
+Copyright (c) 2018 Bumsoo Kim
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/case_studies/diffpure/bpda_eot/bpda_eot_attack.py b/case_studies/diffpure/bpda_eot/bpda_eot_attack.py
new file mode 100644
index 0000000..bd79de6
--- /dev/null
+++ b/case_studies/diffpure/bpda_eot/bpda_eot_attack.py
@@ -0,0 +1,185 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from ebm-defense.
+#
+# Source:
+# https://github.com/point0bar1/ebm-defense/blob/master/bpda_eot_attack.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_BPDA).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+import torch
+import torch.nn.functional as F
+
+criterion = torch.nn.CrossEntropyLoss()
+
+
+class BPDA_EOT_Attack():
+ def __init__(self, model, adv_eps=8.0/255, eot_defense_reps=150, eot_attack_reps=15):
+ self.model = model
+
+ self.config = {
+ 'eot_defense_ave': 'logits',
+ 'eot_attack_ave': 'logits',
+ 'eot_defense_reps': eot_defense_reps,
+ 'eot_attack_reps': eot_attack_reps,
+ 'adv_steps': 50,
+ 'adv_norm': 'l_inf',
+ 'adv_eps': adv_eps,
+ 'adv_eta': 2.0 / 255,
+ 'log_freq': 10
+ }
+
+ print(f'BPDA_EOT config: {self.config}')
+
+ def purify(self, x):
+ return self.model(x, mode='purify')
+
+ def eot_defense_prediction(seslf, logits, reps=1, eot_defense_ave=None):
+ if eot_defense_ave == 'logits':
+ logits_pred = logits.view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
+ elif eot_defense_ave == 'softmax':
+ logits_pred = F.softmax(logits, dim=1).view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
+ elif eot_defense_ave == 'logsoftmax':
+ logits_pred = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
+ elif reps == 1:
+ logits_pred = logits
+ else:
+ raise RuntimeError('Invalid ave_method_pred (use "logits" or "softmax" or "logsoftmax")')
+ _, y_pred = torch.max(logits_pred, 1)
+ return y_pred
+
+ def eot_attack_loss(self, logits, y, reps=1, eot_attack_ave='loss'):
+ if eot_attack_ave == 'logits':
+ logits_loss = logits.view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
+ y_loss = y
+ elif eot_attack_ave == 'softmax':
+ logits_loss = torch.log(F.softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0))
+ y_loss = y
+ elif eot_attack_ave == 'logsoftmax':
+ logits_loss = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
+ y_loss = y
+ elif eot_attack_ave == 'loss':
+ logits_loss = logits
+ y_loss = y.repeat(reps)
+ else:
+ raise RuntimeError('Invalid ave_method_eot ("logits", "softmax", "logsoftmax", "loss")')
+ loss = criterion(logits_loss, y_loss)
+ return loss
+
+ def predict(self, X, y, requires_grad=True, reps=1, eot_defense_ave=None, eot_attack_ave='loss'):
+ if requires_grad:
+ logits = self.model(X, mode='classify')
+ else:
+ with torch.no_grad():
+ logits = self.model(X.data, mode='classify')
+
+ y_pred = self.eot_defense_prediction(logits.detach(), reps, eot_defense_ave)
+ correct = torch.eq(y_pred, y)
+ loss = self.eot_attack_loss(logits, y, reps, eot_attack_ave)
+
+ return correct.detach(), loss
+
+ def pgd_update(self, X_adv, grad, X, adv_norm, adv_eps, adv_eta, eps=1e-10):
+ if adv_norm == 'l_inf':
+ X_adv.data += adv_eta * torch.sign(grad)
+ X_adv = torch.clamp(torch.min(X + adv_eps, torch.max(X - adv_eps, X_adv)), min=0, max=1)
+ elif adv_norm == 'l_2':
+ X_adv.data += adv_eta * grad / grad.view(X.shape[0], -1).norm(p=2, dim=1).view(X.shape[0], 1, 1, 1)
+ dists = (X_adv - X).view(X.shape[0], -1).norm(dim=1, p=2).view(X.shape[0], 1, 1, 1)
+ X_adv = torch.clamp(X + torch.min(dists, adv_eps*torch.ones_like(dists))*(X_adv-X)/(dists+eps), min=0, max=1)
+ else:
+ raise RuntimeError('Invalid adv_norm ("l_inf" or "l_2"')
+ return X_adv
+
+ def purify_and_predict(self, X, y, purify_reps=1, requires_grad=True):
+ X_repeat = X.repeat([purify_reps, 1, 1, 1])
+ X_repeat_purified = self.purify(X_repeat).detach().clone()
+ X_repeat_purified.requires_grad_()
+ correct, loss = self.predict(X_repeat_purified, y, requires_grad, purify_reps,
+ self.config['eot_defense_ave'], self.config['eot_attack_ave'])
+ if requires_grad:
+ X_grads = torch.autograd.grad(loss, [X_repeat_purified])[0]
+ # average gradients over parallel samples for EOT attack
+ attack_grad = X_grads.view([purify_reps]+list(X.shape)).mean(dim=0)
+ return correct, attack_grad
+ else:
+ return correct, None
+
+ def eot_defense_verification(self, X_adv, y, correct, defended):
+ for verify_ind in range(correct.nelement()):
+ if correct[verify_ind] == 0 and defended[verify_ind] == 1:
+ defended[verify_ind] = self.purify_and_predict(X_adv[verify_ind].unsqueeze(0), y[verify_ind].view([1]),
+ self.config['eot_defense_reps'], requires_grad=False)[0]
+ return defended
+
+ def eval_and_bpda_eot_grad(self, X_adv, y, defended, requires_grad=True):
+ correct, attack_grad = self.purify_and_predict(X_adv, y, self.config['eot_attack_reps'], requires_grad)
+ if self.config['eot_defense_reps'] > 0:
+ defended = self.eot_defense_verification(X_adv, y, correct, defended)
+ else:
+ defended *= correct
+ return defended, attack_grad
+
+ def attack_batch(self, X, y):
+ # get baseline accuracy for natural images
+ defended = self.eval_and_bpda_eot_grad(X, y, torch.ones_like(y).bool(), False)[0]
+ print('Baseline: {} of {}'.format(defended.sum(), len(defended)))
+
+ class_batch = torch.zeros([self.config['adv_steps'] + 2, X.shape[0]]).bool()
+ class_batch[0] = defended.cpu()
+ ims_adv_batch = torch.zeros(X.shape)
+ for ind in range(defended.nelement()):
+ if defended[ind] == 0:
+ ims_adv_batch[ind] = X[ind].cpu()
+
+ X_adv = X.clone()
+
+ # adversarial attacks on a single batch of images
+ for step in range(self.config['adv_steps'] + 1):
+ defended, attack_grad = self.eval_and_bpda_eot_grad(X_adv, y, defended)
+
+ class_batch[step+1] = defended.cpu()
+ for ind in range(defended.nelement()):
+ if class_batch[step, ind] == 1 and defended[ind] == 0:
+ ims_adv_batch[ind] = X_adv[ind].cpu()
+
+ # update adversarial images (except on final iteration so final adv images match final eval)
+ if step < self.config['adv_steps']:
+ X_adv = self.pgd_update(X_adv, attack_grad, X, self.config['adv_norm'], self.config['adv_eps'], self.config['adv_eta'])
+ X_adv = X_adv.detach().clone()
+
+ if step == 1 or step % self.config['log_freq'] == 0 or step == self.config['adv_steps']:
+ print('Attack {} of {} Batch defended: {} of {}'.
+ format(step, self.config['adv_steps'], int(torch.sum(defended).cpu().numpy()), X_adv.shape[0]))
+
+ if int(torch.sum(defended).cpu().numpy()) == 0:
+ print('Attack successfully to the batch!')
+ break
+
+ for ind in range(defended.nelement()):
+ if defended[ind] == 1:
+ ims_adv_batch[ind] = X_adv[ind].cpu()
+
+ return class_batch, ims_adv_batch
+
+ def attack_all(self, X, y, batch_size):
+ class_path = torch.zeros([self.config['adv_steps'] + 2, 0]).bool()
+ ims_adv = torch.zeros(0)
+
+ n_batches = X.shape[0] // batch_size
+ if n_batches == 0 and X.shape[0] > 0:
+ n_batches = 1
+ for counter in range(n_batches):
+ X_batch = X[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
+ y_batch = y[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
+
+ class_batch, ims_adv_batch = self.attack_batch(X_batch.contiguous(), y_batch.contiguous())
+ class_path = torch.cat((class_path, class_batch), dim=1)
+ ims_adv = torch.cat((ims_adv, ims_adv_batch), dim=0)
+ print(f'finished {counter}-th batch in attack_all')
+
+ return class_path, ims_adv
diff --git a/case_studies/diffpure/classifiers/attribute_classifier.py b/case_studies/diffpure/classifiers/attribute_classifier.py
new file mode 100644
index 0000000..9ff9634
--- /dev/null
+++ b/case_studies/diffpure/classifiers/attribute_classifier.py
@@ -0,0 +1,65 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import torch
+import os
+from . import attribute_net
+
+softmax = torch.nn.Softmax(dim=1)
+
+
+def downsample(images, size=256):
+ # Downsample to 256x256. The attribute classifiers were built for 256x256.
+ # follows https://github.com/NVlabs/stylegan/blob/master/metrics/linear_separability.py#L127
+ if images.shape[2] > size:
+ factor = images.shape[2] // size
+ assert (factor * size == images.shape[2])
+ images = images.view(
+ [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
+ images = images.mean(dim=[3, 5])
+ return images
+ else:
+ assert (images.shape[-1] == 256)
+ return images
+
+
+def get_logit(net, im):
+ im_256 = downsample(im)
+ logit = net(im_256)
+ return logit
+
+
+def get_softmaxed(net, im):
+ logit = get_logit(net, im)
+ logits = torch.cat([logit, -logit], dim=1)
+ softmaxed = softmax(torch.cat([logit, -logit], dim=1))[:, 1]
+ return logits, softmaxed
+
+
+def load_attribute_classifier(attribute, ckpt_path=None):
+ if ckpt_path is None:
+ base_path = 'checkpoints/diffpure/celebahq'
+ attribute_pkl = os.path.join(base_path, attribute, 'net_best.pth')
+ ckpt = torch.load(attribute_pkl)
+ else:
+ ckpt = torch.load(ckpt_path)
+ print("Using classifier at epoch: %d" % ckpt['epoch'])
+ if 'valacc' in ckpt.keys():
+ print("Validation acc on raw images: %0.5f" % ckpt['valacc'])
+ detector = attribute_net.from_state_dict(
+ ckpt['state_dict'], fixed_size=True, use_mbstd=False).cuda().eval()
+ return detector
+
+
+class ClassifierWrapper(torch.nn.Module):
+ def __init__(self, classifier_name, ckpt_path=None, device='cuda'):
+ super(ClassifierWrapper, self).__init__()
+ self.net = load_attribute_classifier(classifier_name, ckpt_path).eval().to(device)
+
+ def forward(self, ims):
+ out = (ims - 0.5) / 0.5
+ return get_softmaxed(self.net, out)[0]
diff --git a/case_studies/diffpure/classifiers/attribute_net.py b/case_studies/diffpure/classifiers/attribute_net.py
new file mode 100644
index 0000000..c162be5
--- /dev/null
+++ b/case_studies/diffpure/classifiers/attribute_net.py
@@ -0,0 +1,227 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import numpy as np
+
+
+def lerp_clip(a, b, t):
+ return a + (b - a) * torch.clamp(t, 0.0, 1.0)
+
+
+class WScaleLayer(nn.Module):
+ def __init__(self, size, fan_in, gain=np.sqrt(2), bias=True):
+ super(WScaleLayer, self).__init__()
+ self.scale = gain / np.sqrt(fan_in) # No longer a parameter
+ if bias:
+ self.b = nn.Parameter(torch.randn(size))
+ else:
+ self.b = 0
+ self.size = size
+
+ def forward(self, x):
+ x_size = x.size()
+ x = x * self.scale
+ # modified to remove warning
+ if type(self.b) == nn.Parameter and len(x_size) == 4:
+ x = x + self.b.view(1, -1, 1, 1).expand(
+ x_size[0], self.size, x_size[2], x_size[3])
+ if type(self.b) == nn.Parameter and len(x_size) == 2:
+ x = x + self.b.view(1, -1).expand(
+ x_size[0], self.size)
+ return x
+
+
+class WScaleConv2d(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, padding=0,
+ bias=True, gain=np.sqrt(2)):
+ super().__init__()
+ self.conv = nn.Conv2d(in_channels, out_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ bias=False)
+ fan_in = in_channels * kernel_size * kernel_size
+ self.wscale = WScaleLayer(out_channels, fan_in, gain=gain, bias=bias)
+
+ def forward(self, x):
+ return self.wscale(self.conv(x))
+
+
+class WScaleLinear(nn.Module):
+ def __init__(self, in_channels, out_channels, bias=True, gain=np.sqrt(2)):
+ super().__init__()
+ self.linear = nn.Linear(in_channels, out_channels, bias=False)
+ self.wscale = WScaleLayer(out_channels, in_channels, gain=gain,
+ bias=bias)
+
+ def forward(self, x):
+ return self.wscale(self.linear(x))
+
+
+class FromRGB(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size,
+ act=nn.LeakyReLU(0.2), bias=True):
+ super().__init__()
+ self.conv = WScaleConv2d(in_channels, out_channels, kernel_size,
+ padding=0, bias=bias)
+ self.act = act
+
+ def forward(self, x):
+ return self.act(self.conv(x))
+
+
+class Downscale2d(nn.Module):
+ def __init__(self, factor=2):
+ super().__init__()
+ self.downsample = nn.AvgPool2d(kernel_size=factor, stride=factor)
+
+ def forward(self, x):
+ return self.downsample(x)
+
+
+class DownscaleConvBlock(nn.Module):
+ def __init__(self, in_channels, conv0_channels, conv1_channels,
+ kernel_size, padding, bias=True, act=nn.LeakyReLU(0.2)):
+ super().__init__()
+ self.downscale = Downscale2d()
+ self.conv0 = WScaleConv2d(in_channels, conv0_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ bias=bias)
+ self.conv1 = WScaleConv2d(conv0_channels, conv1_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ bias=bias)
+ self.act = act
+
+ def forward(self, x):
+ x = self.act(self.conv0(x))
+ # conv2d_downscale2d applies downscaling before activation
+ # the order matters here! has to be conv -> bias -> downscale -> act
+ x = self.conv1(x)
+ x = self.downscale(x)
+ x = self.act(x)
+ return x
+
+
+class MinibatchStdLayer(nn.Module):
+ def __init__(self, group_size=4):
+ super().__init__()
+ self.group_size = group_size
+
+ def forward(self, x):
+ group_size = min(self.group_size, x.shape[0])
+ s = x.shape
+ y = x.view([group_size, -1, s[1], s[2], s[3]])
+ y = y.float()
+ y = y - torch.mean(y, dim=0, keepdim=True)
+ y = torch.mean(y * y, dim=0)
+ y = torch.sqrt(y + 1e-8)
+ y = torch.mean(torch.mean(torch.mean(y, dim=3, keepdim=True),
+ dim=2, keepdim=True), dim=1, keepdim=True)
+ y = y.type(x.type())
+ y = y.repeat(group_size, 1, s[2], s[3])
+ return torch.cat([x, y], dim=1)
+
+
+class PredictionBlock(nn.Module):
+ def __init__(self, in_channels, dense0_feat, dense1_feat, out_feat,
+ pool_size=2, act=nn.LeakyReLU(0.2), use_mbstd=True):
+ super().__init__()
+ self.use_mbstd = use_mbstd # attribute classifiers don't have this
+ if self.use_mbstd:
+ self.mbstd_layer = MinibatchStdLayer()
+ # MinibatchStdLayer adds an additional feature dimension
+ self.conv = WScaleConv2d(in_channels + int(self.use_mbstd),
+ dense0_feat, kernel_size=3, padding=1)
+ self.dense0 = WScaleLinear(dense0_feat * pool_size * pool_size, dense1_feat)
+ self.dense1 = WScaleLinear(dense1_feat, out_feat, gain=1)
+ self.act = act
+
+ def forward(self, x):
+ if self.use_mbstd:
+ x = self.mbstd_layer(x)
+ x = self.act(self.conv(x))
+ x = x.view([x.shape[0], -1])
+ x = self.act(self.dense0(x))
+ x = self.dense1(x)
+ return x
+
+
+class D(nn.Module):
+
+ def __init__(
+ self,
+ num_channels=3, # Number of input color channels. Overridden based on dataset.
+ resolution=128, # Input resolution. Overridden based on dataset.
+ fmap_base=8192, # Overall multiplier for the number of feature maps.
+ fmap_decay=1.0, # log2 feature map reduction when doubling the resolution.
+ fmap_max=512, # Maximum number of feature maps in any layer.
+ fixed_size=False, # True = load fromrgb_lod0 weights only
+ use_mbstd=True, # False = no mbstd layer in PredictionBlock
+ **kwargs): # Ignore unrecognized keyword args.
+ super().__init__()
+
+ self.resolution_log2 = resolution_log2 = int(np.log2(resolution))
+ assert resolution == 2 ** resolution_log2 and resolution >= 4
+
+ def nf(stage):
+ return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
+
+ self.register_buffer('lod_in', torch.from_numpy(np.array(0.0)))
+
+ res = resolution_log2
+
+ setattr(self, 'fromrgb_lod0', FromRGB(num_channels, nf(res - 1), 1))
+
+ for i, res in enumerate(range(resolution_log2, 2, -1), 1):
+ lod = resolution_log2 - res
+ block = DownscaleConvBlock(nf(res - 1), nf(res - 1), nf(res - 2),
+ kernel_size=3, padding=1)
+ setattr(self, '%dx%d' % (2 ** res, 2 ** res), block)
+ fromrgb = FromRGB(3, nf(res - 2), 1)
+ if not fixed_size:
+ setattr(self, 'fromrgb_lod%d' % i, fromrgb)
+
+ res = 2
+ pool_size = 2 ** res
+ block = PredictionBlock(nf(res + 1 - 2), nf(res - 1), nf(res - 2), 1,
+ pool_size, use_mbstd=use_mbstd)
+ setattr(self, '%dx%d' % (pool_size, pool_size), block)
+ self.downscale = Downscale2d()
+ self.fixed_size = fixed_size
+
+ def forward(self, img):
+ x = self.fromrgb_lod0(img)
+ for i, res in enumerate(range(self.resolution_log2, 2, -1), 1):
+ lod = self.resolution_log2 - res
+ x = getattr(self, '%dx%d' % (2 ** res, 2 ** res))(x)
+ if not self.fixed_size:
+ img = self.downscale(img)
+ y = getattr(self, 'fromrgb_lod%d' % i)(img)
+ x = lerp_clip(x, y, self.lod_in - lod)
+ res = 2
+ pool_size = 2 ** res
+ out = getattr(self, '%dx%d' % (pool_size, pool_size))(x)
+ return out
+
+
+def max_res_from_state_dict(state_dict):
+ for i in range(3, 12):
+ if '%dx%d.conv0.conv.weight' % (2 ** i, 2 ** i) not in state_dict:
+ break
+ return 2 ** (i - 1)
+
+
+def from_state_dict(state_dict, fixed_size=False, use_mbstd=True):
+ res = max_res_from_state_dict(state_dict)
+ print(f'res: {res}')
+ d = D(num_channels=3, resolution=res, fixed_size=fixed_size,
+ use_mbstd=use_mbstd)
+ d.load_state_dict(state_dict)
+ return d
diff --git a/case_studies/diffpure/classifiers/cifar10_resnet.py b/case_studies/diffpure/classifiers/cifar10_resnet.py
new file mode 100644
index 0000000..a409b6a
--- /dev/null
+++ b/case_studies/diffpure/classifiers/cifar10_resnet.py
@@ -0,0 +1,199 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import math
+
+import torch
+import torch.nn.functional as F
+import torch.nn as nn
+
+
+# ---------------------------- ResNet ----------------------------
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(self.expansion * planes)
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = F.relu(self.bn2(self.conv2(out)))
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+ out = F.relu(out)
+ return out
+
+
+class ResNet(nn.Module):
+ def __init__(self, block, num_blocks, num_classes=10):
+ super(ResNet, self).__init__()
+ self.in_planes = 64
+
+ num_input_channels = 3
+ mean = (0.4914, 0.4822, 0.4465)
+ std = (0.2471, 0.2435, 0.2616)
+ self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
+ self.std = torch.tensor(std).view(num_input_channels, 1, 1)
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
+
+ def _make_layer(self, block, planes, num_blocks, stride):
+ strides = [stride] + [1] * (num_blocks - 1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ out = (x - self.mean.to(x.device)) / self.std.to(x.device)
+ out = F.relu(self.bn1(self.conv1(out)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ out = self.linear(out)
+ return out
+
+
+def ResNet50():
+ return ResNet(Bottleneck, [3, 4, 6, 3])
+
+
+# ---------------------------- ResNet ----------------------------
+
+
+# ---------------------------- WideResNet ----------------------------
+
+class BasicBlock(nn.Module):
+ def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
+ super(BasicBlock, self).__init__()
+ self.bn1 = nn.BatchNorm2d(in_planes)
+ self.relu1 = nn.ReLU(inplace=True)
+ self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(out_planes)
+ self.relu2 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
+ padding=1, bias=False)
+ self.droprate = dropRate
+ self.equalInOut = (in_planes == out_planes)
+ self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
+ padding=0, bias=False) or None
+
+ def forward(self, x):
+ if not self.equalInOut:
+ x = self.relu1(self.bn1(x))
+ else:
+ out = self.relu1(self.bn1(x))
+ out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
+ if self.droprate > 0:
+ out = F.dropout(out, p=self.droprate, training=self.training)
+ out = self.conv2(out)
+ return torch.add(x if self.equalInOut else self.convShortcut(x), out)
+
+
+class NetworkBlock(nn.Module):
+ def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
+ super(NetworkBlock, self).__init__()
+ self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
+
+ def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
+ layers = []
+ for i in range(int(nb_layers)):
+ layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ return self.layer(x)
+
+
+class WideResNet(nn.Module):
+ """ Based on code from https://github.com/yaodongyu/TRADES """
+
+ def __init__(self, depth=28, num_classes=10, widen_factor=10, sub_block1=False, dropRate=0.0, bias_last=True):
+ super(WideResNet, self).__init__()
+
+ num_input_channels = 3
+ mean = (0.4914, 0.4822, 0.4465)
+ std = (0.2471, 0.2435, 0.2616)
+ self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
+ self.std = torch.tensor(std).view(num_input_channels, 1, 1)
+
+ nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
+ assert ((depth - 4) % 6 == 0)
+ n = (depth - 4) / 6
+ block = BasicBlock
+ # 1st conv before any network block
+ self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
+ padding=1, bias=False)
+ # 1st block
+ self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
+ if sub_block1:
+ # 1st sub-block
+ self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
+ # 2nd block
+ self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
+ # 3rd block
+ self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
+ # global average pooling and classifier
+ self.bn1 = nn.BatchNorm2d(nChannels[3])
+ self.relu = nn.ReLU(inplace=True)
+ self.fc = nn.Linear(nChannels[3], num_classes, bias=bias_last)
+ self.nChannels = nChannels[3]
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ elif isinstance(m, nn.Linear) and not m.bias is None:
+ m.bias.data.zero_()
+
+ def forward(self, x):
+ out = (x - self.mean.to(x.device)) / self.std.to(x.device)
+ out = self.conv1(out)
+ out = self.block1(out)
+ out = self.block2(out)
+ out = self.block3(out)
+ out = self.relu(self.bn1(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(-1, self.nChannels)
+ return self.fc(out)
+
+
+def WideResNet_70_16():
+ return WideResNet(depth=70, widen_factor=16, dropRate=0.0)
+
+
+def WideResNet_70_16_dropout():
+ return WideResNet(depth=70, widen_factor=16, dropRate=0.3)
+# ---------------------------- WideResNet ----------------------------
diff --git a/case_studies/diffpure/configs/celeba.yml b/case_studies/diffpure/configs/celeba.yml
new file mode 100644
index 0000000..4d0c4d0
--- /dev/null
+++ b/case_studies/diffpure/configs/celeba.yml
@@ -0,0 +1,35 @@
+data:
+ dataset: "CelebA_HQ"
+ category: "celeba"
+ image_size: 256
+ channels: 3
+ logit_transform: false
+ uniform_dequantization: false
+ gaussian_dequantization: false
+ random_flip: true
+ rescaled: true
+ num_workers: 32
+
+model:
+ type: "simple"
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult: [1, 1, 2, 2, 4, 4]
+ num_res_blocks: 2
+ attn_resolutions: [16, ]
+ dropout: 0.0
+ var_type: fixedsmall
+ ema_rate: 0.999
+ ema: True
+ resamp_with_conv: True
+
+diffusion:
+ beta_schedule: linear
+ beta_start: 0.0001
+ beta_end: 0.02
+ num_diffusion_timesteps: 1000
+
+sampling:
+ batch_size: 8
+ last_only: True
\ No newline at end of file
diff --git a/case_studies/diffpure/configs/cifar10.yml b/case_studies/diffpure/configs/cifar10.yml
new file mode 100644
index 0000000..9c9e108
--- /dev/null
+++ b/case_studies/diffpure/configs/cifar10.yml
@@ -0,0 +1,65 @@
+data:
+ dataset: "CIFAR10"
+ category: "cifar10"
+ image_size: 32
+ num_channels: 3
+ random_flip: True
+ centered: True
+ uniform_dequantization: False
+
+model:
+ sigma_min: 0.01
+ sigma_max: 50
+ num_scales: 1000
+ beta_min: 0.1
+ beta_max: 20.
+ dropout: 0.1
+
+ name: 'ncsnpp'
+ scale_by_sigma: False
+ ema_rate: 0.9999
+ normalization: 'GroupNorm'
+ nonlinearity: 'swish'
+ nf: 128
+ ch_mult: [1, 2, 2, 2] # (1, 2, 2, 2)
+ num_res_blocks: 8
+ attn_resolutions: [16] # (16,)
+ resamp_with_conv: True
+ conditional: True
+ fir: False
+ fir_kernel: [1, 3, 3, 1]
+ skip_rescale: True
+ resblock_type: 'biggan'
+ progressive: 'none'
+ progressive_input: 'none'
+ progressive_combine: 'sum'
+ attention_type: 'ddpm'
+ init_scale: 0.
+ embedding_type: 'positional'
+ fourier_scale: 16
+ conv_size: 3
+
+training:
+ sde: 'vpsde'
+ continuous: True
+ reduce_mean: True
+ n_iters: 950001
+
+optim:
+ weight_decay: 0
+ optimizer: 'Adam'
+ lr: 0.0002 # 2e-4
+ beta1: 0.9
+ eps: 0.00000001 # 1e-8
+ warmup: 5000
+ grad_clip: 1.
+
+sampling:
+ n_steps_each: 1
+ noise_removal: True
+ probability_flow: False
+ snr: 0.16
+
+ method: 'pc'
+ predictor: 'euler_maruyama'
+ corrector: 'none'
\ No newline at end of file
diff --git a/case_studies/diffpure/configs/imagenet.yml b/case_studies/diffpure/configs/imagenet.yml
new file mode 100644
index 0000000..022579e
--- /dev/null
+++ b/case_studies/diffpure/configs/imagenet.yml
@@ -0,0 +1,23 @@
+data:
+ dataset: "ImageNet"
+ category: "imagenet"
+
+model:
+ attention_resolutions: '32,16,8'
+ class_cond: False
+ diffusion_steps: 1000
+ rescale_timesteps: True
+ timestep_respacing: '1000' # Modify this value to decrease the number of timesteps.
+ image_size: 256
+ learn_sigma: True
+ noise_schedule: 'linear'
+ num_channels: 256
+ num_head_channels: 64
+ num_res_blocks: 2
+ resblock_updown: True
+ use_fp16: True
+ use_scale_shift_norm: True
+
+sampling:
+ batch_size: 8
+ last_only: True
\ No newline at end of file
diff --git a/case_studies/diffpure/data/__init__.py b/case_studies/diffpure/data/__init__.py
new file mode 100644
index 0000000..0fd671b
--- /dev/null
+++ b/case_studies/diffpure/data/__init__.py
@@ -0,0 +1,25 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+from .datasets import imagenet_lmdb_dataset, imagenet_lmdb_dataset_sub, cifar10_dataset_sub
+
+def get_transform(dataset_name, transform_type, base_size=256):
+ from . import datasets
+ if dataset_name == 'celebahq':
+ return datasets.get_transform(dataset_name, transform_type, base_size)
+ elif 'imagenet' in dataset_name:
+ return datasets.get_transform(dataset_name, transform_type, base_size)
+ else:
+ raise NotImplementedError
+
+
+def get_dataset(dataset_name, partition, *args, **kwargs):
+ from . import datasets
+ if dataset_name == 'celebahq':
+ return datasets.CelebAHQDataset(partition, *args, **kwargs)
+ else:
+ raise NotImplementedError
\ No newline at end of file
diff --git a/case_studies/diffpure/data/datasets.py b/case_studies/diffpure/data/datasets.py
new file mode 100644
index 0000000..93cfa91
--- /dev/null
+++ b/case_studies/diffpure/data/datasets.py
@@ -0,0 +1,335 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os, sys
+
+import io
+import lmdb
+
+import pandas as pd
+import numpy as np
+from PIL import Image
+
+import torch
+import torchvision
+from torch.utils.data import Dataset, Subset
+
+import torchvision.transforms as transforms
+from torchvision.datasets.vision import VisionDataset
+from torchvision.datasets import folder, ImageFolder
+
+
+# ---------------------------------------------------------------------------------------------------
+
+def remove_prefix(s, prefix):
+ if s.startswith(prefix):
+ s = s[len(prefix):]
+ return s
+
+
+class ImageDataset(VisionDataset):
+ """
+ modified from: https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#ImageFolder
+ uses cached directory listing if available rather than walking directory
+ Attributes:
+ classes (list): List of the class names.
+ class_to_idx (dict): Dict with items (class_name, class_index).
+ samples (list): List of (sample path, class_index) tuples
+ targets (list): The class_index value for each image in the dataset
+ """
+
+ def __init__(self, root, loader=folder.default_loader,
+ extensions=folder.IMG_EXTENSIONS, transform=None,
+ target_transform=None, is_valid_file=None, return_path=False):
+ super(ImageDataset, self).__init__(root, transform=transform,
+ target_transform=target_transform)
+ classes, class_to_idx = self._find_classes(self.root)
+ cache = self.root.rstrip('/') + '.txt'
+ if os.path.isfile(cache):
+ print("Using directory list at: %s" % cache)
+ with open(cache) as f:
+ samples = []
+ for line in f:
+ (path, idx) = line.strip().split(';')
+ samples.append((os.path.join(self.root, path), int(idx)))
+ else:
+ print("Walking directory: %s" % self.root)
+ samples = folder.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
+ with open(cache, 'w') as f:
+ for line in samples:
+ path, label = line
+ f.write('%s;%d\n' % (remove_prefix(path, self.root).lstrip('/'), label))
+
+ if len(samples) == 0:
+ raise (RuntimeError(
+ "Found 0 files in subfolders of: " + self.root + "\nSupported extensions are: " + ",".join(extensions)))
+
+ self.loader = loader
+ self.classes = classes
+ self.class_to_idx = class_to_idx
+ self.samples = samples
+ self.return_path = return_path
+
+ def _find_classes(self, dir):
+ """
+ Finds the class folders in a dataset.
+ Ensures:
+ No class is a subdirectory of another.
+ """
+ if sys.version_info >= (3, 5):
+ # Faster and available in Python 3.5 and above
+ classes = [d.name for d in os.scandir(dir) if d.is_dir()]
+ else:
+ classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
+ classes.sort()
+ class_to_idx = {classes[i]: i for i in range(len(classes))}
+ return classes, class_to_idx
+
+ def __getitem__(self, index):
+ path, target = self.samples[index]
+ sample = self.loader(path)
+ if self.transform is not None:
+ sample = self.transform(sample)
+ if self.target_transform is not None:
+ target = self.target_transform(target)
+ if self.return_path:
+ return sample, target, path
+ return sample, target
+
+ def __len__(self):
+ return len(self.samples)
+
+
+# ---------------------------------------------------------------------------------------------------
+
+# get the attributes from celebahq subset
+def make_table(root):
+ filenames = sorted(os.listdir(f'{root}/images'))
+ # filter out non-png files, rename it to jpg to match entries in list_attr_celeba.txt
+ celebahq = [os.path.basename(f).replace('png', 'jpg')
+ if f.endswith('png') else os.path.basename(f) for f in filenames]
+ attr_gt = pd.read_csv(f'{root}/list_attr_celeba.txt',
+ skiprows=1, delim_whitespace=True, index_col=0)
+ attr_celebahq = attr_gt.reindex(index=celebahq).replace(-1, 0)
+
+ # get the train/test/val partitions
+ partitions = {}
+ with open(f'{root}/list_eval_partition.txt') as f:
+ for line in f:
+ filename, part = line.strip().split(' ')
+ partitions[filename] = int(part)
+ partitions_list = [partitions[fname] for fname in attr_celebahq.index]
+
+ attr_celebahq['partition'] = partitions_list
+ return attr_celebahq
+
+
+###### dataset functions ######
+
+class CelebAHQDataset(Dataset):
+ def __init__(self, partition, attribute, root=None, fraction=None, data_seed=1,
+ chunk_length=None, chunk_idx=-1, **kwargs):
+ if root is None:
+ root = './dataset/celebahq'
+ self.fraction = fraction
+ self.dset = ImageDataset(root, **kwargs)
+
+ # make table
+ attr_celebahq = make_table(root)
+
+ # convert from train/val/test to partition numbers
+ part_to_int = dict(train=0, val=1, test=2)
+
+ def get_partition_indices(part):
+ return np.where(attr_celebahq['partition'] == part_to_int[part])[0]
+
+ partition_idx = get_partition_indices(partition)
+
+ # if we want to further subsample the dataset, just subsample
+ # partition_idx and Subset() once
+ if fraction is not None:
+ print("Using a fraction of the original dataset")
+ print("The original dataset has length %d" % len(partition_idx))
+ new_length = int(fraction / 100 * len(partition_idx))
+ rng = np.random.RandomState(data_seed)
+ new_indices = rng.choice(partition_idx, new_length, replace=False)
+ partition_idx = new_indices
+ print("The subsetted dataset has length %d" % len(partition_idx))
+
+ elif chunk_length is not None and chunk_idx > 0:
+ print(f"Using a fraction of the original dataset with chunk_length: {chunk_length}, chunk_idx: {chunk_idx}")
+ print("The original dataset has length %d" % len(partition_idx))
+ new_indices = partition_idx[chunk_length * chunk_idx: chunk_length * (chunk_idx + 1)]
+ partition_idx = new_indices
+ print("The subsetted dataset has length %d" % len(partition_idx))
+
+ self.dset = Subset(self.dset, partition_idx)
+ attr_subset = attr_celebahq.iloc[partition_idx]
+ self.attr_subset = attr_subset[attribute]
+ print('attribute freq: %0.4f (%d / %d)' % (self.attr_subset.mean(),
+ self.attr_subset.sum(),
+ len(self.attr_subset)))
+
+ def __len__(self):
+ return len(self.dset)
+
+ def __getitem__(self, idx):
+ data = self.dset[idx]
+ # first element is the class, replace it
+ label = self.attr_subset[idx]
+ return (data[0], label, *data[2:])
+
+
+###### transformation functions ######
+
+def get_transform(dataset, transform_type, base_size=256):
+ if dataset.lower() == "celebahq":
+ assert base_size == 256, base_size
+
+ if transform_type == 'imtrain':
+ return transforms.Compose([
+ transforms.Resize(base_size),
+ transforms.RandomHorizontalFlip(p=0.5),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+ ])
+ elif transform_type == 'imval':
+ return transforms.Compose([
+ transforms.Resize(base_size),
+ # no horizontal flip for standard validation
+ transforms.ToTensor(),
+ # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+ ])
+ elif transform_type == 'imcolor':
+ return transforms.Compose([
+ transforms.Resize(base_size),
+ transforms.RandomHorizontalFlip(p=0.5),
+ transforms.ColorJitter(brightness=.05, contrast=.05,
+ saturation=.05, hue=.05),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
+ ])
+ elif transform_type == 'imcrop':
+ return transforms.Compose([
+ # 1024 + 32, or 256 + 8
+ transforms.Resize(int(1.03125 * base_size)),
+ transforms.RandomCrop(base_size),
+ transforms.RandomHorizontalFlip(p=0.5),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
+ ])
+ elif transform_type == 'tensorbase':
+ # dummy transform for compatibility with other datasets
+ return transforms.Lambda(lambda x: x)
+ else:
+ raise NotImplementedError
+
+ elif "imagenet" in dataset.lower():
+ assert base_size == 224, base_size
+
+ if transform_type == 'imtrain':
+ return transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(base_size),
+ transforms.RandomHorizontalFlip(p=0.5),
+ transforms.ToTensor(),
+ # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+ ])
+ elif transform_type == 'imval':
+ return transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(base_size),
+ # no horizontal flip for standard validation
+ transforms.ToTensor(),
+ # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+ ])
+ else:
+ raise NotImplementedError
+
+ else:
+ raise NotImplementedError
+
+
+################################################################################
+# ImageNet - LMDB
+###############################################################################
+
+def lmdb_loader(path, lmdb_data):
+ # In-memory binary streams
+ with lmdb_data.begin(write=False, buffers=True) as txn:
+ bytedata = txn.get(path.encode('ascii'))
+ img = Image.open(io.BytesIO(bytedata))
+ return img.convert('RGB')
+
+
+def imagenet_lmdb_dataset(
+ root, transform=None, target_transform=None,
+ loader=lmdb_loader):
+ """
+ You can create this dataloader using:
+ train_data = imagenet_lmdb_dataset(traindir, transform=train_transform)
+ valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform)
+ """
+
+ if root.endswith('/'):
+ root = root[:-1]
+ pt_path = os.path.join(
+ root + '_faster_imagefolder.lmdb.pt')
+ lmdb_path = os.path.join(
+ root + '_faster_imagefolder.lmdb')
+ if os.path.isfile(pt_path) and os.path.isdir(lmdb_path):
+ print('Loading pt {} and lmdb {}'.format(pt_path, lmdb_path))
+ data_set = torch.load(pt_path)
+ else:
+ data_set = ImageFolder(
+ root, None, None, None)
+ torch.save(data_set, pt_path, pickle_protocol=4)
+ print('Saving pt to {}'.format(pt_path))
+ print('Building lmdb to {}'.format(lmdb_path))
+ env = lmdb.open(lmdb_path, map_size=1e12)
+ with env.begin(write=True) as txn:
+ for path, class_index in data_set.imgs:
+ with open(path, 'rb') as f:
+ data = f.read()
+ txn.put(path.encode('ascii'), data)
+ data_set.lmdb_data = lmdb.open(
+ lmdb_path, readonly=True, max_readers=1, lock=False, readahead=False,
+ meminit=False)
+ # reset transform and target_transform
+ data_set.samples = data_set.imgs
+ data_set.transform = transform
+ data_set.target_transform = target_transform
+ data_set.loader = lambda path: loader(path, data_set.lmdb_data)
+
+ return data_set
+
+
+def imagenet_lmdb_dataset_sub(
+ root, transform=None, target_transform=None,
+ loader=lmdb_loader, num_sub=-1, data_seed=0):
+ data_set = imagenet_lmdb_dataset(
+ root, transform=transform, target_transform=target_transform,
+ loader=loader)
+
+ if num_sub > 0:
+ partition_idx = np.random.RandomState(data_seed).choice(len(data_set), num_sub, replace=False)
+ data_set = Subset(data_set, partition_idx)
+
+ return data_set
+
+
+################################################################################
+# CIFAR-10
+###############################################################################
+
+def cifar10_dataset_sub(root, transform=None, num_sub=-1, data_seed=0):
+ val_data = torchvision.datasets.CIFAR10(root=root, transform=transform, download=True, train=False)
+ if num_sub > 0:
+ partition_idx = np.random.RandomState(data_seed).choice(len(val_data), min(len(val_data), num_sub),
+ replace=False)
+ val_data = Subset(val_data, partition_idx)
+ return val_data
diff --git a/case_studies/diffpure/ddpm/LICENSE_UNET_DDPM b/case_studies/diffpure/ddpm/LICENSE_UNET_DDPM
new file mode 100644
index 0000000..87e6566
--- /dev/null
+++ b/case_studies/diffpure/ddpm/LICENSE_UNET_DDPM
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Ermon Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/case_studies/diffpure/ddpm/unet_ddpm.py b/case_studies/diffpure/ddpm/unet_ddpm.py
new file mode 100644
index 0000000..5c18b2c
--- /dev/null
+++ b/case_studies/diffpure/ddpm/unet_ddpm.py
@@ -0,0 +1,345 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/ermongroup/SDEdit/blob/main/models/diffusion.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_UNET_DDPM).
+# ---------------------------------------------------------------
+
+import math
+import torch
+import torch.nn as nn
+
+
+def get_timestep_embedding(timesteps, embedding_dim):
+ """
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
+ From Fairseq.
+ Build sinusoidal embeddings.
+ This matches the implementation in tensor2tensor, but differs slightly
+ from the description in Section 3.5 of "Attention Is All You Need".
+ """
+ assert len(timesteps.shape) == 1
+
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
+ emb = emb.to(device=timesteps.device)
+ emb = timesteps.float()[:, None] * emb[None, :]
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
+ if embedding_dim % 2 == 1: # zero pad
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
+ return emb
+
+
+def nonlinearity(x):
+ # swish
+ return x * torch.sigmoid(x)
+
+
+def Normalize(in_channels):
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
+
+
+class Upsample(nn.Module):
+ def __init__(self, in_channels, with_conv):
+ super().__init__()
+ self.with_conv = with_conv
+ if self.with_conv:
+ self.conv = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+
+ def forward(self, x):
+ x = torch.nn.functional.interpolate(
+ x, scale_factor=2.0, mode="nearest")
+ if self.with_conv:
+ x = self.conv(x)
+ return x
+
+
+class Downsample(nn.Module):
+ def __init__(self, in_channels, with_conv):
+ super().__init__()
+ self.with_conv = with_conv
+ if self.with_conv:
+ # no asymmetric padding in torch conv, must do it ourselves
+ self.conv = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=3,
+ stride=2,
+ padding=0)
+
+ def forward(self, x):
+ if self.with_conv:
+ pad = (0, 1, 0, 1)
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
+ x = self.conv(x)
+ else:
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
+ return x
+
+
+class ResnetBlock(nn.Module):
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
+ dropout, temb_channels=512):
+ super().__init__()
+ self.in_channels = in_channels
+ out_channels = in_channels if out_channels is None else out_channels
+ self.out_channels = out_channels
+ self.use_conv_shortcut = conv_shortcut
+
+ self.norm1 = Normalize(in_channels)
+ self.conv1 = torch.nn.Conv2d(in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+ self.temb_proj = torch.nn.Linear(temb_channels,
+ out_channels)
+ self.norm2 = Normalize(out_channels)
+ self.dropout = torch.nn.Dropout(dropout)
+ self.conv2 = torch.nn.Conv2d(out_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+ if self.in_channels != self.out_channels:
+ if self.use_conv_shortcut:
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+ else:
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
+ out_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+
+ def forward(self, x, temb):
+ h = x
+ h = self.norm1(h)
+ h = nonlinearity(h)
+ h = self.conv1(h)
+
+ h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
+
+ h = self.norm2(h)
+ h = nonlinearity(h)
+ h = self.dropout(h)
+ h = self.conv2(h)
+
+ if self.in_channels != self.out_channels:
+ if self.use_conv_shortcut:
+ x = self.conv_shortcut(x)
+ else:
+ x = self.nin_shortcut(x)
+
+ return x + h
+
+
+class AttnBlock(nn.Module):
+ def __init__(self, in_channels):
+ super().__init__()
+ self.in_channels = in_channels
+
+ self.norm = Normalize(in_channels)
+ self.q = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.k = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.v = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.proj_out = torch.nn.Conv2d(in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+
+ def forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q = self.q(h_)
+ k = self.k(h_)
+ v = self.v(h_)
+
+ # compute attention
+ b, c, h, w = q.shape
+ q = q.reshape(b, c, h * w)
+ q = q.permute(0, 2, 1) # b,hw,c
+ k = k.reshape(b, c, h * w) # b,c,hw
+ w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
+ w_ = w_ * (int(c) ** (-0.5))
+ w_ = torch.nn.functional.softmax(w_, dim=2)
+
+ # attend to values
+ v = v.reshape(b, c, h * w)
+ w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
+ # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
+ h_ = torch.bmm(v, w_)
+ h_ = h_.reshape(b, c, h, w)
+
+ h_ = self.proj_out(h_)
+
+ return x + h_
+
+
+class Model(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ ch, out_ch, ch_mult = config.model.ch, config.model.out_ch, tuple(config.model.ch_mult)
+ num_res_blocks = config.model.num_res_blocks
+ attn_resolutions = config.model.attn_resolutions
+ dropout = config.model.dropout
+ in_channels = config.model.in_channels
+ resolution = config.data.image_size
+ resamp_with_conv = config.model.resamp_with_conv
+
+ self.ch = ch
+ self.temb_ch = self.ch * 4
+ self.num_resolutions = len(ch_mult)
+ self.num_res_blocks = num_res_blocks
+ self.resolution = resolution
+ self.in_channels = in_channels
+
+ # timestep embedding
+ self.temb = nn.Module()
+ self.temb.dense = nn.ModuleList([
+ torch.nn.Linear(self.ch,
+ self.temb_ch),
+ torch.nn.Linear(self.temb_ch,
+ self.temb_ch),
+ ])
+
+ # downsampling
+ self.conv_in = torch.nn.Conv2d(in_channels,
+ self.ch,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+
+ curr_res = resolution
+ in_ch_mult = (1,) + ch_mult
+ self.down = nn.ModuleList()
+ block_in = None
+ for i_level in range(self.num_resolutions):
+ block = nn.ModuleList()
+ attn = nn.ModuleList()
+ block_in = ch * in_ch_mult[i_level]
+ block_out = ch * ch_mult[i_level]
+ for i_block in range(self.num_res_blocks):
+ block.append(ResnetBlock(in_channels=block_in,
+ out_channels=block_out,
+ temb_channels=self.temb_ch,
+ dropout=dropout))
+ block_in = block_out
+ if curr_res in attn_resolutions:
+ attn.append(AttnBlock(block_in))
+ down = nn.Module()
+ down.block = block
+ down.attn = attn
+ if i_level != self.num_resolutions - 1:
+ down.downsample = Downsample(block_in, resamp_with_conv)
+ curr_res = curr_res // 2
+ self.down.append(down)
+
+ # middle
+ self.mid = nn.Module()
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
+ out_channels=block_in,
+ temb_channels=self.temb_ch,
+ dropout=dropout)
+ self.mid.attn_1 = AttnBlock(block_in)
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
+ out_channels=block_in,
+ temb_channels=self.temb_ch,
+ dropout=dropout)
+
+ # upsampling
+ self.up = nn.ModuleList()
+ for i_level in reversed(range(self.num_resolutions)):
+ block = nn.ModuleList()
+ attn = nn.ModuleList()
+ block_out = ch * ch_mult[i_level]
+ skip_in = ch * ch_mult[i_level]
+ for i_block in range(self.num_res_blocks + 1):
+ if i_block == self.num_res_blocks:
+ skip_in = ch * in_ch_mult[i_level]
+ block.append(ResnetBlock(in_channels=block_in + skip_in,
+ out_channels=block_out,
+ temb_channels=self.temb_ch,
+ dropout=dropout))
+ block_in = block_out
+ if curr_res in attn_resolutions:
+ attn.append(AttnBlock(block_in))
+ up = nn.Module()
+ up.block = block
+ up.attn = attn
+ if i_level != 0:
+ up.upsample = Upsample(block_in, resamp_with_conv)
+ curr_res = curr_res * 2
+ self.up.insert(0, up) # prepend to get consistent order
+
+ # end
+ self.norm_out = Normalize(block_in)
+ self.conv_out = torch.nn.Conv2d(block_in,
+ out_ch,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+
+ def forward(self, x, t):
+ assert x.shape[2] == x.shape[3] == self.resolution
+
+ # timestep embedding
+ temb = get_timestep_embedding(t, self.ch)
+ temb = self.temb.dense[0](temb)
+ temb = nonlinearity(temb)
+ temb = self.temb.dense[1](temb)
+
+ # downsampling
+ hs = [self.conv_in(x)]
+ for i_level in range(self.num_resolutions):
+ for i_block in range(self.num_res_blocks):
+ h = self.down[i_level].block[i_block](hs[-1], temb)
+ if len(self.down[i_level].attn) > 0:
+ h = self.down[i_level].attn[i_block](h)
+ hs.append(h)
+ if i_level != self.num_resolutions - 1:
+ hs.append(self.down[i_level].downsample(hs[-1]))
+
+ # middle
+ h = hs[-1]
+ h = self.mid.block_1(h, temb)
+ h = self.mid.attn_1(h)
+ h = self.mid.block_2(h, temb)
+
+ # upsampling
+ for i_level in reversed(range(self.num_resolutions)):
+ for i_block in range(self.num_res_blocks + 1):
+ h = self.up[i_level].block[i_block](
+ torch.cat([h, hs.pop()], dim=1), temb)
+ if len(self.up[i_level].attn) > 0:
+ h = self.up[i_level].attn[i_block](h)
+ if i_level != 0:
+ h = self.up[i_level].upsample(h)
+
+ # end
+ h = self.norm_out(h)
+ h = nonlinearity(h)
+ h = self.conv_out(h)
+ return h
diff --git a/case_studies/diffpure/diffpure.Dockerfile b/case_studies/diffpure/diffpure.Dockerfile
new file mode 100644
index 0000000..6c3aa50
--- /dev/null
+++ b/case_studies/diffpure/diffpure.Dockerfile
@@ -0,0 +1,97 @@
+FROM nvidia/cuda:11.0.3-devel-ubuntu20.04
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Set the time zone correctly
+ENV TZ=Europe/Berlin
+RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
+
+ENV SHELL /bin/bash
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cmake \
+ git \
+ curl \
+ vim \
+ ca-certificates \
+ libjpeg-dev \
+ tmux \
+ nano \
+ xterm \
+ rsync \
+ zip \
+ zsh \
+ htop \
+ screen \
+ zlib1g-dev \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ libpng-dev \
+ libglib2.0-0 \
+ openssh-server \
+ sudo \
+ build-essential \
+ autoconf \
+ automake \
+ libtool \
+ pkg-config \
+ ca-certificates \
+ wget \
+ git \
+ curl \
+ ca-certificates \
+ libjpeg-dev \
+ libpng-dev \
+ python \
+ python3-dev \
+ python3-pip \
+ python3-setuptools \
+ zlib1g-dev \
+ swig \
+ cmake \
+ vim \
+ locales \
+ locales-all \
+ screen \
+ zip \
+ unzip
+RUN apt-get clean
+RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+ENV LC_ALL en_US.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US.UTF-8
+
+RUN cd /usr/local/bin && \
+ ln -s /usr/bin/python3 python && \
+ ln -s /usr/bin/pip3 pip && \
+ pip install --upgrade pip setuptools
+
+RUN pip install numpy==1.19.4 \
+ pyyaml==5.3.1 \
+ wheel==0.34.2 \
+ scipy==1.5.2 \
+ torch==1.7.1 \
+ torchvision==0.8.2 \
+ pillow==7.2.0 \
+ matplotlib==3.3.0 \
+ tqdm==4.56.1 \
+ tensorboardX==2.0 \
+ seaborn==0.10.1 \
+ pandas==1.2.0 \
+ requests==2.25.0 \
+ xvfbwrapper==0.2.9 \
+ torchdiffeq==0.2.1 \
+ timm==0.5.4 \
+ lmdb \
+ Ninja \
+ foolbox \
+ torchsde \
+ git+https://github.com/RobustBench/robustbench.git \
+ numpy \
+ sklearn \
+ git+https://github.com/fra31/auto-attack \
+ tensorflow
\ No newline at end of file
diff --git a/case_studies/diffpure/dp_utils.py b/case_studies/diffpure/dp_utils.py
new file mode 100644
index 0000000..a4e717e
--- /dev/null
+++ b/case_studies/diffpure/dp_utils.py
@@ -0,0 +1,301 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import sys
+import argparse
+from typing import Any
+
+import torch
+import torch.nn as nn
+import torchvision.models as models
+from torch.utils.data import DataLoader
+import torchvision.transforms as transforms
+
+from robustbench import load_model
+import data
+
+
+def compute_n_params(model, return_str=True):
+ tot = 0
+ for p in model.parameters():
+ w = 1
+ for x in p.shape:
+ w *= x
+ tot += w
+ if return_str:
+ if tot >= 1e6:
+ return '{:.1f}M'.format(tot / 1e6)
+ else:
+ return '{:.1f}K'.format(tot / 1e3)
+ else:
+ return tot
+
+
+class Logger(object):
+ """
+ Redirect stderr to stdout, optionally print stdout to a file,
+ and optionally force flushing on both stdout and the file.
+ """
+
+ def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
+ self.file = None
+
+ if file_name is not None:
+ self.file = open(file_name, file_mode)
+
+ self.should_flush = should_flush
+ self.stdout = sys.stdout
+ self.stderr = sys.stderr
+
+ sys.stdout = self
+ sys.stderr = self
+
+ def __enter__(self) -> "Logger":
+ return self
+
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
+ self.close()
+
+ def write(self, text: str) -> None:
+ """Write text to stdout (and a file) and optionally flush."""
+ if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
+ return
+
+ if self.file is not None:
+ self.file.write(text)
+
+ self.stdout.write(text)
+
+ if self.should_flush:
+ self.flush()
+
+ def flush(self) -> None:
+ """Flush written text to both stdout and a file, if open."""
+ if self.file is not None:
+ self.file.flush()
+
+ self.stdout.flush()
+
+ def close(self) -> None:
+ """Flush, close possible files, and remove stdout/stderr mirroring."""
+ self.flush()
+
+ # if using multiple loggers, prevent closing in wrong order
+ if sys.stdout is self:
+ sys.stdout = self.stdout
+ if sys.stderr is self:
+ sys.stderr = self.stderr
+
+ if self.file is not None:
+ self.file.close()
+
+
+def dict2namespace(config):
+ namespace = argparse.Namespace()
+ for key, value in config.items():
+ if isinstance(value, dict):
+ new_value = dict2namespace(value)
+ else:
+ new_value = value
+ setattr(namespace, key, new_value)
+ return namespace
+
+
+def str2bool(v):
+ if isinstance(v, bool):
+ return v
+ if v.lower() in ('yes', 'true', 't', 'y', '1'):
+ return True
+ elif v.lower() in ('no', 'false', 'f', 'n', '0'):
+ return False
+ else:
+ raise argparse.ArgumentTypeError('Boolean value expected.')
+
+
+def update_state_dict(state_dict, idx_start=9):
+
+ from collections import OrderedDict
+ new_state_dict = OrderedDict()
+ for k, v in state_dict.items():
+ name = k[idx_start:] # remove 'module.0.' of dataparallel
+ new_state_dict[name]=v
+
+ return new_state_dict
+
+
+# ------------------------------------------------------------------------
+def get_accuracy(model, x_orig, y_orig, bs=64, device=torch.device('cuda:0')):
+ n_batches = x_orig.shape[0] // bs
+ acc = 0.
+ for counter in range(n_batches):
+ x = x_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(device)
+ y = y_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(device)
+ output = model(x)
+ acc += (output.max(1)[1] == y).float().sum()
+
+ if isinstance(acc, torch.Tensor):
+ acc = acc.item()
+
+ return acc / x_orig.shape[0]
+
+
+def get_image_classifier(classifier_name):
+ class _Wrapper_ResNet(nn.Module):
+ def __init__(self, resnet):
+ super().__init__()
+ self.resnet = resnet
+ self.mu = torch.Tensor([0.485, 0.456, 0.406]).float().view(3, 1, 1)
+ self.sigma = torch.Tensor([0.229, 0.224, 0.225]).float().view(3, 1, 1)
+
+ def forward(self, x):
+ x = (x - self.mu.to(x.device)) / self.sigma.to(x.device)
+ return self.resnet(x)
+
+ if 'imagenet' in classifier_name:
+ if 'resnet18' in classifier_name:
+ print('using imagenet resnet18...')
+ model = models.resnet18(pretrained=True).eval()
+ elif 'resnet50' in classifier_name:
+ print('using imagenet resnet50...')
+ model = models.resnet50(pretrained=True).eval()
+ elif 'resnet101' in classifier_name:
+ print('using imagenet resnet101...')
+ model = models.resnet101(pretrained=True).eval()
+ elif 'wideresnet-50-2' in classifier_name:
+ print('using imagenet wideresnet-50-2...')
+ model = models.wide_resnet50_2(pretrained=True).eval()
+ elif 'deit-s' in classifier_name:
+ print('using imagenet deit-s...')
+ model = torch.hub.load('facebookresearch/deit:main', 'deit_small_patch16_224', pretrained=True).eval()
+ else:
+ raise NotImplementedError(f'unknown {classifier_name}')
+
+ wrapper_resnet = _Wrapper_ResNet(model)
+
+ elif 'cifar10' in classifier_name:
+ if 'wideresnet-28-10' in classifier_name:
+ print('using cifar10 wideresnet-28-10...')
+ model = load_model(model_name='Standard', dataset='cifar10', threat_model='Linf') # pixel in [0, 1]
+
+ elif 'wrn-28-10-at0' in classifier_name:
+ print('using cifar10 wrn-28-10-at0...')
+ model = load_model(model_name='Gowal2021Improving_28_10_ddpm_100m', dataset='cifar10',
+ threat_model='Linf') # pixel in [0, 1]
+
+ elif 'wrn-28-10-at1' in classifier_name:
+ print('using cifar10 wrn-28-10-at1...')
+ model = load_model(model_name='Gowal2020Uncovering_28_10_extra', dataset='cifar10',
+ threat_model='Linf') # pixel in [0, 1]
+
+ elif 'wrn-70-16-at0' in classifier_name:
+ print('using cifar10 wrn-70-16-at0...')
+ model = load_model(model_name='Gowal2021Improving_70_16_ddpm_100m', dataset='cifar10',
+ threat_model='Linf') # pixel in [0, 1]
+
+ elif 'wrn-70-16-at1' in classifier_name:
+ print('using cifar10 wrn-70-16-at1...')
+ model = load_model(model_name='Rebuffi2021Fixing_70_16_cutmix_extra', dataset='cifar10',
+ threat_model='Linf') # pixel in [0, 1]
+
+ elif 'wrn-70-16-L2-at1' in classifier_name:
+ print('using cifar10 wrn-70-16-L2-at1...')
+ model = load_model(model_name='Rebuffi2021Fixing_70_16_cutmix_extra', dataset='cifar10',
+ threat_model='L2') # pixel in [0, 1]
+
+ elif 'wideresnet-70-16' in classifier_name:
+ print('using cifar10 wideresnet-70-16 (dm_wrn-70-16)...')
+ from robustbench.model_zoo.architectures.dm_wide_resnet import DMWideResNet, Swish
+ model = DMWideResNet(num_classes=10, depth=70, width=16, activation_fn=Swish) # pixel in [0, 1]
+
+ model_path = 'checkpoints/diffpure/cifar10/wresnet-76-10/weights-best.pt'
+ print(f"=> loading wideresnet-70-16 checkpoint '{model_path}'")
+ model.load_state_dict(update_state_dict(torch.load(model_path)['model_state_dict']))
+ model.eval()
+ print(f"=> loaded wideresnet-70-16 checkpoint")
+
+ elif 'resnet-50' in classifier_name:
+ print('using cifar10 resnet-50...')
+ from classifiers.cifar10_resnet import ResNet50
+ model = ResNet50() # pixel in [0, 1]
+
+ model_path = 'checkpoints/diffpure/cifar10/resnet-50/weights.pt'
+ print(f"=> loading resnet-50 checkpoint '{model_path}'")
+ model.load_state_dict(update_state_dict(torch.load(model_path), idx_start=7))
+ model.eval()
+ print(f"=> loaded resnet-50 checkpoint")
+
+ elif 'wrn-70-16-dropout' in classifier_name:
+ print('using cifar10 wrn-70-16-dropout (standard wrn-70-16-dropout)...')
+ from classifiers.cifar10_resnet import WideResNet_70_16_dropout
+ model = WideResNet_70_16_dropout() # pixel in [0, 1]
+
+ model_path = 'checkpoints/diffpure/cifar10/wrn-70-16-dropout/weights.pt'
+ print(f"=> loading wrn-70-16-dropout checkpoint '{model_path}'")
+ model.load_state_dict(update_state_dict(torch.load(model_path), idx_start=7))
+ model.eval()
+ print(f"=> loaded wrn-70-16-dropout checkpoint")
+
+ else:
+ raise NotImplementedError(f'unknown {classifier_name}')
+
+ wrapper_resnet = model
+
+ elif 'celebahq' in classifier_name:
+ attribute = classifier_name.split('__')[-1] # `celebahq__Smiling`
+ ckpt_path = f'checkpoints/diffpure/celebahq/{attribute}/net_best.pth'
+ from classifiers.attribute_classifier import ClassifierWrapper
+ model = ClassifierWrapper(attribute, ckpt_path=ckpt_path)
+ wrapper_resnet = model
+ else:
+ raise NotImplementedError(f'unknown {classifier_name}')
+
+ return wrapper_resnet
+
+
+def load_data(args, adv_batch_size, binarization_test=False):
+ if 'imagenet' in args.domain:
+ val_dir = './dataset/imagenet_lmdb/val' # using imagenet lmdb data
+ val_transform = data.get_transform(args.domain, 'imval', base_size=224)
+ val_data = data.imagenet_lmdb_dataset_sub(val_dir, transform=val_transform,
+ num_sub=args.num_sub, data_seed=args.data_seed)
+ n_samples = len(val_data)
+ val_loader = DataLoader(val_data, batch_size=n_samples, shuffle=False, pin_memory=True, num_workers=4)
+ x_val, y_val = next(iter(val_loader))
+ elif 'cifar10' in args.domain:
+ data_dir = './dataset'
+ transform = transforms.Compose([transforms.ToTensor()])
+
+ if binarization_test:
+ val_data = data.cifar10_dataset_sub(
+ data_dir, transform=transform, num_sub=10000, data_seed=args.data_seed)
+ val_loader = DataLoader(val_data, batch_size=args.test_samples_idx_end, shuffle=False)
+ x_val, y_val = next(iter(val_loader))
+ x_val = x_val[args.test_samples_idx_start:args.test_samples_idx_end]
+ y_val = y_val[args.test_samples_idx_start:args.test_samples_idx_end]
+ else:
+ val_data = data.cifar10_dataset_sub(data_dir, transform=transform,
+ num_sub=args.num_sub, data_seed=args.data_seed)
+ n_samples = len(val_data)
+ val_loader = DataLoader(val_data, batch_size=n_samples, shuffle=False, pin_memory=True, num_workers=4)
+ x_val, y_val = next(iter(val_loader))
+ elif 'celebahq' in args.domain:
+ data_dir = './dataset/celebahq'
+ attribute = args.classifier_name.split('__')[-1] # `celebahq__Smiling`
+ val_transform = data.get_transform('celebahq', 'imval')
+ clean_dset = data.get_dataset('celebahq', 'val', attribute, root=data_dir, transform=val_transform,
+ fraction=2, data_seed=args.data_seed) # data_seed randomizes here
+ loader = DataLoader(clean_dset, batch_size=adv_batch_size, shuffle=False,
+ pin_memory=True, num_workers=4)
+ x_val, y_val = next(iter(loader)) # [0, 1], 256x256
+ else:
+ raise NotImplementedError(f'Unknown domain: {args.domain}!')
+
+ print(f'x_val shape: {x_val.shape}')
+ x_val, y_val = x_val.contiguous().requires_grad_(True), y_val.contiguous()
+ print(f'x (min, max): ({x_val.min()}, {x_val.max()})')
+
+ return x_val, y_val
diff --git a/case_studies/diffpure/eval_sde_adv.py b/case_studies/diffpure/eval_sde_adv.py
new file mode 100644
index 0000000..02d1a48
--- /dev/null
+++ b/case_studies/diffpure/eval_sde_adv.py
@@ -0,0 +1,570 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import argparse
+import logging
+import yaml
+import os
+import time
+
+import random
+import numpy as np
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from autoattack import AutoAttack
+from attacks.autopgd import fix_autoattack as fix_autoattack_autopgd
+
+from active_tests import decision_boundary_binarization as dbb
+from argparse_utils import DecisionBoundaryBinarizationSettings
+from stadv_eot.attacks import StAdvAttack
+
+import dp_utils
+import utils
+from dp_utils import str2bool, get_accuracy, get_image_classifier, load_data
+
+from runners.diffpure_ddpm import Diffusion
+from runners.diffpure_guided import GuidedDiffusion
+from runners.diffpure_sde import RevGuidedDiffusion
+from runners.diffpure_ode import OdeGuidedDiffusion
+from runners.diffpure_ldsde import LDGuidedDiffusion
+
+
+def patch_robustbench_models():
+ import robustbench.model_zoo.architectures as arch
+ def wide_resnet_forward(self, x, features_only=False, features_and_logits=False):
+ out = self.conv1(x)
+ out = self.block1(out)
+ out = self.block2(out)
+ out = self.block3(out)
+ out = self.relu(self.bn1(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(-1, self.nChannels)
+ if features_only:
+ return out
+ l = self.fc(out)
+ if features_and_logits:
+ return out, l
+ return l
+
+ def robust_wide_resnet_forward(self, x, features_only=False, features_and_logits=False):
+ out = self.stem_conv(x)
+ for i, block in enumerate(self.blocks):
+ out = block(out)
+ out = self.relu(self.bn1(out))
+ out = self.global_pooling(out)
+ out = out.view(-1, self.fc_size)
+ if features_only:
+ return out
+ l = self.fc(out)
+ if features_and_logits:
+ return out, l
+ return out
+
+ def cifar_resnext_forward(self, x, features_only=False, features_and_logits=False):
+ x = self.conv_1_3x3(x)
+ x = F.relu(self.bn_1(x), inplace=True)
+ x = self.stage_1(x)
+ x = self.stage_2(x)
+ x = self.stage_3(x)
+ x = self.avgpool(x)
+ x = x.view(x.size(0), -1)
+ if features_only:
+ return x
+ l = self.classifier(x)
+ if features_and_logits:
+ return x, l
+ return l
+
+ def preact_resenet_forward(self, x, features_only=False, features_and_logits=False):
+ out = self.conv1(x)
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ if self.bn_before_fc:
+ out = F.relu(self.bn(out))
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ l = self.linear(out)
+ if features_and_logits:
+ return out, l
+ return l
+
+ def resnet_forward(self, x, features_only=False, features_and_logits=False):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ l = self.linear(out)
+ if features_and_logits:
+ out, l
+ return l
+
+ def dm_preact_resnet_forward(self, x, features_only=False, features_and_logits=False):
+ if self.padding > 0:
+ x = F.pad(x, (self.padding,) * 4)
+ out = (x - self.mean) / self.std
+ out = self.conv_2d(out)
+ out = self.layer_0(out)
+ out = self.layer_1(out)
+ out = self.layer_2(out)
+ out = self.layer_3(out)
+ out = self.relu(self.batchnorm(out))
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ l = self.logits(out)
+ if features_and_logits:
+ return out, l
+ return l
+
+ def dm_resnet_forward(self, x, features_only=False, features_and_logits=False):
+ if self.padding > 0:
+ x = F.pad(x, (self.padding,) * 4)
+ out = (x - self.mean) / self.std
+ out = self.init_conv(out)
+ out = self.layer(out)
+ out = self.relu(self.batchnorm(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(-1, self.num_channels)
+ if features_only:
+ return out
+ l = self.logits(out)
+ if features_and_logits:
+ return out, l
+ return l
+
+ arch.wide_resnet.WideResNet.forward = wide_resnet_forward
+ arch.robust_wide_resnet.RobustWideResNet.forward = robust_wide_resnet_forward
+ arch.resnext.CifarResNeXt.forward = cifar_resnext_forward
+ arch.resnet.PreActResNet.forward = preact_resenet_forward
+ arch.resnet.ResNet.forward = resnet_forward
+ arch.dm_wide_resnet.DMPreActResNet.forward = dm_preact_resnet_forward
+ arch.dm_wide_resnet.DMWideResNet.forward = dm_resnet_forward
+
+ print("Patched RobustBench classifiers.")
+
+
+class SDE_Adv_Model(nn.Module):
+ def __init__(self, args, config):
+ super().__init__()
+ self.args = args
+
+ # image classifier
+ self.classifier = get_image_classifier(args.classifier_name).to(config.device)
+
+ # diffusion model
+ print(f'diffusion_type: {args.diffusion_type}')
+ if args.diffusion_type == 'ddpm':
+ self.runner = GuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'sde':
+ self.runner = RevGuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'ode':
+ self.runner = OdeGuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'ldsde':
+ self.runner = LDGuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'celebahq-ddpm':
+ self.runner = Diffusion(args, config, device=config.device)
+ else:
+ raise NotImplementedError('unknown diffusion type')
+
+ self.register_buffer('counter', torch.zeros(1, device=config.device))
+ self.tag = None
+
+ def reset_counter(self):
+ self.counter = torch.zeros(1, dtype=torch.int, device=config.device)
+
+ def set_tag(self, tag=None):
+ self.tag = tag
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ counter = self.counter.item()
+ if counter % 10 == 0:
+ print(f'diffusion times: {counter}')
+
+ # imagenet [3, 224, 224] -> [3, 256, 256] -> [3, 224, 224]
+ if 'imagenet' in self.args.domain:
+ x = F.interpolate(x, size=(256, 256), mode='bilinear', align_corners=False)
+
+ start_time = time.time()
+ x_re = self.runner.image_editing_sample((x - 0.5) * 2, bs_id=counter, tag=self.tag)
+ minutes, seconds = divmod(time.time() - start_time, 60)
+
+ if 'imagenet' in self.args.domain:
+ x_re = F.interpolate(x_re, size=(224, 224), mode='bilinear', align_corners=False)
+
+ if counter % 10 == 0:
+ print(f'x shape (before diffusion models): {x.shape}')
+ print(f'x shape (before classifier): {x_re.shape}')
+ print("Sampling time per batch: {:0>2}:{:05.2f}".format(int(minutes), seconds))
+
+ out = self.classifier((x_re + 1) * 0.5, features_only=features_only, features_and_logits=features_and_logits)
+
+ self.counter += 1
+
+ return out
+
+
+def eval_autoattack(args, config, model, x_val, y_val, adv_batch_size, log_dir):
+ ngpus = torch.cuda.device_count()
+ model_ = model
+ if ngpus > 1:
+ model_ = model.module
+
+ attack_version = args.attack_version # ['standard', 'rand', 'custom']
+ if attack_version == 'standard':
+ attack_list = ['apgd-ce', 'apgd-t', 'fab-t', 'square']
+ elif attack_version == 'rand':
+ attack_list = ['apgd-ce', 'apgd-dlr']
+ elif attack_version == 'custom':
+ attack_list = args.attack_type.split(',')
+ else:
+ raise NotImplementedError(f'Unknown attack version: {attack_version}!')
+ print(f'attack_version: {attack_version}, attack_list: {attack_list}') # ['apgd-ce', 'apgd-t', 'fab-t', 'square']
+
+ # ---------------- apply the attack to classifier ----------------
+ print(f'apply the attack to classifier [{args.lp_norm}]...')
+ classifier = get_image_classifier(args.classifier_name).to(config.device)
+ adversary_resnet = AutoAttack(classifier, norm=args.lp_norm, eps=args.adv_eps,
+ version=attack_version, attacks_to_run=attack_list,
+ log_path=f'{log_dir}/log_resnet.txt', device=config.device)
+ if attack_version == 'custom':
+ adversary_resnet.apgd.n_restarts = 1
+ adversary_resnet.fab.n_restarts = 1
+ adversary_resnet.apgd_targeted.n_restarts = 1
+ adversary_resnet.fab.n_target_classes = 9
+ adversary_resnet.apgd_targeted.n_target_classes = 9
+ adversary_resnet.square.n_queries = 5000
+ if attack_version == 'rand':
+ adversary_resnet.apgd.eot_iter = args.eot_iter
+ print(f'[classifier] rand version with eot_iter: {adversary_resnet.apgd.eot_iter}')
+ print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
+
+ x_adv_resnet = adversary_resnet.run_standard_evaluation(x_val, y_val, bs=adv_batch_size)
+ print(f'x_adv_resnet shape: {x_adv_resnet.shape}')
+ torch.save([x_adv_resnet, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
+
+ # ---------------- apply the attack to sde_adv ----------------
+ print(f'apply the attack to sde_adv [{args.lp_norm}]...')
+ model_.reset_counter()
+ adversary_sde = AutoAttack(model, norm=args.lp_norm, eps=args.adv_eps,
+ version=attack_version, attacks_to_run=attack_list,
+ log_path=f'{log_dir}/log_sde_adv.txt', device=config.device)
+ if attack_version == 'custom':
+ adversary_sde.apgd.n_restarts = 1
+ adversary_sde.fab.n_restarts = 1
+ adversary_sde.apgd_targeted.n_restarts = 1
+ adversary_sde.fab.n_target_classes = 9
+ adversary_sde.apgd_targeted.n_target_classes = 9
+ adversary_sde.square.n_queries = 5000
+ if attack_version == 'rand':
+ adversary_sde.apgd.eot_iter = args.eot_iter
+ print(f'[adv_sde] rand version with eot_iter: {adversary_sde.apgd.eot_iter}')
+ print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
+
+ x_adv_sde = adversary_sde.run_standard_evaluation(x_val, y_val, bs=adv_batch_size)
+ print(f'x_adv_sde shape: {x_adv_sde.shape}')
+ torch.save([x_adv_sde, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
+
+
+def eval_stadv(args, config, model, x_val, y_val, adv_batch_size, log_dir):
+ ngpus = torch.cuda.device_count()
+ model_ = model
+ if ngpus > 1:
+ model_ = model.module
+
+ x_val, y_val = x_val.to(config.device), y_val.to(config.device)
+ print(f'bound: {args.adv_eps}')
+
+ # apply the attack to resnet
+ print(f'apply the stadv attack to resnet...')
+ resnet = get_image_classifier(args.classifier_name).to(config.device)
+
+ start_time = time.time()
+ init_acc = get_accuracy(resnet, x_val, y_val, bs=adv_batch_size)
+ print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
+
+ adversary_resnet = StAdvAttack(resnet, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
+
+ start_time = time.time()
+ x_adv_resnet = adversary_resnet(x_val, y_val)
+
+ robust_acc = get_accuracy(resnet, x_adv_resnet, y_val, bs=adv_batch_size)
+ print('robust accuracy: {:.2%}, time elapsed: {:.2f}s'.format(robust_acc, time.time() - start_time))
+
+ print(f'x_adv_resnet shape: {x_adv_resnet.shape}')
+ torch.save([x_adv_resnet, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
+
+ # apply the attack to sde_adv
+ print(f'apply the stadv attack to sde_adv...')
+
+ start_time = time.time()
+ model_.reset_counter()
+ model_.set_tag('no_adv')
+ init_acc = get_accuracy(model, x_val, y_val, bs=adv_batch_size)
+ print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
+
+ adversary_sde = StAdvAttack(model, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
+
+ start_time = time.time()
+ model_.reset_counter()
+ model_.set_tag()
+ x_adv_sde = adversary_sde(x_val, y_val)
+
+ model_.reset_counter()
+ model_.set_tag('sde_adv')
+ robust_acc = get_accuracy(model, x_adv_sde, y_val, bs=adv_batch_size)
+ print('robust accuracy: {:.2%}, time elapsed: {:.2f}s'.format(robust_acc, time.time() - start_time))
+
+ print(f'x_adv_sde shape: {x_adv_sde.shape}')
+ torch.save([x_adv_sde, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
+
+
+def binarization_eval(args, config):
+ print("Running binarization test.")
+
+ middle_name = '_'.join([args.diffusion_type, args.attack_version]) if args.attack_version in ['stadv', 'standard',
+ 'rand'] \
+ else '_'.join([args.diffusion_type, args.attack_version, args.attack_type])
+ log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
+ 'seed' + str(args.seed), 'data' + str(args.data_seed) + "_" +
+ str(args.test_samples_idx_start) + "_" + str(args.test_samples_idx_end))
+ os.makedirs(log_dir, exist_ok=True)
+ args.log_dir = log_dir
+
+ ngpus = torch.cuda.device_count()
+ adv_batch_size = args.adv_batch_size * ngpus
+
+ # load model
+ print('starting the model and loader...')
+ model = SDE_Adv_Model(args, config)
+ if ngpus > 1:
+ model = torch.nn.DataParallel(model)
+ model = model.eval().to(config.device)
+
+ # load data
+ x_val, y_val = load_data(args, adv_batch_size, binarization_test=True)
+ testloader = utils.build_dataloader_from_arrays(x_val.detach().cpu().numpy(), y_val.detach().cpu().numpy())
+ adv_batch_size = args.adv_batch_size * ngpus
+
+ print('adv_batch_size', adv_batch_size)
+ print('x_val', x_val.shape)
+
+ if args.attack_version in ['standard', 'rand', 'custom']:
+ attack_version = args.attack_version # ['standard', 'rand', 'custom']
+ if attack_version == 'standard':
+ attack_list = ['apgd-ce', 'apgd-t', 'fab-t', 'square']
+ elif attack_version == 'rand':
+ attack_list = ['apgd-ce', 'apgd-dlr']
+ elif attack_version == 'custom':
+ attack_list = args.attack_type.split(',')
+ else:
+ raise NotImplementedError(f'Unknown attack version: {attack_version}!')
+ print(
+ f'attack_version: {attack_version}, attack_list: {attack_list}') # ['apgd-ce', 'apgd-t', 'fab-t', 'square']
+ print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
+ elif args.attack_version == 'stadv':
+ print("Using StAdv attack.")
+ else:
+ raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
+
+ def eval(m, x, y):
+ model.reset_counter()
+
+ if args.attack_version in ['standard', 'rand', 'custom']:
+ adversary_sde = AutoAttack(m, norm=args.lp_norm, eps=args.adv_eps,
+ version=attack_version, attacks_to_run=attack_list,
+ device=config.device)
+ # Fix loss functions of APGD such that they are properly defined for binary classification problems.
+ fix_autoattack_autopgd(adversary_sde)
+ if attack_version == 'custom':
+ adversary_sde.apgd.n_restarts = 1
+ adversary_sde.fab.n_restarts = 1
+ adversary_sde.apgd_targeted.n_restarts = 1
+ adversary_sde.fab.n_target_classes = 1
+ adversary_sde.apgd_targeted.n_target_classes = 1
+ adversary_sde.square.n_queries = 5000
+ if attack_version == 'rand':
+ adversary_sde.apgd.eot_iter = args.eot_iter
+ x_adv_sde = adversary_sde.run_standard_evaluation(x, y, bs=len(x))
+ elif args.attack_version == 'stadv':
+ adversary_sde = StAdvAttack(m, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
+ x_adv_sde = adversary_sde(x, y)
+ else:
+ raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
+
+ x_adv_logits = m(x_adv_sde)
+ robust_acc = get_accuracy(m, x_adv_sde, y, bs=adv_batch_size)
+
+ return robust_acc, (x_adv_sde, x_adv_logits)
+
+ scores_logit_differences_and_validation_accuracies = dbb.interior_boundary_discrimination_attack(
+ model,
+ testloader,
+ attack_fn=lambda m, l, kwargs: eval(m, l.dataset.tensors[0].to(config.device),
+ l.dataset.tensors[1].to(config.device)),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.adv_eps,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=len(x_val),
+ device=config.device,
+ n_samples_evaluation=200, # args.num_samples_test * 10,
+ n_samples_asr_evaluation=200,
+ batch_size=args.batch_size * ngpus,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.999,
+ sample_training_data_from_corners=args.sample_from_corners
+ )
+
+ print(dbb.format_result(scores_logit_differences_and_validation_accuracies, len(x_val)))
+
+
+def robustness_eval(args, config):
+ middle_name = '_'.join([args.diffusion_type, args.attack_version]) if args.attack_version in ['stadv', 'standard', 'rand'] \
+ else '_'.join([args.diffusion_type, args.attack_version, args.attack_type])
+ log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
+ 'seed' + str(args.seed), 'data' + str(args.data_seed))
+ os.makedirs(log_dir, exist_ok=True)
+ args.log_dir = log_dir
+ logger = dp_utils.Logger(file_name=f'{log_dir}/log.txt', file_mode="w+", should_flush=True)
+
+ ngpus = torch.cuda.device_count()
+ adv_batch_size = args.adv_batch_size * ngpus
+ print(f'ngpus: {ngpus}, adv_batch_size: {adv_batch_size}')
+
+ # load model
+ print('starting the model and loader...')
+ model = SDE_Adv_Model(args, config)
+ if ngpus > 1:
+ model = torch.nn.DataParallel(model)
+ model = model.eval().to(config.device)
+
+ # load data
+ x_val, y_val = load_data(args, adv_batch_size)
+
+ # eval classifier and sde_adv against attacks
+ if args.attack_version in ['standard', 'rand', 'custom']:
+ eval_autoattack(args, config, model, x_val, y_val, adv_batch_size, log_dir)
+ elif args.attack_version == 'stadv':
+ eval_stadv(args, config, model, x_val, y_val, adv_batch_size, log_dir)
+ else:
+ raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
+
+ logger.close()
+
+
+def parse_args_and_config():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ # diffusion models
+ parser.add_argument('--config', type=str, required=True, help='Path to the config file')
+ parser.add_argument('--data_seed', type=int, default=0, help='Random seed')
+ parser.add_argument('--seed', type=int, default=1234, help='Random seed')
+ parser.add_argument('--exp', type=str, default='exp', help='Path for saving running related data.')
+ parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
+ parser.add_argument('-i', '--image_folder', type=str, default='images', help="The folder name of samples")
+ parser.add_argument('--ni', action='store_true', help="No interaction. Suitable for Slurm Job launcher")
+ parser.add_argument('--sample_step', type=int, default=1, help='Total sampling steps')
+ parser.add_argument('--t', type=int, default=400, help='Sampling noise scale')
+ parser.add_argument('--t_delta', type=int, default=15, help='Perturbation range of sampling noise scale')
+ parser.add_argument('--rand_t', type=str2bool, default=False, help='Decide if randomize sampling noise scale')
+ parser.add_argument('--diffusion_type', type=str, default='ddpm', help='[ddpm, sde]')
+ parser.add_argument('--score_type', type=str, default='guided_diffusion', help='[guided_diffusion, score_sde]')
+ parser.add_argument('--eot_iter', type=int, default=20, help='only for rand version of autoattack')
+ parser.add_argument('--use_bm', action='store_true', help='whether to use brownian motion')
+
+ # LDSDE
+ parser.add_argument('--sigma2', type=float, default=1e-3, help='LDSDE sigma2')
+ parser.add_argument('--lambda_ld', type=float, default=1e-2, help='lambda_ld')
+ parser.add_argument('--eta', type=float, default=5., help='LDSDE eta')
+ parser.add_argument('--step_size', type=float, default=1e-2, help='step size for ODE Euler method')
+
+ # adv
+ parser.add_argument('--domain', type=str, default='celebahq', help='which domain: celebahq, cat, car, imagenet')
+ parser.add_argument('--classifier_name', type=str, default='Eyeglasses', help='which classifier to use')
+ parser.add_argument('--partition', type=str, default='val')
+ parser.add_argument('--adv_batch_size', type=int, default=64)
+ parser.add_argument('--attack_type', type=str, default='square')
+ parser.add_argument('--lp_norm', type=str, default='Linf', choices=['Linf', 'L2'])
+ parser.add_argument('--attack_version', type=str, default='custom')
+
+ parser.add_argument('--num_sub', type=int, default=1000, help='imagenet subset')
+ parser.add_argument('--adv_eps', type=float, default=0.07)
+
+ # Binarization Test
+ parser.add_argument("--binarization-test", action="store_true")
+ parser.add_argument("--batch-size", default=64, type=int)
+ parser.add_argument("--n_inner_points", default=999, type=int)
+ parser.add_argument("--n_boundary_points", default=1, type=int)
+ parser.add_argument("--sample-from-corners", action="store_true")
+ parser.add_argument("--test-samples-idx-start", default=0, type=int)
+ parser.add_argument("--test-samples-idx-end", default=64, type=int)
+ # parser.add_argument('--gpu_ids', type=str, default='0')
+
+ args = parser.parse_args()
+
+ # parse config file
+ with open(args.config, 'r') as f:
+ config = yaml.safe_load(f)
+ new_config = dp_utils.dict2namespace(config)
+
+ level = getattr(logging, args.verbose.upper(), None)
+ if not isinstance(level, int):
+ raise ValueError('level {} not supported'.format(args.verbose))
+
+ handler1 = logging.StreamHandler()
+ formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
+ handler1.setFormatter(formatter)
+ logger = logging.getLogger()
+ logger.addHandler(handler1)
+ logger.setLevel(level)
+
+ args.image_folder = os.path.join(args.exp, args.image_folder)
+ os.makedirs(args.image_folder, exist_ok=True)
+
+ # add device
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
+ logging.info("Using device: {}".format(device))
+ new_config.device = device
+
+ # set random seed
+ torch.manual_seed(args.seed)
+ random.seed(args.seed)
+ np.random.seed(args.seed)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(args.seed)
+
+ torch.backends.cudnn.benchmark = True
+
+ return args, new_config
+
+
+if __name__ == '__main__':
+ patch_robustbench_models()
+ args, config = parse_args_and_config()
+ # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
+
+ if args.binarization_test:
+ binarization_eval(args, config)
+ else:
+ robustness_eval(args, config)
diff --git a/case_studies/diffpure/eval_sde_adv_bpda.py b/case_studies/diffpure/eval_sde_adv_bpda.py
new file mode 100644
index 0000000..7cb4996
--- /dev/null
+++ b/case_studies/diffpure/eval_sde_adv_bpda.py
@@ -0,0 +1,278 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import argparse
+import logging
+import yaml
+import os
+import time
+
+import random
+import numpy as np
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from bpda_eot.bpda_eot_attack import BPDA_EOT_Attack
+
+import utils
+from utils import str2bool, get_accuracy, get_image_classifier, load_data
+
+from runners.diffpure_ddpm import Diffusion
+from runners.diffpure_guided import GuidedDiffusion
+from runners.diffpure_sde import RevGuidedDiffusion
+
+
+class ResNet_Adv_Model(nn.Module):
+ def __init__(self, args, config):
+ super().__init__()
+ # image classifier
+ self.resnet = get_image_classifier(args.classifier_name).to(config.device)
+
+ def purify(self, x):
+ return x
+
+ def forward(self, x, mode='purify_and_classify'):
+ if mode == 'purify':
+ out = self.purify(x)
+ elif mode == 'classify':
+ out = self.resnet(x) # x in [0, 1]
+ elif mode == 'purify_and_classify':
+ x = self.purify(x)
+ out = self.resnet(x) # x in [0, 1]
+ else:
+ raise NotImplementedError(f'unknown mode: {mode}')
+ return out
+
+
+class SDE_Adv_Model(nn.Module):
+ def __init__(self, args, config):
+ super().__init__()
+ self.args = args
+
+ # image classifier
+ self.resnet = get_image_classifier(args.classifier_name).to(config.device)
+
+ # diffusion model
+ print(f'diffusion_type: {args.diffusion_type}')
+ if args.diffusion_type == 'ddpm':
+ self.runner = GuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'sde':
+ self.runner = RevGuidedDiffusion(args, config, device=config.device)
+ elif args.diffusion_type == 'celebahq-ddpm':
+ self.runner = Diffusion(args, config, device=config.device)
+ else:
+ raise NotImplementedError('unknown diffusion type')
+
+ self.register_buffer('counter', torch.zeros(1, device=config.device))
+ self.tag = None
+
+ def reset_counter(self):
+ self.counter = torch.zeros(1, dtype=torch.int, device=config.device)
+
+ def set_tag(self, tag=None):
+ self.tag = tag
+
+ def purify(self, x):
+ counter = self.counter.item()
+ if counter % 5 == 0:
+ print(f'diffusion times: {counter}')
+
+ # imagenet [3, 224, 224] -> [3, 256, 256] -> [3, 224, 224]
+ if 'imagenet' in self.args.domain:
+ x = F.interpolate(x, size=(256, 256), mode='bilinear', align_corners=False)
+
+ start_time = time.time()
+ x_re = self.runner.image_editing_sample((x - 0.5) * 2, bs_id=counter, tag=self.tag)
+ minutes, seconds = divmod(time.time() - start_time, 60)
+
+ if 'imagenet' in self.args.domain:
+ x_re = F.interpolate(x_re, size=(224, 224), mode='bilinear', align_corners=False)
+
+ if counter % 5 == 0:
+ print(f'x shape (before diffusion models): {x.shape}')
+ print(f'x shape (before resnet): {x_re.shape}')
+ print("Sampling time per batch: {:0>2}:{:05.2f}".format(int(minutes), seconds))
+
+ self.counter += 1
+
+ return (x_re + 1) * 0.5
+
+ def forward(self, x, mode='purify_and_classify'):
+ if mode == 'purify':
+ out = self.purify(x)
+ elif mode == 'classify':
+ out = self.resnet(x) # x in [0, 1]
+ elif mode == 'purify_and_classify':
+ x = self.purify(x)
+ out = self.resnet(x) # x in [0, 1]
+ else:
+ raise NotImplementedError(f'unknown mode: {mode}')
+ return out
+
+
+def eval_bpda(args, config, model, x_val, y_val, adv_batch_size, log_dir):
+ ngpus = torch.cuda.device_count()
+ model_ = model
+ if ngpus > 1:
+ model_ = model.module
+
+ x_val, y_val = x_val.to(config.device), y_val.to(config.device)
+
+ # ------------------ apply the attack to resnet ------------------
+ print(f'apply the bpda attack to resnet...')
+ resnet_bpda = ResNet_Adv_Model(args, config)
+ if ngpus > 1:
+ resnet_bpda = torch.nn.DataParallel(resnet_bpda)
+
+ start_time = time.time()
+ init_acc = get_accuracy(resnet_bpda, x_val, y_val, bs=adv_batch_size)
+ print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
+
+ adversary_resnet = BPDA_EOT_Attack(resnet_bpda, adv_eps=args.adv_eps, eot_defense_reps=args.eot_defense_reps,
+ eot_attack_reps=args.eot_attack_reps)
+
+ start_time = time.time()
+ class_batch, ims_adv_batch = adversary_resnet.attack_all(x_val, y_val, batch_size=adv_batch_size)
+ init_acc = float(class_batch[0, :].sum()) / class_batch.shape[1]
+ robust_acc = float(class_batch[-1, :].sum()) / class_batch.shape[1]
+
+ print('init acc: {:.2%}, robust acc: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, robust_acc, time.time() - start_time))
+
+ print(f'x_adv_resnet shape: {ims_adv_batch.shape}')
+ torch.save([ims_adv_batch, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
+
+ # ------------------ apply the attack to sde_adv ------------------
+ print(f'apply the bpda attack to sde_adv...')
+
+ start_time = time.time()
+ model_.reset_counter()
+ model_.set_tag('no_adv')
+ init_acc = get_accuracy(model, x_val, y_val, bs=adv_batch_size)
+ print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
+
+ adversary_sde = BPDA_EOT_Attack(model, adv_eps=args.adv_eps, eot_defense_reps=args.eot_defense_reps,
+ eot_attack_reps=args.eot_attack_reps)
+
+ start_time = time.time()
+ model_.reset_counter()
+ model_.set_tag()
+ class_batch, ims_adv_batch = adversary_sde.attack_all(x_val, y_val, batch_size=adv_batch_size)
+ init_acc = float(class_batch[0, :].sum()) / class_batch.shape[1]
+ robust_acc = float(class_batch[-1, :].sum()) / class_batch.shape[1]
+
+ print('init acc: {:.2%}, robust acc: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, robust_acc, time.time() - start_time))
+
+ print(f'x_adv_sde shape: {ims_adv_batch.shape}')
+ torch.save([ims_adv_batch, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
+
+
+def robustness_eval(args, config):
+ middle_name = '_'.join([args.diffusion_type, 'bpda'])
+ log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
+ 'seed' + str(args.seed), 'data' + str(args.data_seed))
+ os.makedirs(log_dir, exist_ok=True)
+ args.log_dir = log_dir
+ logger = utils.Logger(file_name=f'{log_dir}/log.txt', file_mode="w+", should_flush=True)
+
+ ngpus = torch.cuda.device_count()
+ adv_batch_size = args.adv_batch_size * ngpus
+ print(f'ngpus: {ngpus}, adv_batch_size: {adv_batch_size}')
+
+ # load model
+ print('starting the model and loader...')
+ model = SDE_Adv_Model(args, config)
+ if ngpus > 1:
+ model = torch.nn.DataParallel(model)
+ model = model.eval().to(config.device)
+
+ # load data
+ x_val, y_val = load_data(args, adv_batch_size)
+
+ # eval classifier and sde_adv against bpda attack
+ eval_bpda(args, config, model, x_val, y_val, adv_batch_size, log_dir)
+
+ logger.close()
+
+
+def parse_args_and_config():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ # diffusion models
+ parser.add_argument('--config', type=str, required=True, help='Path to the config file')
+ parser.add_argument('--data_seed', type=int, default=0, help='Random seed')
+ parser.add_argument('--seed', type=int, default=1234, help='Random seed')
+ parser.add_argument('--exp', type=str, default='exp', help='Path for saving running related data.')
+ parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
+ parser.add_argument('-i', '--image_folder', type=str, default='images', help="The folder name of samples")
+ parser.add_argument('--ni', action='store_true', help="No interaction. Suitable for Slurm Job launcher")
+ parser.add_argument('--sample_step', type=int, default=1, help='Total sampling steps')
+ parser.add_argument('--t', type=int, default=400, help='Sampling noise scale')
+ parser.add_argument('--t_delta', type=int, default=15, help='Perturbation range of sampling noise scale')
+ parser.add_argument('--rand_t', type=str2bool, default=False, help='Decide if randomize sampling noise scale')
+ parser.add_argument('--diffusion_type', type=str, default='ddpm', help='[ddpm, sde, celebahq-ddpm]')
+ parser.add_argument('--score_type', type=str, default='guided_diffusion', help='[guided_diffusion, score_sde]')
+ parser.add_argument('--eot_iter', type=int, default=20, help='only for rand version of autoattack')
+ parser.add_argument('--use_bm', action='store_true', help='whether to use brownian motion')
+
+ parser.add_argument('--eot_defense_reps', type=int, default=150)
+ parser.add_argument('--eot_attack_reps', type=int, default=15)
+
+ # adv
+ parser.add_argument('--domain', type=str, default='celebahq', help='which domain: celebahq, cat, car, imagenet')
+ parser.add_argument('--classifier_name', type=str, default='Eyeglasses', help='which classifier to use')
+ parser.add_argument('--partition', type=str, default='val')
+ parser.add_argument('--adv_batch_size', type=int, default=64)
+
+ parser.add_argument('--num_sub', type=int, default=1000, help='imagenet subset')
+ parser.add_argument('--adv_eps', type=float, default=0.07)
+ parser.add_argument('--gpu_ids', type=str, default='0')
+
+ args = parser.parse_args()
+
+ # parse config file
+ with open(os.path.join('configs', args.config), 'r') as f:
+ config = yaml.safe_load(f)
+ new_config = utils.dict2namespace(config)
+
+ level = getattr(logging, args.verbose.upper(), None)
+ if not isinstance(level, int):
+ raise ValueError('level {} not supported'.format(args.verbose))
+
+ handler1 = logging.StreamHandler()
+ formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
+ handler1.setFormatter(formatter)
+ logger = logging.getLogger()
+ logger.addHandler(handler1)
+ logger.setLevel(level)
+
+ args.image_folder = os.path.join(args.exp, args.image_folder)
+ os.makedirs(args.image_folder, exist_ok=True)
+
+ # add device
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
+ logging.info("Using device: {}".format(device))
+ new_config.device = device
+
+ # set random seed
+ torch.manual_seed(args.seed)
+ random.seed(args.seed)
+ np.random.seed(args.seed)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(args.seed)
+
+ torch.backends.cudnn.benchmark = True
+
+ return args, new_config
+
+
+if __name__ == '__main__':
+ args, config = parse_args_and_config()
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
+ robustness_eval(args, config)
+
+
diff --git a/case_studies/diffpure/guided_diffusion/LICENSE_GUIDED_DIFFUSION b/case_studies/diffpure/guided_diffusion/LICENSE_GUIDED_DIFFUSION
new file mode 100644
index 0000000..9e84fcb
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/LICENSE_GUIDED_DIFFUSION
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 OpenAI
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/case_studies/diffpure/guided_diffusion/__init__.py b/case_studies/diffpure/guided_diffusion/__init__.py
new file mode 100644
index 0000000..2edcdcf
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/__init__.py
@@ -0,0 +1,11 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/__init__.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Codebase for "Improved Denoising Diffusion Probabilistic Models".
+"""
diff --git a/case_studies/diffpure/guided_diffusion/dist_util.py b/case_studies/diffpure/guided_diffusion/dist_util.py
new file mode 100644
index 0000000..76406a6
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/dist_util.py
@@ -0,0 +1,101 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/dist_util.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Helpers for distributed training.
+"""
+
+import io
+import os
+import socket
+
+import blobfile as bf
+from mpi4py import MPI
+import torch as th
+import torch.distributed as dist
+
+# Change this to reflect your cluster layout.
+# The GPU for a given rank is (rank % GPUS_PER_NODE).
+GPUS_PER_NODE = 8
+
+SETUP_RETRY_COUNT = 3
+
+
+def setup_dist():
+ """
+ Setup a distributed process group.
+ """
+ if dist.is_initialized():
+ return
+ os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
+
+ comm = MPI.COMM_WORLD
+ backend = "gloo" if not th.cuda.is_available() else "nccl"
+
+ if backend == "gloo":
+ hostname = "localhost"
+ else:
+ hostname = socket.gethostbyname(socket.getfqdn())
+ os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
+ os.environ["RANK"] = str(comm.rank)
+ os.environ["WORLD_SIZE"] = str(comm.size)
+
+ port = comm.bcast(_find_free_port(), root=0)
+ os.environ["MASTER_PORT"] = str(port)
+ dist.init_process_group(backend=backend, init_method="env://")
+
+
+def dev():
+ """
+ Get the device to use for torch.distributed.
+ """
+ if th.cuda.is_available():
+ return th.device(f"cuda")
+ return th.device("cpu")
+
+
+def load_state_dict(path, **kwargs):
+ """
+ Load a PyTorch file without redundant fetches across MPI ranks.
+ """
+ chunk_size = 2 ** 30 # MPI has a relatively small size limit
+ if MPI.COMM_WORLD.Get_rank() == 0:
+ with bf.BlobFile(path, "rb") as f:
+ data = f.read()
+ num_chunks = len(data) // chunk_size
+ if len(data) % chunk_size:
+ num_chunks += 1
+ MPI.COMM_WORLD.bcast(num_chunks)
+ for i in range(0, len(data), chunk_size):
+ MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
+ else:
+ num_chunks = MPI.COMM_WORLD.bcast(None)
+ data = bytes()
+ for _ in range(num_chunks):
+ data += MPI.COMM_WORLD.bcast(None)
+
+ return th.load(io.BytesIO(data), **kwargs)
+
+
+def sync_params(params):
+ """
+ Synchronize a sequence of Tensors across ranks from rank 0.
+ """
+ for p in params:
+ with th.no_grad():
+ dist.broadcast(p, 0)
+
+
+def _find_free_port():
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(("", 0))
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ return s.getsockname()[1]
+ finally:
+ s.close()
diff --git a/case_studies/diffpure/guided_diffusion/fp16_util.py b/case_studies/diffpure/guided_diffusion/fp16_util.py
new file mode 100644
index 0000000..7560d12
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/fp16_util.py
@@ -0,0 +1,244 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/fp16_util.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Helpers to train with 16-bit precision.
+"""
+
+import numpy as np
+import torch as th
+import torch.nn as nn
+from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
+
+from . import logger
+
+INITIAL_LOG_LOSS_SCALE = 20.0
+
+
+def convert_module_to_f16(l):
+ """
+ Convert primitive modules to float16.
+ """
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
+ l.weight.data = l.weight.data.half()
+ if l.bias is not None:
+ l.bias.data = l.bias.data.half()
+
+
+def convert_module_to_f32(l):
+ """
+ Convert primitive modules to float32, undoing convert_module_to_f16().
+ """
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
+ l.weight.data = l.weight.data.float()
+ if l.bias is not None:
+ l.bias.data = l.bias.data.float()
+
+
+def make_master_params(param_groups_and_shapes):
+ """
+ Copy model parameters into a (differently-shaped) list of full-precision
+ parameters.
+ """
+ master_params = []
+ for param_group, shape in param_groups_and_shapes:
+ master_param = nn.Parameter(
+ _flatten_dense_tensors(
+ [param.detach().float() for (_, param) in param_group]
+ ).view(shape)
+ )
+ master_param.requires_grad = True
+ master_params.append(master_param)
+ return master_params
+
+
+def model_grads_to_master_grads(param_groups_and_shapes, master_params):
+ """
+ Copy the gradients from the model parameters into the master parameters
+ from make_master_params().
+ """
+ for master_param, (param_group, shape) in zip(
+ master_params, param_groups_and_shapes
+ ):
+ master_param.grad = _flatten_dense_tensors(
+ [param_grad_or_zeros(param) for (_, param) in param_group]
+ ).view(shape)
+
+
+def master_params_to_model_params(param_groups_and_shapes, master_params):
+ """
+ Copy the master parameter data back into the model parameters.
+ """
+ # Without copying to a list, if a generator is passed, this will
+ # silently not copy any parameters.
+ for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
+ for (_, param), unflat_master_param in zip(
+ param_group, unflatten_master_params(param_group, master_param.view(-1))
+ ):
+ param.detach().copy_(unflat_master_param)
+
+
+def unflatten_master_params(param_group, master_param):
+ return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
+
+
+def get_param_groups_and_shapes(named_model_params):
+ named_model_params = list(named_model_params)
+ scalar_vector_named_params = (
+ [(n, p) for (n, p) in named_model_params if p.ndim <= 1],
+ (-1),
+ )
+ matrix_named_params = (
+ [(n, p) for (n, p) in named_model_params if p.ndim > 1],
+ (1, -1),
+ )
+ return [scalar_vector_named_params, matrix_named_params]
+
+
+def master_params_to_state_dict(
+ model, param_groups_and_shapes, master_params, use_fp16
+):
+ if use_fp16:
+ state_dict = model.state_dict()
+ for master_param, (param_group, _) in zip(
+ master_params, param_groups_and_shapes
+ ):
+ for (name, _), unflat_master_param in zip(
+ param_group, unflatten_master_params(param_group, master_param.view(-1))
+ ):
+ assert name in state_dict
+ state_dict[name] = unflat_master_param
+ else:
+ state_dict = model.state_dict()
+ for i, (name, _value) in enumerate(model.named_parameters()):
+ assert name in state_dict
+ state_dict[name] = master_params[i]
+ return state_dict
+
+
+def state_dict_to_master_params(model, state_dict, use_fp16):
+ if use_fp16:
+ named_model_params = [
+ (name, state_dict[name]) for name, _ in model.named_parameters()
+ ]
+ param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
+ master_params = make_master_params(param_groups_and_shapes)
+ else:
+ master_params = [state_dict[name] for name, _ in model.named_parameters()]
+ return master_params
+
+
+def zero_master_grads(master_params):
+ for param in master_params:
+ param.grad = None
+
+
+def zero_grad(model_params):
+ for param in model_params:
+ # Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
+ if param.grad is not None:
+ param.grad.detach_()
+ param.grad.zero_()
+
+
+def param_grad_or_zeros(param):
+ if param.grad is not None:
+ return param.grad.data.detach()
+ else:
+ return th.zeros_like(param)
+
+
+class MixedPrecisionTrainer:
+ def __init__(
+ self,
+ *,
+ model,
+ use_fp16=False,
+ fp16_scale_growth=1e-3,
+ initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
+ ):
+ self.model = model
+ self.use_fp16 = use_fp16
+ self.fp16_scale_growth = fp16_scale_growth
+
+ self.model_params = list(self.model.parameters())
+ self.master_params = self.model_params
+ self.param_groups_and_shapes = None
+ self.lg_loss_scale = initial_lg_loss_scale
+
+ if self.use_fp16:
+ self.param_groups_and_shapes = get_param_groups_and_shapes(
+ self.model.named_parameters()
+ )
+ self.master_params = make_master_params(self.param_groups_and_shapes)
+ self.model.convert_to_fp16()
+
+ def zero_grad(self):
+ zero_grad(self.model_params)
+
+ def backward(self, loss: th.Tensor):
+ if self.use_fp16:
+ loss_scale = 2 ** self.lg_loss_scale
+ (loss * loss_scale).backward()
+ else:
+ loss.backward()
+
+ def optimize(self, opt: th.optim.Optimizer):
+ if self.use_fp16:
+ return self._optimize_fp16(opt)
+ else:
+ return self._optimize_normal(opt)
+
+ def _optimize_fp16(self, opt: th.optim.Optimizer):
+ logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
+ model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
+ grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
+ if check_overflow(grad_norm):
+ self.lg_loss_scale -= 1
+ logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
+ zero_master_grads(self.master_params)
+ return False
+
+ logger.logkv_mean("grad_norm", grad_norm)
+ logger.logkv_mean("param_norm", param_norm)
+
+ self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
+ opt.step()
+ zero_master_grads(self.master_params)
+ master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
+ self.lg_loss_scale += self.fp16_scale_growth
+ return True
+
+ def _optimize_normal(self, opt: th.optim.Optimizer):
+ grad_norm, param_norm = self._compute_norms()
+ logger.logkv_mean("grad_norm", grad_norm)
+ logger.logkv_mean("param_norm", param_norm)
+ opt.step()
+ return True
+
+ def _compute_norms(self, grad_scale=1.0):
+ grad_norm = 0.0
+ param_norm = 0.0
+ for p in self.master_params:
+ with th.no_grad():
+ param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
+ if p.grad is not None:
+ grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
+ return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
+
+ def master_params_to_state_dict(self, master_params):
+ return master_params_to_state_dict(
+ self.model, self.param_groups_and_shapes, master_params, self.use_fp16
+ )
+
+ def state_dict_to_master_params(self, state_dict):
+ return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
+
+
+def check_overflow(value):
+ return (value == float("inf")) or (value == -float("inf")) or (value != value)
diff --git a/case_studies/diffpure/guided_diffusion/gaussian_diffusion.py b/case_studies/diffpure/guided_diffusion/gaussian_diffusion.py
new file mode 100644
index 0000000..efbb3c6
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/gaussian_diffusion.py
@@ -0,0 +1,916 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+This code started out as a PyTorch port of Ho et al's diffusion models:
+https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
+
+Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
+"""
+
+import enum
+import math
+
+import numpy as np
+import torch as th
+
+from .nn import mean_flat
+from .losses import normal_kl, discretized_gaussian_log_likelihood
+
+
+def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
+ """
+ Get a pre-defined beta schedule for the given name.
+
+ The beta schedule library consists of beta schedules which remain similar
+ in the limit of num_diffusion_timesteps.
+ Beta schedules may be added, but should not be removed or changed once
+ they are committed to maintain backwards compatibility.
+ """
+ if schedule_name == "linear":
+ # Linear schedule from Ho et al, extended to work for any number of
+ # diffusion steps.
+ scale = 1000 / num_diffusion_timesteps
+ beta_start = scale * 0.0001
+ beta_end = scale * 0.02
+ return np.linspace(
+ beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
+ )
+ elif schedule_name == "cosine":
+ return betas_for_alpha_bar(
+ num_diffusion_timesteps,
+ lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
+ )
+ else:
+ raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
+
+
+def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
+ """
+ Create a beta schedule that discretizes the given alpha_t_bar function,
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
+
+ :param num_diffusion_timesteps: the number of betas to produce.
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
+ produces the cumulative product of (1-beta) up to that
+ part of the diffusion process.
+ :param max_beta: the maximum beta to use; use values lower than 1 to
+ prevent singularities.
+ """
+ betas = []
+ for i in range(num_diffusion_timesteps):
+ t1 = i / num_diffusion_timesteps
+ t2 = (i + 1) / num_diffusion_timesteps
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
+ return np.array(betas)
+
+
+class ModelMeanType(enum.Enum):
+ """
+ Which type of output the model predicts.
+ """
+
+ PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
+ START_X = enum.auto() # the model predicts x_0
+ EPSILON = enum.auto() # the model predicts epsilon
+
+
+class ModelVarType(enum.Enum):
+ """
+ What is used as the model's output variance.
+
+ The LEARNED_RANGE option has been added to allow the model to predict
+ values between FIXED_SMALL and FIXED_LARGE, making its job easier.
+ """
+
+ LEARNED = enum.auto()
+ FIXED_SMALL = enum.auto()
+ FIXED_LARGE = enum.auto()
+ LEARNED_RANGE = enum.auto()
+
+
+class LossType(enum.Enum):
+ MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
+ RESCALED_MSE = (
+ enum.auto()
+ ) # use raw MSE loss (with RESCALED_KL when learning variances)
+ KL = enum.auto() # use the variational lower-bound
+ RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
+
+ def is_vb(self):
+ return self == LossType.KL or self == LossType.RESCALED_KL
+
+
+class GaussianDiffusion:
+ """
+ Utilities for training and sampling diffusion models.
+
+ Ported directly from here, and then adapted over time to further experimentation.
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
+
+ :param betas: a 1-D numpy array of betas for each diffusion timestep,
+ starting at T and going to 1.
+ :param model_mean_type: a ModelMeanType determining what the model outputs.
+ :param model_var_type: a ModelVarType determining how variance is output.
+ :param loss_type: a LossType determining the loss function to use.
+ :param rescale_timesteps: if True, pass floating point timesteps into the
+ model so that they are always scaled like in the
+ original paper (0 to 1000).
+ """
+
+ def __init__(
+ self,
+ *,
+ betas,
+ model_mean_type,
+ model_var_type,
+ loss_type,
+ rescale_timesteps=False,
+ ):
+ self.model_mean_type = model_mean_type
+ self.model_var_type = model_var_type
+ self.loss_type = loss_type
+ self.rescale_timesteps = rescale_timesteps
+
+ # Use float64 for accuracy.
+ betas = np.array(betas, dtype=np.float64)
+ self.betas = betas
+ assert len(betas.shape) == 1, "betas must be 1-D"
+ assert (betas > 0).all() and (betas <= 1).all()
+
+ self.num_timesteps = int(betas.shape[0])
+
+ alphas = 1.0 - betas
+ self.alphas_cumprod = np.cumprod(alphas, axis=0)
+ self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
+ self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
+ assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
+
+ # calculations for diffusion q(x_t | x_{t-1}) and others
+ self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
+ self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
+ self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
+ self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
+ self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
+
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
+ self.posterior_variance = (
+ betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
+ )
+ # log calculation clipped because the posterior variance is 0 at the
+ # beginning of the diffusion chain.
+ self.posterior_log_variance_clipped = np.log(
+ np.append(self.posterior_variance[1], self.posterior_variance[1:])
+ )
+ self.posterior_mean_coef1 = (
+ betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
+ )
+ self.posterior_mean_coef2 = (
+ (1.0 - self.alphas_cumprod_prev)
+ * np.sqrt(alphas)
+ / (1.0 - self.alphas_cumprod)
+ )
+
+ def q_mean_variance(self, x_start, t):
+ """
+ Get the distribution q(x_t | x_0).
+
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
+ """
+ mean = (
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ )
+ variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
+ log_variance = _extract_into_tensor(
+ self.log_one_minus_alphas_cumprod, t, x_start.shape
+ )
+ return mean, variance, log_variance
+
+ def q_sample(self, x_start, t, noise=None):
+ """
+ Diffuse the data for a given number of diffusion steps.
+
+ In other words, sample from q(x_t | x_0).
+
+ :param x_start: the initial data batch.
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
+ :param noise: if specified, the split-out normal noise.
+ :return: A noisy version of x_start.
+ """
+ if noise is None:
+ noise = th.randn_like(x_start)
+ assert noise.shape == x_start.shape
+ return (
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
+ * noise
+ )
+
+ def q_posterior_mean_variance(self, x_start, x_t, t):
+ """
+ Compute the mean and variance of the diffusion posterior:
+
+ q(x_{t-1} | x_t, x_0)
+
+ """
+ assert x_start.shape == x_t.shape
+ posterior_mean = (
+ _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
+ )
+ posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
+ posterior_log_variance_clipped = _extract_into_tensor(
+ self.posterior_log_variance_clipped, t, x_t.shape
+ )
+ assert (
+ posterior_mean.shape[0]
+ == posterior_variance.shape[0]
+ == posterior_log_variance_clipped.shape[0]
+ == x_start.shape[0]
+ )
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
+
+ def p_mean_variance(
+ self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
+ ):
+ """
+ Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
+ the initial x, x_0.
+
+ :param model: the model, which takes a signal and a batch of timesteps
+ as input.
+ :param x: the [N x C x ...] tensor at time t.
+ :param t: a 1-D Tensor of timesteps.
+ :param clip_denoised: if True, clip the denoised signal into [-1, 1].
+ :param denoised_fn: if not None, a function which applies to the
+ x_start prediction before it is used to sample. Applies before
+ clip_denoised.
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
+ pass to the model. This can be used for conditioning.
+ :return: a dict with the following keys:
+ - 'mean': the model mean output.
+ - 'variance': the model variance output.
+ - 'log_variance': the log of 'variance'.
+ - 'pred_xstart': the prediction for x_0.
+ """
+ if model_kwargs is None:
+ model_kwargs = {}
+
+ B, C = x.shape[:2]
+ assert t.shape == (B,)
+ model_output = model(x, self._scale_timesteps(t), **model_kwargs)
+
+ if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
+ assert model_output.shape == (B, C * 2, *x.shape[2:])
+ model_output, model_var_values = th.split(model_output, C, dim=1)
+ if self.model_var_type == ModelVarType.LEARNED:
+ model_log_variance = model_var_values
+ model_variance = th.exp(model_log_variance)
+ else:
+ min_log = _extract_into_tensor(
+ self.posterior_log_variance_clipped, t, x.shape
+ )
+ max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
+ # The model_var_values is [-1, 1] for [min_var, max_var].
+ frac = (model_var_values + 1) / 2
+ model_log_variance = frac * max_log + (1 - frac) * min_log
+ model_variance = th.exp(model_log_variance)
+ else:
+ model_variance, model_log_variance = {
+ # for fixedlarge, we set the initial (log-)variance like so
+ # to get a better decoder log likelihood.
+ ModelVarType.FIXED_LARGE: (
+ np.append(self.posterior_variance[1], self.betas[1:]),
+ np.log(np.append(self.posterior_variance[1], self.betas[1:])),
+ ),
+ ModelVarType.FIXED_SMALL: (
+ self.posterior_variance,
+ self.posterior_log_variance_clipped,
+ ),
+ }[self.model_var_type]
+ model_variance = _extract_into_tensor(model_variance, t, x.shape)
+ model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
+
+ def process_xstart(x):
+ if denoised_fn is not None:
+ x = denoised_fn(x)
+ if clip_denoised:
+ return x.clamp(-1, 1)
+ return x
+
+ if self.model_mean_type == ModelMeanType.PREVIOUS_X:
+ pred_xstart = process_xstart(
+ self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
+ )
+ model_mean = model_output
+ elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
+ if self.model_mean_type == ModelMeanType.START_X:
+ pred_xstart = process_xstart(model_output)
+ else:
+ pred_xstart = process_xstart(
+ self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
+ )
+ model_mean, _, _ = self.q_posterior_mean_variance(
+ x_start=pred_xstart, x_t=x, t=t
+ )
+ else:
+ raise NotImplementedError(self.model_mean_type)
+
+ assert (
+ model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
+ )
+ return {
+ "mean": model_mean,
+ "variance": model_variance,
+ "log_variance": model_log_variance,
+ "pred_xstart": pred_xstart,
+ }
+
+ def _predict_xstart_from_eps(self, x_t, t, eps):
+ assert x_t.shape == eps.shape
+ return (
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
+ - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
+ )
+
+ def _predict_xstart_from_xprev(self, x_t, t, xprev):
+ assert x_t.shape == xprev.shape
+ return ( # (xprev - coef2*x_t) / coef1
+ _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
+ - _extract_into_tensor(
+ self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
+ )
+ * x_t
+ )
+
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
+ return (
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
+ - pred_xstart
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
+
+ def _scale_timesteps(self, t):
+ if self.rescale_timesteps:
+ return t.float() * (1000.0 / self.num_timesteps)
+ return t
+
+ def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
+ """
+ Compute the mean for the previous step, given a function cond_fn that
+ computes the gradient of a conditional log probability with respect to
+ x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
+ condition on y.
+
+ This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
+ """
+ gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
+ new_mean = (
+ p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
+ )
+ return new_mean
+
+ def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
+ """
+ Compute what the p_mean_variance output would have been, should the
+ model's score function be conditioned by cond_fn.
+
+ See condition_mean() for details on cond_fn.
+
+ Unlike condition_mean(), this instead uses the conditioning strategy
+ from Song et al (2020).
+ """
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
+
+ eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
+ eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
+ x, self._scale_timesteps(t), **model_kwargs
+ )
+
+ out = p_mean_var.copy()
+ out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
+ out["mean"], _, _ = self.q_posterior_mean_variance(
+ x_start=out["pred_xstart"], x_t=x, t=t
+ )
+ return out
+
+ def p_sample(
+ self,
+ model,
+ x,
+ t,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ ):
+ """
+ Sample x_{t-1} from the model at the given timestep.
+
+ :param model: the model to sample from.
+ :param x: the current tensor at x_{t-1}.
+ :param t: the value of t, starting at 0 for the first diffusion step.
+ :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
+ :param denoised_fn: if not None, a function which applies to the
+ x_start prediction before it is used to sample.
+ :param cond_fn: if not None, this is a gradient function that acts
+ similarly to the model.
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
+ pass to the model. This can be used for conditioning.
+ :return: a dict containing the following keys:
+ - 'sample': a random sample from the model.
+ - 'pred_xstart': a prediction of x_0.
+ """
+ out = self.p_mean_variance(
+ model,
+ x,
+ t,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ model_kwargs=model_kwargs,
+ )
+ noise = th.randn_like(x)
+ nonzero_mask = (
+ (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
+ ) # no noise when t == 0
+ if cond_fn is not None:
+ out["mean"] = self.condition_mean(
+ cond_fn, out, x, t, model_kwargs=model_kwargs
+ )
+ sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
+
+ def p_sample_loop(
+ self,
+ model,
+ shape,
+ noise=None,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ device=None,
+ progress=False,
+ ):
+ """
+ Generate samples from the model.
+
+ :param model: the model module.
+ :param shape: the shape of the samples, (N, C, H, W).
+ :param noise: if specified, the noise from the encoder to sample.
+ Should be of the same shape as `shape`.
+ :param clip_denoised: if True, clip x_start predictions to [-1, 1].
+ :param denoised_fn: if not None, a function which applies to the
+ x_start prediction before it is used to sample.
+ :param cond_fn: if not None, this is a gradient function that acts
+ similarly to the model.
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
+ pass to the model. This can be used for conditioning.
+ :param device: if specified, the device to create the samples on.
+ If not specified, use a model parameter's device.
+ :param progress: if True, show a tqdm progress bar.
+ :return: a non-differentiable batch of samples.
+ """
+ final = None
+ for sample in self.p_sample_loop_progressive(
+ model,
+ shape,
+ noise=noise,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ cond_fn=cond_fn,
+ model_kwargs=model_kwargs,
+ device=device,
+ progress=progress,
+ ):
+ final = sample
+ return final["sample"]
+
+ def p_sample_loop_progressive(
+ self,
+ model,
+ shape,
+ noise=None,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ device=None,
+ progress=False,
+ ):
+ """
+ Generate samples from the model and yield intermediate samples from
+ each timestep of diffusion.
+
+ Arguments are the same as p_sample_loop().
+ Returns a generator over dicts, where each dict is the return value of
+ p_sample().
+ """
+ if device is None:
+ device = next(model.parameters()).device
+ assert isinstance(shape, (tuple, list))
+ if noise is not None:
+ img = noise
+ else:
+ img = th.randn(*shape, device=device)
+ indices = list(range(self.num_timesteps))[::-1]
+
+ if progress:
+ # Lazy import so that we don't depend on tqdm.
+ from tqdm.auto import tqdm
+
+ indices = tqdm(indices)
+
+ for i in indices:
+ t = th.tensor([i] * shape[0], device=device)
+ with th.no_grad():
+ out = self.p_sample(
+ model,
+ img,
+ t,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ cond_fn=cond_fn,
+ model_kwargs=model_kwargs,
+ )
+ yield out
+ img = out["sample"]
+
+ def ddim_sample(
+ self,
+ model,
+ x,
+ t,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ eta=0.0,
+ ):
+ """
+ Sample x_{t-1} from the model using DDIM.
+
+ Same usage as p_sample().
+ """
+ out = self.p_mean_variance(
+ model,
+ x,
+ t,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ model_kwargs=model_kwargs,
+ )
+ if cond_fn is not None:
+ out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
+
+ # Usually our model outputs epsilon, but we re-derive it
+ # in case we used x_start or x_prev prediction.
+ eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
+
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
+ alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
+ sigma = (
+ eta
+ * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
+ * th.sqrt(1 - alpha_bar / alpha_bar_prev)
+ )
+ # Equation 12.
+ noise = th.randn_like(x)
+ mean_pred = (
+ out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
+ )
+ nonzero_mask = (
+ (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
+ ) # no noise when t == 0
+ sample = mean_pred + nonzero_mask * sigma * noise
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
+
+ def ddim_reverse_sample(
+ self,
+ model,
+ x,
+ t,
+ clip_denoised=True,
+ denoised_fn=None,
+ model_kwargs=None,
+ eta=0.0,
+ ):
+ """
+ Sample x_{t+1} from the model using DDIM reverse ODE.
+ """
+ assert eta == 0.0, "Reverse ODE only for deterministic path"
+ out = self.p_mean_variance(
+ model,
+ x,
+ t,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ model_kwargs=model_kwargs,
+ )
+ # Usually our model outputs epsilon, but we re-derive it
+ # in case we used x_start or x_prev prediction.
+ eps = (
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
+ - out["pred_xstart"]
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
+ alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
+
+ # Equation 12. reversed
+ mean_pred = (
+ out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ + th.sqrt(1 - alpha_bar_next) * eps
+ )
+
+ return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
+
+ def ddim_sample_loop(
+ self,
+ model,
+ shape,
+ noise=None,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ device=None,
+ progress=False,
+ eta=0.0,
+ ):
+ """
+ Generate samples from the model using DDIM.
+
+ Same usage as p_sample_loop().
+ """
+ final = None
+ for sample in self.ddim_sample_loop_progressive(
+ model,
+ shape,
+ noise=noise,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ cond_fn=cond_fn,
+ model_kwargs=model_kwargs,
+ device=device,
+ progress=progress,
+ eta=eta,
+ ):
+ final = sample
+ return final["sample"]
+
+ def ddim_sample_loop_progressive(
+ self,
+ model,
+ shape,
+ noise=None,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None,
+ device=None,
+ progress=False,
+ eta=0.0,
+ ):
+ """
+ Use DDIM to sample from the model and yield intermediate samples from
+ each timestep of DDIM.
+
+ Same usage as p_sample_loop_progressive().
+ """
+ if device is None:
+ device = next(model.parameters()).device
+ assert isinstance(shape, (tuple, list))
+ if noise is not None:
+ img = noise
+ else:
+ img = th.randn(*shape, device=device)
+ indices = list(range(self.num_timesteps))[::-1]
+
+ if progress:
+ # Lazy import so that we don't depend on tqdm.
+ from tqdm.auto import tqdm
+
+ indices = tqdm(indices)
+
+ for i in indices:
+ t = th.tensor([i] * shape[0], device=device)
+ with th.no_grad():
+ out = self.ddim_sample(
+ model,
+ img,
+ t,
+ clip_denoised=clip_denoised,
+ denoised_fn=denoised_fn,
+ cond_fn=cond_fn,
+ model_kwargs=model_kwargs,
+ eta=eta,
+ )
+ yield out
+ img = out["sample"]
+
+ def _vb_terms_bpd(
+ self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
+ ):
+ """
+ Get a term for the variational lower-bound.
+
+ The resulting units are bits (rather than nats, as one might expect).
+ This allows for comparison to other papers.
+
+ :return: a dict with the following keys:
+ - 'output': a shape [N] tensor of NLLs or KLs.
+ - 'pred_xstart': the x_0 predictions.
+ """
+ true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
+ x_start=x_start, x_t=x_t, t=t
+ )
+ out = self.p_mean_variance(
+ model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
+ )
+ kl = normal_kl(
+ true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
+ )
+ kl = mean_flat(kl) / np.log(2.0)
+
+ decoder_nll = -discretized_gaussian_log_likelihood(
+ x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
+ )
+ assert decoder_nll.shape == x_start.shape
+ decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
+
+ # At the first timestep return the decoder NLL,
+ # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
+ output = th.where((t == 0), decoder_nll, kl)
+ return {"output": output, "pred_xstart": out["pred_xstart"]}
+
+ def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
+ """
+ Compute training losses for a single timestep.
+
+ :param model: the model to evaluate loss on.
+ :param x_start: the [N x C x ...] tensor of inputs.
+ :param t: a batch of timestep indices.
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
+ pass to the model. This can be used for conditioning.
+ :param noise: if specified, the specific Gaussian noise to try to remove.
+ :return: a dict with the key "loss" containing a tensor of shape [N].
+ Some mean or variance settings may also have other keys.
+ """
+ if model_kwargs is None:
+ model_kwargs = {}
+ if noise is None:
+ noise = th.randn_like(x_start)
+ x_t = self.q_sample(x_start, t, noise=noise)
+
+ terms = {}
+
+ if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
+ terms["loss"] = self._vb_terms_bpd(
+ model=model,
+ x_start=x_start,
+ x_t=x_t,
+ t=t,
+ clip_denoised=False,
+ model_kwargs=model_kwargs,
+ )["output"]
+ if self.loss_type == LossType.RESCALED_KL:
+ terms["loss"] *= self.num_timesteps
+ elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
+ model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
+
+ if self.model_var_type in [
+ ModelVarType.LEARNED,
+ ModelVarType.LEARNED_RANGE,
+ ]:
+ B, C = x_t.shape[:2]
+ assert model_output.shape == (B, C * 2, *x_t.shape[2:])
+ model_output, model_var_values = th.split(model_output, C, dim=1)
+ # Learn the variance using the variational bound, but don't let
+ # it affect our mean prediction.
+ frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
+ terms["vb"] = self._vb_terms_bpd(
+ model=lambda *args, r=frozen_out: r,
+ x_start=x_start,
+ x_t=x_t,
+ t=t,
+ clip_denoised=False,
+ )["output"]
+ if self.loss_type == LossType.RESCALED_MSE:
+ # Divide by 1000 for equivalence with initial implementation.
+ # Without a factor of 1/1000, the VB term hurts the MSE term.
+ terms["vb"] *= self.num_timesteps / 1000.0
+
+ target = {
+ ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
+ x_start=x_start, x_t=x_t, t=t
+ )[0],
+ ModelMeanType.START_X: x_start,
+ ModelMeanType.EPSILON: noise,
+ }[self.model_mean_type]
+ assert model_output.shape == target.shape == x_start.shape
+ terms["mse"] = mean_flat((target - model_output) ** 2)
+ if "vb" in terms:
+ terms["loss"] = terms["mse"] + terms["vb"]
+ else:
+ terms["loss"] = terms["mse"]
+ else:
+ raise NotImplementedError(self.loss_type)
+
+ return terms
+
+ def _prior_bpd(self, x_start):
+ """
+ Get the prior KL term for the variational lower-bound, measured in
+ bits-per-dim.
+
+ This term can't be optimized, as it only depends on the encoder.
+
+ :param x_start: the [N x C x ...] tensor of inputs.
+ :return: a batch of [N] KL values (in bits), one per batch element.
+ """
+ batch_size = x_start.shape[0]
+ t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
+ kl_prior = normal_kl(
+ mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
+ )
+ return mean_flat(kl_prior) / np.log(2.0)
+
+ def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
+ """
+ Compute the entire variational lower-bound, measured in bits-per-dim,
+ as well as other related quantities.
+
+ :param model: the model to evaluate loss on.
+ :param x_start: the [N x C x ...] tensor of inputs.
+ :param clip_denoised: if True, clip denoised samples.
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
+ pass to the model. This can be used for conditioning.
+
+ :return: a dict containing the following keys:
+ - total_bpd: the total variational lower-bound, per batch element.
+ - prior_bpd: the prior term in the lower-bound.
+ - vb: an [N x T] tensor of terms in the lower-bound.
+ - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
+ - mse: an [N x T] tensor of epsilon MSEs for each timestep.
+ """
+ device = x_start.device
+ batch_size = x_start.shape[0]
+
+ vb = []
+ xstart_mse = []
+ mse = []
+ for t in list(range(self.num_timesteps))[::-1]:
+ t_batch = th.tensor([t] * batch_size, device=device)
+ noise = th.randn_like(x_start)
+ x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
+ # Calculate VLB term at the current timestep
+ with th.no_grad():
+ out = self._vb_terms_bpd(
+ model,
+ x_start=x_start,
+ x_t=x_t,
+ t=t_batch,
+ clip_denoised=clip_denoised,
+ model_kwargs=model_kwargs,
+ )
+ vb.append(out["output"])
+ xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
+ eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
+ mse.append(mean_flat((eps - noise) ** 2))
+
+ vb = th.stack(vb, dim=1)
+ xstart_mse = th.stack(xstart_mse, dim=1)
+ mse = th.stack(mse, dim=1)
+
+ prior_bpd = self._prior_bpd(x_start)
+ total_bpd = vb.sum(dim=1) + prior_bpd
+ return {
+ "total_bpd": total_bpd,
+ "prior_bpd": prior_bpd,
+ "vb": vb,
+ "xstart_mse": xstart_mse,
+ "mse": mse,
+ }
+
+
+def _extract_into_tensor(arr, timesteps, broadcast_shape):
+ """
+ Extract values from a 1-D numpy array for a batch of indices.
+
+ :param arr: the 1-D numpy array.
+ :param timesteps: a tensor of indices into the array to extract.
+ :param broadcast_shape: a larger shape of K dimensions with the batch
+ dimension equal to the length of timesteps.
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
+ """
+ res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
+ while len(res.shape) < len(broadcast_shape):
+ res = res[..., None]
+ return res.expand(broadcast_shape)
diff --git a/case_studies/diffpure/guided_diffusion/image_datasets.py b/case_studies/diffpure/guided_diffusion/image_datasets.py
new file mode 100644
index 0000000..cac5515
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/image_datasets.py
@@ -0,0 +1,175 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/image_datasets.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+import math
+import random
+
+from PIL import Image
+import blobfile as bf
+from mpi4py import MPI
+import numpy as np
+from torch.utils.data import DataLoader, Dataset
+
+
+def load_data(
+ *,
+ data_dir,
+ batch_size,
+ image_size,
+ class_cond=False,
+ deterministic=False,
+ random_crop=False,
+ random_flip=True,
+):
+ """
+ For a dataset, create a generator over (images, kwargs) pairs.
+
+ Each images is an NCHW float tensor, and the kwargs dict contains zero or
+ more keys, each of which map to a batched Tensor of their own.
+ The kwargs dict can be used for class labels, in which case the key is "y"
+ and the values are integer tensors of class labels.
+
+ :param data_dir: a dataset directory.
+ :param batch_size: the batch size of each returned pair.
+ :param image_size: the size to which images are resized.
+ :param class_cond: if True, include a "y" key in returned dicts for class
+ label. If classes are not available and this is true, an
+ exception will be raised.
+ :param deterministic: if True, yield results in a deterministic order.
+ :param random_crop: if True, randomly crop the images for augmentation.
+ :param random_flip: if True, randomly flip the images for augmentation.
+ """
+ if not data_dir:
+ raise ValueError("unspecified data directory")
+ all_files = _list_image_files_recursively(data_dir)
+ classes = None
+ if class_cond:
+ # Assume classes are the first part of the filename,
+ # before an underscore.
+ class_names = [bf.basename(path).split("_")[0] for path in all_files]
+ sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
+ classes = [sorted_classes[x] for x in class_names]
+ dataset = ImageDataset(
+ image_size,
+ all_files,
+ classes=classes,
+ shard=MPI.COMM_WORLD.Get_rank(),
+ num_shards=MPI.COMM_WORLD.Get_size(),
+ random_crop=random_crop,
+ random_flip=random_flip,
+ )
+ if deterministic:
+ loader = DataLoader(
+ dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
+ )
+ else:
+ loader = DataLoader(
+ dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
+ )
+ while True:
+ yield from loader
+
+
+def _list_image_files_recursively(data_dir):
+ results = []
+ for entry in sorted(bf.listdir(data_dir)):
+ full_path = bf.join(data_dir, entry)
+ ext = entry.split(".")[-1]
+ if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
+ results.append(full_path)
+ elif bf.isdir(full_path):
+ results.extend(_list_image_files_recursively(full_path))
+ return results
+
+
+class ImageDataset(Dataset):
+ def __init__(
+ self,
+ resolution,
+ image_paths,
+ classes=None,
+ shard=0,
+ num_shards=1,
+ random_crop=False,
+ random_flip=True,
+ ):
+ super().__init__()
+ self.resolution = resolution
+ self.local_images = image_paths[shard:][::num_shards]
+ self.local_classes = None if classes is None else classes[shard:][::num_shards]
+ self.random_crop = random_crop
+ self.random_flip = random_flip
+
+ def __len__(self):
+ return len(self.local_images)
+
+ def __getitem__(self, idx):
+ path = self.local_images[idx]
+ with bf.BlobFile(path, "rb") as f:
+ pil_image = Image.open(f)
+ pil_image.load()
+ pil_image = pil_image.convert("RGB")
+
+ if self.random_crop:
+ arr = random_crop_arr(pil_image, self.resolution)
+ else:
+ arr = center_crop_arr(pil_image, self.resolution)
+
+ if self.random_flip and random.random() < 0.5:
+ arr = arr[:, ::-1]
+
+ arr = arr.astype(np.float32) / 127.5 - 1
+
+ out_dict = {}
+ if self.local_classes is not None:
+ out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
+ return np.transpose(arr, [2, 0, 1]), out_dict
+
+
+def center_crop_arr(pil_image, image_size):
+ # We are not on a new enough PIL to support the `reducing_gap`
+ # argument, which uses BOX downsampling at powers of two first.
+ # Thus, we do it by hand to improve downsample quality.
+ while min(*pil_image.size) >= 2 * image_size:
+ pil_image = pil_image.resize(
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
+ )
+
+ scale = image_size / min(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
+ )
+
+ arr = np.array(pil_image)
+ crop_y = (arr.shape[0] - image_size) // 2
+ crop_x = (arr.shape[1] - image_size) // 2
+ return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
+
+
+def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
+ min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
+ max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
+ smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
+
+ # We are not on a new enough PIL to support the `reducing_gap`
+ # argument, which uses BOX downsampling at powers of two first.
+ # Thus, we do it by hand to improve downsample quality.
+ while min(*pil_image.size) >= 2 * smaller_dim_size:
+ pil_image = pil_image.resize(
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
+ )
+
+ scale = smaller_dim_size / min(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
+ )
+
+ arr = np.array(pil_image)
+ crop_y = random.randrange(arr.shape[0] - image_size + 1)
+ crop_x = random.randrange(arr.shape[1] - image_size + 1)
+ return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
diff --git a/case_studies/diffpure/guided_diffusion/logger.py b/case_studies/diffpure/guided_diffusion/logger.py
new file mode 100644
index 0000000..cdd8176
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/logger.py
@@ -0,0 +1,503 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/logger.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
+https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
+"""
+
+import os
+import sys
+import shutil
+import os.path as osp
+import json
+import time
+import datetime
+import tempfile
+import warnings
+from collections import defaultdict
+from contextlib import contextmanager
+
+DEBUG = 10
+INFO = 20
+WARN = 30
+ERROR = 40
+
+DISABLED = 50
+
+
+class KVWriter(object):
+ def writekvs(self, kvs):
+ raise NotImplementedError
+
+
+class SeqWriter(object):
+ def writeseq(self, seq):
+ raise NotImplementedError
+
+
+class HumanOutputFormat(KVWriter, SeqWriter):
+ def __init__(self, filename_or_file):
+ if isinstance(filename_or_file, str):
+ self.file = open(filename_or_file, "wt")
+ self.own_file = True
+ else:
+ assert hasattr(filename_or_file, "read"), (
+ "expected file or str, got %s" % filename_or_file
+ )
+ self.file = filename_or_file
+ self.own_file = False
+
+ def writekvs(self, kvs):
+ # Create strings for printing
+ key2str = {}
+ for (key, val) in sorted(kvs.items()):
+ if hasattr(val, "__float__"):
+ valstr = "%-8.3g" % val
+ else:
+ valstr = str(val)
+ key2str[self._truncate(key)] = self._truncate(valstr)
+
+ # Find max widths
+ if len(key2str) == 0:
+ print("WARNING: tried to write empty key-value dict")
+ return
+ else:
+ keywidth = max(map(len, key2str.keys()))
+ valwidth = max(map(len, key2str.values()))
+
+ # Write out the data
+ dashes = "-" * (keywidth + valwidth + 7)
+ lines = [dashes]
+ for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
+ lines.append(
+ "| %s%s | %s%s |"
+ % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
+ )
+ lines.append(dashes)
+ self.file.write("\n".join(lines) + "\n")
+
+ # Flush the output to the file
+ self.file.flush()
+
+ def _truncate(self, s):
+ maxlen = 30
+ return s[: maxlen - 3] + "..." if len(s) > maxlen else s
+
+ def writeseq(self, seq):
+ seq = list(seq)
+ for (i, elem) in enumerate(seq):
+ self.file.write(elem)
+ if i < len(seq) - 1: # add space unless this is the last one
+ self.file.write(" ")
+ self.file.write("\n")
+ self.file.flush()
+
+ def close(self):
+ if self.own_file:
+ self.file.close()
+
+
+class JSONOutputFormat(KVWriter):
+ def __init__(self, filename):
+ self.file = open(filename, "wt")
+
+ def writekvs(self, kvs):
+ for k, v in sorted(kvs.items()):
+ if hasattr(v, "dtype"):
+ kvs[k] = float(v)
+ self.file.write(json.dumps(kvs) + "\n")
+ self.file.flush()
+
+ def close(self):
+ self.file.close()
+
+
+class CSVOutputFormat(KVWriter):
+ def __init__(self, filename):
+ self.file = open(filename, "w+t")
+ self.keys = []
+ self.sep = ","
+
+ def writekvs(self, kvs):
+ # Add our current row to the history
+ extra_keys = list(kvs.keys() - self.keys)
+ extra_keys.sort()
+ if extra_keys:
+ self.keys.extend(extra_keys)
+ self.file.seek(0)
+ lines = self.file.readlines()
+ self.file.seek(0)
+ for (i, k) in enumerate(self.keys):
+ if i > 0:
+ self.file.write(",")
+ self.file.write(k)
+ self.file.write("\n")
+ for line in lines[1:]:
+ self.file.write(line[:-1])
+ self.file.write(self.sep * len(extra_keys))
+ self.file.write("\n")
+ for (i, k) in enumerate(self.keys):
+ if i > 0:
+ self.file.write(",")
+ v = kvs.get(k)
+ if v is not None:
+ self.file.write(str(v))
+ self.file.write("\n")
+ self.file.flush()
+
+ def close(self):
+ self.file.close()
+
+
+class TensorBoardOutputFormat(KVWriter):
+ """
+ Dumps key/value pairs into TensorBoard's numeric format.
+ """
+
+ def __init__(self, dir):
+ os.makedirs(dir, exist_ok=True)
+ self.dir = dir
+ self.step = 1
+ prefix = "events"
+ path = osp.join(osp.abspath(dir), prefix)
+ import tensorflow as tf
+ from tensorflow.python import pywrap_tensorflow
+ from tensorflow.core.util import event_pb2
+ from tensorflow.python.util import compat
+
+ self.tf = tf
+ self.event_pb2 = event_pb2
+ self.pywrap_tensorflow = pywrap_tensorflow
+ self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
+
+ def writekvs(self, kvs):
+ def summary_val(k, v):
+ kwargs = {"tag": k, "simple_value": float(v)}
+ return self.tf.Summary.Value(**kwargs)
+
+ summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
+ event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
+ event.step = (
+ self.step
+ ) # is there any reason why you'd want to specify the step?
+ self.writer.WriteEvent(event)
+ self.writer.Flush()
+ self.step += 1
+
+ def close(self):
+ if self.writer:
+ self.writer.Close()
+ self.writer = None
+
+
+def make_output_format(format, ev_dir, log_suffix=""):
+ os.makedirs(ev_dir, exist_ok=True)
+ if format == "stdout":
+ return HumanOutputFormat(sys.stdout)
+ elif format == "log":
+ return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
+ elif format == "json":
+ return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
+ elif format == "csv":
+ return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
+ elif format == "tensorboard":
+ return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
+ else:
+ raise ValueError("Unknown format specified: %s" % (format,))
+
+
+# ================================================================
+# API
+# ================================================================
+
+
+def logkv(key, val):
+ """
+ Log a value of some diagnostic
+ Call this once for each diagnostic quantity, each iteration
+ If called many times, last value will be used.
+ """
+ get_current().logkv(key, val)
+
+
+def logkv_mean(key, val):
+ """
+ The same as logkv(), but if called many times, values averaged.
+ """
+ get_current().logkv_mean(key, val)
+
+
+def logkvs(d):
+ """
+ Log a dictionary of key-value pairs
+ """
+ for (k, v) in d.items():
+ logkv(k, v)
+
+
+def dumpkvs():
+ """
+ Write all of the diagnostics from the current iteration
+ """
+ return get_current().dumpkvs()
+
+
+def getkvs():
+ return get_current().name2val
+
+
+def log(*args, level=INFO):
+ """
+ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
+ """
+ get_current().log(*args, level=level)
+
+
+def debug(*args):
+ log(*args, level=DEBUG)
+
+
+def info(*args):
+ log(*args, level=INFO)
+
+
+def warn(*args):
+ log(*args, level=WARN)
+
+
+def error(*args):
+ log(*args, level=ERROR)
+
+
+def set_level(level):
+ """
+ Set logging threshold on current logger.
+ """
+ get_current().set_level(level)
+
+
+def set_comm(comm):
+ get_current().set_comm(comm)
+
+
+def get_dir():
+ """
+ Get directory that log files are being written to.
+ will be None if there is no output directory (i.e., if you didn't call start)
+ """
+ return get_current().get_dir()
+
+
+record_tabular = logkv
+dump_tabular = dumpkvs
+
+
+@contextmanager
+def profile_kv(scopename):
+ logkey = "wait_" + scopename
+ tstart = time.time()
+ try:
+ yield
+ finally:
+ get_current().name2val[logkey] += time.time() - tstart
+
+
+def profile(n):
+ """
+ Usage:
+ @profile("my_func")
+ def my_func(): code
+ """
+
+ def decorator_with_name(func):
+ def func_wrapper(*args, **kwargs):
+ with profile_kv(n):
+ return func(*args, **kwargs)
+
+ return func_wrapper
+
+ return decorator_with_name
+
+
+# ================================================================
+# Backend
+# ================================================================
+
+
+def get_current():
+ if Logger.CURRENT is None:
+ _configure_default_logger()
+
+ return Logger.CURRENT
+
+
+class Logger(object):
+ DEFAULT = None # A logger with no output files. (See right below class definition)
+ # So that you can still log to the terminal without setting up any output files
+ CURRENT = None # Current logger being used by the free functions above
+
+ def __init__(self, dir, output_formats, comm=None):
+ self.name2val = defaultdict(float) # values this iteration
+ self.name2cnt = defaultdict(int)
+ self.level = INFO
+ self.dir = dir
+ self.output_formats = output_formats
+ self.comm = comm
+
+ # Logging API, forwarded
+ # ----------------------------------------
+ def logkv(self, key, val):
+ self.name2val[key] = val
+
+ def logkv_mean(self, key, val):
+ oldval, cnt = self.name2val[key], self.name2cnt[key]
+ self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
+ self.name2cnt[key] = cnt + 1
+
+ def dumpkvs(self):
+ if self.comm is None:
+ d = self.name2val
+ else:
+ d = mpi_weighted_mean(
+ self.comm,
+ {
+ name: (val, self.name2cnt.get(name, 1))
+ for (name, val) in self.name2val.items()
+ },
+ )
+ if self.comm.rank != 0:
+ d["dummy"] = 1 # so we don't get a warning about empty dict
+ out = d.copy() # Return the dict for unit testing purposes
+ for fmt in self.output_formats:
+ if isinstance(fmt, KVWriter):
+ fmt.writekvs(d)
+ self.name2val.clear()
+ self.name2cnt.clear()
+ return out
+
+ def log(self, *args, level=INFO):
+ if self.level <= level:
+ self._do_log(args)
+
+ # Configuration
+ # ----------------------------------------
+ def set_level(self, level):
+ self.level = level
+
+ def set_comm(self, comm):
+ self.comm = comm
+
+ def get_dir(self):
+ return self.dir
+
+ def close(self):
+ for fmt in self.output_formats:
+ fmt.close()
+
+ # Misc
+ # ----------------------------------------
+ def _do_log(self, args):
+ for fmt in self.output_formats:
+ if isinstance(fmt, SeqWriter):
+ fmt.writeseq(map(str, args))
+
+
+def get_rank_without_mpi_import():
+ # check environment variables here instead of importing mpi4py
+ # to avoid calling MPI_Init() when this module is imported
+ for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
+ if varname in os.environ:
+ return int(os.environ[varname])
+ return 0
+
+
+def mpi_weighted_mean(comm, local_name2valcount):
+ """
+ Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
+ Perform a weighted average over dicts that are each on a different node
+ Input: local_name2valcount: dict mapping key -> (value, count)
+ Returns: key -> mean
+ """
+ all_name2valcount = comm.gather(local_name2valcount)
+ if comm.rank == 0:
+ name2sum = defaultdict(float)
+ name2count = defaultdict(float)
+ for n2vc in all_name2valcount:
+ for (name, (val, count)) in n2vc.items():
+ try:
+ val = float(val)
+ except ValueError:
+ if comm.rank == 0:
+ warnings.warn(
+ "WARNING: tried to compute mean on non-float {}={}".format(
+ name, val
+ )
+ )
+ else:
+ name2sum[name] += val * count
+ name2count[name] += count
+ return {name: name2sum[name] / name2count[name] for name in name2sum}
+ else:
+ return {}
+
+
+def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
+ """
+ If comm is provided, average all numerical stats across that comm
+ """
+ if dir is None:
+ dir = os.getenv("OPENAI_LOGDIR")
+ if dir is None:
+ dir = osp.join(
+ tempfile.gettempdir(),
+ datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
+ )
+ assert isinstance(dir, str)
+ dir = os.path.expanduser(dir)
+ os.makedirs(os.path.expanduser(dir), exist_ok=True)
+
+ rank = get_rank_without_mpi_import()
+ if rank > 0:
+ log_suffix = log_suffix + "-rank%03i" % rank
+
+ if format_strs is None:
+ if rank == 0:
+ format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
+ else:
+ format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
+ format_strs = filter(None, format_strs)
+ output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
+
+ Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
+ if output_formats:
+ log("Logging to %s" % dir)
+
+
+def _configure_default_logger():
+ configure()
+ Logger.DEFAULT = Logger.CURRENT
+
+
+def reset():
+ if Logger.CURRENT is not Logger.DEFAULT:
+ Logger.CURRENT.close()
+ Logger.CURRENT = Logger.DEFAULT
+ log("Reset logger")
+
+
+@contextmanager
+def scoped_configure(dir=None, format_strs=None, comm=None):
+ prevlogger = Logger.CURRENT
+ configure(dir=dir, format_strs=format_strs, comm=comm)
+ try:
+ yield
+ finally:
+ Logger.CURRENT.close()
+ Logger.CURRENT = prevlogger
+
diff --git a/case_studies/diffpure/guided_diffusion/losses.py b/case_studies/diffpure/guided_diffusion/losses.py
new file mode 100644
index 0000000..a66e7a5
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/losses.py
@@ -0,0 +1,85 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/losses.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Helpers for various likelihood-based losses. These are ported from the original
+Ho et al. diffusion models codebase:
+https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
+"""
+
+import numpy as np
+
+import torch as th
+
+
+def normal_kl(mean1, logvar1, mean2, logvar2):
+ """
+ Compute the KL divergence between two gaussians.
+
+ Shapes are automatically broadcasted, so batches can be compared to
+ scalars, among other use cases.
+ """
+ tensor = None
+ for obj in (mean1, logvar1, mean2, logvar2):
+ if isinstance(obj, th.Tensor):
+ tensor = obj
+ break
+ assert tensor is not None, "at least one argument must be a Tensor"
+
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
+ # Tensors, but it does not work for th.exp().
+ logvar1, logvar2 = [
+ x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
+ for x in (logvar1, logvar2)
+ ]
+
+ return 0.5 * (
+ -1.0
+ + logvar2
+ - logvar1
+ + th.exp(logvar1 - logvar2)
+ + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
+ )
+
+
+def approx_standard_normal_cdf(x):
+ """
+ A fast approximation of the cumulative distribution function of the
+ standard normal.
+ """
+ return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
+
+
+def discretized_gaussian_log_likelihood(x, *, means, log_scales):
+ """
+ Compute the log-likelihood of a Gaussian distribution discretizing to a
+ given image.
+
+ :param x: the target images. It is assumed that this was uint8 values,
+ rescaled to the range [-1, 1].
+ :param means: the Gaussian mean Tensor.
+ :param log_scales: the Gaussian log stddev Tensor.
+ :return: a tensor like x of log probabilities (in nats).
+ """
+ assert x.shape == means.shape == log_scales.shape
+ centered_x = x - means
+ inv_stdv = th.exp(-log_scales)
+ plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
+ cdf_plus = approx_standard_normal_cdf(plus_in)
+ min_in = inv_stdv * (centered_x - 1.0 / 255.0)
+ cdf_min = approx_standard_normal_cdf(min_in)
+ log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
+ log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
+ cdf_delta = cdf_plus - cdf_min
+ log_probs = th.where(
+ x < -0.999,
+ log_cdf_plus,
+ th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
+ )
+ assert log_probs.shape == x.shape
+ return log_probs
diff --git a/case_studies/diffpure/guided_diffusion/nn.py b/case_studies/diffpure/guided_diffusion/nn.py
new file mode 100644
index 0000000..0655447
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/nn.py
@@ -0,0 +1,178 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+"""
+Various utilities for neural networks.
+"""
+
+import math
+
+import torch as th
+import torch.nn as nn
+
+
+# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
+class SiLU(nn.Module):
+ def forward(self, x):
+ return x * th.sigmoid(x)
+
+
+class GroupNorm32(nn.GroupNorm):
+ def forward(self, x):
+ return super().forward(x.float()).type(x.dtype)
+
+
+def conv_nd(dims, *args, **kwargs):
+ """
+ Create a 1D, 2D, or 3D convolution module.
+ """
+ if dims == 1:
+ return nn.Conv1d(*args, **kwargs)
+ elif dims == 2:
+ return nn.Conv2d(*args, **kwargs)
+ elif dims == 3:
+ return nn.Conv3d(*args, **kwargs)
+ raise ValueError(f"unsupported dimensions: {dims}")
+
+
+def linear(*args, **kwargs):
+ """
+ Create a linear module.
+ """
+ return nn.Linear(*args, **kwargs)
+
+
+def avg_pool_nd(dims, *args, **kwargs):
+ """
+ Create a 1D, 2D, or 3D average pooling module.
+ """
+ if dims == 1:
+ return nn.AvgPool1d(*args, **kwargs)
+ elif dims == 2:
+ return nn.AvgPool2d(*args, **kwargs)
+ elif dims == 3:
+ return nn.AvgPool3d(*args, **kwargs)
+ raise ValueError(f"unsupported dimensions: {dims}")
+
+
+def update_ema(target_params, source_params, rate=0.99):
+ """
+ Update target parameters to be closer to those of source parameters using
+ an exponential moving average.
+
+ :param target_params: the target parameter sequence.
+ :param source_params: the source parameter sequence.
+ :param rate: the EMA rate (closer to 1 means slower).
+ """
+ for targ, src in zip(target_params, source_params):
+ targ.detach().mul_(rate).add_(src, alpha=1 - rate)
+
+
+def zero_module(module):
+ """
+ Zero out the parameters of a module and return it.
+ """
+ for p in module.parameters():
+ p.detach().zero_()
+ return module
+
+
+def scale_module(module, scale):
+ """
+ Scale the parameters of a module and return it.
+ """
+ for p in module.parameters():
+ p.detach().mul_(scale)
+ return module
+
+
+def mean_flat(tensor):
+ """
+ Take the mean over all non-batch dimensions.
+ """
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
+
+
+def normalization(channels):
+ """
+ Make a standard normalization layer.
+
+ :param channels: number of input channels.
+ :return: an nn.Module for normalization.
+ """
+ return GroupNorm32(32, channels)
+
+
+def timestep_embedding(timesteps, dim, max_period=10000):
+ """
+ Create sinusoidal timestep embeddings.
+
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an [N x dim] Tensor of positional embeddings.
+ """
+ half = dim // 2
+ freqs = th.exp(
+ -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
+ ).to(device=timesteps.device)
+ args = timesteps[:, None].float() * freqs[None]
+ embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
+ return embedding
+
+
+def checkpoint(func, inputs, params, flag):
+ """
+ Evaluate a function without caching intermediate activations, allowing for
+ reduced memory at the expense of extra compute in the backward pass.
+
+ :param func: the function to evaluate.
+ :param inputs: the argument sequence to pass to `func`.
+ :param params: a sequence of parameters `func` depends on but does not
+ explicitly take as arguments.
+ :param flag: if False, disable gradient checkpointing.
+ """
+ if flag:
+ args = tuple(inputs) + tuple(params)
+ return CheckpointFunction.apply(func, len(inputs), *args)
+ else:
+ return func(*inputs)
+
+
+class CheckpointFunction(th.autograd.Function):
+ @staticmethod
+ def forward(ctx, run_function, length, *args):
+ ctx.run_function = run_function
+ ctx.input_tensors = list(args[:length])
+ ctx.input_params = list(args[length:])
+ with th.no_grad():
+ output_tensors = ctx.run_function(*ctx.input_tensors)
+ return output_tensors
+
+ @staticmethod
+ def backward(ctx, *output_grads):
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
+ with th.enable_grad():
+ # Fixes a bug where the first op in run_function modifies the
+ # Tensor storage in place, which is not allowed for detach()'d
+ # Tensors.
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
+ output_tensors = ctx.run_function(*shallow_copies)
+ input_grads = th.autograd.grad(
+ output_tensors,
+ ctx.input_tensors + ctx.input_params,
+ output_grads,
+ allow_unused=True,
+ )
+ del ctx.input_tensors
+ del ctx.input_params
+ del output_tensors
+ return (None, None) + input_grads
diff --git a/case_studies/diffpure/guided_diffusion/resample.py b/case_studies/diffpure/guided_diffusion/resample.py
new file mode 100644
index 0000000..7afa05e
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/resample.py
@@ -0,0 +1,162 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/resample.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+from abc import ABC, abstractmethod
+
+import numpy as np
+import torch as th
+import torch.distributed as dist
+
+
+def create_named_schedule_sampler(name, diffusion):
+ """
+ Create a ScheduleSampler from a library of pre-defined samplers.
+
+ :param name: the name of the sampler.
+ :param diffusion: the diffusion object to sample for.
+ """
+ if name == "uniform":
+ return UniformSampler(diffusion)
+ elif name == "loss-second-moment":
+ return LossSecondMomentResampler(diffusion)
+ else:
+ raise NotImplementedError(f"unknown schedule sampler: {name}")
+
+
+class ScheduleSampler(ABC):
+ """
+ A distribution over timesteps in the diffusion process, intended to reduce
+ variance of the objective.
+
+ By default, samplers perform unbiased importance sampling, in which the
+ objective's mean is unchanged.
+ However, subclasses may override sample() to change how the resampled
+ terms are reweighted, allowing for actual changes in the objective.
+ """
+
+ @abstractmethod
+ def weights(self):
+ """
+ Get a numpy array of weights, one per diffusion step.
+
+ The weights needn't be normalized, but must be positive.
+ """
+
+ def sample(self, batch_size, device):
+ """
+ Importance-sample timesteps for a batch.
+
+ :param batch_size: the number of timesteps.
+ :param device: the torch device to save to.
+ :return: a tuple (timesteps, weights):
+ - timesteps: a tensor of timestep indices.
+ - weights: a tensor of weights to scale the resulting losses.
+ """
+ w = self.weights()
+ p = w / np.sum(w)
+ indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
+ indices = th.from_numpy(indices_np).long().to(device)
+ weights_np = 1 / (len(p) * p[indices_np])
+ weights = th.from_numpy(weights_np).float().to(device)
+ return indices, weights
+
+
+class UniformSampler(ScheduleSampler):
+ def __init__(self, diffusion):
+ self.diffusion = diffusion
+ self._weights = np.ones([diffusion.num_timesteps])
+
+ def weights(self):
+ return self._weights
+
+
+class LossAwareSampler(ScheduleSampler):
+ def update_with_local_losses(self, local_ts, local_losses):
+ """
+ Update the reweighting using losses from a model.
+
+ Call this method from each rank with a batch of timesteps and the
+ corresponding losses for each of those timesteps.
+ This method will perform synchronization to make sure all of the ranks
+ maintain the exact same reweighting.
+
+ :param local_ts: an integer Tensor of timesteps.
+ :param local_losses: a 1D Tensor of losses.
+ """
+ batch_sizes = [
+ th.tensor([0], dtype=th.int32, device=local_ts.device)
+ for _ in range(dist.get_world_size())
+ ]
+ dist.all_gather(
+ batch_sizes,
+ th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
+ )
+
+ # Pad all_gather batches to be the maximum batch size.
+ batch_sizes = [x.item() for x in batch_sizes]
+ max_bs = max(batch_sizes)
+
+ timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
+ loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
+ dist.all_gather(timestep_batches, local_ts)
+ dist.all_gather(loss_batches, local_losses)
+ timesteps = [
+ x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
+ ]
+ losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
+ self.update_with_all_losses(timesteps, losses)
+
+ @abstractmethod
+ def update_with_all_losses(self, ts, losses):
+ """
+ Update the reweighting using losses from a model.
+
+ Sub-classes should override this method to update the reweighting
+ using losses from the model.
+
+ This method directly updates the reweighting without synchronizing
+ between workers. It is called by update_with_local_losses from all
+ ranks with identical arguments. Thus, it should have deterministic
+ behavior to maintain state across workers.
+
+ :param ts: a list of int timesteps.
+ :param losses: a list of float losses, one per timestep.
+ """
+
+
+class LossSecondMomentResampler(LossAwareSampler):
+ def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
+ self.diffusion = diffusion
+ self.history_per_term = history_per_term
+ self.uniform_prob = uniform_prob
+ self._loss_history = np.zeros(
+ [diffusion.num_timesteps, history_per_term], dtype=np.float64
+ )
+ self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
+
+ def weights(self):
+ if not self._warmed_up():
+ return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
+ weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
+ weights /= np.sum(weights)
+ weights *= 1 - self.uniform_prob
+ weights += self.uniform_prob / len(weights)
+ return weights
+
+ def update_with_all_losses(self, ts, losses):
+ for t, loss in zip(ts, losses):
+ if self._loss_counts[t] == self.history_per_term:
+ # Shift out the oldest loss term.
+ self._loss_history[t, :-1] = self._loss_history[t, 1:]
+ self._loss_history[t, -1] = loss
+ else:
+ self._loss_history[t, self._loss_counts[t]] = loss
+ self._loss_counts[t] += 1
+
+ def _warmed_up(self):
+ return (self._loss_counts == self.history_per_term).all()
diff --git a/case_studies/diffpure/guided_diffusion/respace.py b/case_studies/diffpure/guided_diffusion/respace.py
new file mode 100644
index 0000000..5d41637
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/respace.py
@@ -0,0 +1,136 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/respace.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+import numpy as np
+import torch as th
+
+from .gaussian_diffusion import GaussianDiffusion
+
+
+def space_timesteps(num_timesteps, section_counts):
+ """
+ Create a list of timesteps to use from an original diffusion process,
+ given the number of timesteps we want to take from equally-sized portions
+ of the original process.
+
+ For example, if there's 300 timesteps and the section counts are [10,15,20]
+ then the first 100 timesteps are strided to be 10 timesteps, the second 100
+ are strided to be 15 timesteps, and the final 100 are strided to be 20.
+
+ If the stride is a string starting with "ddim", then the fixed striding
+ from the DDIM paper is used, and only one section is allowed.
+
+ :param num_timesteps: the number of diffusion steps in the original
+ process to divide up.
+ :param section_counts: either a list of numbers, or a string containing
+ comma-separated numbers, indicating the step count
+ per section. As a special case, use "ddimN" where N
+ is a number of steps to use the striding from the
+ DDIM paper.
+ :return: a set of diffusion steps from the original process to use.
+ """
+ if isinstance(section_counts, str):
+ if section_counts.startswith("ddim"):
+ desired_count = int(section_counts[len("ddim") :])
+ for i in range(1, num_timesteps):
+ if len(range(0, num_timesteps, i)) == desired_count:
+ return set(range(0, num_timesteps, i))
+ raise ValueError(
+ f"cannot create exactly {num_timesteps} steps with an integer stride"
+ )
+ section_counts = [int(x) for x in section_counts.split(",")]
+ size_per = num_timesteps // len(section_counts)
+ extra = num_timesteps % len(section_counts)
+ start_idx = 0
+ all_steps = []
+ for i, section_count in enumerate(section_counts):
+ size = size_per + (1 if i < extra else 0)
+ if size < section_count:
+ raise ValueError(
+ f"cannot divide section of {size} steps into {section_count}"
+ )
+ if section_count <= 1:
+ frac_stride = 1
+ else:
+ frac_stride = (size - 1) / (section_count - 1)
+ cur_idx = 0.0
+ taken_steps = []
+ for _ in range(section_count):
+ taken_steps.append(start_idx + round(cur_idx))
+ cur_idx += frac_stride
+ all_steps += taken_steps
+ start_idx += size
+ return set(all_steps)
+
+
+class SpacedDiffusion(GaussianDiffusion):
+ """
+ A diffusion process which can skip steps in a base diffusion process.
+
+ :param use_timesteps: a collection (sequence or set) of timesteps from the
+ original diffusion process to retain.
+ :param kwargs: the kwargs to create the base diffusion process.
+ """
+
+ def __init__(self, use_timesteps, **kwargs):
+ self.use_timesteps = set(use_timesteps)
+ self.timestep_map = []
+ self.original_num_steps = len(kwargs["betas"])
+
+ base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
+ last_alpha_cumprod = 1.0
+ new_betas = []
+ for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
+ if i in self.use_timesteps:
+ new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
+ last_alpha_cumprod = alpha_cumprod
+ self.timestep_map.append(i)
+ kwargs["betas"] = np.array(new_betas)
+ super().__init__(**kwargs)
+
+ def p_mean_variance(
+ self, model, *args, **kwargs
+ ): # pylint: disable=signature-differs
+ return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
+
+ def training_losses(
+ self, model, *args, **kwargs
+ ): # pylint: disable=signature-differs
+ return super().training_losses(self._wrap_model(model), *args, **kwargs)
+
+ def condition_mean(self, cond_fn, *args, **kwargs):
+ return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
+
+ def condition_score(self, cond_fn, *args, **kwargs):
+ return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
+
+ def _wrap_model(self, model):
+ if isinstance(model, _WrappedModel):
+ return model
+ return _WrappedModel(
+ model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
+ )
+
+ def _scale_timesteps(self, t):
+ # Scaling is done by the wrapped model.
+ return t
+
+
+class _WrappedModel:
+ def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
+ self.model = model
+ self.timestep_map = timestep_map
+ self.rescale_timesteps = rescale_timesteps
+ self.original_num_steps = original_num_steps
+
+ def __call__(self, x, ts, **kwargs):
+ map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
+ new_ts = map_tensor[ts]
+ if self.rescale_timesteps:
+ new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
+ return self.model(x, new_ts, **kwargs)
diff --git a/case_studies/diffpure/guided_diffusion/script_util.py b/case_studies/diffpure/guided_diffusion/script_util.py
new file mode 100644
index 0000000..7605693
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/script_util.py
@@ -0,0 +1,460 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/script_util.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+import argparse
+import inspect
+
+from . import gaussian_diffusion as gd
+from .respace import SpacedDiffusion, space_timesteps
+from .unet import SuperResModel, UNetModel, EncoderUNetModel
+
+NUM_CLASSES = 1000
+
+
+def diffusion_defaults():
+ """
+ Defaults for image and classifier training.
+ """
+ return dict(
+ learn_sigma=False,
+ diffusion_steps=1000,
+ noise_schedule="linear",
+ timestep_respacing="",
+ use_kl=False,
+ predict_xstart=False,
+ rescale_timesteps=False,
+ rescale_learned_sigmas=False,
+ )
+
+
+def classifier_defaults():
+ """
+ Defaults for classifier models.
+ """
+ return dict(
+ image_size=64,
+ classifier_use_fp16=False,
+ classifier_width=128,
+ classifier_depth=2,
+ classifier_attention_resolutions="32,16,8", # 16
+ classifier_use_scale_shift_norm=True, # False
+ classifier_resblock_updown=True, # False
+ classifier_pool="attention",
+ )
+
+
+def model_and_diffusion_defaults():
+ """
+ Defaults for image training.
+ """
+ res = dict(
+ image_size=64,
+ num_channels=128,
+ num_res_blocks=2,
+ num_heads=4,
+ num_heads_upsample=-1,
+ num_head_channels=-1,
+ attention_resolutions="16,8",
+ channel_mult="",
+ dropout=0.0,
+ class_cond=False,
+ use_checkpoint=False,
+ use_scale_shift_norm=True,
+ resblock_updown=False,
+ use_fp16=False,
+ use_new_attention_order=False,
+ )
+ res.update(diffusion_defaults())
+ return res
+
+
+def classifier_and_diffusion_defaults():
+ res = classifier_defaults()
+ res.update(diffusion_defaults())
+ return res
+
+
+def create_model_and_diffusion(
+ image_size,
+ class_cond,
+ learn_sigma,
+ num_channels,
+ num_res_blocks,
+ channel_mult,
+ num_heads,
+ num_head_channels,
+ num_heads_upsample,
+ attention_resolutions,
+ dropout,
+ diffusion_steps,
+ noise_schedule,
+ timestep_respacing,
+ use_kl,
+ predict_xstart,
+ rescale_timesteps,
+ rescale_learned_sigmas,
+ use_checkpoint,
+ use_scale_shift_norm,
+ resblock_updown,
+ use_fp16,
+ use_new_attention_order,
+):
+ model = create_model(
+ image_size,
+ num_channels,
+ num_res_blocks,
+ channel_mult=channel_mult,
+ learn_sigma=learn_sigma,
+ class_cond=class_cond,
+ use_checkpoint=use_checkpoint,
+ attention_resolutions=attention_resolutions,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ num_heads_upsample=num_heads_upsample,
+ use_scale_shift_norm=use_scale_shift_norm,
+ dropout=dropout,
+ resblock_updown=resblock_updown,
+ use_fp16=use_fp16,
+ use_new_attention_order=use_new_attention_order,
+ )
+ diffusion = create_gaussian_diffusion(
+ steps=diffusion_steps,
+ learn_sigma=learn_sigma,
+ noise_schedule=noise_schedule,
+ use_kl=use_kl,
+ predict_xstart=predict_xstart,
+ rescale_timesteps=rescale_timesteps,
+ rescale_learned_sigmas=rescale_learned_sigmas,
+ timestep_respacing=timestep_respacing,
+ )
+ return model, diffusion
+
+
+def create_model(
+ image_size,
+ num_channels,
+ num_res_blocks,
+ channel_mult="",
+ learn_sigma=False,
+ class_cond=False,
+ use_checkpoint=False,
+ attention_resolutions="16",
+ num_heads=1,
+ num_head_channels=-1,
+ num_heads_upsample=-1,
+ use_scale_shift_norm=False,
+ dropout=0,
+ resblock_updown=False,
+ use_fp16=False,
+ use_new_attention_order=False,
+):
+ if channel_mult == "":
+ if image_size == 512:
+ channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
+ elif image_size == 256:
+ channel_mult = (1, 1, 2, 2, 4, 4)
+ elif image_size == 128:
+ channel_mult = (1, 1, 2, 3, 4)
+ elif image_size == 64:
+ channel_mult = (1, 2, 3, 4)
+ else:
+ raise ValueError(f"unsupported image size: {image_size}")
+ else:
+ channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
+
+ attention_ds = []
+ for res in attention_resolutions.split(","):
+ attention_ds.append(image_size // int(res))
+
+ return UNetModel(
+ image_size=image_size,
+ in_channels=3,
+ model_channels=num_channels,
+ out_channels=(3 if not learn_sigma else 6),
+ num_res_blocks=num_res_blocks,
+ attention_resolutions=tuple(attention_ds),
+ dropout=dropout,
+ channel_mult=channel_mult,
+ num_classes=(NUM_CLASSES if class_cond else None),
+ use_checkpoint=use_checkpoint,
+ use_fp16=use_fp16,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ num_heads_upsample=num_heads_upsample,
+ use_scale_shift_norm=use_scale_shift_norm,
+ resblock_updown=resblock_updown,
+ use_new_attention_order=use_new_attention_order,
+ )
+
+
+def create_classifier_and_diffusion(
+ image_size,
+ classifier_use_fp16,
+ classifier_width,
+ classifier_depth,
+ classifier_attention_resolutions,
+ classifier_use_scale_shift_norm,
+ classifier_resblock_updown,
+ classifier_pool,
+ learn_sigma,
+ diffusion_steps,
+ noise_schedule,
+ timestep_respacing,
+ use_kl,
+ predict_xstart,
+ rescale_timesteps,
+ rescale_learned_sigmas,
+):
+ classifier = create_classifier(
+ image_size,
+ classifier_use_fp16,
+ classifier_width,
+ classifier_depth,
+ classifier_attention_resolutions,
+ classifier_use_scale_shift_norm,
+ classifier_resblock_updown,
+ classifier_pool,
+ )
+ diffusion = create_gaussian_diffusion(
+ steps=diffusion_steps,
+ learn_sigma=learn_sigma,
+ noise_schedule=noise_schedule,
+ use_kl=use_kl,
+ predict_xstart=predict_xstart,
+ rescale_timesteps=rescale_timesteps,
+ rescale_learned_sigmas=rescale_learned_sigmas,
+ timestep_respacing=timestep_respacing,
+ )
+ return classifier, diffusion
+
+
+def create_classifier(
+ image_size,
+ classifier_use_fp16,
+ classifier_width,
+ classifier_depth,
+ classifier_attention_resolutions,
+ classifier_use_scale_shift_norm,
+ classifier_resblock_updown,
+ classifier_pool,
+):
+ if image_size == 512:
+ channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
+ elif image_size == 256:
+ channel_mult = (1, 1, 2, 2, 4, 4)
+ elif image_size == 128:
+ channel_mult = (1, 1, 2, 3, 4)
+ elif image_size == 64:
+ channel_mult = (1, 2, 3, 4)
+ else:
+ raise ValueError(f"unsupported image size: {image_size}")
+
+ attention_ds = []
+ for res in classifier_attention_resolutions.split(","):
+ attention_ds.append(image_size // int(res))
+
+ return EncoderUNetModel(
+ image_size=image_size,
+ in_channels=3,
+ model_channels=classifier_width,
+ out_channels=1000,
+ num_res_blocks=classifier_depth,
+ attention_resolutions=tuple(attention_ds),
+ channel_mult=channel_mult,
+ use_fp16=classifier_use_fp16,
+ num_head_channels=64,
+ use_scale_shift_norm=classifier_use_scale_shift_norm,
+ resblock_updown=classifier_resblock_updown,
+ pool=classifier_pool,
+ )
+
+
+def sr_model_and_diffusion_defaults():
+ res = model_and_diffusion_defaults()
+ res["large_size"] = 256
+ res["small_size"] = 64
+ arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
+ for k in res.copy().keys():
+ if k not in arg_names:
+ del res[k]
+ return res
+
+
+def sr_create_model_and_diffusion(
+ large_size,
+ small_size,
+ class_cond,
+ learn_sigma,
+ num_channels,
+ num_res_blocks,
+ num_heads,
+ num_head_channels,
+ num_heads_upsample,
+ attention_resolutions,
+ dropout,
+ diffusion_steps,
+ noise_schedule,
+ timestep_respacing,
+ use_kl,
+ predict_xstart,
+ rescale_timesteps,
+ rescale_learned_sigmas,
+ use_checkpoint,
+ use_scale_shift_norm,
+ resblock_updown,
+ use_fp16,
+):
+ model = sr_create_model(
+ large_size,
+ small_size,
+ num_channels,
+ num_res_blocks,
+ learn_sigma=learn_sigma,
+ class_cond=class_cond,
+ use_checkpoint=use_checkpoint,
+ attention_resolutions=attention_resolutions,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ num_heads_upsample=num_heads_upsample,
+ use_scale_shift_norm=use_scale_shift_norm,
+ dropout=dropout,
+ resblock_updown=resblock_updown,
+ use_fp16=use_fp16,
+ )
+ diffusion = create_gaussian_diffusion(
+ steps=diffusion_steps,
+ learn_sigma=learn_sigma,
+ noise_schedule=noise_schedule,
+ use_kl=use_kl,
+ predict_xstart=predict_xstart,
+ rescale_timesteps=rescale_timesteps,
+ rescale_learned_sigmas=rescale_learned_sigmas,
+ timestep_respacing=timestep_respacing,
+ )
+ return model, diffusion
+
+
+def sr_create_model(
+ large_size,
+ small_size,
+ num_channels,
+ num_res_blocks,
+ learn_sigma,
+ class_cond,
+ use_checkpoint,
+ attention_resolutions,
+ num_heads,
+ num_head_channels,
+ num_heads_upsample,
+ use_scale_shift_norm,
+ dropout,
+ resblock_updown,
+ use_fp16,
+):
+ _ = small_size # hack to prevent unused variable
+
+ if large_size == 512:
+ channel_mult = (1, 1, 2, 2, 4, 4)
+ elif large_size == 256:
+ channel_mult = (1, 1, 2, 2, 4, 4)
+ elif large_size == 64:
+ channel_mult = (1, 2, 3, 4)
+ else:
+ raise ValueError(f"unsupported large size: {large_size}")
+
+ attention_ds = []
+ for res in attention_resolutions.split(","):
+ attention_ds.append(large_size // int(res))
+
+ return SuperResModel(
+ image_size=large_size,
+ in_channels=3,
+ model_channels=num_channels,
+ out_channels=(3 if not learn_sigma else 6),
+ num_res_blocks=num_res_blocks,
+ attention_resolutions=tuple(attention_ds),
+ dropout=dropout,
+ channel_mult=channel_mult,
+ num_classes=(NUM_CLASSES if class_cond else None),
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ num_heads_upsample=num_heads_upsample,
+ use_scale_shift_norm=use_scale_shift_norm,
+ resblock_updown=resblock_updown,
+ use_fp16=use_fp16,
+ )
+
+
+def create_gaussian_diffusion(
+ *,
+ steps=1000,
+ learn_sigma=False,
+ sigma_small=False,
+ noise_schedule="linear",
+ use_kl=False,
+ predict_xstart=False,
+ rescale_timesteps=False,
+ rescale_learned_sigmas=False,
+ timestep_respacing="",
+):
+ betas = gd.get_named_beta_schedule(noise_schedule, steps)
+ if use_kl:
+ loss_type = gd.LossType.RESCALED_KL
+ elif rescale_learned_sigmas:
+ loss_type = gd.LossType.RESCALED_MSE
+ else:
+ loss_type = gd.LossType.MSE
+ if not timestep_respacing:
+ timestep_respacing = [steps]
+ return SpacedDiffusion(
+ use_timesteps=space_timesteps(steps, timestep_respacing),
+ betas=betas,
+ model_mean_type=(
+ gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
+ ),
+ model_var_type=(
+ (
+ gd.ModelVarType.FIXED_LARGE
+ if not sigma_small
+ else gd.ModelVarType.FIXED_SMALL
+ )
+ if not learn_sigma
+ else gd.ModelVarType.LEARNED_RANGE
+ ),
+ loss_type=loss_type,
+ rescale_timesteps=rescale_timesteps,
+ )
+
+
+def add_dict_to_argparser(parser, default_dict):
+ for k, v in default_dict.items():
+ v_type = type(v)
+ if v is None:
+ v_type = str
+ elif isinstance(v, bool):
+ v_type = str2bool
+ parser.add_argument(f"--{k}", default=v, type=v_type)
+
+
+def args_to_dict(args, keys):
+ return {k: getattr(args, k) for k in keys}
+
+
+def str2bool(v):
+ """
+ https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
+ """
+ if isinstance(v, bool):
+ return v
+ if v.lower() in ("yes", "true", "t", "y", "1"):
+ return True
+ elif v.lower() in ("no", "false", "f", "n", "0"):
+ return False
+ else:
+ raise argparse.ArgumentTypeError("boolean value expected")
diff --git a/case_studies/diffpure/guided_diffusion/train_util.py b/case_studies/diffpure/guided_diffusion/train_util.py
new file mode 100644
index 0000000..1942f18
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/train_util.py
@@ -0,0 +1,309 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/train_util.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+import copy
+import functools
+import os
+
+import blobfile as bf
+import torch as th
+import torch.distributed as dist
+from torch.nn.parallel.distributed import DistributedDataParallel as DDP
+from torch.optim import AdamW
+
+from . import dist_util, logger
+from .fp16_util import MixedPrecisionTrainer
+from .nn import update_ema
+from .resample import LossAwareSampler, UniformSampler
+
+# For ImageNet experiments, this was a good default value.
+# We found that the lg_loss_scale quickly climbed to
+# 20-21 within the first ~1K steps of training.
+INITIAL_LOG_LOSS_SCALE = 20.0
+
+
+class TrainLoop:
+ def __init__(
+ self,
+ *,
+ model,
+ diffusion,
+ data,
+ batch_size,
+ microbatch,
+ lr,
+ ema_rate,
+ log_interval,
+ save_interval,
+ resume_checkpoint,
+ use_fp16=False,
+ fp16_scale_growth=1e-3,
+ schedule_sampler=None,
+ weight_decay=0.0,
+ lr_anneal_steps=0,
+ ):
+ self.model = model
+ self.diffusion = diffusion
+ self.data = data
+ self.batch_size = batch_size
+ self.microbatch = microbatch if microbatch > 0 else batch_size
+ self.lr = lr
+ self.ema_rate = (
+ [ema_rate]
+ if isinstance(ema_rate, float)
+ else [float(x) for x in ema_rate.split(",")]
+ )
+ self.log_interval = log_interval
+ self.save_interval = save_interval
+ self.resume_checkpoint = resume_checkpoint
+ self.use_fp16 = use_fp16
+ self.fp16_scale_growth = fp16_scale_growth
+ self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
+ self.weight_decay = weight_decay
+ self.lr_anneal_steps = lr_anneal_steps
+
+ self.step = 0
+ self.resume_step = 0
+ self.global_batch = self.batch_size * dist.get_world_size()
+
+ self.sync_cuda = th.cuda.is_available()
+
+ self._load_and_sync_parameters()
+ self.mp_trainer = MixedPrecisionTrainer(
+ model=self.model,
+ use_fp16=self.use_fp16,
+ fp16_scale_growth=fp16_scale_growth,
+ )
+
+ self.opt = AdamW(
+ self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
+ )
+ if self.resume_step:
+ self._load_optimizer_state()
+ # Model was resumed, either due to a restart or a checkpoint
+ # being specified at the command line.
+ self.ema_params = [
+ self._load_ema_parameters(rate) for rate in self.ema_rate
+ ]
+ else:
+ self.ema_params = [
+ copy.deepcopy(self.mp_trainer.master_params)
+ for _ in range(len(self.ema_rate))
+ ]
+
+ if th.cuda.is_available():
+ self.use_ddp = True
+ self.ddp_model = DDP(
+ self.model,
+ device_ids=[dist_util.dev()],
+ output_device=dist_util.dev(),
+ broadcast_buffers=False,
+ bucket_cap_mb=128,
+ find_unused_parameters=False,
+ )
+ else:
+ if dist.get_world_size() > 1:
+ logger.warn(
+ "Distributed training requires CUDA. "
+ "Gradients will not be synchronized properly!"
+ )
+ self.use_ddp = False
+ self.ddp_model = self.model
+
+ def _load_and_sync_parameters(self):
+ resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
+
+ if resume_checkpoint:
+ self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
+ if dist.get_rank() == 0:
+ logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
+ self.model.load_state_dict(
+ dist_util.load_state_dict(
+ resume_checkpoint, map_location=dist_util.dev()
+ )
+ )
+
+ dist_util.sync_params(self.model.parameters())
+
+ def _load_ema_parameters(self, rate):
+ ema_params = copy.deepcopy(self.mp_trainer.master_params)
+
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
+ ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
+ if ema_checkpoint:
+ if dist.get_rank() == 0:
+ logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
+ state_dict = dist_util.load_state_dict(
+ ema_checkpoint, map_location=dist_util.dev()
+ )
+ ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
+
+ dist_util.sync_params(ema_params)
+ return ema_params
+
+ def _load_optimizer_state(self):
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
+ opt_checkpoint = bf.join(
+ bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
+ )
+ if bf.exists(opt_checkpoint):
+ logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
+ state_dict = dist_util.load_state_dict(
+ opt_checkpoint, map_location=dist_util.dev()
+ )
+ self.opt.load_state_dict(state_dict)
+
+ def run_loop(self):
+ while (
+ not self.lr_anneal_steps
+ or self.step + self.resume_step < self.lr_anneal_steps
+ ):
+ batch, cond = next(self.data)
+ self.run_step(batch, cond)
+ if self.step % self.log_interval == 0:
+ logger.dumpkvs()
+ if self.step % self.save_interval == 0:
+ self.save()
+ # Run for a finite amount of time in integration tests.
+ if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
+ return
+ self.step += 1
+ # Save the last checkpoint if it wasn't already saved.
+ if (self.step - 1) % self.save_interval != 0:
+ self.save()
+
+ def run_step(self, batch, cond):
+ self.forward_backward(batch, cond)
+ took_step = self.mp_trainer.optimize(self.opt)
+ if took_step:
+ self._update_ema()
+ self._anneal_lr()
+ self.log_step()
+
+ def forward_backward(self, batch, cond):
+ self.mp_trainer.zero_grad()
+ for i in range(0, batch.shape[0], self.microbatch):
+ micro = batch[i : i + self.microbatch].to(dist_util.dev())
+ micro_cond = {
+ k: v[i : i + self.microbatch].to(dist_util.dev())
+ for k, v in cond.items()
+ }
+ last_batch = (i + self.microbatch) >= batch.shape[0]
+ t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
+
+ compute_losses = functools.partial(
+ self.diffusion.training_losses,
+ self.ddp_model,
+ micro,
+ t,
+ model_kwargs=micro_cond,
+ )
+
+ if last_batch or not self.use_ddp:
+ losses = compute_losses()
+ else:
+ with self.ddp_model.no_sync():
+ losses = compute_losses()
+
+ if isinstance(self.schedule_sampler, LossAwareSampler):
+ self.schedule_sampler.update_with_local_losses(
+ t, losses["loss"].detach()
+ )
+
+ loss = (losses["loss"] * weights).mean()
+ log_loss_dict(
+ self.diffusion, t, {k: v * weights for k, v in losses.items()}
+ )
+ self.mp_trainer.backward(loss)
+
+ def _update_ema(self):
+ for rate, params in zip(self.ema_rate, self.ema_params):
+ update_ema(params, self.mp_trainer.master_params, rate=rate)
+
+ def _anneal_lr(self):
+ if not self.lr_anneal_steps:
+ return
+ frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
+ lr = self.lr * (1 - frac_done)
+ for param_group in self.opt.param_groups:
+ param_group["lr"] = lr
+
+ def log_step(self):
+ logger.logkv("step", self.step + self.resume_step)
+ logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
+
+ def save(self):
+ def save_checkpoint(rate, params):
+ state_dict = self.mp_trainer.master_params_to_state_dict(params)
+ if dist.get_rank() == 0:
+ logger.log(f"saving model {rate}...")
+ if not rate:
+ filename = f"model{(self.step+self.resume_step):06d}.pt"
+ else:
+ filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
+ with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
+ th.save(state_dict, f)
+
+ save_checkpoint(0, self.mp_trainer.master_params)
+ for rate, params in zip(self.ema_rate, self.ema_params):
+ save_checkpoint(rate, params)
+
+ if dist.get_rank() == 0:
+ with bf.BlobFile(
+ bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
+ "wb",
+ ) as f:
+ th.save(self.opt.state_dict(), f)
+
+ dist.barrier()
+
+
+def parse_resume_step_from_filename(filename):
+ """
+ Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
+ checkpoint's number of steps.
+ """
+ split = filename.split("model")
+ if len(split) < 2:
+ return 0
+ split1 = split[-1].split(".")[0]
+ try:
+ return int(split1)
+ except ValueError:
+ return 0
+
+
+def get_blob_logdir():
+ # You can change this to be a separate path to save checkpoints to
+ # a blobstore or some external drive.
+ return logger.get_dir()
+
+
+def find_resume_checkpoint():
+ # On your infrastructure, you may want to override this to automatically
+ # discover the latest checkpoint on your blob storage, etc.
+ return None
+
+
+def find_ema_checkpoint(main_checkpoint, step, rate):
+ if main_checkpoint is None:
+ return None
+ filename = f"ema_{rate}_{(step):06d}.pt"
+ path = bf.join(bf.dirname(main_checkpoint), filename)
+ if bf.exists(path):
+ return path
+ return None
+
+
+def log_loss_dict(diffusion, ts, losses):
+ for key, values in losses.items():
+ logger.logkv_mean(key, values.mean().item())
+ # Log the quantiles (four quartiles, in particular).
+ for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
+ quartile = int(4 * sub_t / diffusion.num_timesteps)
+ logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
diff --git a/case_studies/diffpure/guided_diffusion/unet.py b/case_studies/diffpure/guided_diffusion/unet.py
new file mode 100644
index 0000000..f25e8a7
--- /dev/null
+++ b/case_studies/diffpure/guided_diffusion/unet.py
@@ -0,0 +1,902 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/unet.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_GUIDED_DIFFUSION).
+# ---------------------------------------------------------------
+
+from abc import abstractmethod
+
+import math
+
+import numpy as np
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+from .fp16_util import convert_module_to_f16, convert_module_to_f32
+from .nn import (
+ checkpoint,
+ conv_nd,
+ linear,
+ avg_pool_nd,
+ zero_module,
+ normalization,
+ timestep_embedding,
+)
+
+
+class AttentionPool2d(nn.Module):
+ """
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
+ """
+
+ def __init__(
+ self,
+ spacial_dim: int,
+ embed_dim: int,
+ num_heads_channels: int,
+ output_dim: int = None,
+ ):
+ super().__init__()
+ self.positional_embedding = nn.Parameter(
+ th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
+ )
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
+ self.num_heads = embed_dim // num_heads_channels
+ self.attention = QKVAttention(self.num_heads)
+
+ def forward(self, x):
+ b, c, *_spatial = x.shape
+ x = x.reshape(b, c, -1) # NC(HW)
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
+ x = self.qkv_proj(x)
+ x = self.attention(x)
+ x = self.c_proj(x)
+ return x[:, :, 0]
+
+
+class TimestepBlock(nn.Module):
+ """
+ Any module where forward() takes timestep embeddings as a second argument.
+ """
+
+ @abstractmethod
+ def forward(self, x, emb):
+ """
+ Apply the module to `x` given `emb` timestep embeddings.
+ """
+
+
+class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
+ """
+ A sequential module that passes timestep embeddings to the children that
+ support it as an extra input.
+ """
+
+ def forward(self, x, emb):
+ for layer in self:
+ if isinstance(layer, TimestepBlock):
+ x = layer(x, emb)
+ else:
+ x = layer(x)
+ return x
+
+
+class Upsample(nn.Module):
+ """
+ An upsampling layer with an optional convolution.
+
+ :param channels: channels in the inputs and outputs.
+ :param use_conv: a bool determining if a convolution is applied.
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
+ upsampling occurs in the inner-two dimensions.
+ """
+
+ def __init__(self, channels, use_conv, dims=2, out_channels=None):
+ super().__init__()
+ self.channels = channels
+ self.out_channels = out_channels or channels
+ self.use_conv = use_conv
+ self.dims = dims
+ if use_conv:
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
+
+ def forward(self, x):
+ assert x.shape[1] == self.channels
+ if self.dims == 3:
+ x = F.interpolate(
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
+ )
+ else:
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
+ if self.use_conv:
+ x = self.conv(x)
+ return x
+
+
+class Downsample(nn.Module):
+ """
+ A downsampling layer with an optional convolution.
+
+ :param channels: channels in the inputs and outputs.
+ :param use_conv: a bool determining if a convolution is applied.
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
+ downsampling occurs in the inner-two dimensions.
+ """
+
+ def __init__(self, channels, use_conv, dims=2, out_channels=None):
+ super().__init__()
+ self.channels = channels
+ self.out_channels = out_channels or channels
+ self.use_conv = use_conv
+ self.dims = dims
+ stride = 2 if dims != 3 else (1, 2, 2)
+ if use_conv:
+ self.op = conv_nd(
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=1
+ )
+ else:
+ assert self.channels == self.out_channels
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
+
+ def forward(self, x):
+ assert x.shape[1] == self.channels
+ return self.op(x)
+
+
+class ResBlock(TimestepBlock):
+ """
+ A residual block that can optionally change the number of channels.
+
+ :param channels: the number of input channels.
+ :param emb_channels: the number of timestep embedding channels.
+ :param dropout: the rate of dropout.
+ :param out_channels: if specified, the number of out channels.
+ :param use_conv: if True and out_channels is specified, use a spatial
+ convolution instead of a smaller 1x1 convolution to change the
+ channels in the skip connection.
+ :param dims: determines if the signal is 1D, 2D, or 3D.
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
+ :param up: if True, use this block for upsampling.
+ :param down: if True, use this block for downsampling.
+ """
+
+ def __init__(
+ self,
+ channels,
+ emb_channels,
+ dropout,
+ out_channels=None,
+ use_conv=False,
+ use_scale_shift_norm=False,
+ dims=2,
+ use_checkpoint=False,
+ up=False,
+ down=False,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.emb_channels = emb_channels
+ self.dropout = dropout
+ self.out_channels = out_channels or channels
+ self.use_conv = use_conv
+ self.use_checkpoint = use_checkpoint
+ self.use_scale_shift_norm = use_scale_shift_norm
+
+ self.in_layers = nn.Sequential(
+ normalization(channels),
+ nn.SiLU(),
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
+ )
+
+ self.updown = up or down
+
+ if up:
+ self.h_upd = Upsample(channels, False, dims)
+ self.x_upd = Upsample(channels, False, dims)
+ elif down:
+ self.h_upd = Downsample(channels, False, dims)
+ self.x_upd = Downsample(channels, False, dims)
+ else:
+ self.h_upd = self.x_upd = nn.Identity()
+
+ self.emb_layers = nn.Sequential(
+ nn.SiLU(),
+ linear(
+ emb_channels,
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
+ ),
+ )
+ self.out_layers = nn.Sequential(
+ normalization(self.out_channels),
+ nn.SiLU(),
+ nn.Dropout(p=dropout),
+ zero_module(
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
+ ),
+ )
+
+ if self.out_channels == channels:
+ self.skip_connection = nn.Identity()
+ elif use_conv:
+ self.skip_connection = conv_nd(
+ dims, channels, self.out_channels, 3, padding=1
+ )
+ else:
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
+
+ def forward(self, x, emb):
+ """
+ Apply the block to a Tensor, conditioned on a timestep embedding.
+
+ :param x: an [N x C x ...] Tensor of features.
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
+ :return: an [N x C x ...] Tensor of outputs.
+ """
+ return checkpoint(
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
+ )
+
+ def _forward(self, x, emb):
+ if self.updown:
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
+ h = in_rest(x)
+ h = self.h_upd(h)
+ x = self.x_upd(x)
+ h = in_conv(h)
+ else:
+ h = self.in_layers(x)
+ emb_out = self.emb_layers(emb).type(h.dtype)
+ while len(emb_out.shape) < len(h.shape):
+ emb_out = emb_out[..., None]
+ if self.use_scale_shift_norm:
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
+ scale, shift = th.chunk(emb_out, 2, dim=1)
+ h = out_norm(h) * (1 + scale) + shift
+ h = out_rest(h)
+ else:
+ h = h + emb_out
+ h = self.out_layers(h)
+ return self.skip_connection(x) + h
+
+
+class AttentionBlock(nn.Module):
+ """
+ An attention block that allows spatial positions to attend to each other.
+
+ Originally ported from here, but adapted to the N-d case.
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
+ """
+
+ def __init__(
+ self,
+ channels,
+ num_heads=1,
+ num_head_channels=-1,
+ use_checkpoint=False,
+ use_new_attention_order=False,
+ ):
+ super().__init__()
+ self.channels = channels
+ if num_head_channels == -1:
+ self.num_heads = num_heads
+ else:
+ assert (
+ channels % num_head_channels == 0
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
+ self.num_heads = channels // num_head_channels
+ self.use_checkpoint = use_checkpoint
+ self.norm = normalization(channels)
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
+ if use_new_attention_order:
+ # split qkv before split heads
+ self.attention = QKVAttention(self.num_heads)
+ else:
+ # split heads before split qkv
+ self.attention = QKVAttentionLegacy(self.num_heads)
+
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
+
+ def forward(self, x):
+ return checkpoint(self._forward, (x,), self.parameters(), True)
+
+ def _forward(self, x):
+ b, c, *spatial = x.shape
+ x = x.reshape(b, c, -1)
+ qkv = self.qkv(self.norm(x))
+ h = self.attention(qkv)
+ h = self.proj_out(h)
+ return (x + h).reshape(b, c, *spatial)
+
+
+def count_flops_attn(model, _x, y):
+ """
+ A counter for the `thop` package to count the operations in an
+ attention operation.
+ Meant to be used like:
+ macs, params = thop.profile(
+ model,
+ inputs=(inputs, timestamps),
+ custom_ops={QKVAttention: QKVAttention.count_flops},
+ )
+ """
+ b, c, *spatial = y[0].shape
+ num_spatial = int(np.prod(spatial))
+ # We perform two matmuls with the same number of ops.
+ # The first computes the weight matrix, the second computes
+ # the combination of the value vectors.
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
+ model.total_ops += th.DoubleTensor([matmul_ops])
+
+
+class QKVAttentionLegacy(nn.Module):
+ """
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
+ """
+
+ def __init__(self, n_heads):
+ super().__init__()
+ self.n_heads = n_heads
+
+ def forward(self, qkv):
+ """
+ Apply QKV attention.
+
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
+ :return: an [N x (H * C) x T] tensor after attention.
+ """
+ bs, width, length = qkv.shape
+ assert width % (3 * self.n_heads) == 0
+ ch = width // (3 * self.n_heads)
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
+ scale = 1 / math.sqrt(math.sqrt(ch))
+ weight = th.einsum(
+ "bct,bcs->bts", q * scale, k * scale
+ ) # More stable with f16 than dividing afterwards
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
+ a = th.einsum("bts,bcs->bct", weight, v)
+ return a.reshape(bs, -1, length)
+
+ @staticmethod
+ def count_flops(model, _x, y):
+ return count_flops_attn(model, _x, y)
+
+
+class QKVAttention(nn.Module):
+ """
+ A module which performs QKV attention and splits in a different order.
+ """
+
+ def __init__(self, n_heads):
+ super().__init__()
+ self.n_heads = n_heads
+
+ def forward(self, qkv):
+ """
+ Apply QKV attention.
+
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
+ :return: an [N x (H * C) x T] tensor after attention.
+ """
+ bs, width, length = qkv.shape
+ assert width % (3 * self.n_heads) == 0
+ ch = width // (3 * self.n_heads)
+ q, k, v = qkv.chunk(3, dim=1)
+ scale = 1 / math.sqrt(math.sqrt(ch))
+ weight = th.einsum(
+ "bct,bcs->bts",
+ (q * scale).view(bs * self.n_heads, ch, length),
+ (k * scale).view(bs * self.n_heads, ch, length),
+ ) # More stable with f16 than dividing afterwards
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
+ return a.reshape(bs, -1, length)
+
+ @staticmethod
+ def count_flops(model, _x, y):
+ return count_flops_attn(model, _x, y)
+
+
+class UNetModel(nn.Module):
+ """
+ The full UNet model with attention and timestep embedding.
+
+ :param in_channels: channels in the input Tensor.
+ :param model_channels: base channel count for the model.
+ :param out_channels: channels in the output Tensor.
+ :param num_res_blocks: number of residual blocks per downsample.
+ :param attention_resolutions: a collection of downsample rates at which
+ attention will take place. May be a set, list, or tuple.
+ For example, if this contains 4, then at 4x downsampling, attention
+ will be used.
+ :param dropout: the dropout probability.
+ :param channel_mult: channel multiplier for each level of the UNet.
+ :param conv_resample: if True, use learned convolutions for upsampling and
+ downsampling.
+ :param dims: determines if the signal is 1D, 2D, or 3D.
+ :param num_classes: if specified (as an int), then this model will be
+ class-conditional with `num_classes` classes.
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
+ :param num_heads: the number of attention heads in each attention layer.
+ :param num_heads_channels: if specified, ignore num_heads and instead use
+ a fixed channel width per attention head.
+ :param num_heads_upsample: works with num_heads to set a different number
+ of heads for upsampling. Deprecated.
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
+ :param resblock_updown: use residual blocks for up/downsampling.
+ :param use_new_attention_order: use a different attention pattern for potentially
+ increased efficiency.
+ """
+
+ def __init__(
+ self,
+ image_size,
+ in_channels,
+ model_channels,
+ out_channels,
+ num_res_blocks,
+ attention_resolutions,
+ dropout=0,
+ channel_mult=(1, 2, 4, 8),
+ conv_resample=True,
+ dims=2,
+ num_classes=None,
+ use_checkpoint=False,
+ use_fp16=False,
+ num_heads=1,
+ num_head_channels=-1,
+ num_heads_upsample=-1,
+ use_scale_shift_norm=False,
+ resblock_updown=False,
+ use_new_attention_order=False,
+ ):
+ super().__init__()
+
+ if num_heads_upsample == -1:
+ num_heads_upsample = num_heads
+
+ self.image_size = image_size
+ self.in_channels = in_channels
+ self.model_channels = model_channels
+ self.out_channels = out_channels
+ self.num_res_blocks = num_res_blocks
+ self.attention_resolutions = attention_resolutions
+ self.dropout = dropout
+ self.channel_mult = channel_mult
+ self.conv_resample = conv_resample
+ self.num_classes = num_classes
+ self.use_checkpoint = use_checkpoint
+ self.dtype = th.float16 if use_fp16 else th.float32
+ self.num_heads = num_heads
+ self.num_head_channels = num_head_channels
+ self.num_heads_upsample = num_heads_upsample
+
+ time_embed_dim = model_channels * 4
+ self.time_embed = nn.Sequential(
+ linear(model_channels, time_embed_dim),
+ nn.SiLU(),
+ linear(time_embed_dim, time_embed_dim),
+ )
+
+ if self.num_classes is not None:
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
+
+ ch = input_ch = int(channel_mult[0] * model_channels)
+ self.input_blocks = nn.ModuleList(
+ [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
+ )
+ self._feature_size = ch
+ input_block_chans = [ch]
+ ds = 1
+ for level, mult in enumerate(channel_mult):
+ for _ in range(num_res_blocks):
+ layers = [
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels=int(mult * model_channels),
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ )
+ ]
+ ch = int(mult * model_channels)
+ if ds in attention_resolutions:
+ layers.append(
+ AttentionBlock(
+ ch,
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ use_new_attention_order=use_new_attention_order,
+ )
+ )
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
+ self._feature_size += ch
+ input_block_chans.append(ch)
+ if level != len(channel_mult) - 1:
+ out_ch = ch
+ self.input_blocks.append(
+ TimestepEmbedSequential(
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels=out_ch,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ down=True,
+ )
+ if resblock_updown
+ else Downsample(
+ ch, conv_resample, dims=dims, out_channels=out_ch
+ )
+ )
+ )
+ ch = out_ch
+ input_block_chans.append(ch)
+ ds *= 2
+ self._feature_size += ch
+
+ self.middle_block = TimestepEmbedSequential(
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ ),
+ AttentionBlock(
+ ch,
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ use_new_attention_order=use_new_attention_order,
+ ),
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ ),
+ )
+ self._feature_size += ch
+
+ self.output_blocks = nn.ModuleList([])
+ for level, mult in list(enumerate(channel_mult))[::-1]:
+ for i in range(num_res_blocks + 1):
+ ich = input_block_chans.pop()
+ layers = [
+ ResBlock(
+ ch + ich,
+ time_embed_dim,
+ dropout,
+ out_channels=int(model_channels * mult),
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ )
+ ]
+ ch = int(model_channels * mult)
+ if ds in attention_resolutions:
+ layers.append(
+ AttentionBlock(
+ ch,
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads_upsample,
+ num_head_channels=num_head_channels,
+ use_new_attention_order=use_new_attention_order,
+ )
+ )
+ if level and i == num_res_blocks:
+ out_ch = ch
+ layers.append(
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels=out_ch,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ up=True,
+ )
+ if resblock_updown
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
+ )
+ ds //= 2
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
+ self._feature_size += ch
+
+ self.out = nn.Sequential(
+ normalization(ch),
+ nn.SiLU(),
+ zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
+ )
+
+ def convert_to_fp16(self):
+ """
+ Convert the torso of the model to float16.
+ """
+ self.input_blocks.apply(convert_module_to_f16)
+ self.middle_block.apply(convert_module_to_f16)
+ self.output_blocks.apply(convert_module_to_f16)
+
+ def convert_to_fp32(self):
+ """
+ Convert the torso of the model to float32.
+ """
+ self.input_blocks.apply(convert_module_to_f32)
+ self.middle_block.apply(convert_module_to_f32)
+ self.output_blocks.apply(convert_module_to_f32)
+
+ def forward(self, x, timesteps, y=None):
+ """
+ Apply the model to an input batch.
+
+ :param x: an [N x C x ...] Tensor of inputs.
+ :param timesteps: a 1-D batch of timesteps.
+ :param y: an [N] Tensor of labels, if class-conditional.
+ :return: an [N x C x ...] Tensor of outputs.
+ """
+ assert (y is not None) == (
+ self.num_classes is not None
+ ), "must specify y if and only if the model is class-conditional"
+
+ hs = []
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
+
+ if self.num_classes is not None:
+ assert y.shape == (x.shape[0],)
+ emb = emb + self.label_emb(y)
+
+ h = x.type(self.dtype)
+ for module in self.input_blocks:
+ h = module(h, emb)
+ hs.append(h)
+ h = self.middle_block(h, emb)
+ for module in self.output_blocks:
+ h = th.cat([h, hs.pop()], dim=1)
+ h = module(h, emb)
+ h = h.type(x.dtype)
+ return self.out(h)
+
+
+class SuperResModel(UNetModel):
+ """
+ A UNetModel that performs super-resolution.
+
+ Expects an extra kwarg `low_res` to condition on a low-resolution image.
+ """
+
+ def __init__(self, image_size, in_channels, *args, **kwargs):
+ super().__init__(image_size, in_channels * 2, *args, **kwargs)
+
+ def forward(self, x, timesteps, low_res=None, **kwargs):
+ _, _, new_height, new_width = x.shape
+ upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
+ x = th.cat([x, upsampled], dim=1)
+ return super().forward(x, timesteps, **kwargs)
+
+
+class EncoderUNetModel(nn.Module):
+ """
+ The half UNet model with attention and timestep embedding.
+
+ For usage, see UNet.
+ """
+
+ def __init__(
+ self,
+ image_size,
+ in_channels,
+ model_channels,
+ out_channels,
+ num_res_blocks,
+ attention_resolutions,
+ dropout=0,
+ channel_mult=(1, 2, 4, 8),
+ conv_resample=True,
+ dims=2,
+ use_checkpoint=False,
+ use_fp16=False,
+ num_heads=1,
+ num_head_channels=-1,
+ num_heads_upsample=-1,
+ use_scale_shift_norm=False,
+ resblock_updown=False,
+ use_new_attention_order=False,
+ pool="adaptive",
+ ):
+ super().__init__()
+
+ if num_heads_upsample == -1:
+ num_heads_upsample = num_heads
+
+ self.in_channels = in_channels
+ self.model_channels = model_channels
+ self.out_channels = out_channels
+ self.num_res_blocks = num_res_blocks
+ self.attention_resolutions = attention_resolutions
+ self.dropout = dropout
+ self.channel_mult = channel_mult
+ self.conv_resample = conv_resample
+ self.use_checkpoint = use_checkpoint
+ self.dtype = th.float16 if use_fp16 else th.float32
+ self.num_heads = num_heads
+ self.num_head_channels = num_head_channels
+ self.num_heads_upsample = num_heads_upsample
+
+ time_embed_dim = model_channels * 4
+ self.time_embed = nn.Sequential(
+ linear(model_channels, time_embed_dim),
+ nn.SiLU(),
+ linear(time_embed_dim, time_embed_dim),
+ )
+
+ ch = int(channel_mult[0] * model_channels)
+ self.input_blocks = nn.ModuleList(
+ [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
+ )
+ self._feature_size = ch
+ input_block_chans = [ch]
+ ds = 1
+ for level, mult in enumerate(channel_mult):
+ for _ in range(num_res_blocks):
+ layers = [
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels=int(mult * model_channels),
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ )
+ ]
+ ch = int(mult * model_channels)
+ if ds in attention_resolutions:
+ layers.append(
+ AttentionBlock(
+ ch,
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ use_new_attention_order=use_new_attention_order,
+ )
+ )
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
+ self._feature_size += ch
+ input_block_chans.append(ch)
+ if level != len(channel_mult) - 1:
+ out_ch = ch
+ self.input_blocks.append(
+ TimestepEmbedSequential(
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels=out_ch,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ down=True,
+ )
+ if resblock_updown
+ else Downsample(
+ ch, conv_resample, dims=dims, out_channels=out_ch
+ )
+ )
+ )
+ ch = out_ch
+ input_block_chans.append(ch)
+ ds *= 2
+ self._feature_size += ch
+
+ self.middle_block = TimestepEmbedSequential(
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ ),
+ AttentionBlock(
+ ch,
+ use_checkpoint=use_checkpoint,
+ num_heads=num_heads,
+ num_head_channels=num_head_channels,
+ use_new_attention_order=use_new_attention_order,
+ ),
+ ResBlock(
+ ch,
+ time_embed_dim,
+ dropout,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ ),
+ )
+ self._feature_size += ch
+ self.pool = pool
+ if pool == "adaptive":
+ self.out = nn.Sequential(
+ normalization(ch),
+ nn.SiLU(),
+ nn.AdaptiveAvgPool2d((1, 1)),
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
+ nn.Flatten(),
+ )
+ elif pool == "attention":
+ assert num_head_channels != -1
+ self.out = nn.Sequential(
+ normalization(ch),
+ nn.SiLU(),
+ AttentionPool2d(
+ (image_size // ds), ch, num_head_channels, out_channels
+ ),
+ )
+ elif pool == "spatial":
+ self.out = nn.Sequential(
+ nn.Linear(self._feature_size, 2048),
+ nn.ReLU(),
+ nn.Linear(2048, self.out_channels),
+ )
+ elif pool == "spatial_v2":
+ self.out = nn.Sequential(
+ nn.Linear(self._feature_size, 2048),
+ normalization(2048),
+ nn.SiLU(),
+ nn.Linear(2048, self.out_channels),
+ )
+ else:
+ raise NotImplementedError(f"Unexpected {pool} pooling")
+
+ def convert_to_fp16(self):
+ """
+ Convert the torso of the model to float16.
+ """
+ self.input_blocks.apply(convert_module_to_f16)
+ self.middle_block.apply(convert_module_to_f16)
+
+ def convert_to_fp32(self):
+ """
+ Convert the torso of the model to float32.
+ """
+ self.input_blocks.apply(convert_module_to_f32)
+ self.middle_block.apply(convert_module_to_f32)
+
+ def forward(self, x, timesteps):
+ """
+ Apply the model to an input batch.
+
+ :param x: an [N x C x ...] Tensor of inputs.
+ :param timesteps: a 1-D batch of timesteps.
+ :return: an [N x K] Tensor of outputs.
+ """
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
+
+ results = []
+ h = x.type(self.dtype)
+ for module in self.input_blocks:
+ h = module(h, emb)
+ if self.pool.startswith("spatial"):
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
+ h = self.middle_block(h, emb)
+ if self.pool.startswith("spatial"):
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
+ h = th.cat(results, axis=-1)
+ return self.out(h)
+ else:
+ h = h.type(x.dtype)
+ return self.out(h)
diff --git a/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_glasses.sh b/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_glasses.sh
new file mode 100644
index 0000000..ff5c80b
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_glasses.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for classifier_name in celebahq__Eyeglasses; do
+ for t in 500; do
+ for adv_eps in 0.062745098; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv_bpda.py --exp ./exp_results --config celeba.yml \
+ -i celebahq-adv-$t-eps$adv_eps-2x4-disc-bpda-rev \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 2 \
+ --domain celebahq \
+ --classifier_name $classifier_name \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type celebahq-ddpm \
+ --eot_defense_reps 20 \
+ --eot_attack_reps 15 \
+
+ done
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_smiling.sh b/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_smiling.sh
new file mode 100644
index 0000000..147ab07
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/celebahq/run_celebahq_bpda_smiling.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for classifier_name in celebahq__Smiling; do
+ for t in 500; do
+ for adv_eps in 0.062745098; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv_bpda.py --exp ./exp_results --config celeba.yml \
+ -i celebahq-adv-$t-eps$adv_eps-2x4-disc-bpda-rev \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 2 \
+ --domain celebahq \
+ --classifier_name $classifier_name \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type celebahq-ddpm \
+ --eot_defense_reps 20 \
+ --eot_attack_reps 15 \
+
+ done
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_bpda_eot.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_bpda_eot.sh
new file mode 100644
index 0000000..c656bb4
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_bpda_eot.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+SEED1=$1
+SEED2=$2
+
+for t in 100; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ PYTHONPATH=$(pwd) python case_studies/diffpure/eval_sde_adv_bpda.py --exp ./case_studies/diffpure/exp_results \
+ --config case_studies/diffpure/configs/cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-200x1-bm0-t0-end1e-5-cont-bpda \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 10 \
+ --num_sub 200 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2.sh
new file mode 100644
index 0000000..16b3ea1
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 75; do
+ for adv_eps in 0.5; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-L2-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20 \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_70-16-dp.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_70-16-dp.sh
new file mode 100644
index 0000000..75da1fe
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_70-16-dp.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 75; do
+ for adv_eps in 0.5; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-wres70-16-L2-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wrn-70-16-dropout \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20 \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_rn50_eps1.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_rn50_eps1.sh
new file mode 100644
index 0000000..7393851
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_L2_rn50_eps1.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 125; do
+ for adv_eps in 1; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-L2-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-resnet-50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20 \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf.sh
new file mode 100644
index 0000000..5392050
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+SEED1=$1
+SEED2=$2
+
+for t in 100; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ PYTHONPATH=$(pwd) python case_studies/diffpure/eval_sde_adv.py --exp ./case_studies/diffpure/exp_results \
+ --config ./case_studies/diffpure/configs/cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 5 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20 \
+ --batch-size 512 \
+ --n_boundary_points=1 \
+ --n_inner_points=999
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_70-16-dp.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_70-16-dp.sh
new file mode 100644
index 0000000..c10b52f
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_70-16-dp.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+SEED1=$1
+SEED2=$2
+
+for t in 100; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ PYTHONPATH=$(pwd) python case_studies/diffpure/eval_sde_adv.py --exp ./case_studies/diffpure/exp_results \
+ --config case_studies/diffpure/configs/cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-wres70-16-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wrn-70-16-dropout \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_binarization_test.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_binarization_test.sh
new file mode 100644
index 0000000..4567569
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_binarization_test.sh
@@ -0,0 +1,27 @@
+seed=$1
+data_seed=$2
+sample_idx_start=$3
+sample_idx_end=$4
+
+t=100
+adv_eps=0.031373
+
+PYTHONPATH=$(pwd) python -u case_studies/diffpure/eval_sde_adv.py --exp ./case_studies/diffpure/exp_results \
+ --config ./case_studies/diffpure/configs/cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 5 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20 \
+ --batch-size 512 \
+ --test-samples-idx-start=$sample_idx_start \
+ --test-samples-idx-end=$sample_idx_end \
+ --binarization-test
\ No newline at end of file
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_rn50.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_rn50.sh
new file mode 100644
index 0000000..40c2d71
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_rn50.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+SEED1=$1
+SEED2=$2
+
+for t in 125; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ PYTHONPATH=$(pwd) python case_studies/diffpure/eval_sde_adv.py --exp ./case_studies/diffpure/exp_results \
+ --config case_studies/diffpure/configs/cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-resnet-50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version rand \
+ --eot_iter 20
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stadv_rn50.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stadv_rn50.sh
new file mode 100644
index 0000000..b40917d
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stadv_rn50.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 125; do
+ for adv_eps in 0.05; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-stadv-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-resnet-50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version stadv \
+ --eot_iter 20
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2.sh
new file mode 100644
index 0000000..181deba
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 75; do
+ for adv_eps in 0.5; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-L2 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_70-16-dp.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_70-16-dp.sh
new file mode 100644
index 0000000..35338b8
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_70-16-dp.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 75; do
+ for adv_eps in 0.5; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-L2-wres70-16 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wrn-70-16-dropout \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_rn50_eps1.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_rn50_eps1.sh
new file mode 100644
index 0000000..f2f8d94
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_L2_rn50_eps1.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 125; do
+ for adv_eps in 1; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-L2 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-resnet-50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+ --lp_norm L2 \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf.sh
new file mode 100644
index 0000000..c6ebfc0
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 100; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wideresnet-28-10 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_70-16-dp.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_70-16-dp.sh
new file mode 100644
index 0000000..59e4a70
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_70-16-dp.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 100; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont-wres70-16 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-wrn-70-16-dropout \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_rn50.sh b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_rn50.sh
new file mode 100644
index 0000000..cae45d0
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/cifar10/run_cifar_stand_inf_rn50.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 125; do
+ for adv_eps in 0.031373; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0 python eval_sde_adv.py --exp ./exp_results --config cifar10.yml \
+ -i cifar10-robust_adv-$t-eps$adv_eps-64x1-bm0-t0-end1e-5-cont \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 64 \
+ --num_sub 64 \
+ --domain cifar10 \
+ --classifier_name cifar10-resnet-50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --score_type score_sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf.sh
new file mode 100644
index 0000000..eace779
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-resnet50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version rand \
+ --eot_iter 20
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_50-2.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_50-2.sh
new file mode 100644
index 0000000..6aab2d8
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_50-2.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-wideresnet-50-2 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version rand \
+ --eot_iter 20
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_deits.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_deits.sh
new file mode 100644
index 0000000..e46ec4d
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_rand_inf_deits.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont-eot20 \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-deit-s \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version rand \
+ --eot_iter 20
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf.sh
new file mode 100644
index 0000000..fe42784
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-resnet50 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_50-2.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_50-2.sh
new file mode 100644
index 0000000..b17c1de
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_50-2.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-wideresnet-50-2 \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_deits.sh b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_deits.sh
new file mode 100644
index 0000000..17337fd
--- /dev/null
+++ b/case_studies/diffpure/run_scripts/imagenet/run_in_stand_inf_deits.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+cd ../..
+
+SEED1=$1
+SEED2=$2
+
+for t in 150; do
+ for adv_eps in 0.0157; do
+ for seed in $SEED1; do
+ for data_seed in $SEED2; do
+
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python eval_sde_adv.py --exp ./exp_results --config imagenet.yml \
+ -i imagenet-robust_adv-$t-eps$adv_eps-4x4-bm0-t0-end1e-5-cont \
+ --t $t \
+ --adv_eps $adv_eps \
+ --adv_batch_size 4 \
+ --num_sub 16 \
+ --domain imagenet \
+ --classifier_name imagenet-deit-s \
+ --seed $seed \
+ --data_seed $data_seed \
+ --diffusion_type sde \
+ --attack_version standard \
+
+ done
+ done
+ done
+done
diff --git a/case_studies/diffpure/runners/diffpure_ddpm.py b/case_studies/diffpure/runners/diffpure_ddpm.py
new file mode 100644
index 0000000..4b88a44
--- /dev/null
+++ b/case_studies/diffpure/runners/diffpure_ddpm.py
@@ -0,0 +1,142 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os
+import random
+
+import numpy as np
+
+import torch
+import torchvision.utils as tvu
+
+from ddpm.unet_ddpm import Model
+
+
+def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps):
+ betas = np.linspace(beta_start, beta_end,
+ num_diffusion_timesteps, dtype=np.float64)
+ assert betas.shape == (num_diffusion_timesteps,)
+ return betas
+
+
+def extract(a, t, x_shape):
+ """Extract coefficients from a based on t and reshape to make it
+ broadcastable with x_shape."""
+ bs, = t.shape
+ assert x_shape[0] == bs
+ out = torch.gather(torch.tensor(a, dtype=torch.float, device=t.device), 0, t.long())
+ assert out.shape == (bs,)
+ out = out.reshape((bs,) + (1,) * (len(x_shape) - 1))
+ return out
+
+
+def image_editing_denoising_step_flexible_mask(x, t, *, model, logvar, betas):
+ """
+ Sample from p(x_{t-1} | x_t)
+ """
+ alphas = 1.0 - betas
+ alphas_cumprod = alphas.cumprod(dim=0)
+
+ model_output = model(x, t)
+ weighted_score = betas / torch.sqrt(1 - alphas_cumprod)
+ mean = extract(1 / torch.sqrt(alphas), t, x.shape) * (x - extract(weighted_score, t, x.shape) * model_output)
+
+ logvar = extract(logvar, t, x.shape)
+ noise = torch.randn_like(x)
+ mask = 1 - (t == 0).float()
+ mask = mask.reshape((x.shape[0],) + (1,) * (len(x.shape) - 1))
+ sample = mean + mask * torch.exp(0.5 * logvar) * noise
+ sample = sample.float()
+ return sample
+
+
+class Diffusion(torch.nn.Module):
+ def __init__(self, args, config, device=None):
+ super().__init__()
+ self.args = args
+ self.config = config
+ if device is None:
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.device = device
+
+ print("Loading model")
+ if self.config.data.dataset == "CelebA_HQ":
+ url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt"
+ else:
+ raise ValueError
+
+ model = Model(self.config)
+ ckpt = torch.hub.load_state_dict_from_url(url, map_location='cpu')
+ model.load_state_dict(ckpt)
+ model.eval()
+
+ self.model = model
+
+ self.model_var_type = config.model.var_type
+ betas = get_beta_schedule(
+ beta_start=config.diffusion.beta_start,
+ beta_end=config.diffusion.beta_end,
+ num_diffusion_timesteps=config.diffusion.num_diffusion_timesteps
+ )
+ self.betas = torch.from_numpy(betas).float()
+ self.num_timesteps = betas.shape[0]
+
+ alphas = 1.0 - betas
+ alphas_cumprod = np.cumprod(alphas, axis=0)
+ alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
+ posterior_variance = betas * \
+ (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
+ if self.model_var_type == "fixedlarge":
+ self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
+
+ elif self.model_var_type == 'fixedsmall':
+ self.logvar = np.log(np.maximum(posterior_variance, 1e-20))
+
+ def image_editing_sample(self, img=None, bs_id=0, tag=None):
+ assert isinstance(img, torch.Tensor)
+ batch_size = img.shape[0]
+
+ with torch.no_grad():
+ if tag is None:
+ tag = 'rnd' + str(random.randint(0, 10000))
+ out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
+
+ assert img.ndim == 4, img.ndim
+ x0 = img
+
+ if bs_id < 2:
+ os.makedirs(out_dir, exist_ok=True)
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
+
+ xs = []
+ for it in range(self.args.sample_step):
+ e = torch.randn_like(x0)
+ total_noise_levels = self.args.t
+ a = (1 - self.betas).cumprod(dim=0).to(x0.device)
+ x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
+
+ if bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
+
+ for i in reversed(range(total_noise_levels)):
+ t = torch.tensor([i] * batch_size, device=img.device)
+ x = image_editing_denoising_step_flexible_mask(x, t=t, model=self.model,
+ logvar=self.logvar,
+ betas=self.betas.to(img.device))
+ # added intermediate step vis
+ if (i - 49) % 50 == 0 and bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
+
+ x0 = x
+
+ if bs_id < 2:
+ torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
+
+ xs.append(x0)
+
+ return torch.cat(xs, dim=0)
diff --git a/case_studies/diffpure/runners/diffpure_guided.py b/case_studies/diffpure/runners/diffpure_guided.py
new file mode 100644
index 0000000..eb0eaf4
--- /dev/null
+++ b/case_studies/diffpure/runners/diffpure_guided.py
@@ -0,0 +1,89 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os
+import random
+
+import torch
+import torchvision.utils as tvu
+
+from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
+
+
+class GuidedDiffusion(torch.nn.Module):
+ def __init__(self, args, config, device=None, model_dir='checkpoints/diffpure/guided_diffusion'):
+ super().__init__()
+ self.args = args
+ self.config = config
+ if device is None:
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.device = device
+
+ # load model
+ model_config = model_and_diffusion_defaults()
+ model_config.update(vars(self.config.model))
+ print(f'model_config: {model_config}')
+ model, diffusion = create_model_and_diffusion(**model_config)
+ model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
+ model.requires_grad_(False).eval().to(self.device)
+
+ if model_config['use_fp16']:
+ model.convert_to_fp16()
+
+ self.model = model
+ self.diffusion = diffusion
+ self.betas = torch.from_numpy(diffusion.betas).float().to(self.device)
+
+ def image_editing_sample(self, img, bs_id=0, tag=None):
+ with torch.no_grad():
+ assert isinstance(img, torch.Tensor)
+ batch_size = img.shape[0]
+
+ if tag is None:
+ tag = 'rnd' + str(random.randint(0, 10000))
+ out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
+
+ assert img.ndim == 4, img.ndim
+ img = img.to(self.device)
+ x0 = img
+
+ if bs_id < 2:
+ os.makedirs(out_dir, exist_ok=True)
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
+
+ xs = []
+ for it in range(self.args.sample_step):
+ e = torch.randn_like(x0)
+ total_noise_levels = self.args.t
+ a = (1 - self.betas).cumprod(dim=0)
+ x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
+
+ if bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
+
+ for i in reversed(range(total_noise_levels)):
+ t = torch.tensor([i] * batch_size, device=self.device)
+
+ x = self.diffusion.p_sample(self.model, x, t,
+ clip_denoised=True,
+ denoised_fn=None,
+ cond_fn=None,
+ model_kwargs=None)["sample"]
+
+ # added intermediate step vis
+ if (i - 99) % 100 == 0 and bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
+
+ x0 = x
+
+ if bs_id < 2:
+ torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
+
+ xs.append(x0)
+
+ return torch.cat(xs, dim=0)
diff --git a/case_studies/diffpure/runners/diffpure_ldsde.py b/case_studies/diffpure/runners/diffpure_ldsde.py
new file mode 100644
index 0000000..49c3882
--- /dev/null
+++ b/case_studies/diffpure/runners/diffpure_ldsde.py
@@ -0,0 +1,252 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os
+import random
+import numpy as np
+
+import torch
+import torchvision.utils as tvu
+import torchsde
+
+from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
+from score_sde.losses import get_optimizer
+from score_sde.models import utils as mutils
+from score_sde.models.ema import ExponentialMovingAverage
+from score_sde import sde_lib
+
+
+def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
+ """
+ Extract values from a 1-D numpy array for a batch of indices.
+
+ :param arr: the 1-D numpy array or a func.
+ :param timesteps: a tensor of indices into the array to extract.
+ :param broadcast_shape: a larger shape of K dimensions with the batch
+ dimension equal to the length of timesteps.
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
+ """
+ if callable(arr_or_func):
+ res = arr_or_func(timesteps).float()
+ else:
+ res = arr_or_func.to(device=timesteps.device)[timesteps].float()
+ while len(res.shape) < len(broadcast_shape):
+ res = res[..., None]
+ return res.expand(broadcast_shape)
+
+
+def restore_checkpoint(ckpt_dir, state, device):
+ loaded_state = torch.load(ckpt_dir, map_location=device)
+ state['optimizer'].load_state_dict(loaded_state['optimizer'])
+ state['model'].load_state_dict(loaded_state['model'], strict=False)
+ state['ema'].load_state_dict(loaded_state['ema'])
+ state['step'] = loaded_state['step']
+
+
+class LDSDE(torch.nn.Module):
+ def __init__(self, model, x_init, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
+ img_shape=(3, 256, 256), sigma2=0.001, lambda_ld=0.01, eta=5, model_kwargs=None):
+ """Construct a Variance Preserving SDE.
+
+ Args:
+ model: diffusion model
+ score_type: [guided_diffusion, score_sde, ddpm]
+ beta_min: value of beta(0)
+ beta_max: value of beta(1)
+ """
+ super().__init__()
+ self.model = model
+ self.x_init = x_init
+ self.sigma2 = sigma2
+ self.eta = eta
+ self.lambda_ld = lambda_ld # damping coefficient
+ self.score_type = score_type
+ self.model_kwargs = model_kwargs
+ self.img_shape = img_shape
+
+ self.beta_0 = beta_min
+ self.beta_1 = beta_max
+ self.N = N
+ self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
+ self.alphas = 1. - self.discrete_betas
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
+ self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
+ self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
+
+ self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
+ self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
+
+ self.noise_type = "diagonal"
+ self.sde_type = "ito"
+
+ print(f'sigma2: {self.sigma2}, lambda_ld: {self.lambda_ld}, eta: {self.eta}')
+
+ def _scale_timesteps(self, t):
+ assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
+ return (t.float() * self.N).long()
+
+ def ldsde_fn(self, t, x, return_type='drift'):
+ """Create the drift and diffusion functions for the reverse SDE"""
+ t = torch.zeros_like(t, dtype=torch.float, device=t.device) + 1e-2
+
+ if return_type == 'drift':
+
+ assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
+ x_img = x.view(-1, *self.img_shape)
+
+ if self.score_type == 'guided_diffusion':
+ # model output is epsilon
+ if self.model_kwargs is None:
+ self.model_kwargs = {}
+
+ disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
+ model_output = self.model(x_img, disc_steps, **self.model_kwargs)
+ # with learned sigma, so model_output contains (mean, val)
+ model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
+ assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
+ model_output = model_output.view(x.shape[0], -1)
+ score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
+
+ elif self.score_type == 'score_sde':
+ # model output is epsilon
+ sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
+ score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
+ score = score_fn(x_img, t)
+ assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
+ score = score.view(x.shape[0], -1)
+
+ else:
+ raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
+
+ drift = -0.5 * (-score + (x - self.x_init) / self.sigma2) * self.lambda_ld # TODO
+ return drift
+
+ else:
+ diffusion_coef = np.sqrt(self.lambda_ld) * self.eta
+ return torch.tensor([diffusion_coef], dtype=torch.float).expand(x.shape[0]).to(x.device)
+
+ def f(self, t, x):
+ """Create the drift function f(x, t)
+ sdeint only support a 2D tensor (batch_size, c*h*w)
+ """
+ t = t.expand(x.shape[0]) # (batch_size, )
+ drift = self.ldsde_fn(t, x, return_type='drift')
+ assert drift.shape == x.shape
+ return drift
+
+ def g(self, t, x):
+ """Create the diffusion function g(t)
+ sdeint only support a 2D tensor (batch_size, c*h*w)
+ """
+ t = t.expand(x.shape[0]) # (batch_size, )
+ diffusion = self.ldsde_fn(t, x, return_type='diffusion')
+ assert diffusion.shape == (x.shape[0], )
+ return diffusion[:, None].expand(x.shape)
+
+
+class LDGuidedDiffusion(torch.nn.Module):
+ def __init__(self, args, config, device=None):
+ super().__init__()
+ self.args = args
+ self.config = config
+ if device is None:
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.device = device
+
+ # load model
+ if config.data.dataset == 'ImageNet':
+ img_shape = (3, 256, 256)
+ model_dir = 'checkpoints/diffpure/guided_diffusion'
+ model_config = model_and_diffusion_defaults()
+ model_config.update(vars(self.config.model))
+ print(f'model_config: {model_config}')
+ model, _ = create_model_and_diffusion(**model_config)
+ model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
+
+ if model_config['use_fp16']:
+ model.convert_to_fp16()
+
+ elif config.data.dataset == 'CIFAR10':
+ img_shape = (3, 32, 32)
+ model_dir = 'checkpoints/diffpure/score_sde'
+ print(f'model_config: {config}')
+ model = mutils.create_model(config)
+
+ optimizer = get_optimizer(config, model.parameters())
+ ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
+ state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
+ restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
+ ema.copy_to(model.parameters())
+
+ else:
+ raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
+
+ model.eval().to(self.device)
+
+ self.model = model
+ self.img_shape = img_shape
+
+ print(f'use_bm: {args.use_bm}')
+
+ self.args_dict = {
+ 'method': 'euler', # ["srk", "euler", None]
+ 'adaptive': False,
+ 'dt': 1e-2,
+ }
+
+ print(f'args_dict: {self.args_dict}')
+
+ def image_editing_sample(self, img, bs_id=0, tag=None):
+ assert isinstance(img, torch.Tensor)
+ batch_size = img.shape[0]
+ state_size = int(np.prod(img.shape[1:])) # c*h*w
+
+ if tag is None:
+ tag = 'rnd' + str(random.randint(0, 10000))
+ out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
+
+ assert img.ndim == 4, img.ndim
+ img = img.to(self.device)
+ x0 = img
+
+ x0_ = x0.view(batch_size, -1) # (batch_size, state_size)
+ self.ldsde = LDSDE(model=self.model, x_init=x0_, score_type=self.args.score_type, img_shape=self.img_shape,
+ sigma2=self.args.sigma2, lambda_ld=self.args.lambda_ld, eta=self.args.eta,
+ model_kwargs=None).to(self.device)
+ self.betas = self.ldsde.discrete_betas.float().to(self.device)
+
+ if bs_id < 2:
+ os.makedirs(out_dir, exist_ok=True)
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
+
+ xs = []
+ for it in range(self.args.sample_step):
+ x = x0
+
+ if bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
+
+ epsilon_dt0, epsilon_dt1 = 0, 1e-5
+ t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
+ t_size = 2
+ ts = torch.linspace(t0, t1, t_size).to(self.device)
+
+ x_ = x.view(batch_size, -1) # (batch_size, state_size)
+ if self.args.use_bm:
+ bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
+ xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, bm=bm, **self.args_dict)
+ else:
+ xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, **self.args_dict)
+ x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
+
+ if bs_id < 2:
+ torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
+
+ xs.append(x0)
+
+ return torch.cat(xs, dim=0)
diff --git a/case_studies/diffpure/runners/diffpure_ode.py b/case_studies/diffpure/runners/diffpure_ode.py
new file mode 100644
index 0000000..9f0217b
--- /dev/null
+++ b/case_studies/diffpure/runners/diffpure_ode.py
@@ -0,0 +1,249 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os
+import random
+import numpy as np
+
+import torch
+import torchvision.utils as tvu
+
+from torchdiffeq import odeint_adjoint
+
+from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
+from score_sde.losses import get_optimizer
+from score_sde.models import utils as mutils
+from score_sde.models.ema import ExponentialMovingAverage
+from score_sde import sde_lib
+
+
+def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
+ """
+ Extract values from a 1-D numpy array for a batch of indices.
+
+ :param arr: the 1-D numpy array or a func.
+ :param timesteps: a tensor of indices into the array to extract.
+ :param broadcast_shape: a larger shape of K dimensions with the batch
+ dimension equal to the length of timesteps.
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
+ """
+ if callable(arr_or_func):
+ res = arr_or_func(timesteps).float()
+ else:
+ res = arr_or_func.to(device=timesteps.device)[timesteps].float()
+ while len(res.shape) < len(broadcast_shape):
+ res = res[..., None]
+ return res.expand(broadcast_shape)
+
+
+def restore_checkpoint(ckpt_dir, state, device):
+ loaded_state = torch.load(ckpt_dir, map_location=device)
+ state['optimizer'].load_state_dict(loaded_state['optimizer'])
+ state['model'].load_state_dict(loaded_state['model'], strict=False)
+ state['ema'].load_state_dict(loaded_state['ema'])
+ state['step'] = loaded_state['step']
+
+
+class VPODE(torch.nn.Module):
+ def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
+ img_shape=(3, 256, 256), model_kwargs=None):
+ """Construct a Variance Preserving SDE.
+
+ Args:
+ model: diffusion model
+ score_type: [guided_diffusion, score_sde, ddpm]
+ beta_min: value of beta(0)
+ beta_max: value of beta(1)
+ """
+ super().__init__()
+ self.model = model
+ self.score_type = score_type
+ self.model_kwargs = model_kwargs
+ self.img_shape = img_shape
+
+ self.beta_0 = beta_min
+ self.beta_1 = beta_max
+ self.N = N
+ self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
+ self.alphas = 1. - self.discrete_betas
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
+ self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
+ self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
+
+ self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
+ self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
+
+ def _scale_timesteps(self, t):
+ assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
+ return (t.float() * self.N).long()
+
+ def vpsde_fn(self, t, x):
+ beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
+ drift = -0.5 * beta_t[:, None] * x
+ diffusion = torch.sqrt(beta_t)
+ return drift, diffusion
+
+ def ode_fn(self, t, x):
+ """Create the drift and diffusion functions for the reverse SDE"""
+ drift, diffusion = self.vpsde_fn(t, x)
+
+ assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
+ x_img = x.view(-1, *self.img_shape)
+
+ if self.score_type == 'guided_diffusion':
+ # model output is epsilon
+ if self.model_kwargs is None:
+ self.model_kwargs = {}
+
+ disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
+ model_output = self.model(x_img, disc_steps, **self.model_kwargs)
+ # with learned sigma, so model_output contains (mean, val)
+ model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
+ assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
+ model_output = model_output.view(x.shape[0], -1)
+ score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
+
+ elif self.score_type == 'score_sde':
+ # model output is epsilon
+ sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
+ score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
+ score = score_fn(x_img, t)
+ assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
+ score = score.view(x.shape[0], -1)
+
+ else:
+ raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
+
+ ode_coef = drift - 0.5 * diffusion[:, None] ** 2 * score
+ return ode_coef
+
+ def forward(self, t, states):
+ x = states[0]
+
+ t = t.expand(x.shape[0]) # (batch_size, )
+ dx_dt = self.ode_fn(t, x)
+ assert dx_dt.shape == x.shape
+
+ return dx_dt,
+
+
+class OdeGuidedDiffusion(torch.nn.Module):
+ def __init__(self, args, config, device=None):
+ super().__init__()
+ self.args = args
+ self.config = config
+ if device is None:
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.device = device
+
+ # load model
+ if config.data.dataset == 'ImageNet':
+ img_shape = (3, 256, 256)
+ model_dir = 'checkpoints/diffpure/guided_diffusion'
+ model_config = model_and_diffusion_defaults()
+ model_config.update(vars(self.config.model))
+ print(f'model_config: {model_config}')
+ model, _ = create_model_and_diffusion(**model_config)
+ model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
+
+ if model_config['use_fp16']:
+ model.convert_to_fp16()
+
+ elif config.data.dataset == 'CIFAR10':
+ img_shape = (3, 32, 32)
+ model_dir = 'checkpoints/diffpure/score_sde'
+ print(f'model_config: {config}')
+ model = mutils.create_model(config)
+
+ optimizer = get_optimizer(config, model.parameters())
+ ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
+ state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
+ restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
+ ema.copy_to(model.parameters())
+
+ else:
+ raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
+
+ model.eval().to(self.device)
+
+ self.model = model
+ self.vpode = VPODE(model=model, score_type=args.score_type, img_shape=img_shape,
+ model_kwargs=None).to(self.device)
+ self.betas = self.vpode.discrete_betas.float().to(self.device)
+
+ self.atol, self.rtol = 1e-3, 1e-3
+ self.method = 'euler'
+
+ print(f'method: {self.method}, atol: {self.atol}, rtol: {self.rtol}, step_size: {self.args.step_size}')
+
+ def image_editing_sample(self, img, bs_id=0, tag=None):
+ assert isinstance(img, torch.Tensor)
+ batch_size = img.shape[0]
+
+ if tag is None:
+ tag = 'rnd' + str(random.randint(0, 10000))
+ out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
+
+ assert img.ndim == 4, img.ndim
+ img = img.to(self.device)
+ x0 = img
+
+ if bs_id < 2:
+ os.makedirs(out_dir, exist_ok=True)
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
+
+ xs = []
+ for it in range(self.args.sample_step):
+
+ if self.args.fix_rand:
+ # fix initial randomness
+ noise_fixed = torch.FloatTensor(1, *x0.shape[1:]).\
+ normal_(0, 1, generator=torch.manual_seed(self.args.seed)).to(self.device)
+ print(f'noise_fixed: {noise_fixed[0, 0, 0, :3]}')
+ e = noise_fixed.repeat(x0.shape[0], 1, 1, 1)
+ else:
+ e = torch.randn_like(x0).to(self.device)
+
+ assert e.shape == x0.shape
+
+ total_noise_levels = self.args.t
+ a = (1 - self.betas).cumprod(dim=0).to(self.device)
+ x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
+
+ if bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
+
+ epsilon_dt0, epsilon_dt1 = 0, 1e-5
+ t0, t1 = self.args.t * 1. / 1000 - epsilon_dt0, epsilon_dt1
+ t_size = 2
+ ts = torch.linspace(t0, t1, t_size).to(self.device)
+
+ x_ = x.view(batch_size, -1) # (batch_size, state_size)
+ states = (x_, )
+
+ # ODE solver
+ odeint = odeint_adjoint
+ state_t = odeint(
+ self.vpode,
+ states,
+ ts,
+ atol=self.atol,
+ rtol=self.rtol,
+ method=self.method,
+ options=None if self.method != 'euler' else dict(step_size=self.args.step_size) # only used for fixed-point method
+ ) # 'euler', 'dopri5'
+
+ x0_ = state_t[0][-1]
+ x0 = x0_.view(x.shape) # (batch_size, c, h, w)
+
+ if bs_id < 2:
+ torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
+
+ xs.append(x0)
+
+ return torch.cat(xs, dim=0)
diff --git a/case_studies/diffpure/runners/diffpure_sde.py b/case_studies/diffpure/runners/diffpure_sde.py
new file mode 100644
index 0000000..882aef0
--- /dev/null
+++ b/case_studies/diffpure/runners/diffpure_sde.py
@@ -0,0 +1,247 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import os
+import random
+import numpy as np
+
+import torch
+import torchvision.utils as tvu
+import torchsde
+
+from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
+from score_sde.losses import get_optimizer
+from score_sde.models import utils as mutils
+from score_sde.models.ema import ExponentialMovingAverage
+from score_sde import sde_lib
+
+
+def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
+ """
+ Extract values from a 1-D numpy array for a batch of indices.
+
+ :param arr: the 1-D numpy array or a func.
+ :param timesteps: a tensor of indices into the array to extract.
+ :param broadcast_shape: a larger shape of K dimensions with the batch
+ dimension equal to the length of timesteps.
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
+ """
+ if callable(arr_or_func):
+ res = arr_or_func(timesteps).float()
+ else:
+ res = arr_or_func.to(device=timesteps.device)[timesteps].float()
+ while len(res.shape) < len(broadcast_shape):
+ res = res[..., None]
+ return res.expand(broadcast_shape)
+
+
+def restore_checkpoint(ckpt_dir, state, device):
+ loaded_state = torch.load(ckpt_dir, map_location=device)
+ state['optimizer'].load_state_dict(loaded_state['optimizer'])
+ state['model'].load_state_dict(loaded_state['model'], strict=False)
+ state['ema'].load_state_dict(loaded_state['ema'])
+ state['step'] = loaded_state['step']
+
+
+class RevVPSDE(torch.nn.Module):
+ def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
+ img_shape=(3, 256, 256), model_kwargs=None):
+ """Construct a Variance Preserving SDE.
+
+ Args:
+ model: diffusion model
+ score_type: [guided_diffusion, score_sde, ddpm]
+ beta_min: value of beta(0)
+ beta_max: value of beta(1)
+ """
+ super().__init__()
+ self.model = model
+ self.score_type = score_type
+ self.model_kwargs = model_kwargs
+ self.img_shape = img_shape
+
+ self.beta_0 = beta_min
+ self.beta_1 = beta_max
+ self.N = N
+ self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
+ self.alphas = 1. - self.discrete_betas
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
+ self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
+ self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
+
+ self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
+ self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
+
+ self.noise_type = "diagonal"
+ self.sde_type = "ito"
+
+ def _scale_timesteps(self, t):
+ assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
+ return (t.float() * self.N).long()
+
+ def vpsde_fn(self, t, x):
+ beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
+ drift = -0.5 * beta_t[:, None] * x
+ diffusion = torch.sqrt(beta_t)
+ return drift, diffusion
+
+ def rvpsde_fn(self, t, x, return_type='drift'):
+ """Create the drift and diffusion functions for the reverse SDE"""
+ drift, diffusion = self.vpsde_fn(t, x)
+
+ if return_type == 'drift':
+
+ assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
+ x_img = x.view(-1, *self.img_shape)
+
+ if self.score_type == 'guided_diffusion':
+ # model output is epsilon
+ if self.model_kwargs is None:
+ self.model_kwargs = {}
+
+ disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
+ model_output = self.model(x_img, disc_steps, **self.model_kwargs)
+ # with learned sigma, so model_output contains (mean, val)
+ model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
+ assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
+ model_output = model_output.view(x.shape[0], -1)
+ score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
+
+ elif self.score_type == 'score_sde':
+ # model output is epsilon
+ sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
+ score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
+ score = score_fn(x_img, t)
+ assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
+ score = score.view(x.shape[0], -1)
+
+ else:
+ raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
+
+ drift = drift - diffusion[:, None] ** 2 * score
+ return drift
+
+ else:
+ return diffusion
+
+ def f(self, t, x):
+ """Create the drift function -f(x, 1-t) (by t' = 1 - t)
+ sdeint only support a 2D tensor (batch_size, c*h*w)
+ """
+ t = t.expand(x.shape[0]) # (batch_size, )
+ drift = self.rvpsde_fn(1 - t, x, return_type='drift')
+ assert drift.shape == x.shape
+ return -drift
+
+ def g(self, t, x):
+ """Create the diffusion function g(1-t) (by t' = 1 - t)
+ sdeint only support a 2D tensor (batch_size, c*h*w)
+ """
+ t = t.expand(x.shape[0]) # (batch_size, )
+ diffusion = self.rvpsde_fn(1 - t, x, return_type='diffusion')
+ assert diffusion.shape == (x.shape[0], )
+ return diffusion[:, None].expand(x.shape)
+
+
+class RevGuidedDiffusion(torch.nn.Module):
+ def __init__(self, args, config, device=None):
+ super().__init__()
+ self.args = args
+ self.config = config
+ if device is None:
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.device = device
+
+ # load model
+ if config.data.dataset == 'ImageNet':
+ img_shape = (3, 256, 256)
+ model_dir = 'checkpoints/diffpure/guided_diffusion'
+ model_config = model_and_diffusion_defaults()
+ model_config.update(vars(self.config.model))
+ print(f'model_config: {model_config}')
+ model, _ = create_model_and_diffusion(**model_config)
+ model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
+
+ if model_config['use_fp16']:
+ model.convert_to_fp16()
+
+ elif config.data.dataset == 'CIFAR10':
+ img_shape = (3, 32, 32)
+ model_dir = 'checkpoints/diffpure/score_sde'
+ print(f'model_config: {config}')
+ model = mutils.create_model(config)
+
+ optimizer = get_optimizer(config, model.parameters())
+ ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
+ state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
+ restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
+ ema.copy_to(model.parameters())
+
+ else:
+ raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
+
+ model.eval().to(self.device)
+
+ self.model = model
+ self.rev_vpsde = RevVPSDE(model=model, score_type=args.score_type, img_shape=img_shape,
+ model_kwargs=None).to(self.device)
+ self.betas = self.rev_vpsde.discrete_betas.float().to(self.device)
+
+ print(f't: {args.t}, rand_t: {args.rand_t}, t_delta: {args.t_delta}')
+ print(f'use_bm: {args.use_bm}')
+
+ def image_editing_sample(self, img, bs_id=0, tag=None):
+ assert isinstance(img, torch.Tensor)
+ batch_size = img.shape[0]
+ state_size = int(np.prod(img.shape[1:])) # c*h*w
+
+ if tag is None:
+ tag = 'rnd' + str(random.randint(0, 10000))
+ out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
+
+ assert img.ndim == 4, img.ndim
+ img = img.to(self.device)
+ x0 = img
+
+ if bs_id < 2:
+ os.makedirs(out_dir, exist_ok=True)
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
+
+ xs = []
+ for it in range(self.args.sample_step):
+
+ e = torch.randn_like(x0).to(self.device)
+ total_noise_levels = self.args.t
+ if self.args.rand_t:
+ total_noise_levels = self.args.t + np.random.randint(-self.args.t_delta, self.args.t_delta)
+ print(f'total_noise_levels: {total_noise_levels}')
+ a = (1 - self.betas).cumprod(dim=0).to(self.device)
+ x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
+
+ if bs_id < 2:
+ tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
+
+ epsilon_dt0, epsilon_dt1 = 0, 1e-5
+ t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
+ t_size = 2
+ ts = torch.linspace(t0, t1, t_size).to(self.device)
+
+ x_ = x.view(batch_size, -1) # (batch_size, state_size)
+ if self.args.use_bm:
+ bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
+ xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler', bm=bm)
+ else:
+ xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler')
+ x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
+
+ if bs_id < 2:
+ torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
+ tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
+
+ xs.append(x0)
+
+ return torch.cat(xs, dim=0)
diff --git a/case_studies/diffpure/score_sde/LICENSE_SCORE_SDE b/case_studies/diffpure/score_sde/LICENSE_SCORE_SDE
new file mode 100644
index 0000000..f49a4e1
--- /dev/null
+++ b/case_studies/diffpure/score_sde/LICENSE_SCORE_SDE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/losses.py b/case_studies/diffpure/score_sde/losses.py
new file mode 100644
index 0000000..9b3931b
--- /dev/null
+++ b/case_studies/diffpure/score_sde/losses.py
@@ -0,0 +1,210 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""All functions related to loss computation and optimization.
+"""
+
+import torch
+import torch.optim as optim
+import numpy as np
+from .models import utils as mutils
+from .sde_lib import VESDE, VPSDE
+
+
+def get_optimizer(config, params):
+ """Returns a flax optimizer object based on `config`."""
+ if config.optim.optimizer == 'Adam':
+ optimizer = optim.Adam(params, lr=config.optim.lr, betas=(config.optim.beta1, 0.999), eps=config.optim.eps,
+ weight_decay=config.optim.weight_decay)
+ else:
+ raise NotImplementedError(
+ f'Optimizer {config.optim.optimizer} not supported yet!')
+
+ return optimizer
+
+
+def optimization_manager(config):
+ """Returns an optimize_fn based on `config`."""
+
+ def optimize_fn(optimizer, params, step, lr=config.optim.lr,
+ warmup=config.optim.warmup,
+ grad_clip=config.optim.grad_clip):
+ """Optimizes with warmup and gradient clipping (disabled if negative)."""
+ if warmup > 0:
+ for g in optimizer.param_groups:
+ g['lr'] = lr * np.minimum(step / warmup, 1.0)
+ if grad_clip >= 0:
+ torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
+ optimizer.step()
+
+ return optimize_fn
+
+
+def get_sde_loss_fn(sde, train, reduce_mean=True, continuous=True, likelihood_weighting=True, eps=1e-5):
+ """Create a loss function for training with arbirary SDEs.
+
+ Args:
+ sde: An `sde_lib.SDE` object that represents the forward SDE.
+ train: `True` for training loss and `False` for evaluation loss.
+ reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
+ continuous: `True` indicates that the model is defined to take continuous time steps. Otherwise it requires
+ ad-hoc interpolation to take continuous time steps.
+ likelihood_weighting: If `True`, weight the mixture of score matching losses
+ according to https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended in our paper.
+ eps: A `float` number. The smallest time step to sample from.
+
+ Returns:
+ A loss function.
+ """
+ reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
+
+ def loss_fn(model, batch):
+ """Compute the loss function.
+
+ Args:
+ model: A score model.
+ batch: A mini-batch of training data.
+
+ Returns:
+ loss: A scalar that represents the average loss value across the mini-batch.
+ """
+ score_fn = mutils.get_score_fn(sde, model, train=train, continuous=continuous)
+ t = torch.rand(batch.shape[0], device=batch.device) * (sde.T - eps) + eps
+ z = torch.randn_like(batch)
+ mean, std = sde.marginal_prob(batch, t)
+ perturbed_data = mean + std[:, None, None, None] * z
+ score = score_fn(perturbed_data, t)
+
+ if not likelihood_weighting:
+ losses = torch.square(score * std[:, None, None, None] + z)
+ losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
+ else:
+ g2 = sde.sde(torch.zeros_like(batch), t)[1] ** 2
+ losses = torch.square(score + z / std[:, None, None, None])
+ losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1) * g2
+
+ loss = torch.mean(losses)
+ return loss
+
+ return loss_fn
+
+
+def get_smld_loss_fn(vesde, train, reduce_mean=False):
+ """Legacy code to reproduce previous results on SMLD(NCSN). Not recommended for new work."""
+ assert isinstance(vesde, VESDE), "SMLD training only works for VESDEs."
+
+ # Previous SMLD models assume descending sigmas
+ smld_sigma_array = torch.flip(vesde.discrete_sigmas, dims=(0,))
+ reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
+
+ def loss_fn(model, batch):
+ model_fn = mutils.get_model_fn(model, train=train)
+ labels = torch.randint(0, vesde.N, (batch.shape[0],), device=batch.device)
+ sigmas = smld_sigma_array.to(batch.device)[labels]
+ noise = torch.randn_like(batch) * sigmas[:, None, None, None]
+ perturbed_data = noise + batch
+ score = model_fn(perturbed_data, labels)
+ target = -noise / (sigmas ** 2)[:, None, None, None]
+ losses = torch.square(score - target)
+ losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1) * sigmas ** 2
+ loss = torch.mean(losses)
+ return loss
+
+ return loss_fn
+
+
+def get_ddpm_loss_fn(vpsde, train, reduce_mean=True):
+ """Legacy code to reproduce previous results on DDPM. Not recommended for new work."""
+ assert isinstance(vpsde, VPSDE), "DDPM training only works for VPSDEs."
+
+ reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
+
+ def loss_fn(model, batch):
+ model_fn = mutils.get_model_fn(model, train=train)
+ labels = torch.randint(0, vpsde.N, (batch.shape[0],), device=batch.device)
+ sqrt_alphas_cumprod = vpsde.sqrt_alphas_cumprod.to(batch.device)
+ sqrt_1m_alphas_cumprod = vpsde.sqrt_1m_alphas_cumprod.to(batch.device)
+ noise = torch.randn_like(batch)
+ perturbed_data = sqrt_alphas_cumprod[labels, None, None, None] * batch + \
+ sqrt_1m_alphas_cumprod[labels, None, None, None] * noise
+ score = model_fn(perturbed_data, labels)
+ losses = torch.square(score - noise)
+ losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
+ loss = torch.mean(losses)
+ return loss
+
+ return loss_fn
+
+
+def get_step_fn(sde, train, optimize_fn=None, reduce_mean=False, continuous=True, likelihood_weighting=False):
+ """Create a one-step training/evaluation function.
+
+ Args:
+ sde: An `sde_lib.SDE` object that represents the forward SDE.
+ optimize_fn: An optimization function.
+ reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
+ continuous: `True` indicates that the model is defined to take continuous time steps.
+ likelihood_weighting: If `True`, weight the mixture of score matching losses according to
+ https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended by our paper.
+
+ Returns:
+ A one-step function for training or evaluation.
+ """
+ if continuous:
+ loss_fn = get_sde_loss_fn(sde, train, reduce_mean=reduce_mean,
+ continuous=True, likelihood_weighting=likelihood_weighting)
+ else:
+ assert not likelihood_weighting, "Likelihood weighting is not supported for original SMLD/DDPM training."
+ if isinstance(sde, VESDE):
+ loss_fn = get_smld_loss_fn(sde, train, reduce_mean=reduce_mean)
+ elif isinstance(sde, VPSDE):
+ loss_fn = get_ddpm_loss_fn(sde, train, reduce_mean=reduce_mean)
+ else:
+ raise ValueError(f"Discrete training for {sde.__class__.__name__} is not recommended.")
+
+ def step_fn(state, batch):
+ """Running one step of training or evaluation.
+
+ This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together
+ for faster execution.
+
+ Args:
+ state: A dictionary of training information, containing the score model, optimizer,
+ EMA status, and number of optimization steps.
+ batch: A mini-batch of training/evaluation data.
+
+ Returns:
+ loss: The average loss value of this state.
+ """
+ model = state['model']
+ if train:
+ optimizer = state['optimizer']
+ optimizer.zero_grad()
+ loss = loss_fn(model, batch)
+ loss.backward()
+ optimize_fn(optimizer, model.parameters(), step=state['step'])
+ state['step'] += 1
+ state['ema'].update(model.parameters())
+ else:
+ with torch.no_grad():
+ ema = state['ema']
+ ema.store(model.parameters())
+ ema.copy_to(model.parameters())
+ loss = loss_fn(model, batch)
+ ema.restore(model.parameters())
+
+ return loss
+
+ return step_fn
diff --git a/case_studies/diffpure/score_sde/models/__init__.py b/case_studies/diffpure/score_sde/models/__init__.py
new file mode 100644
index 0000000..4b0b4f2
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/__init__.py
@@ -0,0 +1,15 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from . import ncsnpp
diff --git a/case_studies/diffpure/score_sde/models/ddpm.py b/case_studies/diffpure/score_sde/models/ddpm.py
new file mode 100644
index 0000000..2c2a394
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/ddpm.py
@@ -0,0 +1,181 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+"""DDPM model.
+
+This code is the pytorch equivalent of:
+https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
+"""
+import torch
+import torch.nn as nn
+import functools
+
+from . import utils, layers, normalization
+
+RefineBlock = layers.RefineBlock
+ResidualBlock = layers.ResidualBlock
+ResnetBlockDDPM = layers.ResnetBlockDDPM
+Upsample = layers.Upsample
+Downsample = layers.Downsample
+conv3x3 = layers.ddpm_conv3x3
+get_act = layers.get_act
+get_normalization = normalization.get_normalization
+default_initializer = layers.default_init
+
+
+@utils.register_model(name='ddpm')
+class DDPM(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.act = act = get_act(config)
+ self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
+
+ self.nf = nf = config.model.nf
+ ch_mult = config.model.ch_mult
+ self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
+ self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
+ dropout = config.model.dropout
+ resamp_with_conv = config.model.resamp_with_conv
+ self.num_resolutions = num_resolutions = len(ch_mult)
+ self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
+
+ AttnBlock = functools.partial(layers.AttnBlock)
+ self.conditional = conditional = config.model.conditional
+ ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, temb_dim=4 * nf, dropout=dropout)
+ if conditional:
+ # Condition on noise levels.
+ modules = [nn.Linear(nf, nf * 4)]
+ modules[0].weight.data = default_initializer()(modules[0].weight.data.shape)
+ nn.init.zeros_(modules[0].bias)
+ modules.append(nn.Linear(nf * 4, nf * 4))
+ modules[1].weight.data = default_initializer()(modules[1].weight.data.shape)
+ nn.init.zeros_(modules[1].bias)
+
+ self.centered = config.data.centered
+ channels = config.data.num_channels
+
+ # Downsampling block
+ modules.append(conv3x3(channels, nf))
+ hs_c = [nf]
+ in_ch = nf
+ for i_level in range(num_resolutions):
+ # Residual blocks for this resolution
+ for i_block in range(num_res_blocks):
+ out_ch = nf * ch_mult[i_level]
+ modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
+ in_ch = out_ch
+ if all_resolutions[i_level] in attn_resolutions:
+ modules.append(AttnBlock(channels=in_ch))
+ hs_c.append(in_ch)
+ if i_level != num_resolutions - 1:
+ modules.append(Downsample(channels=in_ch, with_conv=resamp_with_conv))
+ hs_c.append(in_ch)
+
+ in_ch = hs_c[-1]
+ modules.append(ResnetBlock(in_ch=in_ch))
+ modules.append(AttnBlock(channels=in_ch))
+ modules.append(ResnetBlock(in_ch=in_ch))
+
+ # Upsampling block
+ for i_level in reversed(range(num_resolutions)):
+ for i_block in range(num_res_blocks + 1):
+ out_ch = nf * ch_mult[i_level]
+ modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch))
+ in_ch = out_ch
+ if all_resolutions[i_level] in attn_resolutions:
+ modules.append(AttnBlock(channels=in_ch))
+ if i_level != 0:
+ modules.append(Upsample(channels=in_ch, with_conv=resamp_with_conv))
+
+ assert not hs_c
+ modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=32, eps=1e-6))
+ modules.append(conv3x3(in_ch, channels, init_scale=0.))
+ self.all_modules = nn.ModuleList(modules)
+
+ self.scale_by_sigma = config.model.scale_by_sigma
+
+ def forward(self, x, labels):
+ modules = self.all_modules
+ m_idx = 0
+ if self.conditional:
+ # timestep/scale embedding
+ timesteps = labels
+ temb = layers.get_timestep_embedding(timesteps, self.nf)
+ temb = modules[m_idx](temb)
+ m_idx += 1
+ temb = modules[m_idx](self.act(temb))
+ m_idx += 1
+ else:
+ temb = None
+
+ if self.centered:
+ # Input is in [-1, 1]
+ h = x
+ else:
+ # Input is in [0, 1]
+ h = 2 * x - 1.
+
+ # Downsampling block
+ hs = [modules[m_idx](h)]
+ m_idx += 1
+ for i_level in range(self.num_resolutions):
+ # Residual blocks for this resolution
+ for i_block in range(self.num_res_blocks):
+ h = modules[m_idx](hs[-1], temb)
+ m_idx += 1
+ if h.shape[-1] in self.attn_resolutions:
+ h = modules[m_idx](h)
+ m_idx += 1
+ hs.append(h)
+ if i_level != self.num_resolutions - 1:
+ hs.append(modules[m_idx](hs[-1]))
+ m_idx += 1
+
+ h = hs[-1]
+ h = modules[m_idx](h, temb)
+ m_idx += 1
+ h = modules[m_idx](h)
+ m_idx += 1
+ h = modules[m_idx](h, temb)
+ m_idx += 1
+
+ # Upsampling block
+ for i_level in reversed(range(self.num_resolutions)):
+ for i_block in range(self.num_res_blocks + 1):
+ h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
+ m_idx += 1
+ if h.shape[-1] in self.attn_resolutions:
+ h = modules[m_idx](h)
+ m_idx += 1
+ if i_level != 0:
+ h = modules[m_idx](h)
+ m_idx += 1
+
+ assert not hs
+ h = self.act(modules[m_idx](h))
+ m_idx += 1
+ h = modules[m_idx](h)
+ m_idx += 1
+ assert m_idx == len(modules)
+
+ if self.scale_by_sigma:
+ # Divide the output by sigmas. Useful for training with the NCSN loss.
+ # The DDPM loss scales the network output by sigma in the loss function,
+ # so no need of doing it here.
+ used_sigmas = self.sigmas[labels, None, None, None]
+ h = h / used_sigmas
+
+ return h
diff --git a/case_studies/diffpure/score_sde/models/ema.py b/case_studies/diffpure/score_sde/models/ema.py
new file mode 100644
index 0000000..84c5932
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/ema.py
@@ -0,0 +1,106 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/models/ema.py
+#
+# The license for the original version of this file can be
+# found in the `score_sde` directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+# Modified from https://raw.githubusercontent.com/fadel/pytorch_ema/master/torch_ema/ema.py
+
+from __future__ import division
+from __future__ import unicode_literals
+
+import torch
+
+
+# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
+class ExponentialMovingAverage:
+ """
+ Maintains (exponential) moving average of a set of parameters.
+ """
+
+ def __init__(self, parameters, decay, use_num_updates=True):
+ """
+ Args:
+ parameters: Iterable of `torch.nn.Parameter`; usually the result of
+ `model.parameters()`.
+ decay: The exponential decay.
+ use_num_updates: Whether to use number of updates when computing
+ averages.
+ """
+ if decay < 0.0 or decay > 1.0:
+ raise ValueError('Decay must be between 0 and 1')
+ self.decay = decay
+ self.num_updates = 0 if use_num_updates else None
+ self.shadow_params = [p.clone().detach()
+ for p in parameters if p.requires_grad]
+ self.collected_params = []
+
+ def update(self, parameters):
+ """
+ Update currently maintained parameters.
+
+ Call this every time the parameters are updated, such as the result of
+ the `optimizer.step()` call.
+
+ Args:
+ parameters: Iterable of `torch.nn.Parameter`; usually the same set of
+ parameters used to initialize this object.
+ """
+ decay = self.decay
+ if self.num_updates is not None:
+ self.num_updates += 1
+ decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
+ one_minus_decay = 1.0 - decay
+ with torch.no_grad():
+ parameters = [p for p in parameters if p.requires_grad]
+ for s_param, param in zip(self.shadow_params, parameters):
+ s_param.sub_(one_minus_decay * (s_param - param))
+
+ def copy_to(self, parameters):
+ """
+ Copy current parameters into given collection of parameters.
+
+ Args:
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
+ updated with the stored moving averages.
+ """
+ parameters = [p for p in parameters if p.requires_grad]
+ for s_param, param in zip(self.shadow_params, parameters):
+ if param.requires_grad:
+ param.data.copy_(s_param.data)
+
+ def store(self, parameters):
+ """
+ Save the current parameters for restoring later.
+
+ Args:
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
+ temporarily stored.
+ """
+ self.collected_params = [param.clone() for param in parameters]
+
+ def restore(self, parameters):
+ """
+ Restore the parameters stored with the `store` method.
+ Useful to validate the model with EMA parameters without affecting the
+ original optimization process. Store the parameters before the
+ `copy_to` method. After validation (or model saving), use this to
+ restore the former parameters.
+
+ Args:
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
+ updated with the stored parameters.
+ """
+ for c_param, param in zip(self.collected_params, parameters):
+ param.data.copy_(c_param.data)
+
+ def state_dict(self):
+ return dict(decay=self.decay, num_updates=self.num_updates,
+ shadow_params=self.shadow_params)
+
+ def load_state_dict(self, state_dict):
+ self.decay = state_dict['decay']
+ self.num_updates = state_dict['num_updates']
+ self.shadow_params = state_dict['shadow_params']
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/models/layers.py b/case_studies/diffpure/score_sde/models/layers.py
new file mode 100644
index 0000000..296a2a0
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/layers.py
@@ -0,0 +1,662 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+"""Common layers for defining score networks.
+"""
+import math
+import string
+from functools import partial
+import torch.nn as nn
+import torch
+import torch.nn.functional as F
+import numpy as np
+from .normalization import ConditionalInstanceNorm2dPlus
+
+
+def get_act(config):
+ """Get activation functions from the config file."""
+
+ if config.model.nonlinearity.lower() == 'elu':
+ return nn.ELU()
+ elif config.model.nonlinearity.lower() == 'relu':
+ return nn.ReLU()
+ elif config.model.nonlinearity.lower() == 'lrelu':
+ return nn.LeakyReLU(negative_slope=0.2)
+ elif config.model.nonlinearity.lower() == 'swish':
+ return nn.SiLU()
+ else:
+ raise NotImplementedError('activation function does not exist!')
+
+
+def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=0):
+ """1x1 convolution. Same as NCSNv1/v2."""
+ conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation,
+ padding=padding)
+ init_scale = 1e-10 if init_scale == 0 else init_scale
+ conv.weight.data *= init_scale
+ conv.bias.data *= init_scale
+ return conv
+
+
+def variance_scaling(scale, mode, distribution,
+ in_axis=1, out_axis=0,
+ dtype=torch.float32,
+ device='cpu'):
+ """Ported from JAX. """
+
+ def _compute_fans(shape, in_axis=1, out_axis=0):
+ receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis]
+ fan_in = shape[in_axis] * receptive_field_size
+ fan_out = shape[out_axis] * receptive_field_size
+ return fan_in, fan_out
+
+ def init(shape, dtype=dtype, device=device):
+ fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
+ if mode == "fan_in":
+ denominator = fan_in
+ elif mode == "fan_out":
+ denominator = fan_out
+ elif mode == "fan_avg":
+ denominator = (fan_in + fan_out) / 2
+ else:
+ raise ValueError(
+ "invalid mode for variance scaling initializer: {}".format(mode))
+ variance = scale / denominator
+ if distribution == "normal":
+ return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance)
+ elif distribution == "uniform":
+ return (torch.rand(*shape, dtype=dtype, device=device) * 2. - 1.) * np.sqrt(3 * variance)
+ else:
+ raise ValueError("invalid distribution for variance scaling initializer")
+
+ return init
+
+
+def default_init(scale=1.):
+ """The same initialization used in DDPM."""
+ scale = 1e-10 if scale == 0 else scale
+ return variance_scaling(scale, 'fan_avg', 'uniform')
+
+
+class Dense(nn.Module):
+ """Linear layer with `default_init`."""
+ def __init__(self):
+ super().__init__()
+
+
+def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1., padding=0):
+ """1x1 convolution with DDPM initialization."""
+ conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias)
+ conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
+ nn.init.zeros_(conv.bias)
+ return conv
+
+
+def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
+ """3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
+ init_scale = 1e-10 if init_scale == 0 else init_scale
+ conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
+ dilation=dilation, padding=padding, kernel_size=3)
+ conv.weight.data *= init_scale
+ conv.bias.data *= init_scale
+ return conv
+
+
+def ddpm_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
+ """3x3 convolution with DDPM initialization."""
+ conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding,
+ dilation=dilation, bias=bias)
+ conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
+ nn.init.zeros_(conv.bias)
+ return conv
+
+ ###########################################################################
+ # Functions below are ported over from the NCSNv1/NCSNv2 codebase:
+ # https://github.com/ermongroup/ncsn
+ # https://github.com/ermongroup/ncsnv2
+ ###########################################################################
+
+
+class CRPBlock(nn.Module):
+ def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True):
+ super().__init__()
+ self.convs = nn.ModuleList()
+ for i in range(n_stages):
+ self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
+ self.n_stages = n_stages
+ if maxpool:
+ self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
+ else:
+ self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
+
+ self.act = act
+
+ def forward(self, x):
+ x = self.act(x)
+ path = x
+ for i in range(self.n_stages):
+ path = self.pool(path)
+ path = self.convs[i](path)
+ x = path + x
+ return x
+
+
+class CondCRPBlock(nn.Module):
+ def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()):
+ super().__init__()
+ self.convs = nn.ModuleList()
+ self.norms = nn.ModuleList()
+ self.normalizer = normalizer
+ for i in range(n_stages):
+ self.norms.append(normalizer(features, num_classes, bias=True))
+ self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
+
+ self.n_stages = n_stages
+ self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
+ self.act = act
+
+ def forward(self, x, y):
+ x = self.act(x)
+ path = x
+ for i in range(self.n_stages):
+ path = self.norms[i](path, y)
+ path = self.pool(path)
+ path = self.convs[i](path)
+
+ x = path + x
+ return x
+
+
+class RCUBlock(nn.Module):
+ def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()):
+ super().__init__()
+
+ for i in range(n_blocks):
+ for j in range(n_stages):
+ setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
+
+ self.stride = 1
+ self.n_blocks = n_blocks
+ self.n_stages = n_stages
+ self.act = act
+
+ def forward(self, x):
+ for i in range(self.n_blocks):
+ residual = x
+ for j in range(self.n_stages):
+ x = self.act(x)
+ x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
+
+ x += residual
+ return x
+
+
+class CondRCUBlock(nn.Module):
+ def __init__(self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU()):
+ super().__init__()
+
+ for i in range(n_blocks):
+ for j in range(n_stages):
+ setattr(self, '{}_{}_norm'.format(i + 1, j + 1), normalizer(features, num_classes, bias=True))
+ setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
+
+ self.stride = 1
+ self.n_blocks = n_blocks
+ self.n_stages = n_stages
+ self.act = act
+ self.normalizer = normalizer
+
+ def forward(self, x, y):
+ for i in range(self.n_blocks):
+ residual = x
+ for j in range(self.n_stages):
+ x = getattr(self, '{}_{}_norm'.format(i + 1, j + 1))(x, y)
+ x = self.act(x)
+ x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
+
+ x += residual
+ return x
+
+
+class MSFBlock(nn.Module):
+ def __init__(self, in_planes, features):
+ super().__init__()
+ assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
+ self.convs = nn.ModuleList()
+ self.features = features
+
+ for i in range(len(in_planes)):
+ self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
+
+ def forward(self, xs, shape):
+ sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
+ for i in range(len(self.convs)):
+ h = self.convs[i](xs[i])
+ h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
+ sums += h
+ return sums
+
+
+class CondMSFBlock(nn.Module):
+ def __init__(self, in_planes, features, num_classes, normalizer):
+ super().__init__()
+ assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
+
+ self.convs = nn.ModuleList()
+ self.norms = nn.ModuleList()
+ self.features = features
+ self.normalizer = normalizer
+
+ for i in range(len(in_planes)):
+ self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
+ self.norms.append(normalizer(in_planes[i], num_classes, bias=True))
+
+ def forward(self, xs, y, shape):
+ sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
+ for i in range(len(self.convs)):
+ h = self.norms[i](xs[i], y)
+ h = self.convs[i](h)
+ h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
+ sums += h
+ return sums
+
+
+class RefineBlock(nn.Module):
+ def __init__(self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True):
+ super().__init__()
+
+ assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
+ self.n_blocks = n_blocks = len(in_planes)
+
+ self.adapt_convs = nn.ModuleList()
+ for i in range(n_blocks):
+ self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act))
+
+ self.output_convs = RCUBlock(features, 3 if end else 1, 2, act)
+
+ if not start:
+ self.msf = MSFBlock(in_planes, features)
+
+ self.crp = CRPBlock(features, 2, act, maxpool=maxpool)
+
+ def forward(self, xs, output_shape):
+ assert isinstance(xs, tuple) or isinstance(xs, list)
+ hs = []
+ for i in range(len(xs)):
+ h = self.adapt_convs[i](xs[i])
+ hs.append(h)
+
+ if self.n_blocks > 1:
+ h = self.msf(hs, output_shape)
+ else:
+ h = hs[0]
+
+ h = self.crp(h)
+ h = self.output_convs(h)
+
+ return h
+
+
+class CondRefineBlock(nn.Module):
+ def __init__(self, in_planes, features, num_classes, normalizer, act=nn.ReLU(), start=False, end=False):
+ super().__init__()
+
+ assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
+ self.n_blocks = n_blocks = len(in_planes)
+
+ self.adapt_convs = nn.ModuleList()
+ for i in range(n_blocks):
+ self.adapt_convs.append(
+ CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act)
+ )
+
+ self.output_convs = CondRCUBlock(features, 3 if end else 1, 2, num_classes, normalizer, act)
+
+ if not start:
+ self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer)
+
+ self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act)
+
+ def forward(self, xs, y, output_shape):
+ assert isinstance(xs, tuple) or isinstance(xs, list)
+ hs = []
+ for i in range(len(xs)):
+ h = self.adapt_convs[i](xs[i], y)
+ hs.append(h)
+
+ if self.n_blocks > 1:
+ h = self.msf(hs, y, output_shape)
+ else:
+ h = hs[0]
+
+ h = self.crp(h, y)
+ h = self.output_convs(h, y)
+
+ return h
+
+
+class ConvMeanPool(nn.Module):
+ def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False):
+ super().__init__()
+ if not adjust_padding:
+ conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
+ self.conv = conv
+ else:
+ conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
+
+ self.conv = nn.Sequential(
+ nn.ZeroPad2d((1, 0, 1, 0)),
+ conv
+ )
+
+ def forward(self, inputs):
+ output = self.conv(inputs)
+ output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
+ output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
+ return output
+
+
+class MeanPoolConv(nn.Module):
+ def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
+ super().__init__()
+ self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
+
+ def forward(self, inputs):
+ output = inputs
+ output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
+ output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
+ return self.conv(output)
+
+
+class UpsampleConv(nn.Module):
+ def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
+ super().__init__()
+ self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
+ self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
+
+ def forward(self, inputs):
+ output = inputs
+ output = torch.cat([output, output, output, output], dim=1)
+ output = self.pixelshuffle(output)
+ return self.conv(output)
+
+
+class ConditionalResidualBlock(nn.Module):
+ def __init__(self, input_dim, output_dim, num_classes, resample=1, act=nn.ELU(),
+ normalization=ConditionalInstanceNorm2dPlus, adjust_padding=False, dilation=None):
+ super().__init__()
+ self.non_linearity = act
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.resample = resample
+ self.normalization = normalization
+ if resample == 'down':
+ if dilation > 1:
+ self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
+ self.normalize2 = normalization(input_dim, num_classes)
+ self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
+ conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
+ else:
+ self.conv1 = ncsn_conv3x3(input_dim, input_dim)
+ self.normalize2 = normalization(input_dim, num_classes)
+ self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
+ conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
+
+ elif resample is None:
+ if dilation > 1:
+ conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
+ self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
+ self.normalize2 = normalization(output_dim, num_classes)
+ self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
+ else:
+ conv_shortcut = nn.Conv2d
+ self.conv1 = ncsn_conv3x3(input_dim, output_dim)
+ self.normalize2 = normalization(output_dim, num_classes)
+ self.conv2 = ncsn_conv3x3(output_dim, output_dim)
+ else:
+ raise Exception('invalid resample value')
+
+ if output_dim != input_dim or resample is not None:
+ self.shortcut = conv_shortcut(input_dim, output_dim)
+
+ self.normalize1 = normalization(input_dim, num_classes)
+
+ def forward(self, x, y):
+ output = self.normalize1(x, y)
+ output = self.non_linearity(output)
+ output = self.conv1(output)
+ output = self.normalize2(output, y)
+ output = self.non_linearity(output)
+ output = self.conv2(output)
+
+ if self.output_dim == self.input_dim and self.resample is None:
+ shortcut = x
+ else:
+ shortcut = self.shortcut(x)
+
+ return shortcut + output
+
+
+class ResidualBlock(nn.Module):
+ def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
+ normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
+ super().__init__()
+ self.non_linearity = act
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.resample = resample
+ self.normalization = normalization
+ if resample == 'down':
+ if dilation > 1:
+ self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
+ self.normalize2 = normalization(input_dim)
+ self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
+ conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
+ else:
+ self.conv1 = ncsn_conv3x3(input_dim, input_dim)
+ self.normalize2 = normalization(input_dim)
+ self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
+ conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
+
+ elif resample is None:
+ if dilation > 1:
+ conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
+ self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
+ self.normalize2 = normalization(output_dim)
+ self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
+ else:
+ # conv_shortcut = nn.Conv2d ### Something wierd here.
+ conv_shortcut = partial(ncsn_conv1x1)
+ self.conv1 = ncsn_conv3x3(input_dim, output_dim)
+ self.normalize2 = normalization(output_dim)
+ self.conv2 = ncsn_conv3x3(output_dim, output_dim)
+ else:
+ raise Exception('invalid resample value')
+
+ if output_dim != input_dim or resample is not None:
+ self.shortcut = conv_shortcut(input_dim, output_dim)
+
+ self.normalize1 = normalization(input_dim)
+
+ def forward(self, x):
+ output = self.normalize1(x)
+ output = self.non_linearity(output)
+ output = self.conv1(output)
+ output = self.normalize2(output)
+ output = self.non_linearity(output)
+ output = self.conv2(output)
+
+ if self.output_dim == self.input_dim and self.resample is None:
+ shortcut = x
+ else:
+ shortcut = self.shortcut(x)
+
+ return shortcut + output
+
+
+###########################################################################
+# Functions below are ported over from the DDPM codebase:
+# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
+###########################################################################
+
+def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
+ assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32
+ half_dim = embedding_dim // 2
+ # magic number 10000 is from transformers
+ emb = math.log(max_positions) / (half_dim - 1)
+ # emb = math.log(2.) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
+ # emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :]
+ # emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :]
+ emb = timesteps.float()[:, None] * emb[None, :]
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
+ if embedding_dim % 2 == 1: # zero pad
+ emb = F.pad(emb, (0, 1), mode='constant')
+ assert emb.shape == (timesteps.shape[0], embedding_dim)
+ return emb
+
+
+def _einsum(a, b, c, x, y):
+ einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
+ return torch.einsum(einsum_str, x, y)
+
+
+def contract_inner(x, y):
+ """tensordot(x, y, 1)."""
+ x_chars = list(string.ascii_lowercase[:len(x.shape)])
+ y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x.shape)])
+ y_chars[0] = x_chars[-1] # first axis of y and last of x get summed
+ out_chars = x_chars[:-1] + y_chars[1:]
+ return _einsum(x_chars, y_chars, out_chars, x, y)
+
+
+class NIN(nn.Module):
+ def __init__(self, in_dim, num_units, init_scale=0.1):
+ super().__init__()
+ self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True)
+ self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
+
+ def forward(self, x):
+ x = x.permute(0, 2, 3, 1)
+ y = contract_inner(x, self.W) + self.b
+ return y.permute(0, 3, 1, 2)
+
+
+class AttnBlock(nn.Module):
+ """Channel-wise self-attention block."""
+ def __init__(self, channels):
+ super().__init__()
+ self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6)
+ self.NIN_0 = NIN(channels, channels)
+ self.NIN_1 = NIN(channels, channels)
+ self.NIN_2 = NIN(channels, channels)
+ self.NIN_3 = NIN(channels, channels, init_scale=0.)
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ h = self.GroupNorm_0(x)
+ q = self.NIN_0(h)
+ k = self.NIN_1(h)
+ v = self.NIN_2(h)
+
+ w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
+ w = torch.reshape(w, (B, H, W, H * W))
+ w = F.softmax(w, dim=-1)
+ w = torch.reshape(w, (B, H, W, H, W))
+ h = torch.einsum('bhwij,bcij->bchw', w, v)
+ h = self.NIN_3(h)
+ return x + h
+
+
+class Upsample(nn.Module):
+ def __init__(self, channels, with_conv=False):
+ super().__init__()
+ if with_conv:
+ self.Conv_0 = ddpm_conv3x3(channels, channels)
+ self.with_conv = with_conv
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ h = F.interpolate(x, (H * 2, W * 2), mode='nearest')
+ if self.with_conv:
+ h = self.Conv_0(h)
+ return h
+
+
+class Downsample(nn.Module):
+ def __init__(self, channels, with_conv=False):
+ super().__init__()
+ if with_conv:
+ self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0)
+ self.with_conv = with_conv
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ # Emulate 'SAME' padding
+ if self.with_conv:
+ x = F.pad(x, (0, 1, 0, 1))
+ x = self.Conv_0(x)
+ else:
+ x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0)
+
+ assert x.shape == (B, C, H // 2, W // 2)
+ return x
+
+
+class ResnetBlockDDPM(nn.Module):
+ """The ResNet Blocks used in DDPM."""
+ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1):
+ super().__init__()
+ if out_ch is None:
+ out_ch = in_ch
+ self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6)
+ self.act = act
+ self.Conv_0 = ddpm_conv3x3(in_ch, out_ch)
+ if temb_dim is not None:
+ self.Dense_0 = nn.Linear(temb_dim, out_ch)
+ self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
+ nn.init.zeros_(self.Dense_0.bias)
+
+ self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6)
+ self.Dropout_0 = nn.Dropout(dropout)
+ self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.)
+ if in_ch != out_ch:
+ if conv_shortcut:
+ self.Conv_2 = ddpm_conv3x3(in_ch, out_ch)
+ else:
+ self.NIN_0 = NIN(in_ch, out_ch)
+ self.out_ch = out_ch
+ self.in_ch = in_ch
+ self.conv_shortcut = conv_shortcut
+
+ def forward(self, x, temb=None):
+ B, C, H, W = x.shape
+ assert C == self.in_ch
+ out_ch = self.out_ch if self.out_ch else self.in_ch
+ h = self.act(self.GroupNorm_0(x))
+ h = self.Conv_0(h)
+ # Add bias to each feature map conditioned on the time embedding
+ if temb is not None:
+ h += self.Dense_0(self.act(temb))[:, :, None, None]
+ h = self.act(self.GroupNorm_1(h))
+ h = self.Dropout_0(h)
+ h = self.Conv_1(h)
+ if C != out_ch:
+ if self.conv_shortcut:
+ x = self.Conv_2(x)
+ else:
+ x = self.NIN_0(x)
+ return x + h
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/models/layerspp.py b/case_studies/diffpure/score_sde/models/layerspp.py
new file mode 100644
index 0000000..948b068
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/layerspp.py
@@ -0,0 +1,274 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+"""Layers for defining NCSN++.
+"""
+from . import layers
+from . import up_or_down_sampling
+import torch.nn as nn
+import torch
+import torch.nn.functional as F
+import numpy as np
+
+conv1x1 = layers.ddpm_conv1x1
+conv3x3 = layers.ddpm_conv3x3
+NIN = layers.NIN
+default_init = layers.default_init
+
+
+class GaussianFourierProjection(nn.Module):
+ """Gaussian Fourier embeddings for noise levels."""
+
+ def __init__(self, embedding_size=256, scale=1.0):
+ super().__init__()
+ self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
+
+ def forward(self, x):
+ x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
+ return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
+
+
+class Combine(nn.Module):
+ """Combine information from skip connections."""
+
+ def __init__(self, dim1, dim2, method='cat'):
+ super().__init__()
+ self.Conv_0 = conv1x1(dim1, dim2)
+ self.method = method
+
+ def forward(self, x, y):
+ h = self.Conv_0(x)
+ if self.method == 'cat':
+ return torch.cat([h, y], dim=1)
+ elif self.method == 'sum':
+ return h + y
+ else:
+ raise ValueError(f'Method {self.method} not recognized.')
+
+
+class AttnBlockpp(nn.Module):
+ """Channel-wise self-attention block. Modified from DDPM."""
+
+ def __init__(self, channels, skip_rescale=False, init_scale=0.):
+ super().__init__()
+ self.GroupNorm_0 = nn.GroupNorm(num_groups=min(channels // 4, 32), num_channels=channels,
+ eps=1e-6)
+ self.NIN_0 = NIN(channels, channels)
+ self.NIN_1 = NIN(channels, channels)
+ self.NIN_2 = NIN(channels, channels)
+ self.NIN_3 = NIN(channels, channels, init_scale=init_scale)
+ self.skip_rescale = skip_rescale
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ h = self.GroupNorm_0(x)
+ q = self.NIN_0(h)
+ k = self.NIN_1(h)
+ v = self.NIN_2(h)
+
+ w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
+ w = torch.reshape(w, (B, H, W, H * W))
+ w = F.softmax(w, dim=-1)
+ w = torch.reshape(w, (B, H, W, H, W))
+ h = torch.einsum('bhwij,bcij->bchw', w, v)
+ h = self.NIN_3(h)
+ if not self.skip_rescale:
+ return x + h
+ else:
+ return (x + h) / np.sqrt(2.)
+
+
+class Upsample(nn.Module):
+ def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
+ fir_kernel=(1, 3, 3, 1)):
+ super().__init__()
+ out_ch = out_ch if out_ch else in_ch
+ if not fir:
+ if with_conv:
+ self.Conv_0 = conv3x3(in_ch, out_ch)
+ else:
+ if with_conv:
+ self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
+ kernel=3, up=True,
+ resample_kernel=fir_kernel,
+ use_bias=True,
+ kernel_init=default_init())
+ self.fir = fir
+ self.with_conv = with_conv
+ self.fir_kernel = fir_kernel
+ self.out_ch = out_ch
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ if not self.fir:
+ h = F.interpolate(x, (H * 2, W * 2), 'nearest')
+ if self.with_conv:
+ h = self.Conv_0(h)
+ else:
+ if not self.with_conv:
+ h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
+ else:
+ h = self.Conv2d_0(x)
+
+ return h
+
+
+class Downsample(nn.Module):
+ def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
+ fir_kernel=(1, 3, 3, 1)):
+ super().__init__()
+ out_ch = out_ch if out_ch else in_ch
+ if not fir:
+ if with_conv:
+ self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0)
+ else:
+ if with_conv:
+ self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
+ kernel=3, down=True,
+ resample_kernel=fir_kernel,
+ use_bias=True,
+ kernel_init=default_init())
+ self.fir = fir
+ self.fir_kernel = fir_kernel
+ self.with_conv = with_conv
+ self.out_ch = out_ch
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ if not self.fir:
+ if self.with_conv:
+ x = F.pad(x, (0, 1, 0, 1))
+ x = self.Conv_0(x)
+ else:
+ x = F.avg_pool2d(x, 2, stride=2)
+ else:
+ if not self.with_conv:
+ x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
+ else:
+ x = self.Conv2d_0(x)
+
+ return x
+
+
+class ResnetBlockDDPMpp(nn.Module):
+ """ResBlock adapted from DDPM."""
+
+ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False,
+ dropout=0.1, skip_rescale=False, init_scale=0.):
+ super().__init__()
+ out_ch = out_ch if out_ch else in_ch
+ self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
+ self.Conv_0 = conv3x3(in_ch, out_ch)
+ if temb_dim is not None:
+ self.Dense_0 = nn.Linear(temb_dim, out_ch)
+ self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
+ nn.init.zeros_(self.Dense_0.bias)
+ self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
+ self.Dropout_0 = nn.Dropout(dropout)
+ self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
+ if in_ch != out_ch:
+ if conv_shortcut:
+ self.Conv_2 = conv3x3(in_ch, out_ch)
+ else:
+ self.NIN_0 = NIN(in_ch, out_ch)
+
+ self.skip_rescale = skip_rescale
+ self.act = act
+ self.out_ch = out_ch
+ self.conv_shortcut = conv_shortcut
+
+ def forward(self, x, temb=None):
+ h = self.act(self.GroupNorm_0(x))
+ h = self.Conv_0(h)
+ if temb is not None:
+ h += self.Dense_0(self.act(temb))[:, :, None, None]
+ h = self.act(self.GroupNorm_1(h))
+ h = self.Dropout_0(h)
+ h = self.Conv_1(h)
+ if x.shape[1] != self.out_ch:
+ if self.conv_shortcut:
+ x = self.Conv_2(x)
+ else:
+ x = self.NIN_0(x)
+ if not self.skip_rescale:
+ return x + h
+ else:
+ return (x + h) / np.sqrt(2.)
+
+
+class ResnetBlockBigGANpp(nn.Module):
+ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, up=False, down=False,
+ dropout=0.1, fir=False, fir_kernel=(1, 3, 3, 1),
+ skip_rescale=True, init_scale=0.):
+ super().__init__()
+
+ out_ch = out_ch if out_ch else in_ch
+ self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
+ self.up = up
+ self.down = down
+ self.fir = fir
+ self.fir_kernel = fir_kernel
+
+ self.Conv_0 = conv3x3(in_ch, out_ch)
+ if temb_dim is not None:
+ self.Dense_0 = nn.Linear(temb_dim, out_ch)
+ self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape)
+ nn.init.zeros_(self.Dense_0.bias)
+
+ self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
+ self.Dropout_0 = nn.Dropout(dropout)
+ self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
+ if in_ch != out_ch or up or down:
+ self.Conv_2 = conv1x1(in_ch, out_ch)
+
+ self.skip_rescale = skip_rescale
+ self.act = act
+ self.in_ch = in_ch
+ self.out_ch = out_ch
+
+ def forward(self, x, temb=None):
+ h = self.act(self.GroupNorm_0(x))
+
+ if self.up:
+ if self.fir:
+ h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2)
+ x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
+ else:
+ h = up_or_down_sampling.naive_upsample_2d(h, factor=2)
+ x = up_or_down_sampling.naive_upsample_2d(x, factor=2)
+ elif self.down:
+ if self.fir:
+ h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2)
+ x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
+ else:
+ h = up_or_down_sampling.naive_downsample_2d(h, factor=2)
+ x = up_or_down_sampling.naive_downsample_2d(x, factor=2)
+
+ h = self.Conv_0(h)
+ # Add bias to each feature map conditioned on the time embedding
+ if temb is not None:
+ h += self.Dense_0(self.act(temb))[:, :, None, None]
+ h = self.act(self.GroupNorm_1(h))
+ h = self.Dropout_0(h)
+ h = self.Conv_1(h)
+
+ if self.in_ch != self.out_ch or self.up or self.down:
+ x = self.Conv_2(x)
+
+ if not self.skip_rescale:
+ return x + h
+ else:
+ return (x + h) / np.sqrt(2.)
diff --git a/case_studies/diffpure/score_sde/models/ncsnpp.py b/case_studies/diffpure/score_sde/models/ncsnpp.py
new file mode 100644
index 0000000..0557eb0
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/ncsnpp.py
@@ -0,0 +1,381 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+
+from . import utils, layers, layerspp, normalization
+import torch.nn as nn
+import functools
+import torch
+import numpy as np
+
+ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp
+ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp
+Combine = layerspp.Combine
+conv3x3 = layerspp.conv3x3
+conv1x1 = layerspp.conv1x1
+get_act = layers.get_act
+get_normalization = normalization.get_normalization
+default_initializer = layers.default_init
+
+
+@utils.register_model(name='ncsnpp')
+class NCSNpp(nn.Module):
+ """NCSN++ model"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.act = act = get_act(config)
+ self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
+
+ self.nf = nf = config.model.nf
+ ch_mult = config.model.ch_mult
+ self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
+ self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
+ dropout = config.model.dropout
+ resamp_with_conv = config.model.resamp_with_conv
+ self.num_resolutions = num_resolutions = len(ch_mult)
+ self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
+
+ self.conditional = conditional = config.model.conditional # noise-conditional
+ fir = config.model.fir
+ fir_kernel = config.model.fir_kernel
+ self.skip_rescale = skip_rescale = config.model.skip_rescale
+ self.resblock_type = resblock_type = config.model.resblock_type.lower()
+ self.progressive = progressive = config.model.progressive.lower()
+ self.progressive_input = progressive_input = config.model.progressive_input.lower()
+ self.embedding_type = embedding_type = config.model.embedding_type.lower()
+ init_scale = config.model.init_scale
+ assert progressive in ['none', 'output_skip', 'residual']
+ assert progressive_input in ['none', 'input_skip', 'residual']
+ assert embedding_type in ['fourier', 'positional']
+ combine_method = config.model.progressive_combine.lower()
+ combiner = functools.partial(Combine, method=combine_method)
+
+ modules = []
+ # timestep/noise_level embedding; only for continuous training
+ if embedding_type == 'fourier':
+ # Gaussian Fourier features embeddings.
+ assert config.training.continuous, "Fourier features are only used for continuous training."
+
+ modules.append(layerspp.GaussianFourierProjection(
+ embedding_size=nf, scale=config.model.fourier_scale
+ ))
+ embed_dim = 2 * nf
+
+ elif embedding_type == 'positional':
+ embed_dim = nf
+
+ else:
+ raise ValueError(f'embedding type {embedding_type} unknown.')
+
+ if conditional:
+ modules.append(nn.Linear(embed_dim, nf * 4))
+ modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
+ nn.init.zeros_(modules[-1].bias)
+ modules.append(nn.Linear(nf * 4, nf * 4))
+ modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
+ nn.init.zeros_(modules[-1].bias)
+
+ AttnBlock = functools.partial(layerspp.AttnBlockpp,
+ init_scale=init_scale,
+ skip_rescale=skip_rescale)
+
+ Upsample = functools.partial(layerspp.Upsample,
+ with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
+
+ if progressive == 'output_skip':
+ self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
+ elif progressive == 'residual':
+ pyramid_upsample = functools.partial(layerspp.Upsample,
+ fir=fir, fir_kernel=fir_kernel, with_conv=True)
+
+ Downsample = functools.partial(layerspp.Downsample,
+ with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
+
+ if progressive_input == 'input_skip':
+ self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
+ elif progressive_input == 'residual':
+ pyramid_downsample = functools.partial(layerspp.Downsample,
+ fir=fir, fir_kernel=fir_kernel, with_conv=True)
+
+ if resblock_type == 'ddpm':
+ ResnetBlock = functools.partial(ResnetBlockDDPM,
+ act=act,
+ dropout=dropout,
+ init_scale=init_scale,
+ skip_rescale=skip_rescale,
+ temb_dim=nf * 4)
+
+ elif resblock_type == 'biggan':
+ ResnetBlock = functools.partial(ResnetBlockBigGAN,
+ act=act,
+ dropout=dropout,
+ fir=fir,
+ fir_kernel=fir_kernel,
+ init_scale=init_scale,
+ skip_rescale=skip_rescale,
+ temb_dim=nf * 4)
+
+ else:
+ raise ValueError(f'resblock type {resblock_type} unrecognized.')
+
+ # Downsampling block
+
+ channels = config.data.num_channels
+ if progressive_input != 'none':
+ input_pyramid_ch = channels
+
+ modules.append(conv3x3(channels, nf))
+ hs_c = [nf]
+
+ in_ch = nf
+ for i_level in range(num_resolutions):
+ # Residual blocks for this resolution
+ for i_block in range(num_res_blocks):
+ out_ch = nf * ch_mult[i_level]
+ modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
+ in_ch = out_ch
+
+ if all_resolutions[i_level] in attn_resolutions:
+ modules.append(AttnBlock(channels=in_ch))
+ hs_c.append(in_ch)
+
+ if i_level != num_resolutions - 1:
+ if resblock_type == 'ddpm':
+ modules.append(Downsample(in_ch=in_ch))
+ else:
+ modules.append(ResnetBlock(down=True, in_ch=in_ch))
+
+ if progressive_input == 'input_skip':
+ modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch))
+ if combine_method == 'cat':
+ in_ch *= 2
+
+ elif progressive_input == 'residual':
+ modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch))
+ input_pyramid_ch = in_ch
+
+ hs_c.append(in_ch)
+
+ in_ch = hs_c[-1]
+ modules.append(ResnetBlock(in_ch=in_ch))
+ modules.append(AttnBlock(channels=in_ch))
+ modules.append(ResnetBlock(in_ch=in_ch))
+
+ pyramid_ch = 0
+ # Upsampling block
+ for i_level in reversed(range(num_resolutions)):
+ for i_block in range(num_res_blocks + 1):
+ out_ch = nf * ch_mult[i_level]
+ modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(),
+ out_ch=out_ch))
+ in_ch = out_ch
+
+ if all_resolutions[i_level] in attn_resolutions:
+ modules.append(AttnBlock(channels=in_ch))
+
+ if progressive != 'none':
+ if i_level == num_resolutions - 1:
+ if progressive == 'output_skip':
+ modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
+ num_channels=in_ch, eps=1e-6))
+ modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
+ pyramid_ch = channels
+ elif progressive == 'residual':
+ modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
+ num_channels=in_ch, eps=1e-6))
+ modules.append(conv3x3(in_ch, in_ch, bias=True))
+ pyramid_ch = in_ch
+ else:
+ raise ValueError(f'{progressive} is not a valid name.')
+ else:
+ if progressive == 'output_skip':
+ modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
+ num_channels=in_ch, eps=1e-6))
+ modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale))
+ pyramid_ch = channels
+ elif progressive == 'residual':
+ modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch))
+ pyramid_ch = in_ch
+ else:
+ raise ValueError(f'{progressive} is not a valid name')
+
+ if i_level != 0:
+ if resblock_type == 'ddpm':
+ modules.append(Upsample(in_ch=in_ch))
+ else:
+ modules.append(ResnetBlock(in_ch=in_ch, up=True))
+
+ assert not hs_c
+
+ if progressive != 'output_skip':
+ modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
+ num_channels=in_ch, eps=1e-6))
+ modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
+
+ self.all_modules = nn.ModuleList(modules)
+
+ def forward(self, x, time_cond):
+ # timestep/noise_level embedding; only for continuous training
+ modules = self.all_modules
+ m_idx = 0
+ if self.embedding_type == 'fourier':
+ # Gaussian Fourier features embeddings.
+ used_sigmas = time_cond
+ temb = modules[m_idx](torch.log(used_sigmas))
+ m_idx += 1
+
+ elif self.embedding_type == 'positional':
+ # Sinusoidal positional embeddings.
+ timesteps = time_cond
+ used_sigmas = self.sigmas[time_cond.long()]
+ temb = layers.get_timestep_embedding(timesteps, self.nf)
+
+ else:
+ raise ValueError(f'embedding type {self.embedding_type} unknown.')
+
+ if self.conditional:
+ temb = modules[m_idx](temb)
+ m_idx += 1
+ temb = modules[m_idx](self.act(temb))
+ m_idx += 1
+ else:
+ temb = None
+
+ if not self.config.data.centered:
+ # If input data is in [0, 1]
+ x = 2 * x - 1.
+
+ # Downsampling block
+ input_pyramid = None
+ if self.progressive_input != 'none':
+ input_pyramid = x
+
+ hs = [modules[m_idx](x)]
+ m_idx += 1
+ for i_level in range(self.num_resolutions):
+ # Residual blocks for this resolution
+ for i_block in range(self.num_res_blocks):
+ h = modules[m_idx](hs[-1], temb)
+ m_idx += 1
+ if h.shape[-1] in self.attn_resolutions:
+ h = modules[m_idx](h)
+ m_idx += 1
+
+ hs.append(h)
+
+ if i_level != self.num_resolutions - 1:
+ if self.resblock_type == 'ddpm':
+ h = modules[m_idx](hs[-1])
+ m_idx += 1
+ else:
+ h = modules[m_idx](hs[-1], temb)
+ m_idx += 1
+
+ if self.progressive_input == 'input_skip':
+ input_pyramid = self.pyramid_downsample(input_pyramid)
+ h = modules[m_idx](input_pyramid, h)
+ m_idx += 1
+
+ elif self.progressive_input == 'residual':
+ input_pyramid = modules[m_idx](input_pyramid)
+ m_idx += 1
+ if self.skip_rescale:
+ input_pyramid = (input_pyramid + h) / np.sqrt(2.)
+ else:
+ input_pyramid = input_pyramid + h
+ h = input_pyramid
+
+ hs.append(h)
+
+ h = hs[-1]
+ h = modules[m_idx](h, temb)
+ m_idx += 1
+ h = modules[m_idx](h)
+ m_idx += 1
+ h = modules[m_idx](h, temb)
+ m_idx += 1
+
+ pyramid = None
+
+ # Upsampling block
+ for i_level in reversed(range(self.num_resolutions)):
+ for i_block in range(self.num_res_blocks + 1):
+ h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
+ m_idx += 1
+
+ if h.shape[-1] in self.attn_resolutions:
+ h = modules[m_idx](h)
+ m_idx += 1
+
+ if self.progressive != 'none':
+ if i_level == self.num_resolutions - 1:
+ if self.progressive == 'output_skip':
+ pyramid = self.act(modules[m_idx](h))
+ m_idx += 1
+ pyramid = modules[m_idx](pyramid)
+ m_idx += 1
+ elif self.progressive == 'residual':
+ pyramid = self.act(modules[m_idx](h))
+ m_idx += 1
+ pyramid = modules[m_idx](pyramid)
+ m_idx += 1
+ else:
+ raise ValueError(f'{self.progressive} is not a valid name.')
+ else:
+ if self.progressive == 'output_skip':
+ pyramid = self.pyramid_upsample(pyramid)
+ pyramid_h = self.act(modules[m_idx](h))
+ m_idx += 1
+ pyramid_h = modules[m_idx](pyramid_h)
+ m_idx += 1
+ pyramid = pyramid + pyramid_h
+ elif self.progressive == 'residual':
+ pyramid = modules[m_idx](pyramid)
+ m_idx += 1
+ if self.skip_rescale:
+ pyramid = (pyramid + h) / np.sqrt(2.)
+ else:
+ pyramid = pyramid + h
+ h = pyramid
+ else:
+ raise ValueError(f'{self.progressive} is not a valid name')
+
+ if i_level != 0:
+ if self.resblock_type == 'ddpm':
+ h = modules[m_idx](h)
+ m_idx += 1
+ else:
+ h = modules[m_idx](h, temb)
+ m_idx += 1
+
+ assert not hs
+
+ if self.progressive == 'output_skip':
+ h = pyramid
+ else:
+ h = self.act(modules[m_idx](h))
+ m_idx += 1
+ h = modules[m_idx](h)
+ m_idx += 1
+
+ assert m_idx == len(modules)
+ if self.config.model.scale_by_sigma:
+ used_sigmas = used_sigmas.reshape((x.shape[0], *([1] * len(x.shape[1:]))))
+ h = h / used_sigmas
+
+ return h
diff --git a/case_studies/diffpure/score_sde/models/ncsnv2.py b/case_studies/diffpure/score_sde/models/ncsnv2.py
new file mode 100644
index 0000000..5302cc9
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/ncsnv2.py
@@ -0,0 +1,416 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+"""The NCSNv2 model."""
+import torch
+import torch.nn as nn
+import functools
+
+from .utils import get_sigmas, register_model
+from .layers import (CondRefineBlock, RefineBlock, ResidualBlock, ncsn_conv3x3,
+ ConditionalResidualBlock, get_act)
+from .normalization import get_normalization
+
+CondResidualBlock = ConditionalResidualBlock
+conv3x3 = ncsn_conv3x3
+
+
+def get_network(config):
+ if config.data.image_size < 96:
+ return functools.partial(NCSNv2, config=config)
+ elif 96 <= config.data.image_size <= 128:
+ return functools.partial(NCSNv2_128, config=config)
+ elif 128 < config.data.image_size <= 256:
+ return functools.partial(NCSNv2_256, config=config)
+ else:
+ raise NotImplementedError(
+ f'No network suitable for {config.data.image_size}px implemented yet.')
+
+
+@register_model(name='ncsnv2_64')
+class NCSNv2(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.centered = config.data.centered
+ self.norm = get_normalization(config)
+ self.nf = nf = config.model.nf
+
+ self.act = act = get_act(config)
+ self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
+ self.config = config
+
+ self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
+
+ self.normalizer = self.norm(nf, config.model.num_scales)
+ self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
+
+ self.res1 = nn.ModuleList([
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm),
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res2 = nn.ModuleList([
+ ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res3 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm, dilation=2),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=2)]
+ )
+
+ if config.data.image_size == 28:
+ self.res4 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm, adjust_padding=True, dilation=4),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+ else:
+ self.res4 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm, adjust_padding=False, dilation=4),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+
+ self.refine1 = RefineBlock([2 * self.nf], 2 * self.nf, act=act, start=True)
+ self.refine2 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
+ self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
+ self.refine4 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
+
+ def _compute_cond_module(self, module, x):
+ for m in module:
+ x = m(x)
+ return x
+
+ def forward(self, x, y):
+ if not self.centered:
+ h = 2 * x - 1.
+ else:
+ h = x
+
+ output = self.begin_conv(h)
+
+ layer1 = self._compute_cond_module(self.res1, output)
+ layer2 = self._compute_cond_module(self.res2, layer1)
+ layer3 = self._compute_cond_module(self.res3, layer2)
+ layer4 = self._compute_cond_module(self.res4, layer3)
+
+ ref1 = self.refine1([layer4], layer4.shape[2:])
+ ref2 = self.refine2([layer3, ref1], layer3.shape[2:])
+ ref3 = self.refine3([layer2, ref2], layer2.shape[2:])
+ output = self.refine4([layer1, ref3], layer1.shape[2:])
+
+ output = self.normalizer(output)
+ output = self.act(output)
+ output = self.end_conv(output)
+
+ used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
+
+ output = output / used_sigmas
+
+ return output
+
+
+@register_model(name='ncsn')
+class NCSN(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.centered = config.data.centered
+ self.norm = get_normalization(config)
+ self.nf = nf = config.model.nf
+ self.act = act = get_act(config)
+ self.config = config
+
+ self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
+
+ self.normalizer = self.norm(nf, config.model.num_scales)
+ self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
+
+ self.res1 = nn.ModuleList([
+ ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm),
+ ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res2 = nn.ModuleList([
+ ConditionalResidualBlock(self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
+ normalization=self.norm),
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res3 = nn.ModuleList([
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
+ normalization=self.norm, dilation=2),
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm, dilation=2)]
+ )
+
+ if config.data.image_size == 28:
+ self.res4 = nn.ModuleList([
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
+ normalization=self.norm, adjust_padding=True, dilation=4),
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+ else:
+ self.res4 = nn.ModuleList([
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
+ normalization=self.norm, adjust_padding=False, dilation=4),
+ ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+
+ self.refine1 = CondRefineBlock([2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act, start=True)
+ self.refine2 = CondRefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act)
+ self.refine3 = CondRefineBlock([2 * self.nf, 2 * self.nf], self.nf, config.model.num_scales, self.norm, act=act)
+ self.refine4 = CondRefineBlock([self.nf, self.nf], self.nf, config.model.num_scales, self.norm, act=act, end=True)
+
+ def _compute_cond_module(self, module, x, y):
+ for m in module:
+ x = m(x, y)
+ return x
+
+ def forward(self, x, y):
+ if not self.centered:
+ h = 2 * x - 1.
+ else:
+ h = x
+
+ output = self.begin_conv(h)
+
+ layer1 = self._compute_cond_module(self.res1, output, y)
+ layer2 = self._compute_cond_module(self.res2, layer1, y)
+ layer3 = self._compute_cond_module(self.res3, layer2, y)
+ layer4 = self._compute_cond_module(self.res4, layer3, y)
+
+ ref1 = self.refine1([layer4], y, layer4.shape[2:])
+ ref2 = self.refine2([layer3, ref1], y, layer3.shape[2:])
+ ref3 = self.refine3([layer2, ref2], y, layer2.shape[2:])
+ output = self.refine4([layer1, ref3], y, layer1.shape[2:])
+
+ output = self.normalizer(output, y)
+ output = self.act(output)
+ output = self.end_conv(output)
+
+ return output
+
+
+@register_model(name='ncsnv2_128')
+class NCSNv2_128(nn.Module):
+ """NCSNv2 model architecture for 128px images."""
+ def __init__(self, config):
+ super().__init__()
+ self.centered = config.data.centered
+ self.norm = get_normalization(config)
+ self.nf = nf = config.model.nf
+ self.act = act = get_act(config)
+ self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
+ self.config = config
+
+ self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
+ self.normalizer = self.norm(nf, config.model.num_scales)
+
+ self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
+
+ self.res1 = nn.ModuleList([
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm),
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res2 = nn.ModuleList([
+ ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res3 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res4 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
+ normalization=self.norm, dilation=2),
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=2)]
+ )
+
+ self.res5 = nn.ModuleList([
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
+ normalization=self.norm, dilation=4),
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+
+ self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
+ self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
+ self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
+ self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
+ self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
+
+ def _compute_cond_module(self, module, x):
+ for m in module:
+ x = m(x)
+ return x
+
+ def forward(self, x, y):
+ if not self.centered:
+ h = 2 * x - 1.
+ else:
+ h = x
+
+ output = self.begin_conv(h)
+
+ layer1 = self._compute_cond_module(self.res1, output)
+ layer2 = self._compute_cond_module(self.res2, layer1)
+ layer3 = self._compute_cond_module(self.res3, layer2)
+ layer4 = self._compute_cond_module(self.res4, layer3)
+ layer5 = self._compute_cond_module(self.res5, layer4)
+
+ ref1 = self.refine1([layer5], layer5.shape[2:])
+ ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
+ ref3 = self.refine3([layer3, ref2], layer3.shape[2:])
+ ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
+ output = self.refine5([layer1, ref4], layer1.shape[2:])
+
+ output = self.normalizer(output)
+ output = self.act(output)
+ output = self.end_conv(output)
+
+ used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
+
+ output = output / used_sigmas
+
+ return output
+
+
+@register_model(name='ncsnv2_256')
+class NCSNv2_256(nn.Module):
+ """NCSNv2 model architecture for 256px images."""
+ def __init__(self, config):
+ super().__init__()
+ self.centered = config.data.centered
+ self.norm = get_normalization(config)
+ self.nf = nf = config.model.nf
+ self.act = act = get_act(config)
+ self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
+ self.config = config
+
+ self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
+ self.normalizer = self.norm(nf, config.model.num_scales)
+
+ self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
+
+ self.res1 = nn.ModuleList([
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm),
+ ResidualBlock(self.nf, self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res2 = nn.ModuleList([
+ ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res3 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res31 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
+ normalization=self.norm),
+ ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
+ normalization=self.norm)]
+ )
+
+ self.res4 = nn.ModuleList([
+ ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
+ normalization=self.norm, dilation=2),
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=2)]
+ )
+
+ self.res5 = nn.ModuleList([
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
+ normalization=self.norm, dilation=4),
+ ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
+ normalization=self.norm, dilation=4)]
+ )
+
+ self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
+ self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
+ self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
+ self.refine31 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
+ self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
+ self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
+
+ def _compute_cond_module(self, module, x):
+ for m in module:
+ x = m(x)
+ return x
+
+ def forward(self, x, y):
+ if not self.centered:
+ h = 2 * x - 1.
+ else:
+ h = x
+
+ output = self.begin_conv(h)
+
+ layer1 = self._compute_cond_module(self.res1, output)
+ layer2 = self._compute_cond_module(self.res2, layer1)
+ layer3 = self._compute_cond_module(self.res3, layer2)
+ layer31 = self._compute_cond_module(self.res31, layer3)
+ layer4 = self._compute_cond_module(self.res4, layer31)
+ layer5 = self._compute_cond_module(self.res5, layer4)
+
+ ref1 = self.refine1([layer5], layer5.shape[2:])
+ ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
+ ref31 = self.refine31([layer31, ref2], layer31.shape[2:])
+ ref3 = self.refine3([layer3, ref31], layer3.shape[2:])
+ ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
+ output = self.refine5([layer1, ref4], layer1.shape[2:])
+
+ output = self.normalizer(output)
+ output = self.act(output)
+ output = self.end_conv(output)
+
+ used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
+
+ output = output / used_sigmas
+
+ return output
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/models/normalization.py b/case_studies/diffpure/score_sde/models/normalization.py
new file mode 100644
index 0000000..9a23204
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/normalization.py
@@ -0,0 +1,215 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Normalization layers."""
+import torch.nn as nn
+import torch
+import functools
+
+
+def get_normalization(config, conditional=False):
+ """Obtain normalization modules from the config file."""
+ norm = config.model.normalization
+ if conditional:
+ if norm == 'InstanceNorm++':
+ return functools.partial(ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes)
+ else:
+ raise NotImplementedError(f'{norm} not implemented yet.')
+ else:
+ if norm == 'InstanceNorm':
+ return nn.InstanceNorm2d
+ elif norm == 'InstanceNorm++':
+ return InstanceNorm2dPlus
+ elif norm == 'VarianceNorm':
+ return VarianceNorm2d
+ elif norm == 'GroupNorm':
+ return nn.GroupNorm
+ else:
+ raise ValueError('Unknown normalization: %s' % norm)
+
+
+class ConditionalBatchNorm2d(nn.Module):
+ def __init__(self, num_features, num_classes, bias=True):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.bn = nn.BatchNorm2d(num_features, affine=False)
+ if self.bias:
+ self.embed = nn.Embedding(num_classes, num_features * 2)
+ self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
+ self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
+ else:
+ self.embed = nn.Embedding(num_classes, num_features)
+ self.embed.weight.data.uniform_()
+
+ def forward(self, x, y):
+ out = self.bn(x)
+ if self.bias:
+ gamma, beta = self.embed(y).chunk(2, dim=1)
+ out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
+ else:
+ gamma = self.embed(y)
+ out = gamma.view(-1, self.num_features, 1, 1) * out
+ return out
+
+
+class ConditionalInstanceNorm2d(nn.Module):
+ def __init__(self, num_features, num_classes, bias=True):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
+ if bias:
+ self.embed = nn.Embedding(num_classes, num_features * 2)
+ self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
+ self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
+ else:
+ self.embed = nn.Embedding(num_classes, num_features)
+ self.embed.weight.data.uniform_()
+
+ def forward(self, x, y):
+ h = self.instance_norm(x)
+ if self.bias:
+ gamma, beta = self.embed(y).chunk(2, dim=-1)
+ out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
+ else:
+ gamma = self.embed(y)
+ out = gamma.view(-1, self.num_features, 1, 1) * h
+ return out
+
+
+class ConditionalVarianceNorm2d(nn.Module):
+ def __init__(self, num_features, num_classes, bias=False):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.embed = nn.Embedding(num_classes, num_features)
+ self.embed.weight.data.normal_(1, 0.02)
+
+ def forward(self, x, y):
+ vars = torch.var(x, dim=(2, 3), keepdim=True)
+ h = x / torch.sqrt(vars + 1e-5)
+
+ gamma = self.embed(y)
+ out = gamma.view(-1, self.num_features, 1, 1) * h
+ return out
+
+
+class VarianceNorm2d(nn.Module):
+ def __init__(self, num_features, bias=False):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.alpha = nn.Parameter(torch.zeros(num_features))
+ self.alpha.data.normal_(1, 0.02)
+
+ def forward(self, x):
+ vars = torch.var(x, dim=(2, 3), keepdim=True)
+ h = x / torch.sqrt(vars + 1e-5)
+
+ out = self.alpha.view(-1, self.num_features, 1, 1) * h
+ return out
+
+
+class ConditionalNoneNorm2d(nn.Module):
+ def __init__(self, num_features, num_classes, bias=True):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ if bias:
+ self.embed = nn.Embedding(num_classes, num_features * 2)
+ self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
+ self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
+ else:
+ self.embed = nn.Embedding(num_classes, num_features)
+ self.embed.weight.data.uniform_()
+
+ def forward(self, x, y):
+ if self.bias:
+ gamma, beta = self.embed(y).chunk(2, dim=-1)
+ out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1)
+ else:
+ gamma = self.embed(y)
+ out = gamma.view(-1, self.num_features, 1, 1) * x
+ return out
+
+
+class NoneNorm2d(nn.Module):
+ def __init__(self, num_features, bias=True):
+ super().__init__()
+
+ def forward(self, x):
+ return x
+
+
+class InstanceNorm2dPlus(nn.Module):
+ def __init__(self, num_features, bias=True):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
+ self.alpha = nn.Parameter(torch.zeros(num_features))
+ self.gamma = nn.Parameter(torch.zeros(num_features))
+ self.alpha.data.normal_(1, 0.02)
+ self.gamma.data.normal_(1, 0.02)
+ if bias:
+ self.beta = nn.Parameter(torch.zeros(num_features))
+
+ def forward(self, x):
+ means = torch.mean(x, dim=(2, 3))
+ m = torch.mean(means, dim=-1, keepdim=True)
+ v = torch.var(means, dim=-1, keepdim=True)
+ means = (means - m) / (torch.sqrt(v + 1e-5))
+ h = self.instance_norm(x)
+
+ if self.bias:
+ h = h + means[..., None, None] * self.alpha[..., None, None]
+ out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
+ else:
+ h = h + means[..., None, None] * self.alpha[..., None, None]
+ out = self.gamma.view(-1, self.num_features, 1, 1) * h
+ return out
+
+
+class ConditionalInstanceNorm2dPlus(nn.Module):
+ def __init__(self, num_features, num_classes, bias=True):
+ super().__init__()
+ self.num_features = num_features
+ self.bias = bias
+ self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
+ if bias:
+ self.embed = nn.Embedding(num_classes, num_features * 3)
+ self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
+ self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
+ else:
+ self.embed = nn.Embedding(num_classes, 2 * num_features)
+ self.embed.weight.data.normal_(1, 0.02)
+
+ def forward(self, x, y):
+ means = torch.mean(x, dim=(2, 3))
+ m = torch.mean(means, dim=-1, keepdim=True)
+ v = torch.var(means, dim=-1, keepdim=True)
+ means = (means - m) / (torch.sqrt(v + 1e-5))
+ h = self.instance_norm(x)
+
+ if self.bias:
+ gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
+ h = h + means[..., None, None] * alpha[..., None, None]
+ out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
+ else:
+ gamma, alpha = self.embed(y).chunk(2, dim=-1)
+ h = h + means[..., None, None] * alpha[..., None, None]
+ out = gamma.view(-1, self.num_features, 1, 1) * h
+ return out
diff --git a/case_studies/diffpure/score_sde/models/up_or_down_sampling.py b/case_studies/diffpure/score_sde/models/up_or_down_sampling.py
new file mode 100644
index 0000000..e5a88d5
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/up_or_down_sampling.py
@@ -0,0 +1,265 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/models/up_or_down_sampling.py
+#
+# The license for the original version of this file can be
+# found in the `score_sde` directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+"""Layers used for up-sampling or down-sampling images.
+
+Many functions are ported from https://github.com/NVlabs/stylegan2.
+"""
+
+import torch.nn as nn
+import torch
+import torch.nn.functional as F
+import numpy as np
+from ..op import upfirdn2d
+
+
+# Function ported from StyleGAN2
+def get_weight(module,
+ shape,
+ weight_var='weight',
+ kernel_init=None):
+ """Get/create weight tensor for a convolution or fully-connected layer."""
+
+ return module.param(weight_var, kernel_init, shape)
+
+
+class Conv2d(nn.Module):
+ """Conv2d layer with optimal upsampling and downsampling (StyleGAN2)."""
+
+ def __init__(self, in_ch, out_ch, kernel, up=False, down=False,
+ resample_kernel=(1, 3, 3, 1),
+ use_bias=True,
+ kernel_init=None):
+ super().__init__()
+ assert not (up and down)
+ assert kernel >= 1 and kernel % 2 == 1
+ self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel))
+ if kernel_init is not None:
+ self.weight.data = kernel_init(self.weight.data.shape)
+ if use_bias:
+ self.bias = nn.Parameter(torch.zeros(out_ch))
+
+ self.up = up
+ self.down = down
+ self.resample_kernel = resample_kernel
+ self.kernel = kernel
+ self.use_bias = use_bias
+
+ def forward(self, x):
+ if self.up:
+ x = upsample_conv_2d(x, self.weight, k=self.resample_kernel)
+ elif self.down:
+ x = conv_downsample_2d(x, self.weight, k=self.resample_kernel)
+ else:
+ x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2)
+
+ if self.use_bias:
+ x = x + self.bias.reshape(1, -1, 1, 1)
+
+ return x
+
+
+def naive_upsample_2d(x, factor=2):
+ _N, C, H, W = x.shape
+ x = torch.reshape(x, (-1, C, H, 1, W, 1))
+ x = x.repeat(1, 1, 1, factor, 1, factor)
+ return torch.reshape(x, (-1, C, H * factor, W * factor))
+
+
+def naive_downsample_2d(x, factor=2):
+ _N, C, H, W = x.shape
+ x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor))
+ return torch.mean(x, dim=(3, 5))
+
+
+def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
+ """Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
+
+ Padding is performed only once at the beginning, not between the
+ operations.
+ The fused op is considerably more efficient than performing the same
+ calculation
+ using standard TensorFlow ops. It supports gradients of arbitrary order.
+ Args:
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
+ C]`.
+ w: Weight tensor of the shape `[filterH, filterW, inChannels,
+ outChannels]`. Grouped convolution can be performed by `inChannels =
+ x.shape[0] // numGroups`.
+ k: FIR filter of the shape `[firH, firW]` or `[firN]`
+ (separable). The default is `[1] * factor`, which corresponds to
+ nearest-neighbor upsampling.
+ factor: Integer upsampling factor (default: 2).
+ gain: Scaling factor for signal magnitude (default: 1.0).
+
+ Returns:
+ Tensor of the shape `[N, C, H * factor, W * factor]` or
+ `[N, H * factor, W * factor, C]`, and same datatype as `x`.
+ """
+
+ assert isinstance(factor, int) and factor >= 1
+
+ # Check weight shape.
+ assert len(w.shape) == 4
+ convH = w.shape[2]
+ convW = w.shape[3]
+ inC = w.shape[1]
+ outC = w.shape[0]
+
+ assert convW == convH
+
+ # Setup filter kernel.
+ if k is None:
+ k = [1] * factor
+ k = _setup_kernel(k) * (gain * (factor ** 2))
+ p = (k.shape[0] - factor) - (convW - 1)
+
+ stride = (factor, factor)
+
+ # Determine data dimensions.
+ stride = [1, 1, factor, factor]
+ output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW)
+ output_padding = (output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH,
+ output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW)
+ assert output_padding[0] >= 0 and output_padding[1] >= 0
+ num_groups = _shape(x, 1) // inC
+
+ # Transpose weights.
+ w = torch.reshape(w, (num_groups, -1, inC, convH, convW))
+ w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
+ w = torch.reshape(w, (num_groups * inC, -1, convH, convW))
+
+ x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0)
+ ## Original TF code.
+ # x = tf.nn.conv2d_transpose(
+ # x,
+ # w,
+ # output_shape=output_shape,
+ # strides=stride,
+ # padding='VALID',
+ # data_format=data_format)
+ ## JAX equivalent
+
+ return upfirdn2d(x, torch.tensor(k, device=x.device),
+ pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
+
+
+def conv_downsample_2d(x, w, k=None, factor=2, gain=1):
+ """Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
+
+ Padding is performed only once at the beginning, not between the operations.
+ The fused op is considerably more efficient than performing the same
+ calculation
+ using standard TensorFlow ops. It supports gradients of arbitrary order.
+ Args:
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
+ C]`.
+ w: Weight tensor of the shape `[filterH, filterW, inChannels,
+ outChannels]`. Grouped convolution can be performed by `inChannels =
+ x.shape[0] // numGroups`.
+ k: FIR filter of the shape `[firH, firW]` or `[firN]`
+ (separable). The default is `[1] * factor`, which corresponds to
+ average pooling.
+ factor: Integer downsampling factor (default: 2).
+ gain: Scaling factor for signal magnitude (default: 1.0).
+
+ Returns:
+ Tensor of the shape `[N, C, H // factor, W // factor]` or
+ `[N, H // factor, W // factor, C]`, and same datatype as `x`.
+ """
+
+ assert isinstance(factor, int) and factor >= 1
+ _outC, _inC, convH, convW = w.shape
+ assert convW == convH
+ if k is None:
+ k = [1] * factor
+ k = _setup_kernel(k) * gain
+ p = (k.shape[0] - factor) + (convW - 1)
+ s = [factor, factor]
+ x = upfirdn2d(x, torch.tensor(k, device=x.device),
+ pad=((p + 1) // 2, p // 2))
+ return F.conv2d(x, w, stride=s, padding=0)
+
+
+def _setup_kernel(k):
+ k = np.asarray(k, dtype=np.float32)
+ if k.ndim == 1:
+ k = np.outer(k, k)
+ k /= np.sum(k)
+ assert k.ndim == 2
+ assert k.shape[0] == k.shape[1]
+ return k
+
+
+def _shape(x, dim):
+ return x.shape[dim]
+
+
+def upsample_2d(x, k=None, factor=2, gain=1):
+ r"""Upsample a batch of 2D images with the given filter.
+
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
+ and upsamples each image with the given filter. The filter is normalized so
+ that
+ if the input pixels are constant, they will be scaled by the specified
+ `gain`.
+ Pixels outside the image are assumed to be zero, and the filter is padded
+ with
+ zeros so that its shape is a multiple of the upsampling factor.
+ Args:
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
+ C]`.
+ k: FIR filter of the shape `[firH, firW]` or `[firN]`
+ (separable). The default is `[1] * factor`, which corresponds to
+ nearest-neighbor upsampling.
+ factor: Integer upsampling factor (default: 2).
+ gain: Scaling factor for signal magnitude (default: 1.0).
+
+ Returns:
+ Tensor of the shape `[N, C, H * factor, W * factor]`
+ """
+ assert isinstance(factor, int) and factor >= 1
+ if k is None:
+ k = [1] * factor
+ k = _setup_kernel(k) * (gain * (factor ** 2))
+ p = k.shape[0] - factor
+ return upfirdn2d(x, torch.tensor(k, device=x.device),
+ up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))
+
+
+def downsample_2d(x, k=None, factor=2, gain=1):
+ r"""Downsample a batch of 2D images with the given filter.
+
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
+ and downsamples each image with the given filter. The filter is normalized
+ so that
+ if the input pixels are constant, they will be scaled by the specified
+ `gain`.
+ Pixels outside the image are assumed to be zero, and the filter is padded
+ with
+ zeros so that its shape is a multiple of the downsampling factor.
+ Args:
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
+ C]`.
+ k: FIR filter of the shape `[firH, firW]` or `[firN]`
+ (separable). The default is `[1] * factor`, which corresponds to
+ average pooling.
+ factor: Integer downsampling factor (default: 2).
+ gain: Scaling factor for signal magnitude (default: 1.0).
+
+ Returns:
+ Tensor of the shape `[N, C, H // factor, W // factor]`
+ """
+
+ assert isinstance(factor, int) and factor >= 1
+ if k is None:
+ k = [1] * factor
+ k = _setup_kernel(k) * gain
+ p = k.shape[0] - factor
+ return upfirdn2d(x, torch.tensor(k, device=x.device),
+ down=factor, pad=((p + 1) // 2, p // 2))
diff --git a/case_studies/diffpure/score_sde/models/utils.py b/case_studies/diffpure/score_sde/models/utils.py
new file mode 100644
index 0000000..b6ee60a
--- /dev/null
+++ b/case_studies/diffpure/score_sde/models/utils.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""All functions and modules related to model definition.
+"""
+
+import torch
+from score_sde import sde_lib
+import numpy as np
+
+_MODELS = {}
+
+
+def register_model(cls=None, *, name=None):
+ """A decorator for registering model classes."""
+
+ def _register(cls):
+ if name is None:
+ local_name = cls.__name__
+ else:
+ local_name = name
+ if local_name in _MODELS:
+ raise ValueError(f'Already registered model with name: {local_name}')
+ _MODELS[local_name] = cls
+ return cls
+
+ if cls is None:
+ return _register
+ else:
+ return _register(cls)
+
+
+def get_model(name):
+ return _MODELS[name]
+
+
+def get_sigmas(config):
+ """Get sigmas --- the set of noise levels for SMLD from config files.
+ Args:
+ config: A ConfigDict object parsed from the config file
+ Returns:
+ sigmas: a jax numpy arrary of noise levels
+ """
+ sigmas = np.exp(
+ np.linspace(np.log(config.model.sigma_max), np.log(config.model.sigma_min), config.model.num_scales))
+
+ return sigmas
+
+
+def get_ddpm_params(config):
+ """Get betas and alphas --- parameters used in the original DDPM paper."""
+ num_diffusion_timesteps = 1000
+ # parameters need to be adapted if number of time steps differs from 1000
+ beta_start = config.model.beta_min / config.model.num_scales
+ beta_end = config.model.beta_max / config.model.num_scales
+ betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
+
+ alphas = 1. - betas
+ alphas_cumprod = np.cumprod(alphas, axis=0)
+ sqrt_alphas_cumprod = np.sqrt(alphas_cumprod)
+ sqrt_1m_alphas_cumprod = np.sqrt(1. - alphas_cumprod)
+
+ return {
+ 'betas': betas,
+ 'alphas': alphas,
+ 'alphas_cumprod': alphas_cumprod,
+ 'sqrt_alphas_cumprod': sqrt_alphas_cumprod,
+ 'sqrt_1m_alphas_cumprod': sqrt_1m_alphas_cumprod,
+ 'beta_min': beta_start * (num_diffusion_timesteps - 1),
+ 'beta_max': beta_end * (num_diffusion_timesteps - 1),
+ 'num_diffusion_timesteps': num_diffusion_timesteps
+ }
+
+
+def create_model(config):
+ """Create the score model."""
+ model_name = config.model.name
+ score_model = get_model(model_name)(config)
+ # score_model = score_model.to(config.device)
+ # score_model = torch.nn.DataParallel(score_model)
+ return score_model
+
+
+def get_model_fn(model, train=False):
+ """Create a function to give the output of the score-based model.
+
+ Args:
+ model: The score model.
+ train: `True` for training and `False` for evaluation.
+
+ Returns:
+ A model function.
+ """
+
+ def model_fn(x, labels):
+ """Compute the output of the score-based model.
+
+ Args:
+ x: A mini-batch of input data.
+ labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently
+ for different models.
+
+ Returns:
+ A tuple of (model output, new mutable states)
+ """
+ if not train:
+ model.eval()
+ return model(x, labels)
+ else:
+ model.train()
+ return model(x, labels)
+
+ return model_fn
+
+
+def get_score_fn(sde, model, train=False, continuous=False):
+ """Wraps `score_fn` so that the model output corresponds to a real time-dependent score function.
+
+ Args:
+ sde: An `sde_lib.SDE` object that represents the forward SDE.
+ model: A score model.
+ train: `True` for training and `False` for evaluation.
+ continuous: If `True`, the score-based model is expected to directly take continuous time steps.
+
+ Returns:
+ A score function.
+ """
+ model_fn = get_model_fn(model, train=train)
+
+ if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
+ def score_fn(x, t):
+ # Scale neural network output by standard deviation and flip sign
+ if continuous or isinstance(sde, sde_lib.subVPSDE):
+ # For VP-trained models, t=0 corresponds to the lowest noise level
+ # The maximum value of time embedding is assumed to 999 for
+ # continuously-trained models.
+ labels = t * 999
+ score = model_fn(x, labels)
+ std = sde.marginal_prob(torch.zeros_like(x), t)[1]
+ else:
+ # For VP-trained models, t=0 corresponds to the lowest noise level
+ labels = t * (sde.N - 1)
+ score = model_fn(x, labels)
+ std = sde.sqrt_1m_alphas_cumprod.to(labels.device)[labels.long()]
+
+ score = -score / std[:, None, None, None]
+ return score
+
+ elif isinstance(sde, sde_lib.VESDE):
+ def score_fn(x, t):
+ if continuous:
+ labels = sde.marginal_prob(torch.zeros_like(x), t)[1]
+ else:
+ # For VE-trained models, t=0 corresponds to the highest noise level
+ labels = sde.T - t
+ labels *= sde.N - 1
+ labels = torch.round(labels).long()
+
+ score = model_fn(x, labels)
+ return score
+
+ else:
+ raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
+
+ return score_fn
+
+
+def to_flattened_numpy(x):
+ """Flatten a torch tensor `x` and convert it to numpy."""
+ return x.detach().cpu().numpy().reshape((-1,))
+
+
+def from_flattened_numpy(x, shape):
+ """Form a torch tensor with the given `shape` from a flattened numpy array `x`."""
+ return torch.from_numpy(x.reshape(shape))
diff --git a/case_studies/diffpure/score_sde/op/__init__.py b/case_studies/diffpure/score_sde/op/__init__.py
new file mode 100644
index 0000000..aa3a30e
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/__init__.py
@@ -0,0 +1,10 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/op/__init__.py
+#
+# The license for the original version of this file can be
+# found in the `score_sde` directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+from .fused_act import FusedLeakyReLU, fused_leaky_relu
+from .upfirdn2d import upfirdn2d
diff --git a/case_studies/diffpure/score_sde/op/fused_act.py b/case_studies/diffpure/score_sde/op/fused_act.py
new file mode 100644
index 0000000..10c90fc
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/fused_act.py
@@ -0,0 +1,105 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/op/fused_act.py
+#
+# The license for the original version of this file can be
+# found in the `score_sde` directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+import os
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.autograd import Function
+from torch.utils.cpp_extension import load
+
+
+module_path = os.path.dirname(__file__)
+fused = load(
+ "fused",
+ sources=[
+ os.path.join(module_path, "fused_bias_act.cpp"),
+ os.path.join(module_path, "fused_bias_act_kernel.cu"),
+ ],
+)
+
+
+class FusedLeakyReLUFunctionBackward(Function):
+ @staticmethod
+ def forward(ctx, grad_output, out, negative_slope, scale):
+ ctx.save_for_backward(out)
+ ctx.negative_slope = negative_slope
+ ctx.scale = scale
+
+ empty = grad_output.new_empty(0)
+
+ grad_input = fused.fused_bias_act(
+ grad_output, empty, out, 3, 1, negative_slope, scale
+ )
+
+ dim = [0]
+
+ if grad_input.ndim > 2:
+ dim += list(range(2, grad_input.ndim))
+
+ grad_bias = grad_input.sum(dim).detach()
+
+ return grad_input, grad_bias
+
+ @staticmethod
+ def backward(ctx, gradgrad_input, gradgrad_bias):
+ out, = ctx.saved_tensors
+ gradgrad_out = fused.fused_bias_act(
+ gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
+ )
+
+ return gradgrad_out, None, None, None
+
+
+class FusedLeakyReLUFunction(Function):
+ @staticmethod
+ def forward(ctx, input, bias, negative_slope, scale):
+ empty = input.new_empty(0)
+ out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
+ ctx.save_for_backward(out)
+ ctx.negative_slope = negative_slope
+ ctx.scale = scale
+
+ return out
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ out, = ctx.saved_tensors
+
+ grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
+ grad_output, out, ctx.negative_slope, ctx.scale
+ )
+
+ return grad_input, grad_bias, None, None
+
+
+class FusedLeakyReLU(nn.Module):
+ def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
+ super().__init__()
+
+ self.bias = nn.Parameter(torch.zeros(channel))
+ self.negative_slope = negative_slope
+ self.scale = scale
+
+ def forward(self, input):
+ return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
+
+
+def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
+ if input.device.type == "cpu":
+ rest_dim = [1] * (input.ndim - bias.ndim - 1)
+ return (
+ F.leaky_relu(
+ input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
+ )
+ * scale
+ )
+
+ else:
+ return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
diff --git a/case_studies/diffpure/score_sde/op/fused_bias_act.cpp b/case_studies/diffpure/score_sde/op/fused_bias_act.cpp
new file mode 100644
index 0000000..d7f951e
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/fused_bias_act.cpp
@@ -0,0 +1,29 @@
+// ---------------------------------------------------------------
+// Taken from the following link as is from:
+// https://github.com/yang-song/score_sde_pytorch/blob/main/op/fused_bias_act.cpp
+//
+// The license for the original version of this file can be
+// found in the `score_sde` directory (LICENSE_SCORE_SDE).
+// ---------------------------------------------------------------
+
+#include
+
+
+torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+ int act, int grad, float alpha, float scale);
+
+#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
+#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
+#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
+
+torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+ int act, int grad, float alpha, float scale) {
+ CHECK_CUDA(input);
+ CHECK_CUDA(bias);
+
+ return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
+}
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/op/fused_bias_act_kernel.cu b/case_studies/diffpure/score_sde/op/fused_bias_act_kernel.cu
new file mode 100644
index 0000000..c9fa56f
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/fused_bias_act_kernel.cu
@@ -0,0 +1,99 @@
+// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+//
+// This work is made available under the Nvidia Source Code License-NC.
+// To view a copy of this license, visit
+// https://nvlabs.github.io/stylegan2/license.html
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+
+template
+static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
+ int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
+ int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
+
+ scalar_t zero = 0.0;
+
+ for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
+ scalar_t x = p_x[xi];
+
+ if (use_bias) {
+ x += p_b[(xi / step_b) % size_b];
+ }
+
+ scalar_t ref = use_ref ? p_ref[xi] : zero;
+
+ scalar_t y;
+
+ switch (act * 10 + grad) {
+ default:
+ case 10: y = x; break;
+ case 11: y = x; break;
+ case 12: y = 0.0; break;
+
+ case 30: y = (x > 0.0) ? x : x * alpha; break;
+ case 31: y = (ref > 0.0) ? x : x * alpha; break;
+ case 32: y = 0.0; break;
+ }
+
+ out[xi] = y * scale;
+ }
+}
+
+
+torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+ int act, int grad, float alpha, float scale) {
+ int curDevice = -1;
+ cudaGetDevice(&curDevice);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
+
+ auto x = input.contiguous();
+ auto b = bias.contiguous();
+ auto ref = refer.contiguous();
+
+ int use_bias = b.numel() ? 1 : 0;
+ int use_ref = ref.numel() ? 1 : 0;
+
+ int size_x = x.numel();
+ int size_b = b.numel();
+ int step_b = 1;
+
+ for (int i = 1 + 1; i < x.dim(); i++) {
+ step_b *= x.size(i);
+ }
+
+ int loop_x = 4;
+ int block_size = 4 * 32;
+ int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
+
+ auto y = torch::empty_like(x);
+
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
+ fused_bias_act_kernel<<>>(
+ y.data_ptr(),
+ x.data_ptr(),
+ b.data_ptr(),
+ ref.data_ptr(),
+ act,
+ grad,
+ alpha,
+ scale,
+ loop_x,
+ size_x,
+ step_b,
+ size_b,
+ use_bias,
+ use_ref
+ );
+ });
+
+ return y;
+}
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/op/upfirdn2d.cpp b/case_studies/diffpure/score_sde/op/upfirdn2d.cpp
new file mode 100644
index 0000000..294cda5
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/upfirdn2d.cpp
@@ -0,0 +1,31 @@
+// ---------------------------------------------------------------
+// Taken from the following link as is from:
+// https://github.com/yang-song/score_sde_pytorch/blob/main/op/upfirdn2d.cpp
+//
+// The license for the original version of this file can be
+// found in the `score_sde` directory (LICENSE_SCORE_SDE).
+// ---------------------------------------------------------------
+
+#include
+
+
+torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
+ int up_x, int up_y, int down_x, int down_y,
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1);
+
+#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
+#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
+#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
+
+torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
+ int up_x, int up_y, int down_x, int down_y,
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
+ CHECK_CUDA(input);
+ CHECK_CUDA(kernel);
+
+ return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
+}
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/op/upfirdn2d.py b/case_studies/diffpure/score_sde/op/upfirdn2d.py
new file mode 100644
index 0000000..bc53c65
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/upfirdn2d.py
@@ -0,0 +1,208 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/op/upfirdn2d.py
+#
+# The license for the original version of this file can be
+# found in the `score_sde` directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+import os
+
+import torch
+from torch.nn import functional as F
+from torch.autograd import Function
+from torch.utils.cpp_extension import load
+
+
+module_path = os.path.dirname(__file__)
+upfirdn2d_op = load(
+ "upfirdn2d",
+ sources=[
+ os.path.join(module_path, "upfirdn2d.cpp"),
+ os.path.join(module_path, "upfirdn2d_kernel.cu"),
+ ],
+)
+
+
+class UpFirDn2dBackward(Function):
+ @staticmethod
+ def forward(
+ ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
+ ):
+
+ up_x, up_y = up
+ down_x, down_y = down
+ g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
+
+ grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
+
+ grad_input = upfirdn2d_op.upfirdn2d(
+ grad_output,
+ grad_kernel,
+ down_x,
+ down_y,
+ up_x,
+ up_y,
+ g_pad_x0,
+ g_pad_x1,
+ g_pad_y0,
+ g_pad_y1,
+ )
+ grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
+
+ ctx.save_for_backward(kernel)
+
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+ ctx.up_x = up_x
+ ctx.up_y = up_y
+ ctx.down_x = down_x
+ ctx.down_y = down_y
+ ctx.pad_x0 = pad_x0
+ ctx.pad_x1 = pad_x1
+ ctx.pad_y0 = pad_y0
+ ctx.pad_y1 = pad_y1
+ ctx.in_size = in_size
+ ctx.out_size = out_size
+
+ return grad_input
+
+ @staticmethod
+ def backward(ctx, gradgrad_input):
+ kernel, = ctx.saved_tensors
+
+ gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
+
+ gradgrad_out = upfirdn2d_op.upfirdn2d(
+ gradgrad_input,
+ kernel,
+ ctx.up_x,
+ ctx.up_y,
+ ctx.down_x,
+ ctx.down_y,
+ ctx.pad_x0,
+ ctx.pad_x1,
+ ctx.pad_y0,
+ ctx.pad_y1,
+ )
+ # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
+ gradgrad_out = gradgrad_out.view(
+ ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
+ )
+
+ return gradgrad_out, None, None, None, None, None, None, None, None
+
+
+class UpFirDn2d(Function):
+ @staticmethod
+ def forward(ctx, input, kernel, up, down, pad):
+ up_x, up_y = up
+ down_x, down_y = down
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+ kernel_h, kernel_w = kernel.shape
+ batch, channel, in_h, in_w = input.shape
+ ctx.in_size = input.shape
+
+ input = input.reshape(-1, in_h, in_w, 1)
+
+ ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
+
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
+ ctx.out_size = (out_h, out_w)
+
+ ctx.up = (up_x, up_y)
+ ctx.down = (down_x, down_y)
+ ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
+
+ g_pad_x0 = kernel_w - pad_x0 - 1
+ g_pad_y0 = kernel_h - pad_y0 - 1
+ g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
+ g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
+
+ ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
+
+ out = upfirdn2d_op.upfirdn2d(
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
+ )
+ # out = out.view(major, out_h, out_w, minor)
+ out = out.view(-1, channel, out_h, out_w)
+
+ return out
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ kernel, grad_kernel = ctx.saved_tensors
+
+ grad_input = UpFirDn2dBackward.apply(
+ grad_output,
+ kernel,
+ grad_kernel,
+ ctx.up,
+ ctx.down,
+ ctx.pad,
+ ctx.g_pad,
+ ctx.in_size,
+ ctx.out_size,
+ )
+
+ return grad_input, None, None, None, None
+
+
+def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
+ if input.device.type == "cpu":
+ out = upfirdn2d_native(
+ input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
+ )
+
+ else:
+ out = UpFirDn2d.apply(
+ input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
+ )
+
+ return out
+
+
+def upfirdn2d_native(
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
+):
+ _, channel, in_h, in_w = input.shape
+ input = input.reshape(-1, in_h, in_w, 1)
+
+ _, in_h, in_w, minor = input.shape
+ kernel_h, kernel_w = kernel.shape
+
+ out = input.view(-1, in_h, 1, in_w, 1, minor)
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
+
+ out = F.pad(
+ out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
+ )
+ out = out[
+ :,
+ max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
+ max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
+ :,
+ ]
+
+ out = out.permute(0, 3, 1, 2)
+ out = out.reshape(
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
+ )
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
+ out = F.conv2d(out, w)
+ out = out.reshape(
+ -1,
+ minor,
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
+ )
+ out = out.permute(0, 2, 3, 1)
+ out = out[:, ::down_y, ::down_x, :]
+
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
+
+ return out.view(-1, channel, out_h, out_w)
diff --git a/case_studies/diffpure/score_sde/op/upfirdn2d_kernel.cu b/case_studies/diffpure/score_sde/op/upfirdn2d_kernel.cu
new file mode 100644
index 0000000..a88bc77
--- /dev/null
+++ b/case_studies/diffpure/score_sde/op/upfirdn2d_kernel.cu
@@ -0,0 +1,369 @@
+// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+//
+// This work is made available under the Nvidia Source Code License-NC.
+// To view a copy of this license, visit
+// https://nvlabs.github.io/stylegan2/license.html
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
+ int c = a / b;
+
+ if (c * b > a) {
+ c--;
+ }
+
+ return c;
+}
+
+struct UpFirDn2DKernelParams {
+ int up_x;
+ int up_y;
+ int down_x;
+ int down_y;
+ int pad_x0;
+ int pad_x1;
+ int pad_y0;
+ int pad_y1;
+
+ int major_dim;
+ int in_h;
+ int in_w;
+ int minor_dim;
+ int kernel_h;
+ int kernel_w;
+ int out_h;
+ int out_w;
+ int loop_major;
+ int loop_x;
+};
+
+template
+__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input,
+ const scalar_t *kernel,
+ const UpFirDn2DKernelParams p) {
+ int minor_idx = blockIdx.x * blockDim.x + threadIdx.x;
+ int out_y = minor_idx / p.minor_dim;
+ minor_idx -= out_y * p.minor_dim;
+ int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y;
+ int major_idx_base = blockIdx.z * p.loop_major;
+
+ if (out_x_base >= p.out_w || out_y >= p.out_h ||
+ major_idx_base >= p.major_dim) {
+ return;
+ }
+
+ int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0;
+ int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h);
+ int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y;
+ int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y;
+
+ for (int loop_major = 0, major_idx = major_idx_base;
+ loop_major < p.loop_major && major_idx < p.major_dim;
+ loop_major++, major_idx++) {
+ for (int loop_x = 0, out_x = out_x_base;
+ loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) {
+ int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0;
+ int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w);
+ int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x;
+ int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x;
+
+ const scalar_t *x_p =
+ &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim +
+ minor_idx];
+ const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x];
+ int x_px = p.minor_dim;
+ int k_px = -p.up_x;
+ int x_py = p.in_w * p.minor_dim;
+ int k_py = -p.up_y * p.kernel_w;
+
+ scalar_t v = 0.0f;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ v += static_cast(*x_p) * static_cast(*k_p);
+ x_p += x_px;
+ k_p += k_px;
+ }
+
+ x_p += x_py - w * x_px;
+ k_p += k_py - w * k_px;
+ }
+
+ out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
+ minor_idx] = v;
+ }
+ }
+}
+
+template
+__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input,
+ const scalar_t *kernel,
+ const UpFirDn2DKernelParams p) {
+ const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
+ const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
+
+ __shared__ volatile float sk[kernel_h][kernel_w];
+ __shared__ volatile float sx[tile_in_h][tile_in_w];
+
+ int minor_idx = blockIdx.x;
+ int tile_out_y = minor_idx / p.minor_dim;
+ minor_idx -= tile_out_y * p.minor_dim;
+ tile_out_y *= tile_out_h;
+ int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
+ int major_idx_base = blockIdx.z * p.loop_major;
+
+ if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h |
+ major_idx_base >= p.major_dim) {
+ return;
+ }
+
+ for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w;
+ tap_idx += blockDim.x) {
+ int ky = tap_idx / kernel_w;
+ int kx = tap_idx - ky * kernel_w;
+ scalar_t v = 0.0;
+
+ if (kx < p.kernel_w & ky < p.kernel_h) {
+ v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
+ }
+
+ sk[ky][kx] = v;
+ }
+
+ for (int loop_major = 0, major_idx = major_idx_base;
+ loop_major < p.loop_major & major_idx < p.major_dim;
+ loop_major++, major_idx++) {
+ for (int loop_x = 0, tile_out_x = tile_out_x_base;
+ loop_x < p.loop_x & tile_out_x < p.out_w;
+ loop_x++, tile_out_x += tile_out_w) {
+ int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
+ int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
+ int tile_in_x = floor_div(tile_mid_x, up_x);
+ int tile_in_y = floor_div(tile_mid_y, up_y);
+
+ __syncthreads();
+
+ for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w;
+ in_idx += blockDim.x) {
+ int rel_in_y = in_idx / tile_in_w;
+ int rel_in_x = in_idx - rel_in_y * tile_in_w;
+ int in_x = rel_in_x + tile_in_x;
+ int in_y = rel_in_y + tile_in_y;
+
+ scalar_t v = 0.0;
+
+ if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
+ v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) *
+ p.minor_dim +
+ minor_idx];
+ }
+
+ sx[rel_in_y][rel_in_x] = v;
+ }
+
+ __syncthreads();
+ for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w;
+ out_idx += blockDim.x) {
+ int rel_out_y = out_idx / tile_out_w;
+ int rel_out_x = out_idx - rel_out_y * tile_out_w;
+ int out_x = rel_out_x + tile_out_x;
+ int out_y = rel_out_y + tile_out_y;
+
+ int mid_x = tile_mid_x + rel_out_x * down_x;
+ int mid_y = tile_mid_y + rel_out_y * down_y;
+ int in_x = floor_div(mid_x, up_x);
+ int in_y = floor_div(mid_y, up_y);
+ int rel_in_x = in_x - tile_in_x;
+ int rel_in_y = in_y - tile_in_y;
+ int kernel_x = (in_x + 1) * up_x - mid_x - 1;
+ int kernel_y = (in_y + 1) * up_y - mid_y - 1;
+
+ scalar_t v = 0.0;
+
+#pragma unroll
+ for (int y = 0; y < kernel_h / up_y; y++)
+#pragma unroll
+ for (int x = 0; x < kernel_w / up_x; x++)
+ v += sx[rel_in_y + y][rel_in_x + x] *
+ sk[kernel_y + y * up_y][kernel_x + x * up_x];
+
+ if (out_x < p.out_w & out_y < p.out_h) {
+ out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
+ minor_idx] = v;
+ }
+ }
+ }
+ }
+}
+
+torch::Tensor upfirdn2d_op(const torch::Tensor &input,
+ const torch::Tensor &kernel, int up_x, int up_y,
+ int down_x, int down_y, int pad_x0, int pad_x1,
+ int pad_y0, int pad_y1) {
+ int curDevice = -1;
+ cudaGetDevice(&curDevice);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
+
+ UpFirDn2DKernelParams p;
+
+ auto x = input.contiguous();
+ auto k = kernel.contiguous();
+
+ p.major_dim = x.size(0);
+ p.in_h = x.size(1);
+ p.in_w = x.size(2);
+ p.minor_dim = x.size(3);
+ p.kernel_h = k.size(0);
+ p.kernel_w = k.size(1);
+ p.up_x = up_x;
+ p.up_y = up_y;
+ p.down_x = down_x;
+ p.down_y = down_y;
+ p.pad_x0 = pad_x0;
+ p.pad_x1 = pad_x1;
+ p.pad_y0 = pad_y0;
+ p.pad_y1 = pad_y1;
+
+ p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) /
+ p.down_y;
+ p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) /
+ p.down_x;
+
+ auto out =
+ at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
+
+ int mode = -1;
+
+ int tile_out_h = -1;
+ int tile_out_w = -1;
+
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
+ p.kernel_h <= 4 && p.kernel_w <= 4) {
+ mode = 1;
+ tile_out_h = 16;
+ tile_out_w = 64;
+ }
+
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
+ p.kernel_h <= 3 && p.kernel_w <= 3) {
+ mode = 2;
+ tile_out_h = 16;
+ tile_out_w = 64;
+ }
+
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
+ p.kernel_h <= 4 && p.kernel_w <= 4) {
+ mode = 3;
+ tile_out_h = 16;
+ tile_out_w = 64;
+ }
+
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
+ p.kernel_h <= 2 && p.kernel_w <= 2) {
+ mode = 4;
+ tile_out_h = 16;
+ tile_out_w = 64;
+ }
+
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
+ p.kernel_h <= 4 && p.kernel_w <= 4) {
+ mode = 5;
+ tile_out_h = 8;
+ tile_out_w = 32;
+ }
+
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
+ p.kernel_h <= 2 && p.kernel_w <= 2) {
+ mode = 6;
+ tile_out_h = 8;
+ tile_out_w = 32;
+ }
+
+ dim3 block_size;
+ dim3 grid_size;
+
+ if (tile_out_h > 0 && tile_out_w > 0) {
+ p.loop_major = (p.major_dim - 1) / 16384 + 1;
+ p.loop_x = 1;
+ block_size = dim3(32 * 8, 1, 1);
+ grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
+ (p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
+ (p.major_dim - 1) / p.loop_major + 1);
+ } else {
+ p.loop_major = (p.major_dim - 1) / 16384 + 1;
+ p.loop_x = 4;
+ block_size = dim3(4, 32, 1);
+ grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1,
+ (p.out_w - 1) / (p.loop_x * block_size.y) + 1,
+ (p.major_dim - 1) / p.loop_major + 1);
+ }
+
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
+ switch (mode) {
+ case 1:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ case 2:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ case 3:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ case 4:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ case 5:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ case 6:
+ upfirdn2d_kernel
+ <<>>(out.data_ptr(),
+ x.data_ptr(),
+ k.data_ptr(), p);
+
+ break;
+
+ default:
+ upfirdn2d_kernel_large<<>>(
+ out.data_ptr(), x.data_ptr(),
+ k.data_ptr(), p);
+ }
+ });
+
+ return out;
+}
\ No newline at end of file
diff --git a/case_studies/diffpure/score_sde/sampling.py b/case_studies/diffpure/score_sde/sampling.py
new file mode 100644
index 0000000..8454dc7
--- /dev/null
+++ b/case_studies/diffpure/score_sde/sampling.py
@@ -0,0 +1,485 @@
+# coding=utf-8
+# Copyright 2020 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+# pytype: skip-file
+"""Various sampling methods."""
+import functools
+
+import torch
+import numpy as np
+import abc
+
+from .models.utils import from_flattened_numpy, to_flattened_numpy, get_score_fn
+from scipy import integrate
+from . import sde_lib
+from .models import utils as mutils
+
+_CORRECTORS = {}
+_PREDICTORS = {}
+
+
+def register_predictor(cls=None, *, name=None):
+ """A decorator for registering predictor classes."""
+
+ def _register(cls):
+ if name is None:
+ local_name = cls.__name__
+ else:
+ local_name = name
+ if local_name in _PREDICTORS:
+ raise ValueError(f'Already registered model with name: {local_name}')
+ _PREDICTORS[local_name] = cls
+ return cls
+
+ if cls is None:
+ return _register
+ else:
+ return _register(cls)
+
+
+def register_corrector(cls=None, *, name=None):
+ """A decorator for registering corrector classes."""
+
+ def _register(cls):
+ if name is None:
+ local_name = cls.__name__
+ else:
+ local_name = name
+ if local_name in _CORRECTORS:
+ raise ValueError(f'Already registered model with name: {local_name}')
+ _CORRECTORS[local_name] = cls
+ return cls
+
+ if cls is None:
+ return _register
+ else:
+ return _register(cls)
+
+
+def get_predictor(name):
+ return _PREDICTORS[name]
+
+
+def get_corrector(name):
+ return _CORRECTORS[name]
+
+
+def get_sampling_fn(config, sde, shape, inverse_scaler, eps):
+ """Create a sampling function.
+
+ Args:
+ config: A `ml_collections.ConfigDict` object that contains all configuration information.
+ sde: A `sde_lib.SDE` object that represents the forward SDE.
+ shape: A sequence of integers representing the expected shape of a single sample.
+ inverse_scaler: The inverse data normalizer function.
+ eps: A `float` number. The reverse-time SDE is only integrated to `eps` for numerical stability.
+
+ Returns:
+ A function that takes random states and a replicated training state and outputs samples with the
+ trailing dimensions matching `shape`.
+ """
+
+ sampler_name = config.sampling.method
+ # Probability flow ODE sampling with black-box ODE solvers
+ if sampler_name.lower() == 'ode':
+ sampling_fn = get_ode_sampler(sde=sde,
+ shape=shape,
+ inverse_scaler=inverse_scaler,
+ denoise=config.sampling.noise_removal,
+ eps=eps,
+ device=config.device)
+ # Predictor-Corrector sampling. Predictor-only and Corrector-only samplers are special cases.
+ elif sampler_name.lower() == 'pc':
+ predictor = get_predictor(config.sampling.predictor.lower())
+ corrector = get_corrector(config.sampling.corrector.lower())
+ sampling_fn = get_pc_sampler(sde=sde,
+ shape=shape,
+ predictor=predictor,
+ corrector=corrector,
+ inverse_scaler=inverse_scaler,
+ snr=config.sampling.snr,
+ n_steps=config.sampling.n_steps_each,
+ probability_flow=config.sampling.probability_flow,
+ continuous=config.training.continuous,
+ denoise=config.sampling.noise_removal,
+ eps=eps,
+ device=config.device)
+ else:
+ raise ValueError(f"Sampler name {sampler_name} unknown.")
+
+ return sampling_fn
+
+
+class Predictor(abc.ABC):
+ """The abstract class for a predictor algorithm."""
+
+ def __init__(self, sde, score_fn, probability_flow=False):
+ super().__init__()
+ self.sde = sde
+ # Compute the reverse SDE/ODE
+ self.rsde = sde.reverse(score_fn, probability_flow)
+ self.score_fn = score_fn
+
+ @abc.abstractmethod
+ def update_fn(self, x, t):
+ """One update of the predictor.
+
+ Args:
+ x: A PyTorch tensor representing the current state
+ t: A Pytorch tensor representing the current time step.
+
+ Returns:
+ x: A PyTorch tensor of the next state.
+ x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising.
+ """
+ pass
+
+
+class Corrector(abc.ABC):
+ """The abstract class for a corrector algorithm."""
+
+ def __init__(self, sde, score_fn, snr, n_steps):
+ super().__init__()
+ self.sde = sde
+ self.score_fn = score_fn
+ self.snr = snr
+ self.n_steps = n_steps
+
+ @abc.abstractmethod
+ def update_fn(self, x, t):
+ """One update of the corrector.
+
+ Args:
+ x: A PyTorch tensor representing the current state
+ t: A PyTorch tensor representing the current time step.
+
+ Returns:
+ x: A PyTorch tensor of the next state.
+ x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising.
+ """
+ pass
+
+
+@register_predictor(name='euler_maruyama')
+class EulerMaruyamaPredictor(Predictor):
+ def __init__(self, sde, score_fn, probability_flow=False):
+ super().__init__(sde, score_fn, probability_flow)
+
+ def update_fn(self, x, t):
+ dt = -1. / self.rsde.N
+ z = torch.randn_like(x)
+ drift, diffusion = self.rsde.sde(x, t)
+ x_mean = x + drift * dt
+ x = x_mean + diffusion[:, None, None, None] * np.sqrt(-dt) * z
+ return x, x_mean
+
+
+@register_predictor(name='reverse_diffusion')
+class ReverseDiffusionPredictor(Predictor):
+ def __init__(self, sde, score_fn, probability_flow=False):
+ super().__init__(sde, score_fn, probability_flow)
+
+ def update_fn(self, x, t):
+ f, G = self.rsde.discretize(x, t)
+ z = torch.randn_like(x)
+ x_mean = x - f
+ x = x_mean + G[:, None, None, None] * z
+ return x, x_mean
+
+
+@register_predictor(name='ancestral_sampling')
+class AncestralSamplingPredictor(Predictor):
+ """The ancestral sampling predictor. Currently only supports VE/VP SDEs."""
+
+ def __init__(self, sde, score_fn, probability_flow=False):
+ super().__init__(sde, score_fn, probability_flow)
+ if not isinstance(sde, sde_lib.VPSDE) and not isinstance(sde, sde_lib.VESDE):
+ raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
+ assert not probability_flow, "Probability flow not supported by ancestral sampling"
+
+ def vesde_update_fn(self, x, t):
+ sde = self.sde
+ timestep = (t * (sde.N - 1) / sde.T).long()
+ sigma = sde.discrete_sigmas[timestep]
+ adjacent_sigma = torch.where(timestep == 0, torch.zeros_like(t), sde.discrete_sigmas.to(t.device)[timestep - 1])
+ score = self.score_fn(x, t)
+ x_mean = x + score * (sigma ** 2 - adjacent_sigma ** 2)[:, None, None, None]
+ std = torch.sqrt((adjacent_sigma ** 2 * (sigma ** 2 - adjacent_sigma ** 2)) / (sigma ** 2))
+ noise = torch.randn_like(x)
+ x = x_mean + std[:, None, None, None] * noise
+ return x, x_mean
+
+ def vpsde_update_fn(self, x, t):
+ sde = self.sde
+ timestep = (t * (sde.N - 1) / sde.T).long()
+ beta = sde.discrete_betas.to(t.device)[timestep]
+ score = self.score_fn(x, t)
+ x_mean = (x + beta[:, None, None, None] * score) / torch.sqrt(1. - beta)[:, None, None, None]
+ noise = torch.randn_like(x)
+ x = x_mean + torch.sqrt(beta)[:, None, None, None] * noise
+ return x, x_mean
+
+ def update_fn(self, x, t):
+ if isinstance(self.sde, sde_lib.VESDE):
+ return self.vesde_update_fn(x, t)
+ elif isinstance(self.sde, sde_lib.VPSDE):
+ return self.vpsde_update_fn(x, t)
+
+
+@register_predictor(name='none')
+class NonePredictor(Predictor):
+ """An empty predictor that does nothing."""
+
+ def __init__(self, sde, score_fn, probability_flow=False):
+ pass
+
+ def update_fn(self, x, t):
+ return x, x
+
+
+@register_corrector(name='langevin')
+class LangevinCorrector(Corrector):
+ def __init__(self, sde, score_fn, snr, n_steps):
+ super().__init__(sde, score_fn, snr, n_steps)
+ if not isinstance(sde, sde_lib.VPSDE) \
+ and not isinstance(sde, sde_lib.VESDE) \
+ and not isinstance(sde, sde_lib.subVPSDE):
+ raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
+
+ def update_fn(self, x, t):
+ sde = self.sde
+ score_fn = self.score_fn
+ n_steps = self.n_steps
+ target_snr = self.snr
+ if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
+ timestep = (t * (sde.N - 1) / sde.T).long()
+ alpha = sde.alphas.to(t.device)[timestep]
+ else:
+ alpha = torch.ones_like(t)
+
+ for i in range(n_steps):
+ grad = score_fn(x, t)
+ noise = torch.randn_like(x)
+ grad_norm = torch.norm(grad.reshape(grad.shape[0], -1), dim=-1).mean()
+ noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
+ step_size = (target_snr * noise_norm / grad_norm) ** 2 * 2 * alpha
+ x_mean = x + step_size[:, None, None, None] * grad
+ x = x_mean + torch.sqrt(step_size * 2)[:, None, None, None] * noise
+
+ return x, x_mean
+
+
+@register_corrector(name='ald')
+class AnnealedLangevinDynamics(Corrector):
+ """The original annealed Langevin dynamics predictor in NCSN/NCSNv2.
+
+ We include this corrector only for completeness. It was not directly used in our paper.
+ """
+
+ def __init__(self, sde, score_fn, snr, n_steps):
+ super().__init__(sde, score_fn, snr, n_steps)
+ if not isinstance(sde, sde_lib.VPSDE) \
+ and not isinstance(sde, sde_lib.VESDE) \
+ and not isinstance(sde, sde_lib.subVPSDE):
+ raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
+
+ def update_fn(self, x, t):
+ sde = self.sde
+ score_fn = self.score_fn
+ n_steps = self.n_steps
+ target_snr = self.snr
+ if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
+ timestep = (t * (sde.N - 1) / sde.T).long()
+ alpha = sde.alphas.to(t.device)[timestep]
+ else:
+ alpha = torch.ones_like(t)
+
+ std = self.sde.marginal_prob(x, t)[1]
+
+ for i in range(n_steps):
+ grad = score_fn(x, t)
+ noise = torch.randn_like(x)
+ step_size = (target_snr * std) ** 2 * 2 * alpha
+ x_mean = x + step_size[:, None, None, None] * grad
+ x = x_mean + noise * torch.sqrt(step_size * 2)[:, None, None, None]
+
+ return x, x_mean
+
+
+@register_corrector(name='none')
+class NoneCorrector(Corrector):
+ """An empty corrector that does nothing."""
+
+ def __init__(self, sde, score_fn, snr, n_steps):
+ pass
+
+ def update_fn(self, x, t):
+ return x, x
+
+
+def shared_predictor_update_fn(x, t, sde, model, predictor, probability_flow, continuous):
+ """A wrapper that configures and returns the update function of predictors."""
+ score_fn = mutils.get_score_fn(sde, model, train=False, continuous=continuous)
+ if predictor is None:
+ # Corrector-only sampler
+ predictor_obj = NonePredictor(sde, score_fn, probability_flow)
+ else:
+ predictor_obj = predictor(sde, score_fn, probability_flow)
+ return predictor_obj.update_fn(x, t)
+
+
+def shared_corrector_update_fn(x, t, sde, model, corrector, continuous, snr, n_steps):
+ """A wrapper tha configures and returns the update function of correctors."""
+ score_fn = mutils.get_score_fn(sde, model, train=False, continuous=continuous)
+ if corrector is None:
+ # Predictor-only sampler
+ corrector_obj = NoneCorrector(sde, score_fn, snr, n_steps)
+ else:
+ corrector_obj = corrector(sde, score_fn, snr, n_steps)
+ return corrector_obj.update_fn(x, t)
+
+
+def get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,
+ n_steps=1, probability_flow=False, continuous=False,
+ denoise=True, eps=1e-3, device='cuda'):
+ """Create a Predictor-Corrector (PC) sampler.
+
+ Args:
+ sde: An `sde_lib.SDE` object representing the forward SDE.
+ shape: A sequence of integers. The expected shape of a single sample.
+ predictor: A subclass of `sampling.Predictor` representing the predictor algorithm.
+ corrector: A subclass of `sampling.Corrector` representing the corrector algorithm.
+ inverse_scaler: The inverse data normalizer.
+ snr: A `float` number. The signal-to-noise ratio for configuring correctors.
+ n_steps: An integer. The number of corrector steps per predictor update.
+ probability_flow: If `True`, solve the reverse-time probability flow ODE when running the predictor.
+ continuous: `True` indicates that the score model was continuously trained.
+ denoise: If `True`, add one-step denoising to the final samples.
+ eps: A `float` number. The reverse-time SDE and ODE are integrated to `epsilon` to avoid numerical issues.
+ device: PyTorch device.
+
+ Returns:
+ A sampling function that returns samples and the number of function evaluations during sampling.
+ """
+ # Create predictor & corrector update functions
+ predictor_update_fn = functools.partial(shared_predictor_update_fn,
+ sde=sde,
+ predictor=predictor,
+ probability_flow=probability_flow,
+ continuous=continuous)
+ corrector_update_fn = functools.partial(shared_corrector_update_fn,
+ sde=sde,
+ corrector=corrector,
+ continuous=continuous,
+ snr=snr,
+ n_steps=n_steps)
+
+ def pc_sampler(model):
+ """ The PC sampler funciton.
+
+ Args:
+ model: A score model.
+ Returns:
+ Samples, number of function evaluations.
+ """
+ with torch.no_grad():
+ # Initial sample
+ x = sde.prior_sampling(shape).to(device)
+ timesteps = torch.linspace(sde.T, eps, sde.N, device=device)
+
+ for i in range(sde.N):
+ t = timesteps[i]
+ vec_t = torch.ones(shape[0], device=t.device) * t
+ x, x_mean = corrector_update_fn(x, vec_t, model=model)
+ x, x_mean = predictor_update_fn(x, vec_t, model=model)
+
+ return inverse_scaler(x_mean if denoise else x), sde.N * (n_steps + 1)
+
+ return pc_sampler
+
+
+def get_ode_sampler(sde, shape, inverse_scaler,
+ denoise=False, rtol=1e-5, atol=1e-5,
+ method='RK45', eps=1e-3, device='cuda'):
+ """Probability flow ODE sampler with the black-box ODE solver.
+
+ Args:
+ sde: An `sde_lib.SDE` object that represents the forward SDE.
+ shape: A sequence of integers. The expected shape of a single sample.
+ inverse_scaler: The inverse data normalizer.
+ denoise: If `True`, add one-step denoising to final samples.
+ rtol: A `float` number. The relative tolerance level of the ODE solver.
+ atol: A `float` number. The absolute tolerance level of the ODE solver.
+ method: A `str`. The algorithm used for the black-box ODE solver.
+ See the documentation of `scipy.integrate.solve_ivp`.
+ eps: A `float` number. The reverse-time SDE/ODE will be integrated to `eps` for numerical stability.
+ device: PyTorch device.
+
+ Returns:
+ A sampling function that returns samples and the number of function evaluations during sampling.
+ """
+
+ def denoise_update_fn(model, x):
+ score_fn = get_score_fn(sde, model, train=False, continuous=True)
+ # Reverse diffusion predictor for denoising
+ predictor_obj = ReverseDiffusionPredictor(sde, score_fn, probability_flow=False)
+ vec_eps = torch.ones(x.shape[0], device=x.device) * eps
+ _, x = predictor_obj.update_fn(x, vec_eps)
+ return x
+
+ def drift_fn(model, x, t):
+ """Get the drift function of the reverse-time SDE."""
+ score_fn = get_score_fn(sde, model, train=False, continuous=True)
+ rsde = sde.reverse(score_fn, probability_flow=True)
+ return rsde.sde(x, t)[0]
+
+ def ode_sampler(model, z=None):
+ """The probability flow ODE sampler with black-box ODE solver.
+
+ Args:
+ model: A score model.
+ z: If present, generate samples from latent code `z`.
+ Returns:
+ samples, number of function evaluations.
+ """
+ with torch.no_grad():
+ # Initial sample
+ if z is None:
+ # If not represent, sample the latent code from the prior distibution of the SDE.
+ x = sde.prior_sampling(shape).to(device)
+ else:
+ x = z
+
+ def ode_func(t, x):
+ x = from_flattened_numpy(x, shape).to(device).type(torch.float32)
+ vec_t = torch.ones(shape[0], device=x.device) * t
+ drift = drift_fn(model, x, vec_t)
+ return to_flattened_numpy(drift)
+
+ # Black-box ODE solver for the probability flow ODE
+ solution = integrate.solve_ivp(ode_func, (sde.T, eps), to_flattened_numpy(x),
+ rtol=rtol, atol=atol, method=method)
+ nfe = solution.nfev
+ x = torch.tensor(solution.y[:, -1]).reshape(shape).to(device).type(torch.float32)
+
+ # Denoising is equivalent to running one predictor step without adding noise
+ if denoise:
+ x = denoise_update_fn(model, x)
+
+ x = inverse_scaler(x)
+ return x, nfe
+
+ return ode_sampler
diff --git a/case_studies/diffpure/score_sde/sde_lib.py b/case_studies/diffpure/score_sde/sde_lib.py
new file mode 100644
index 0000000..1062602
--- /dev/null
+++ b/case_studies/diffpure/score_sde/sde_lib.py
@@ -0,0 +1,262 @@
+# ---------------------------------------------------------------
+# Taken from the following link as is from:
+# https://github.com/yang-song/score_sde_pytorch/blob/main/sde_lib.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_SCORE_SDE).
+# ---------------------------------------------------------------
+
+"""Abstract SDE classes, Reverse SDE, and VE/VP SDEs."""
+import abc
+import torch
+import numpy as np
+
+
+class SDE(abc.ABC):
+ """SDE abstract class. Functions are designed for a mini-batch of inputs."""
+
+ def __init__(self, N):
+ """Construct an SDE.
+
+ Args:
+ N: number of discretization time steps.
+ """
+ super().__init__()
+ self.N = N
+
+ @property
+ @abc.abstractmethod
+ def T(self):
+ """End time of the SDE."""
+ pass
+
+ @abc.abstractmethod
+ def sde(self, x, t):
+ pass
+
+ @abc.abstractmethod
+ def marginal_prob(self, x, t):
+ """Parameters to determine the marginal distribution of the SDE, $p_t(x)$."""
+ pass
+
+ @abc.abstractmethod
+ def prior_sampling(self, shape):
+ """Generate one sample from the prior distribution, $p_T(x)$."""
+ pass
+
+ @abc.abstractmethod
+ def prior_logp(self, z):
+ """Compute log-density of the prior distribution.
+
+ Useful for computing the log-likelihood via probability flow ODE.
+
+ Args:
+ z: latent code
+ Returns:
+ log probability density
+ """
+ pass
+
+ def discretize(self, x, t):
+ """Discretize the SDE in the form: x_{i+1} = x_i + f_i(x_i) + G_i z_i.
+
+ Useful for reverse diffusion sampling and probabiliy flow sampling.
+ Defaults to Euler-Maruyama discretization.
+
+ Args:
+ x: a torch tensor
+ t: a torch float representing the time step (from 0 to `self.T`)
+
+ Returns:
+ f, G
+ """
+ dt = 1 / self.N
+ drift, diffusion = self.sde(x, t)
+ f = drift * dt
+ G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))
+ return f, G
+
+ def reverse(self, score_fn, probability_flow=False):
+ """Create the reverse-time SDE/ODE.
+
+ Args:
+ score_fn: A time-dependent score-based model that takes x and t and returns the score.
+ probability_flow: If `True`, create the reverse-time ODE used for probability flow sampling.
+ """
+ N = self.N
+ T = self.T
+ sde_fn = self.sde
+ discretize_fn = self.discretize
+
+ # Build the class for reverse-time SDE.
+ class RSDE(self.__class__):
+ def __init__(self):
+ self.N = N
+ self.probability_flow = probability_flow
+
+ @property
+ def T(self):
+ return T
+
+ def sde(self, x, t):
+ """Create the drift and diffusion functions for the reverse SDE/ODE."""
+ drift, diffusion = sde_fn(x, t)
+ score = score_fn(x, t)
+ drift = drift - diffusion[:, None, None, None] ** 2 * score * (0.5 if self.probability_flow else 1.)
+ # Set the diffusion function to zero for ODEs.
+ diffusion = 0. if self.probability_flow else diffusion
+ return drift, diffusion
+
+ def discretize(self, x, t):
+ """Create discretized iteration rules for the reverse diffusion sampler."""
+ f, G = discretize_fn(x, t)
+ rev_f = f - G[:, None, None, None] ** 2 * score_fn(x, t) * (0.5 if self.probability_flow else 1.)
+ rev_G = torch.zeros_like(G) if self.probability_flow else G
+ return rev_f, rev_G
+
+ return RSDE()
+
+
+class VPSDE(SDE):
+ def __init__(self, beta_min=0.1, beta_max=20, N=1000):
+ """Construct a Variance Preserving SDE.
+
+ Args:
+ beta_min: value of beta(0)
+ beta_max: value of beta(1)
+ N: number of discretization steps
+ """
+ super().__init__(N)
+ self.beta_0 = beta_min
+ self.beta_1 = beta_max
+ self.N = N
+ self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
+ self.alphas = 1. - self.discrete_betas
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
+ self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
+ self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
+
+ @property
+ def T(self):
+ return 1
+
+ def sde(self, x, t):
+ beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
+ drift = -0.5 * beta_t[:, None, None, None] * x
+ diffusion = torch.sqrt(beta_t)
+ return drift, diffusion
+
+ def marginal_prob(self, x, t):
+ log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
+ mean = torch.exp(log_mean_coeff[:, None, None, None]) * x
+ std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
+ return mean, std
+
+ def prior_sampling(self, shape):
+ return torch.randn(*shape)
+
+ def prior_logp(self, z):
+ shape = z.shape
+ N = np.prod(shape[1:])
+ logps = -N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
+ return logps
+
+ def discretize(self, x, t):
+ """DDPM discretization."""
+ timestep = (t * (self.N - 1) / self.T).long()
+ beta = self.discrete_betas.to(x.device)[timestep]
+ alpha = self.alphas.to(x.device)[timestep]
+ sqrt_beta = torch.sqrt(beta)
+ f = torch.sqrt(alpha)[:, None, None, None] * x - x
+ G = sqrt_beta
+ return f, G
+
+
+class subVPSDE(SDE):
+ def __init__(self, beta_min=0.1, beta_max=20, N=1000):
+ """Construct the sub-VP SDE that excels at likelihoods.
+
+ Args:
+ beta_min: value of beta(0)
+ beta_max: value of beta(1)
+ N: number of discretization steps
+ """
+ super().__init__(N)
+ self.beta_0 = beta_min
+ self.beta_1 = beta_max
+ self.N = N
+
+ @property
+ def T(self):
+ return 1
+
+ def sde(self, x, t):
+ beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
+ drift = -0.5 * beta_t[:, None, None, None] * x
+ discount = 1. - torch.exp(-2 * self.beta_0 * t - (self.beta_1 - self.beta_0) * t ** 2)
+ diffusion = torch.sqrt(beta_t * discount)
+ return drift, diffusion
+
+ def marginal_prob(self, x, t):
+ log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
+ mean = torch.exp(log_mean_coeff)[:, None, None, None] * x
+ std = 1 - torch.exp(2. * log_mean_coeff)
+ return mean, std
+
+ def prior_sampling(self, shape):
+ return torch.randn(*shape)
+
+ def prior_logp(self, z):
+ shape = z.shape
+ N = np.prod(shape[1:])
+ return -N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
+
+
+class VESDE(SDE):
+ def __init__(self, sigma_min=0.01, sigma_max=50, N=1000):
+ """Construct a Variance Exploding SDE.
+
+ Args:
+ sigma_min: smallest sigma.
+ sigma_max: largest sigma.
+ N: number of discretization steps
+ """
+ super().__init__(N)
+ self.sigma_min = sigma_min
+ self.sigma_max = sigma_max
+ self.discrete_sigmas = torch.exp(torch.linspace(np.log(self.sigma_min), np.log(self.sigma_max), N))
+ self.N = N
+
+ @property
+ def T(self):
+ return 1
+
+ def sde(self, x, t):
+ sigma = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
+ drift = torch.zeros_like(x)
+ diffusion = sigma * torch.sqrt(torch.tensor(2 * (np.log(self.sigma_max) - np.log(self.sigma_min)),
+ device=t.device))
+ return drift, diffusion
+
+ def marginal_prob(self, x, t):
+ std = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
+ mean = x
+ return mean, std
+
+ def prior_sampling(self, shape):
+ return torch.randn(*shape) * self.sigma_max
+
+ def prior_logp(self, z):
+ shape = z.shape
+ N = np.prod(shape[1:])
+ return -N / 2. * np.log(2 * np.pi * self.sigma_max ** 2) - torch.sum(z ** 2, dim=(1, 2, 3)) / (2 * self.sigma_max ** 2)
+
+ def discretize(self, x, t):
+ """SMLD(NCSN) discretization."""
+ timestep = (t * (self.N - 1) / self.T).long()
+ sigma = self.discrete_sigmas.to(t.device)[timestep]
+ adjacent_sigma = torch.where(timestep == 0, torch.zeros_like(t),
+ self.discrete_sigmas[timestep - 1].to(t.device))
+ f = torch.zeros_like(x)
+ G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)
+ return f, G
\ No newline at end of file
diff --git a/case_studies/diffpure/slurm/analyze_logs.py b/case_studies/diffpure/slurm/analyze_logs.py
new file mode 100644
index 0000000..757e543
--- /dev/null
+++ b/case_studies/diffpure/slurm/analyze_logs.py
@@ -0,0 +1,74 @@
+import numpy as np
+import glob
+import argparse
+import os
+
+
+def parse_log(path):
+ with open(path, "r") as f:
+ lines = f.readlines()
+
+ lines = [l.strip() for l in lines]
+
+ if len(lines) < 4:
+ return None
+
+ if lines[-4].startswith("interior-vs-boundary discimination"):
+ asr = float(lines[-4].split(":")[1].strip())
+ logit_diff = float(lines[-3].split(":")[1].split("+-")[0].strip())
+ validation_acc = eval(lines[-2].split(":")[-1].replace("nan", "np.nan"))
+ if type(validation_acc) is float:
+ validation_acc = (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
+ validation_acc = np.array(validation_acc)
+ n_failed = int(lines[-1].split("for ")[1].split("/")[0].strip())
+
+ return asr, logit_diff, validation_acc, n_failed
+ else:
+ return None
+
+
+def main(input_folder):
+ logs = glob.glob(os.path.join(input_folder, "*.log"))
+
+ results = [(p, parse_log(p)) for p in logs]
+
+ incomplete_logs = [it[0] for it in results if it[1] is None]
+ if len(incomplete_logs) > 0:
+ print("Found incomplete logs for experiments:")
+ for it in incomplete_logs:
+ print(f"\t{it}")
+
+ results = [it[1] for it in results if it[1] is not None]
+
+ if len(results) == 0:
+ print("No results found.")
+ return
+
+ results = results[:512]
+
+ properties = [np.array([it[i] for it in results]) for i in range(len(results[0]))]
+
+ n_samples = len(results)
+ n_failed_samples = np.sum(properties[3])
+
+ # filter failed samples
+ failed_samples = [idx for idx in range(len(properties[3])) if properties[3][idx] == 1]
+ properties = [[prop[idx] for idx in range(len(prop)) if idx not in failed_samples] for prop in properties]
+
+ import pdb; pdb.set_trace()
+
+ means = [np.mean(prop, 0) for prop in properties]
+ stds = [np.std(prop, 0) for prop in properties]
+
+ print(f"ASR: {means[0]}")
+ print(f"Normalized Logit-Difference-Improvement: {means[1]} +- {stds[1]}")
+ print(f"Validation Accuracy (I, B, BS, BC, R. ASR S, R. ASR C): {tuple(means[2])}")
+ print(f"Setup failed for {n_failed_samples}/{n_samples} samples")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--input", "-i", required=True)
+ args = parser.parse_args()
+ main(args.input)
+
diff --git a/case_studies/diffpure/slurm/binarization_test.sh b/case_studies/diffpure/slurm/binarization_test.sh
new file mode 100644
index 0000000..d632695
--- /dev/null
+++ b/case_studies/diffpure/slurm/binarization_test.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+SLURMDIR="/home/bethge/rzimmermann/diffpure-slurm-logs/"
+function execute_job {
+ local arguments=$1
+
+ echo "Using arguments: $arguments"
+ #read -r -p "Are you sure? [enter N to cancel] " response
+ #if [ "$response" == "N" ]; then
+ # exit 1
+ #fi
+
+ sbatch case_studies/diffpure/slurm/binarization_test_single.sh "${arguments}" || echo "error in $SLURMDIR/slog--$SLURM_JOB_ID.err on $SLURM_JOB_NODELIST" > $SLURMDIR/slog-common.err
+}
+
+mkdir -p "$SLURMDIR"
+
+
+for startidx in {1..500}; do
+ endidx=$(expr $startidx + 1)
+ execute_job "1 1 $startidx $endidx"
+done
\ No newline at end of file
diff --git a/case_studies/diffpure/slurm/binarization_test_single.sh b/case_studies/diffpure/slurm/binarization_test_single.sh
new file mode 100644
index 0000000..f8a5ddd
--- /dev/null
+++ b/case_studies/diffpure/slurm/binarization_test_single.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#SBATCH --ntasks=1 # Number of tasks (see below)
+#SBATCH --nodes=1 # Ensure that all cores are on one machine
+#SBATCH --partition=gpu-2080ti # Partition to submit to
+#SBATCH --mem=20G # Memory pool for all cores (see also --mem-per-cpu)
+#SBATCH --time=3-00:00 # Runtime in D-HH:MM
+#SBATCH --output=/home/bethge/rzimmermann/diffpure-slurm-logs/slog-%j.out # File to which STDOUT will be written
+#SBATCH --error=/home/bethge/rzimmermann/diffpure-slurm-logs/slog-%j.err # File to which STDERR will be written
+#SBATCH --gres=gpu:1 # Request right GPUs
+#SBATCH --cpus-per-task=8
+#SBATCH --job-name=diffpure_binarization_test
+
+
+function _termmsg()
+{
+ SLURMDIR="/home/bethge/rzimmermann/diffpure-slurm-logs/"
+ echo "terminaged $SLURM_JOB_ID on $SLURM_JOB_NODELIST. Check slog-$SLURM_JOB_ID.err" > $SLURMDIR/slog-common.err
+}
+
+trap _termmsg SIGTERM
+
+arguments=$1
+
+# include information about the job in the output
+scontrol show job=$SLURM_JOB_ID
+
+echo "GPU information:"
+nvidia-smi --query-gpu=memory.used --format=csv
+
+SCRATCH_DIRECTORY="/scratch_local/$SLURM_JOB_USER-$SLURM_JOBID"
+
+echo "Copying SIF..."
+qcopy /mnt/qb/home/bethge/rzimmermann/sifs/diffpure.sif $SCRATCH_DIRECTORY
+echo "SIF copied!"
+
+echo "arguments: $1"
+
+log_folder="/home/bethge/rzimmermann/diffpure-logs/"
+mkdir -p ${log_folder}
+
+log_path="${log_folder}/${arguments// /_}"
+
+echo "Slurm Job ID: $SLURM_JOB_ID" >> $log_path.out
+srun singularity exec --nv --bind /mnt/qb/ --bind $SCRATCH_DIRECTORY "$SCRATCH_DIRECTORY/diffpure.sif" \
+ /bin/bash -c "cd /mnt/qb/bethge/rzimmermann/code/active-adversarial-tests-internal && ./case_studies/diffpure/run_scripts/cifar10/run_cifar_rand_inf_binarization_test.sh $arguments 2> ${log_path}.err | tee ${log_path}.log"
diff --git a/case_studies/diffpure/stadv_eot/attacks.py b/case_studies/diffpure/stadv_eot/attacks.py
new file mode 100644
index 0000000..f3aea13
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/attacks.py
@@ -0,0 +1,136 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This work is licensed under the NVIDIA Source Code License
+# for DiffPure. To view a copy of this license, see the LICENSE file.
+# ---------------------------------------------------------------
+
+import functools
+import torch
+from torch import nn
+from torch import optim
+
+# mister_ed
+from .recoloradv.mister_ed import loss_functions as lf
+from .recoloradv.mister_ed import adversarial_training as advtrain
+from .recoloradv.mister_ed import adversarial_perturbations as ap
+from .recoloradv.mister_ed import adversarial_attacks as aa
+from .recoloradv.mister_ed import spatial_transformers as st
+
+
+PGD_ITERS = 20
+
+
+def run_attack_with_random_targets(attack, model, inputs, labels, num_classes):
+ """
+ Runs an attack with targets randomly selected from all classes besides the
+ correct one. The attack should be a function from (inputs, labels) to
+ adversarial examples.
+ """
+
+ rand_targets = torch.randint(
+ 0, num_classes - 1, labels.size(),
+ dtype=labels.dtype, device=labels.device,
+ )
+ targets = torch.remainder(labels + rand_targets + 1, num_classes)
+
+ adv_inputs = attack(inputs, targets)
+ adv_labels = model(adv_inputs).argmax(1)
+ unsuccessful = adv_labels != targets
+ adv_inputs[unsuccessful] = inputs[unsuccessful]
+
+ return adv_inputs
+
+
+class MisterEdAttack(nn.Module):
+ """
+ Base class for attacks using the mister_ed library.
+ """
+
+ def __init__(self, model, threat_model, randomize=False,
+ perturbation_norm_loss=False, lr=0.001, random_targets=False,
+ num_classes=None, **kwargs):
+ super().__init__()
+
+ self.model = model
+ self.normalizer = nn.Identity()
+
+ self.threat_model = threat_model
+ self.randomize = randomize
+ self.perturbation_norm_loss = perturbation_norm_loss
+ self.attack_kwargs = kwargs
+ self.lr = lr
+ self.random_targets = random_targets
+ self.num_classes = num_classes
+
+ self.attack = None
+
+ def _setup_attack(self):
+ cw_loss = lf.CWLossF6(self.model, self.normalizer, kappa=float('inf'))
+ if self.random_targets:
+ cw_loss.forward = functools.partial(cw_loss.forward, targeted=True)
+ perturbation_loss = lf.PerturbationNormLoss(lp=2)
+ pert_factor = 0.0
+ if self.perturbation_norm_loss is True:
+ pert_factor = 0.05
+ elif type(self.perturbation_norm_loss) is float:
+ pert_factor = self.perturbation_norm_loss
+ adv_loss = lf.RegularizedLoss({
+ 'cw': cw_loss,
+ 'pert': perturbation_loss,
+ }, {
+ 'cw': 1.0,
+ 'pert': pert_factor,
+ }, negate=True)
+
+ self.pgd_attack = aa.PGD(self.model, self.normalizer,
+ self.threat_model(), adv_loss)
+
+ attack_params = {
+ 'optimizer': optim.Adam,
+ 'optimizer_kwargs': {'lr': self.lr},
+ 'signed': False,
+ 'verbose': False,
+ 'num_iterations': 0 if self.randomize else PGD_ITERS,
+ 'random_init': self.randomize,
+ }
+ attack_params.update(self.attack_kwargs)
+
+ self.attack = advtrain.AdversarialAttackParameters(
+ self.pgd_attack,
+ 1.0,
+ attack_specific_params={'attack_kwargs': attack_params},
+ )
+ self.attack.set_gpu(False)
+
+ def forward(self, inputs, labels):
+ if self.attack is None:
+ self._setup_attack()
+ assert self.attack is not None
+
+ if self.random_targets:
+ return run_attack_with_random_targets(
+ lambda inputs, labels: self.attack.attack(inputs, labels)[0],
+ self.model,
+ inputs,
+ labels,
+ num_classes=self.num_classes,
+ )
+ else:
+ return self.attack.attack(inputs, labels)[0]
+
+
+class StAdvAttack(MisterEdAttack):
+ def __init__(self, model, bound=0.05, **kwargs):
+ kwargs.setdefault('lr', 0.01)
+ super().__init__(
+ model,
+ threat_model=lambda: ap.ThreatModel(ap.ParameterizedXformAdv, {
+ 'lp_style': 'inf',
+ 'lp_bound': bound,
+ 'xform_class': st.FullSpatial,
+ 'use_stadv': True,
+ }),
+ perturbation_norm_loss=0.0025 / bound,
+ **kwargs,
+ )
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/LICENSE_RECOLORADV b/case_studies/diffpure/stadv_eot/recoloradv/LICENSE_RECOLORADV
new file mode 100644
index 0000000..3754756
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/LICENSE_RECOLORADV
@@ -0,0 +1,17 @@
+MIT License
+Copyright (c) 2018 YOUR NAME
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/__init__.py b/case_studies/diffpure/stadv_eot/recoloradv/__init__.py
new file mode 100644
index 0000000..ad92927
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/__init__.py
@@ -0,0 +1,12 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/__init__.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/color_spaces.py b/case_studies/diffpure/stadv_eot/recoloradv/color_spaces.py
new file mode 100644
index 0000000..cc0e1e8
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/color_spaces.py
@@ -0,0 +1,272 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/color_spaces.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+"""
+Contains classes that convert from RGB to various other color spaces and back.
+"""
+
+import torch
+import numpy as np
+import math
+
+
+class ColorSpace(object):
+ """
+ Base class for color spaces.
+ """
+
+ def from_rgb(self, imgs):
+ """
+ Converts an Nx3xWxH tensor in RGB color space to a Nx3xWxH tensor in
+ this color space. All outputs should be in the 0-1 range.
+ """
+ raise NotImplementedError()
+
+ def to_rgb(self, imgs):
+ """
+ Converts an Nx3xWxH tensor in this color space to a Nx3xWxH tensor in
+ RGB color space.
+ """
+ raise NotImplementedError()
+
+
+class RGBColorSpace(ColorSpace):
+ """
+ RGB color space. Just applies identity transformation.
+ """
+
+ def from_rgb(self, imgs):
+ return imgs
+
+ def to_rgb(self, imgs):
+ return imgs
+
+
+class YPbPrColorSpace(ColorSpace):
+ """
+ YPbPr color space. Uses ITU-R BT.601 standard by default.
+ """
+
+ def __init__(self, kr=0.299, kg=0.587, kb=0.114, luma_factor=1,
+ chroma_factor=1):
+ self.kr, self.kg, self.kb = kr, kg, kb
+ self.luma_factor = luma_factor
+ self.chroma_factor = chroma_factor
+
+ def from_rgb(self, imgs):
+ r, g, b = imgs.permute(1, 0, 2, 3)
+
+ y = r * self.kr + g * self.kg + b * self.kb
+ pb = (b - y) / (2 * (1 - self.kb))
+ pr = (r - y) / (2 * (1 - self.kr))
+
+ return torch.stack([y * self.luma_factor,
+ pb * self.chroma_factor + 0.5,
+ pr * self.chroma_factor + 0.5], 1)
+
+ def to_rgb(self, imgs):
+ y_prime, pb_prime, pr_prime = imgs.permute(1, 0, 2, 3)
+ y = y_prime / self.luma_factor
+ pb = (pb_prime - 0.5) / self.chroma_factor
+ pr = (pr_prime - 0.5) / self.chroma_factor
+
+ b = pb * 2 * (1 - self.kb) + y
+ r = pr * 2 * (1 - self.kr) + y
+ g = (y - r * self.kr - b * self.kb) / self.kg
+
+ return torch.stack([r, g, b], 1).clamp(0, 1)
+
+
+class ApproxHSVColorSpace(ColorSpace):
+ """
+ Converts from RGB to approximately the HSV cone using a much smoother
+ transformation.
+ """
+
+ def from_rgb(self, imgs):
+ r, g, b = imgs.permute(1, 0, 2, 3)
+
+ x = r * np.sqrt(2) / 3 - g / (np.sqrt(2) * 3) - b / (np.sqrt(2) * 3)
+ y = g / np.sqrt(6) - b / np.sqrt(6)
+ z, _ = imgs.max(1)
+
+ return torch.stack([z, x + 0.5, y + 0.5], 1)
+
+ def to_rgb(self, imgs):
+ z, xp, yp = imgs.permute(1, 0, 2, 3)
+ x, y = xp - 0.5, yp - 0.5
+
+ rp = float(np.sqrt(2)) * x
+ gp = -x / np.sqrt(2) + y * np.sqrt(3 / 2)
+ bp = -x / np.sqrt(2) - y * np.sqrt(3 / 2)
+
+ delta = z - torch.max(torch.stack([rp, gp, bp], 1), 1)[0]
+ r, g, b = rp + delta, gp + delta, bp + delta
+
+ return torch.stack([r, g, b], 1).clamp(0, 1)
+
+
+class HSVConeColorSpace(ColorSpace):
+ """
+ Converts from RGB to the HSV "cone", where (x, y, z) =
+ (s * v cos h, s * v sin h, v). Note that this cone is then squashed to fit
+ in [0, 1]^3 by letting (x', y', z') = ((x + 1) / 2, (y + 1) / 2, z).
+
+ WARNING: has a very complex derivative, not very useful in practice
+ """
+
+ def from_rgb(self, imgs):
+ r, g, b = imgs.permute(1, 0, 2, 3)
+
+ mx, argmx = imgs.max(1)
+ mn, _ = imgs.min(1)
+ chroma = mx - mn
+ eps = 1e-10
+ h_max_r = math.pi / 3 * (g - b) / (chroma + eps)
+ h_max_g = math.pi / 3 * (b - r) / (chroma + eps) + math.pi * 2 / 3
+ h_max_b = math.pi / 3 * (r - g) / (chroma + eps) + math.pi * 4 / 3
+
+ h = (((argmx == 0) & (chroma != 0)).float() * h_max_r
+ + ((argmx == 1) & (chroma != 0)).float() * h_max_g
+ + ((argmx == 2) & (chroma != 0)).float() * h_max_b)
+
+ x = torch.cos(h) * chroma
+ y = torch.sin(h) * chroma
+ z = mx
+
+ return torch.stack([(x + 1) / 2, (y + 1) / 2, z], 1)
+
+ def _to_rgb_part(self, h, chroma, v, n):
+ """
+ Implements the function f(n) defined here:
+ https://en.wikipedia.org/wiki/HSL_and_HSV#Alternative_HSV_to_RGB
+ """
+
+ k = (n + h * math.pi / 3) % 6
+ return v - chroma * torch.min(k, 4 - k).clamp(0, 1)
+
+ def to_rgb(self, imgs):
+ xp, yp, z = imgs.permute(1, 0, 2, 3)
+ x, y = xp * 2 - 1, yp * 2 - 1
+
+ # prevent NaN gradients when calculating atan2
+ x_nonzero = (1 - 2 * (torch.sign(x) == -1).float()) * (torch.abs(x) + 1e-10)
+ h = torch.atan2(y, x_nonzero)
+ v = z.clamp(0, 1)
+ chroma = torch.min(torch.sqrt(x ** 2 + y ** 2 + 1e-10), v)
+
+ r = self._to_rgb_part(h, chroma, v, 5)
+ g = self._to_rgb_part(h, chroma, v, 3)
+ b = self._to_rgb_part(h, chroma, v, 1)
+
+ return torch.stack([r, g, b], 1).clamp(0, 1)
+
+
+class CIEXYZColorSpace(ColorSpace):
+ """
+ The 1931 CIE XYZ color space (assuming input is in sRGB).
+
+ Warning: may have values outside [0, 1] range. Should only be used in
+ the process of converting to/from other color spaces.
+ """
+
+ def from_rgb(self, imgs):
+ # apply gamma correction
+ small_values_mask = (imgs < 0.04045).float()
+ imgs_corrected = (
+ (imgs / 12.92) * small_values_mask +
+ ((imgs + 0.055) / 1.055) ** 2.4 * (1 - small_values_mask)
+ )
+
+ # linear transformation to XYZ
+ r, g, b = imgs_corrected.permute(1, 0, 2, 3)
+ x = 0.4124 * r + 0.3576 * g + 0.1805 * b
+ y = 0.2126 * r + 0.7152 * g + 0.0722 * b
+ z = 0.0193 * r + 0.1192 * g + 0.9504 * b
+
+ return torch.stack([x, y, z], 1)
+
+ def to_rgb(self, imgs):
+ # linear transformation
+ x, y, z = imgs.permute(1, 0, 2, 3)
+ r = 3.2406 * x - 1.5372 * y - 0.4986 * z
+ g = -0.9689 * x + 1.8758 * y + 0.0415 * z
+ b = 0.0557 * x - 0.2040 * y + 1.0570 * z
+
+ imgs = torch.stack([r, g, b], 1)
+
+ # apply gamma correction
+ small_values_mask = (imgs < 0.0031308).float()
+ imgs_clamped = imgs.clamp(min=1e-10) # prevent NaN gradients
+ imgs_corrected = (
+ (12.92 * imgs) * small_values_mask +
+ (1.055 * imgs_clamped ** (1 / 2.4) - 0.055) *
+ (1 - small_values_mask)
+ )
+
+ return imgs_corrected
+
+
+class CIELUVColorSpace(ColorSpace):
+ """
+ Converts to the 1976 CIE L*u*v* color space.
+ """
+
+ def __init__(self, up_white=0.1978, vp_white=0.4683, y_white=1,
+ eps=1e-10):
+ self.xyz_cspace = CIEXYZColorSpace()
+ self.up_white = up_white
+ self.vp_white = vp_white
+ self.y_white = y_white
+ self.eps = eps
+
+ def from_rgb(self, imgs):
+ x, y, z = self.xyz_cspace.from_rgb(imgs).permute(1, 0, 2, 3)
+
+ # calculate u' and v'
+ denom = x + 15 * y + 3 * z + self.eps
+ up = 4 * x / denom
+ vp = 9 * y / denom
+
+ # calculate L*, u*, and v*
+ small_values_mask = (y / self.y_white < (6 / 29) ** 3).float()
+ y_clamped = y.clamp(min=self.eps) # prevent NaN gradients
+ L = (
+ ((29 / 3) ** 3 * y / self.y_white) * small_values_mask +
+ (116 * (y_clamped / self.y_white) ** (1 / 3) - 16) *
+ (1 - small_values_mask)
+ )
+ u = 13 * L * (up - self.up_white)
+ v = 13 * L * (vp - self.vp_white)
+
+ return torch.stack([L / 100, (u + 100) / 200, (v + 100) / 200], 1)
+
+ def to_rgb(self, imgs):
+ L = imgs[:, 0, :, :] * 100
+ u = imgs[:, 1, :, :] * 200 - 100
+ v = imgs[:, 2, :, :] * 200 - 100
+
+ up = u / (13 * L + self.eps) + self.up_white
+ vp = v / (13 * L + self.eps) + self.vp_white
+
+ small_values_mask = (L <= 8).float()
+ y = (
+ (self.y_white * L * (3 / 29) ** 3) * small_values_mask +
+ (self.y_white * ((L + 16) / 116) ** 3) * (1 - small_values_mask)
+ )
+ denom = 4 * vp + self.eps
+ x = y * 9 * up / denom
+ z = y * (12 - 3 * up - 20 * vp) / denom
+
+ return self.xyz_cspace.to_rgb(
+ torch.stack([x, y, z], 1).clamp(0, 1.1)).clamp(0, 1)
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/color_transformers.py b/case_studies/diffpure/stadv_eot/recoloradv/color_transformers.py
new file mode 100644
index 0000000..e5df968
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/color_transformers.py
@@ -0,0 +1,379 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/color_transformers.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+"""
+Contains various parameterizations for spatial transformation in 3D color space.
+"""
+
+import torch
+import torch.nn as nn
+from .mister_ed.utils import pytorch_utils as utils
+from torch.autograd import Variable
+from . import norms
+from functools import lru_cache
+
+
+##############################################################################
+# #
+# SKELETON CLASS #
+# #
+##############################################################################
+
+class ParameterizedTransformation(nn.Module):
+ """ General class of transformations.
+ All subclasses need the following methods:
+ - norm: no args -> scalar variable
+ - identity_params: shape -> TENSOR : takes an input shape and outputs
+ the subclass-specific parameter for the identity
+ transformation
+ - forward : Variable -> Variable - is the transformation
+ """
+
+ def __init__(self, **kwargs):
+ super(ParameterizedTransformation, self).__init__()
+
+ if kwargs.get('manual_gpu', None) is not None:
+ self.use_gpu = kwargs['manual_gpu']
+ else:
+ self.use_gpu = utils.use_gpu()
+
+ def clone(self, shape=None, example_index=None):
+ raise NotImplementedError()
+
+ def norm(self, lp='inf'):
+ raise NotImplementedError("Need to call subclass's norm!")
+
+ @classmethod
+ def identity_params(self, shape):
+ raise NotImplementedError("Need to call subclass's identity_params!")
+
+ def merge_xform(self, other, self_mask):
+ """ Takes in an other instance of this same class with the same
+ shape of parameters (NxSHAPE) and a self_mask bytetensor of length
+ N and outputs the merge between self's parameters for the indices
+ of 1s in the self_mask and other's parameters for the indices of 0's
+ ARGS:
+ other: instance of same class as self with params of shape NxSHAPE -
+ the thing we merge with this one
+ self_mask : ByteTensor (length N) - which indices of parameters we
+ keep from self, and which we keep from other
+ RETURNS:
+ New instance of this class that's merged between the self and other
+ (same shaped params)
+ """
+
+ # JUST DO ASSERTS IN THE SKELETON CLASS
+ assert self.__class__ == other.__class__
+
+ self_params = self.xform_params.data
+ other_params = other.xform_params.data
+ assert self_params.shape == other_params.shape
+ assert self_params.shape[0] == self_mask.shape[0]
+ assert other_params.shape[0] == self_mask.shape[0]
+
+ new_xform = self.__class__(shape=self.img_shape)
+
+ new_params = utils.fold_mask(self.xform_params.data,
+ other.xform_params.data, self_mask)
+ new_xform.xform_params = nn.Parameter(new_params)
+ new_xform.use_gpu = self.use_gpu
+ return new_xform
+
+ def forward(self, examples):
+ raise NotImplementedError("Need to call subclass's forward!")
+
+
+class AffineTransform(ParameterizedTransformation):
+ def __init__(self, *args, **kwargs):
+ super(AffineTransform, self).__init__(**kwargs)
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+ self.xform_params = nn.Parameter(self.identity_params(img_shape))
+
+ def clone(self, shape=None, example_index=None):
+ xform = AffineTransform(shape=shape or self.img_shape)
+ if example_index is None:
+ my_params = self.xform_params
+ else:
+ my_params = self.xform_params[example_index][None]
+ xform.xform_params = nn.Parameter(
+ my_params.clone()
+ .expand(shape[0], -1, -1)
+ )
+ return xform
+
+ def norm(self, lp='inf'):
+ identity_params = Variable(self.identity_params(self.img_shape))
+ return utils.batchwise_norm(self.xform_params - identity_params, lp,
+ dim=0)
+
+ def identity_params(self, shape):
+ num_examples = shape[0]
+ identity_affine_transform = torch.zeros(num_examples, 3, 4)
+ if self.use_gpu:
+ identity_affine_transform = identity_affine_transform.cuda()
+
+ identity_affine_transform[:, 0, 0] = 1
+ identity_affine_transform[:, 1, 1] = 1
+ identity_affine_transform[:, 2, 2] = 1
+
+ return identity_affine_transform
+
+ def project_params(self, lp, lp_bound):
+ assert isinstance(lp, int) or lp == 'inf'
+ diff = self.xform_params.data - self.identity_params(self.img_shape)
+ new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
+ self.xform_params.data.add_(new_diff - diff)
+
+ def forward(self, x):
+ N, _, W, H = self.img_shape
+ x_padded = torch.cat([x, torch.ones(N, 1, W, H)], 1).permute(
+ 0, 2, 3, 1)
+ transform_padded = self.xform_params[:, None, None, :, :] \
+ .expand(-1, W, H, -1, -1)
+ x_transformed = transform_padded.matmul(x_padded[..., None]) \
+ .squeeze(4) \
+ .permute(0, 3, 1, 2)
+ return x_transformed
+
+
+class FullSpatial(ParameterizedTransformation):
+ def __init__(self, *args, resolution_x=8,
+ resolution_y=8, resolution_z=8, **kwargs):
+ super(FullSpatial, self).__init__(**kwargs)
+
+ self.resolution_x = resolution_x
+ self.resolution_y = resolution_y
+ self.resolution_z = resolution_z
+
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+
+ self.cspace = kwargs.get('cspace')
+
+ batch_size = self.img_shape[0]
+ self.identity_params = FullSpatial.construct_identity_params(
+ batch_size,
+ self.resolution_x,
+ self.resolution_y,
+ self.resolution_z,
+ torch.cuda.current_device() if self.use_gpu else None,
+ )
+ self.xform_params = nn.Parameter(
+ torch.empty_like(self.identity_params)
+ .copy_(self.identity_params)
+ )
+
+ def clone(self, shape=None, example_index=None):
+ xform = FullSpatial(
+ shape=shape or self.img_shape,
+ resolution_x=self.resolution_x,
+ resolution_y=self.resolution_y,
+ resolution_z=self.resolution_z,
+ cspace=self.cspace,
+ )
+ if example_index is None:
+ my_params = self.xform_params
+ else:
+ my_params = self.xform_params[example_index][None]
+ xform.xform_params = nn.Parameter(
+ my_params.clone()
+ .expand(shape[0], -1, -1, -1, -1)
+ )
+ return xform
+
+ def smoothness_norm(self):
+ return norms.smoothness(self.xform_params -
+ self.identity_params)
+
+ def norm(self, lp='inf'):
+ if isinstance(lp, int) or lp == 'inf':
+ return utils.batchwise_norm(
+ self.xform_params - self.identity_params,
+ lp, dim=0,
+ )
+ else:
+ assert lp == 'smooth'
+ return self.smoothness_norm()
+
+ def clip_params(self):
+ """
+ Clips the parameters to be between 0 and 1 and also within the color
+ space's gamut.
+ """
+
+ clamp_params = torch.clamp(self.xform_params, 0, 1).data
+
+ params_shape = self.xform_params.size()
+ flattened_params = (
+ clamp_params
+ .permute(0, 4, 1, 2, 3)
+ .reshape(params_shape[0], 3, -1, 1)
+ )
+ gamut_params = self.cspace.from_rgb(self.cspace.to_rgb(
+ flattened_params))
+ clamp_params = (
+ gamut_params
+ .permute(0, 2, 3, 1)
+ .reshape(*params_shape)
+ )
+
+ change_in_params = clamp_params - self.xform_params.data
+ self.xform_params.data.add_(change_in_params)
+
+ def merge_xform(self, other, self_mask):
+ """
+ Takes in an other instance of this same class with the same
+ shape of parameters (NxSHAPE) and a self_mask bytetensor of length
+ N and outputs the merge between self's parameters for the indices
+ of 1s in the self_mask and other's parameters for the indices of 0's
+ """
+
+ super().merge_xform(other, self_mask)
+ new_xform = FullSpatial(shape=self.img_shape,
+ manual_gpu=self.use_gpu,
+ resolution_x=self.resolution_x,
+ resolution_y=self.resolution_y,
+ resolution_z=self.resolution_z,
+ cspace=self.cspace)
+ new_params = utils.fold_mask(self.xform_params.data,
+ other.xform_params.data, self_mask)
+ new_xform.xform_params = nn.Parameter(new_params)
+
+ return new_xform
+
+ def project_params(self, lp, lp_bound):
+ """
+ Projects the params to be within lp_bound (according to an lp)
+ of the identity map. First thing we do is clip the params to be
+ valid, too.
+ ARGS:
+ lp : int or 'inf' - which LP norm we use. Must be an int or the
+ string 'inf'.
+ lp_bound : float - how far we're allowed to go in LP land. Can be
+ a list to indicate that we can go more in some channels
+ than others.
+ RETURNS:
+ None, but modifies self.xform_params
+ """
+
+ assert isinstance(lp, int) or lp == 'inf'
+
+ # clip first
+ self.clip_params()
+
+ # then project back
+ if lp == 'inf':
+ try:
+ # first, assume lp_bound is a vector, and then revert to scalar
+ # if it's not
+ clamped_channels = []
+ for channel_index, bound in enumerate(lp_bound):
+ clamped_channels.append(utils.clamp_ref(
+ self.xform_params[..., channel_index],
+ self.identity_params[..., channel_index],
+ bound,
+ ))
+ clamp_params = torch.stack(clamped_channels, 4)
+ except TypeError:
+ clamp_params = utils.clamp_ref(self.xform_params.data,
+ self.identity_params, lp_bound)
+ change_in_params = clamp_params - self.xform_params.data
+ else:
+ flattened_params = (
+ self.xform_params.data -
+ self.identity_params
+ ).reshape((-1, 3))
+ projected_params = flattened_params.renorm(lp, 0, lp_bound)
+ flattened_change = projected_params - flattened_params
+ change_in_params = flattened_change.reshape(
+ self.xform_params.size())
+ self.xform_params.data.add_(change_in_params)
+
+ def forward(self, imgs):
+ device = torch.device('cuda') if self.use_gpu else None
+ N, C, W, H = self.img_shape
+ imgs = imgs.permute(0, 2, 3, 1) # N x W x H x C
+ imgs = imgs * torch.tensor(
+ [
+ self.resolution_x - 1,
+ self.resolution_y - 1,
+ self.resolution_z - 1,
+ ],
+ dtype=torch.float,
+ device=device,
+ )[None, None, None, :].expand(N, W, H, C)
+ integer_part, float_part = torch.floor(imgs).long(), imgs % 1
+ params_list = self.xform_params.view(N, -1, 3)
+
+ # do trilinear interpolation from the params grid
+ endpoint_values = []
+ for delta_x in [0, 1]:
+ corner_values = []
+ for delta_y in [0, 1]:
+ vertex_values = []
+ for delta_z in [0, 1]:
+ params_index = Variable(torch.zeros(
+ N, W, H,
+ dtype=torch.long,
+ device=device,
+ ))
+ for color_index, resolution in [
+ (integer_part[..., 0] + delta_x, self.resolution_x),
+ (integer_part[..., 1] + delta_y, self.resolution_y),
+ (integer_part[..., 2] + delta_z, self.resolution_z),
+ ]:
+ color_index = color_index.clamp(
+ 0, resolution - 1)
+ params_index = (params_index * resolution +
+ color_index)
+ params_index = params_index.view(N, -1)[:, :, None] \
+ .expand(-1, -1, 3)
+ vertex_values.append(
+ params_list.gather(1, params_index)
+ .view(N, W, H, C)
+ )
+ corner_values.append(
+ vertex_values[0] * (1 - float_part[..., 2, None]) +
+ vertex_values[1] * float_part[..., 2, None]
+ )
+ endpoint_values.append(
+ corner_values[0] * (1 - float_part[..., 1, None]) +
+ corner_values[1] * float_part[..., 1, None]
+ )
+ result = (
+ endpoint_values[0] * (1 - float_part[..., 0, None]) +
+ endpoint_values[1] * float_part[..., 0, None]
+ )
+ return result.permute(0, 3, 1, 2)
+
+ @staticmethod
+ @lru_cache(maxsize=10)
+ def construct_identity_params(batch_size, resolution_x, resolution_y,
+ resolution_z, device):
+ identity_params = torch.empty(
+ batch_size, resolution_x, resolution_y,
+ resolution_z, 3,
+ dtype=torch.float,
+ device=device,
+ )
+ for x in range(resolution_x):
+ for y in range(resolution_y):
+ for z in range(resolution_z):
+ identity_params[:, x, y, z, 0] = \
+ x / (resolution_x - 1)
+ identity_params[:, x, y, z, 1] = \
+ y / (resolution_y - 1)
+ identity_params[:, x, y, z, 2] = \
+ z / (resolution_z - 1)
+ return identity_params
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/README.md b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/README.md
new file mode 100644
index 0000000..69e668c
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/README.md
@@ -0,0 +1 @@
+Code in this directory is adapted from the [`mister_ed`](https://github.com/revbucket/mister_ed) library.
\ No newline at end of file
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/__init__.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/__init__.py
new file mode 100644
index 0000000..4f3e26b
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/__init__.py
@@ -0,0 +1,12 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/__init__.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_attacks.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_attacks.py
new file mode 100644
index 0000000..cd8bfda
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_attacks.py
@@ -0,0 +1,710 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_attacks.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Holds the various attacks we can do """
+
+from __future__ import print_function
+from six import string_types
+import torch
+from torch.autograd import Variable
+from torch import optim
+
+from .utils import pytorch_utils as utils
+from . import loss_functions as lf
+
+MAXFLOAT = 1e20
+
+
+###############################################################################
+# #
+# PARENT CLASS FOR ADVERSARIAL ATTACKS #
+# #
+###############################################################################
+
+class AdversarialAttack(object):
+ """ Wrapper for adversarial attacks. Is helpful for when subsidiary methods
+ are needed.
+ """
+
+ def __init__(self, classifier_net, normalizer, threat_model,
+ manual_gpu=None):
+ """ Initializes things to hold to perform a single batch of
+ adversarial attacks
+ ARGS:
+ classifier_net : nn.Module subclass - neural net that is the
+ classifier we're attacking
+ normalizer : DifferentiableNormalize object - object to convert
+ input data to mean-zero, unit-var examples
+ threat_model : ThreatModel object - object that allows us to create
+ per-minibatch adversarial examples
+ manual_gpu : None or boolean - if not None, we override the
+ environment variable 'MISTER_ED_GPU' for how we use
+ the GPU in this object
+
+ """
+ self.classifier_net = classifier_net
+ self.normalizer = normalizer or utils.IdentityNormalize()
+ if manual_gpu is not None:
+ self.use_gpu = manual_gpu
+ else:
+ self.use_gpu = utils.use_gpu()
+ self.validator = lambda *args: None
+ self.threat_model = threat_model
+
+ @property
+ def _dtype(self):
+ return torch.cuda.FloatTensor if self.use_gpu else torch.FloatTensor
+
+ def setup(self):
+ self.classifier_net.eval()
+ self.normalizer.differentiable_call()
+
+ def eval(self, ground_examples, adversarials, labels, topk=1):
+ """ Evaluates how good the adversarial examples are
+ ARGS:
+ ground_truths: Variable (NxCxHxW) - examples before we did
+ adversarial perturbation. Vals in [0, 1] range
+ adversarials: Variable (NxCxHxW) - examples after we did
+ adversarial perturbation. Should be same shape and
+ in same order as ground_truth
+ labels: Variable (longTensor N) - correct labels of classification
+ output
+ RETURNS:
+ tuple of (% of correctly classified original examples,
+ % of correctly classified adversarial examples)
+ """
+ normed_ground = self.normalizer.forward(ground_examples)
+ ground_output = self.classifier_net.forward(normed_ground)
+
+ normed_advs = self.normalizer.forward(adversarials)
+ adv_output = self.classifier_net.forward(normed_advs)
+
+ start_prec = utils.accuracy(ground_output.data, labels.data,
+ topk=(topk,))
+ adv_prec = utils.accuracy(adv_output.data, labels.data,
+ topk=(topk,))
+
+ return float(start_prec[0]), float(adv_prec[0])
+
+ def eval_attack_only(self, adversarials, labels, topk=1):
+ """ Outputs the accuracy of the adv_inputs only
+ ARGS:
+ adv_inputs: Variable NxCxHxW - examples after we did adversarial
+ perturbation
+ labels: Variable (longtensor N) - correct labels of classification
+ output
+ topk: int - criterion for 'correct' classification
+ RETURNS:
+ (int) number of correctly classified examples
+ """
+
+ normed_advs = self.normalizer.forward(adversarials)
+
+ adv_output = self.classifier_net.forward(normed_advs)
+ return utils.accuracy_int(adv_output, labels, topk=topk)
+
+ def print_eval_str(self, ground_examples, adversarials, labels, topk=1):
+ """ Prints how good this adversarial attack is
+ (explicitly prints out %CorrectlyClassified(ground_examples)
+ vs %CorrectlyClassified(adversarials)
+
+ ARGS:
+ ground_truths: Variable (NxCxHxW) - examples before we did
+ adversarial perturbation. Vals in [0, 1] range
+ adversarials: Variable (NxCxHxW) - examples after we did
+ adversarial perturbation. Should be same shape and
+ in same order as ground_truth
+ labels: Variable (longTensor N) - correct labels of classification
+ output
+ RETURNS:
+ None, prints some stuff though
+ """
+
+ og, adv = self.eval(ground_examples, adversarials, labels, topk=topk)
+ print("Went from %s correct to %s correct" % (og, adv))
+
+ def validation_loop(self, examples, labels, iter_no=None):
+ """ Prints out validation values interim for use in iterative techniques
+ ARGS:
+ new_examples: Variable (NxCxHxW) - [0.0, 1.0] images to be
+ classified and compared against labels
+ labels: Variable (longTensor
+ N) - correct labels for indices of
+ examples
+ iter_no: String - an extra thing for prettier prints
+ RETURNS:
+ None
+ """
+ normed_input = self.normalizer.forward(examples)
+ new_output = self.classifier_net.forward(normed_input)
+ new_prec = utils.accuracy(new_output.data, labels.data, topk=(1,))
+ print_str = ""
+ if isinstance(iter_no, int):
+ print_str += "(iteration %02d): " % iter_no
+ elif isinstance(iter_no, string_types):
+ print_str += "(%s): " % iter_no
+ else:
+ pass
+
+ print_str += " %s correct" % float(new_prec[0])
+
+ print(print_str)
+
+
+##############################################################################
+# #
+# Fast Gradient Sign Method (FGSM) #
+# #
+##############################################################################
+
+class FGSM(AdversarialAttack):
+ def __init__(self, classifier_net, normalizer, threat_model, loss_fxn,
+ manual_gpu=None):
+ super(FGSM, self).__init__(classifier_net, normalizer, threat_model,
+ manual_gpu=manual_gpu)
+ self.loss_fxn = loss_fxn
+
+ def attack(self, examples, labels, step_size=0.05, verbose=True):
+ """ Builds FGSM examples for the given examples with l_inf bound
+ ARGS:
+ classifier: Pytorch NN
+ examples: Nxcxwxh tensor for N examples. NOT NORMALIZED (i.e. all
+ vals are between 0.0 and 1.0 )
+ labels: single-dimension tensor with labels of examples (in same
+ order)
+ step_size: float - how much we nudge each parameter along the
+ signs of its gradient
+ normalizer: DifferentiableNormalize object to prep objects into
+ classifier
+ evaluate: boolean, if True will validation results
+ loss_fxn: RegularizedLoss object - partially applied loss fxn that
+ takes [0.0, 1.0] image Variables and labels and outputs
+ a scalar loss variable. Also has a zero_grad method
+ RETURNS:
+ AdversarialPerturbation object with correct parameters.
+ Calling perturbation() gets Variable of output and
+ calling perturbation().data gets tensor of output
+ """
+ self.classifier_net.eval() # ALWAYS EVAL FOR BUILDING ADV EXAMPLES
+
+ perturbation = self.threat_model(examples)
+
+ var_examples = Variable(examples, requires_grad=True)
+ var_labels = Variable(labels, requires_grad=False)
+
+ ######################################################################
+ # Build adversarial examples #
+ ######################################################################
+
+ # Fix the 'reference' images for the loss function
+ self.loss_fxn.setup_attack_batch(var_examples)
+
+ # take gradients
+ loss = self.loss_fxn.forward(perturbation(var_examples), var_labels,
+ perturbation=perturbation)
+ torch.autograd.backward(loss)
+
+ # add adversarial noise to each parameter
+ update_fxn = lambda grad_data: step_size * torch.sign(grad_data)
+ perturbation.update_params(update_fxn)
+
+ if verbose:
+ self.validation_loop(perturbation(var_examples), var_labels,
+ iter_no='Post FGSM')
+
+ # output tensor with the data
+ self.loss_fxn.cleanup_attack_batch()
+ perturbation.attach_originals(examples)
+ return perturbation
+
+
+##############################################################################
+# #
+# PGD/FGSM^k/BIM #
+# #
+##############################################################################
+# This goes by a lot of different names in the literature
+# The key idea here is that we take many small steps of FGSM
+# I'll call it PGD though
+
+class PGD(AdversarialAttack):
+
+ def __init__(self, classifier_net, normalizer, threat_model, loss_fxn,
+ manual_gpu=None):
+ super(PGD, self).__init__(classifier_net, normalizer, threat_model,
+ manual_gpu=manual_gpu)
+ self.loss_fxn = loss_fxn # WE MAXIMIZE THIS!!!
+
+ def attack(self, examples, labels, step_size=1.0 / 255.0,
+ num_iterations=20, random_init=False, signed=True,
+ optimizer=None, optimizer_kwargs=None,
+ loss_convergence=0.999, verbose=True,
+ keep_best=True, eot_iter=1):
+ """ Builds PGD examples for the given examples with l_inf bound and
+ given step size. Is almost identical to the BIM attack, except
+ we take steps that are proportional to gradient value instead of
+ just their sign.
+
+ ARGS:
+ examples: NxCxHxW tensor - for N examples, is NOT NORMALIZED
+ (i.e., all values are in between 0.0 and 1.0)
+ labels: N longTensor - single dimension tensor with labels of
+ examples (in same order as examples)
+ l_inf_bound : float - how much we're allowed to perturb each pixel
+ (relative to the 0.0, 1.0 range)
+ step_size : float - how much of a step we take each iteration
+ num_iterations: int or pair of ints - how many iterations we take.
+ If pair of ints, is of form (lo, hi), where we run
+ at least 'lo' iterations, at most 'hi' iterations
+ and we quit early if loss has stabilized.
+ random_init : bool - if True, we randomly pick a point in the
+ l-inf epsilon ball around each example
+ signed : bool - if True, each step is
+ adversarial = adversarial + sign(grad)
+ [this is the form that madry et al use]
+ if False, each step is
+ adversarial = adversarial + grad
+ keep_best : bool - if True, we keep track of the best adversarial
+ perturbations per example (in terms of maximal
+ loss) in the minibatch. The output is the best of
+ each of these then
+ RETURNS:
+ AdversarialPerturbation object with correct parameters.
+ Calling perturbation() gets Variable of output and
+ calling perturbation().data gets tensor of output
+ """
+
+ ######################################################################
+ # Setups and assertions #
+ ######################################################################
+
+ self.classifier_net.eval()
+
+ if not verbose:
+ self.validator = lambda ex, label, iter_no: None
+ else:
+ self.validator = self.validation_loop
+
+ perturbation = self.threat_model(examples)
+
+ num_examples = examples.shape[0]
+ var_examples = Variable(examples, requires_grad=True)
+ var_labels = Variable(labels, requires_grad=False)
+
+ if isinstance(num_iterations, int):
+ min_iterations = num_iterations
+ max_iterations = num_iterations
+ elif isinstance(num_iterations, tuple):
+ min_iterations, max_iterations = num_iterations
+
+ best_perturbation = None
+ if keep_best:
+ best_loss_per_example = {i: None for i in range(num_examples)}
+
+ prev_loss = None
+
+ ######################################################################
+ # Loop through iterations #
+ ######################################################################
+
+ self.loss_fxn.setup_attack_batch(var_examples)
+ self.validator(var_examples, var_labels, iter_no="START")
+
+ # random initialization if necessary
+ if random_init:
+ perturbation.random_init()
+ self.validator(perturbation(var_examples), var_labels,
+ iter_no="RANDOM")
+
+ # Build optimizer techniques for both signed and unsigned methods
+ optimizer = optimizer or optim.Adam
+ if optimizer_kwargs is None:
+ optimizer_kwargs = {'lr': 0.0001}
+ optimizer = optimizer(perturbation.parameters(), **optimizer_kwargs)
+
+ update_fxn = lambda grad_data: -1 * step_size * torch.sign(grad_data)
+
+ param_list = list(perturbation.parameters())
+ assert len(param_list) == 1, len(param_list)
+ param = param_list[0]
+ print(f'inside PGD attack, eot_iter: {eot_iter}, max_iterations: {max_iterations}')
+ for iter_no in range(max_iterations):
+ print("pgd iter", iter_no)
+ perturbation.zero_grad()
+
+ grad = torch.zeros_like(param)
+ loss_per_example_ave = 0
+ for i in range(eot_iter):
+ loss_per_example = self.loss_fxn.forward(perturbation(var_examples), var_labels,
+ perturbation=perturbation,
+ output_per_example=keep_best)
+
+ loss_per_example_ave += loss_per_example.detach().clone()
+ loss = -1 * loss_per_example.sum()
+
+ loss.backward()
+ grad += param.grad.data.detach()
+ param.grad.data.zero_()
+
+ grad /= float(eot_iter)
+ loss_per_example_ave /= float(eot_iter)
+
+ assert len(param_list) == 1, len(param_list)
+ param.grad.data = grad.clone()
+
+ if signed:
+ perturbation.update_params(update_fxn)
+ else:
+ optimizer.step()
+
+ if keep_best:
+ mask_val = torch.zeros(num_examples, dtype=torch.uint8)
+ for i, el in enumerate(loss_per_example_ave):
+ this_best_loss = best_loss_per_example[i]
+ if this_best_loss is None or this_best_loss[1] < float(el):
+ mask_val[i] = 1
+ best_loss_per_example[i] = (iter_no, float(el))
+
+ if best_perturbation is None:
+ best_perturbation = self.threat_model(examples)
+
+ best_perturbation = perturbation.merge_perturbation(
+ best_perturbation,
+ mask_val)
+
+ self.validator((best_perturbation or perturbation)(var_examples),
+ var_labels, iter_no=iter_no)
+
+ # Stop early if loss didn't go down too much
+ if (iter_no >= min_iterations and
+ float(loss) >= loss_convergence * prev_loss):
+ if verbose:
+ print("Stopping early at %03d iterations" % iter_no)
+ break
+ prev_loss = float(loss)
+
+ perturbation.zero_grad()
+ self.loss_fxn.cleanup_attack_batch()
+ perturbation.attach_originals(examples)
+ return perturbation
+
+
+##############################################################################
+# #
+# CARLINI WAGNER #
+# #
+##############################################################################
+"""
+General class of CW attacks: these aim to solve optim problems of the form
+
+Adv(x) = argmin_{x'} D(x, x')
+ s.t. f(x) != f(x')
+ x' is a valid attack (e.g., meets LP bounds)
+
+Which is typically relaxed to solving
+Adv(x) = argmin_{x'} D(x, x') + lambda * L_adv(x')
+where L_adv(x') is only nonpositive when f(x) != f(x').
+
+Binary search is performed on a per-example basis to find the appropriate
+lambda.
+
+The distance function is backpropagated through in each bin search step, so it
+needs to be differentiable. It does not need to be a true distance metric tho
+"""
+
+
+class CarliniWagner(AdversarialAttack):
+
+ def __init__(self, classifier_net, normalizer, threat_model,
+ distance_fxn, carlini_loss, manual_gpu=None):
+ """ This is a different init than the other style attacks because the
+ loss function is separated into two arguments here
+ ARGS:
+ classifier_net: standard attack arg
+ normalizer: standard attack arg
+ threat_model: standard attack arg
+ distance_fxn: lf.ReferenceRegularizer subclass (CLASS NOT INSTANCE)
+ - is a loss function
+ that stores originals so it can be used to create a
+ RegularizedLoss object with the carlini loss object
+ carlini_loss: lf.PartialLoss subclass (CLASS NOT INSTANCE) - is the
+ loss term that is
+ a function on images and labels that only
+ returns zero when the images are adversarial
+ """
+ super(CarliniWagner, self).__init__(classifier_net, normalizer,
+ threat_model, manual_gpu=manual_gpu)
+
+ assert issubclass(distance_fxn, lf.ReferenceRegularizer)
+ assert issubclass(carlini_loss, lf.CWLossF6)
+
+ self.loss_classes = {'distance_fxn': distance_fxn,
+ 'carlini_loss': carlini_loss}
+
+ def _construct_loss_fxn(self, initial_lambda, confidence):
+ """ Uses the distance_fxn and carlini_loss to create a loss function to
+ be optimized
+ ARGS:
+ initial_lambda : float - which lambda to use initially
+ in the regularization of the carlini loss
+ confidence : float - how great the difference in the logits must be
+ for the carlini_loss to be zero. Overwrites the
+ self.carlini_loss.kappa value
+ RETURNS:
+ RegularizedLoss OBJECT to be used as the loss for this optimization
+ """
+ losses = {'distance_fxn': self.loss_classes['distance_fxn'](None,
+ use_gpu=self.use_gpu),
+ 'carlini_loss': self.loss_classes['carlini_loss'](
+ self.classifier_net,
+ self.normalizer,
+ kappa=confidence)}
+ scalars = {'distance_fxn': 1.0,
+ 'carlini_loss': initial_lambda}
+ return lf.RegularizedLoss(losses, scalars)
+
+ def _optimize_step(self, optimizer, perturbation, var_examples,
+ var_targets, var_scale, loss_fxn, targeted=False):
+ """ Does one step of optimization """
+ assert not targeted
+ optimizer.zero_grad()
+
+ loss = loss_fxn.forward(perturbation(var_examples), var_targets)
+ if torch.numel(loss) > 1:
+ loss = loss.sum()
+ loss.backward()
+
+ optimizer.step()
+ # return a loss 'average' to determine if we need to stop early
+ return loss.item()
+
+ def _batch_compare(self, example_logits, targets, confidence=0.0,
+ targeted=False):
+ """ Returns a list of indices of valid adversarial examples
+ ARGS:
+ example_logits: Variable/Tensor (Nx#Classes) - output logits for a
+ batch of images
+ targets: Variable/Tensor (N) - each element is a class index for the
+ target class for the i^th example.
+ confidence: float - how much the logits must differ by for an
+ attack to be considered valid
+ targeted: bool - if True, the 'targets' arg should be the targets
+ we want to hit. If False, 'targets' arg should be
+ the targets we do NOT want to hit
+ RETURNS:
+ Variable ByteTensor of length (N) on the same device as
+ example_logits/targets with 1's for successful adversaral examples,
+ 0's for unsuccessful
+ """
+ # check if the max val is the targets
+ target_vals = example_logits.gather(1, targets.view(-1, 1))
+ max_vals, max_idxs = torch.max(example_logits, 1)
+ max_eq_targets = torch.eq(targets, max_idxs)
+
+ # check margins between max and target_vals
+ if targeted:
+ max_2_vals, _ = example_logits.kthvalue(2, dim=1)
+ good_confidence = torch.gt(max_vals - confidence, max_2_vals)
+ one_hot_indices = max_eq_targets * good_confidence
+ else:
+ good_confidence = torch.gt(max_vals.view(-1, 1),
+ target_vals + confidence)
+ one_hot_indices = ((1 - max_eq_targets.data).view(-1, 1) *
+ good_confidence.data)
+
+ return one_hot_indices.squeeze()
+ # return [idx for idx, el in enumerate(one_hot_indices) if el[0] == 1]
+
+ @classmethod
+ def tweak_lambdas(cls, var_scale_lo, var_scale_hi, var_scale,
+ successful_mask):
+ """ Modifies the constant scaling that we keep to weight f_adv vs D(.)
+ in our loss function.
+
+ IF the attack was successful
+ THEN hi -> lambda
+ lambda -> (lambda + lo) /2
+ ELSE
+ lo -> lambda
+ lambda -> (lambda + hi) / 2
+
+
+ ARGS:
+ var_scale_lo : Variable (N) - variable that holds the running lower
+ bounds in our binary search
+ var_scale_hi: Variable (N) - variable that holds the running upper
+ bounds in our binary search
+ var_scale : Variable (N) - variable that holds the lambdas we
+ actually use
+ successful_mask : Variable (ByteTensor N) - mask that holds the
+ indices of the successful attacks
+ RETURNS:
+ (var_scale_lo, var_scale_hi, var_scale) but modified according to
+ the rule describe in the spec of this method
+ """
+ prev_his = var_scale_hi.data
+ downweights = (var_scale_lo.data + var_scale.data) / 2.0
+ upweights = (var_scale_hi.data + var_scale.data) / 2.0
+
+ scale_hi = utils.fold_mask(var_scale.data, var_scale_hi.data,
+ successful_mask.data)
+ scale_lo = utils.fold_mask(var_scale_lo.data, var_scale.data,
+ successful_mask.data)
+ scale = utils.fold_mask(downweights, upweights,
+ successful_mask.data)
+ return (Variable(scale_lo), Variable(scale_hi), Variable(scale))
+
+ def attack(self, examples, labels, targets=None, initial_lambda=1.0,
+ num_bin_search_steps=10, num_optim_steps=1000,
+ confidence=0.0, verbose=True):
+ """ Performs Carlini Wagner attack on provided examples to make them
+ not get classified as the labels.
+ ARGS:
+ examples : Tensor (NxCxHxW) - input images to be made adversarial
+ labels : Tensor (N) - correct labels of the examples
+ initial_lambda : float - which lambda to use initially
+ in the regularization of the carlini loss
+ num_bin_search_steps : int - how many binary search steps we perform
+ to optimize the lambda
+ num_optim_steps : int - how many optimizer steps we perform during
+ each binary search step (we may stop early)
+ confidence : float - how great the difference in the logits must be
+ for the carlini_loss to be zero. Overwrites the
+ self.carlini_loss.kappa value
+ RETURNS:
+ AdversarialPerturbation object with correct parameters.
+ Calling perturbation() gets Variable of output and
+ calling perturbation().data gets tensor of output
+ calling perturbation(distances=True) returns a dict like
+ {}
+ """
+
+ ######################################################################
+ # First perform some setups #
+ ######################################################################
+
+ if targets is not None:
+ raise NotImplementedError("Targeted attacks aren't built yet")
+
+ if self.use_gpu:
+ examples = examples.cuda()
+ labels = labels.cuda()
+
+ self.classifier_net.eval() # ALWAYS EVAL FOR BUILDING ADV EXAMPLES
+
+ var_examples = Variable(examples, requires_grad=False)
+ var_labels = Variable(labels, requires_grad=False)
+
+ loss_fxn = self._construct_loss_fxn(initial_lambda, confidence)
+ loss_fxn.setup_attack_batch(var_examples)
+ distance_fxn = loss_fxn.losses['distance_fxn']
+
+ num_examples = examples.shape[0]
+
+ best_results = {'best_dist': torch.ones(num_examples) \
+ .type(examples.type()) \
+ * MAXFLOAT,
+ 'best_perturbation': self.threat_model(examples)}
+
+ ######################################################################
+ # Now start the binary search #
+ ######################################################################
+ var_scale_lo = Variable(torch.zeros(num_examples) \
+ .type(self._dtype).squeeze())
+
+ var_scale = Variable(torch.ones(num_examples, 1).type(self._dtype) *
+ initial_lambda).squeeze()
+ var_scale_hi = Variable(torch.ones(num_examples).type(self._dtype)
+ * 128).squeeze() # HARDCODED UPPER LIMIT
+
+ for bin_search_step in range(num_bin_search_steps):
+ perturbation = self.threat_model(examples)
+ ##################################################################
+ # Optimize with a given scale constant #
+ ##################################################################
+ if verbose:
+ print("Starting binary_search_step %02d..." % bin_search_step)
+
+ prev_loss = MAXFLOAT
+ optimizer = optim.Adam(perturbation.parameters(), lr=0.001)
+
+ for optim_step in range(num_optim_steps):
+
+ if verbose and optim_step > 0 and optim_step % 25 == 0:
+ print("Optim search: %s, Loss: %s" %
+ (optim_step, prev_loss))
+
+ loss_sum = self._optimize_step(optimizer, perturbation,
+ var_examples, var_labels,
+ var_scale, loss_fxn)
+
+ if loss_sum + 1e-10 > prev_loss * 0.99999 and optim_step >= 100:
+ if verbose:
+ print(("...stopping early on binary_search_step %02d "
+ " after %03d iterations") % (bin_search_step,
+ optim_step))
+ break
+ prev_loss = loss_sum
+ # End inner optimize loop
+
+ ################################################################
+ # Update with results from optimization #
+ ################################################################
+
+ # We only keep this round's perturbations if two things occur:
+ # 1) the perturbation fools the classifier
+ # 2) the perturbation is closer to original than the best-so-far
+
+ bin_search_perts = perturbation(var_examples)
+ bin_search_out = self.classifier_net.forward(bin_search_perts)
+ successful_attack_idxs = self._batch_compare(bin_search_out,
+ var_labels)
+
+ batch_dists = distance_fxn.forward(bin_search_perts).data
+
+ successful_dist_idxs = batch_dists < best_results['best_dist']
+ successful_dist_idxs = successful_dist_idxs
+
+ successful_mask = successful_attack_idxs * successful_dist_idxs
+
+ # And then generate a new 'best distance' and 'best perturbation'
+
+ best_results['best_dist'] = utils.fold_mask(batch_dists,
+ best_results['best_dist'],
+ successful_mask)
+
+ best_results['best_perturbation'] = \
+ perturbation.merge_perturbation(
+ best_results['best_perturbation'],
+ successful_mask)
+
+ # And then adjust the scale variables (lambdas)
+ new_scales = self.tweak_lambdas(var_scale_lo, var_scale_hi,
+ var_scale,
+ Variable(successful_mask))
+
+ var_scale_lo, var_scale_hi, var_scale = new_scales
+
+ # End binary search loop
+ if verbose:
+ num_successful = len([_ for _ in best_results['best_dist']
+ if _ < MAXFLOAT])
+ print("\n Ending attack")
+ print("Successful attacks for %03d/%03d examples in CONTINUOUS" % \
+ (num_successful, num_examples))
+
+ loss_fxn.cleanup_attack_batch()
+ perturbation.attach_originals(examples)
+ perturbation.attach_attr('distances', best_results['best_dist'])
+
+ return perturbation
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_perturbations.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_perturbations.py
new file mode 100644
index 0000000..9c98081
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_perturbations.py
@@ -0,0 +1,813 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_perturbations.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" File that holds adversarial perturbations as torch.nn.Modules.
+ An adversarial perturbation is an example-specific
+"""
+
+import torch
+import torch.nn as nn
+from . import spatial_transformers as st
+from .utils import image_utils as img_utils
+from .utils import pytorch_utils as utils
+from torch.autograd import Variable
+import functools
+
+
+# assert initialized decorator
+def initialized(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ assert self.initialized, ("Parameters not initialized yet. "
+ "Call .forward(...) first")
+ return func(self, *args, **kwargs)
+ return wrapper
+
+##############################################################################
+# #
+# SKELETON CLASS #
+# #
+##############################################################################
+
+class AdversarialPerturbation(nn.Module):
+ """ Skeleton class to hold adversarial perturbations FOR A SINGLE MINIBATCH.
+ For general input-agnostic adversarial perturbations, see the
+ ThreatModel class
+
+ All subclasses need the following:
+ - perturbation_norm() : no args -> scalar Variable
+ - self.parameters() needs to iterate over params we want to optimize
+ - constrain_params() : no args -> no return,
+ modifies the parameters such that this is still a valid image
+ - forward : no args -> Variable - applies the adversarial perturbation
+ the originals and outputs a Variable of how we got there
+ - adversarial_tensors() : applies the adversarial transform to the
+ originals and outputs TENSORS that are the
+ adversarial images
+ """
+
+ def __init__(self, threat_model, perturbation_params):
+
+ super(AdversarialPerturbation, self).__init__()
+ self.threat_model = threat_model
+ self.initialized = False
+ self.perturbation_params = perturbation_params
+
+ if isinstance(perturbation_params, tuple):
+ self.use_gpu = perturbation_params[1].use_gpu or utils.use_gpu()
+ else:
+ self.use_gpu = perturbation_params.use_gpu or utils.use_gpu()
+ # Stores parameters of the adversarial perturbation and hyperparams
+ # to compute total perturbation norm here
+
+
+ def __call__(self, x):
+ return self.forward(x)
+
+ def __repr__(self):
+ if isinstance(self.perturbation_params, tuple):
+ output_str = "[Perturbation] %s: %s" % (self.__class__.__name__,
+ self.perturbation_params[1])
+ output_str += '\n['
+ for el in self.perturbation_params[0]:
+ output_str += '\n\t%s,' % el
+ output_str += '\n]'
+ return output_str
+ else:
+ return "[Perturbation] %s: %s" % (self.__class__.__name__,
+ self.perturbation_params)
+
+ def _merge_setup(self, *args):
+ """ Internal method to be used when initializing a new perturbation
+ from merging only. Should not be called outside this file!!
+ """
+ pass
+
+ def setup(self, x):
+ """ This is the standard setup technique and should be used to
+ initialize a perturbation (i.e. sets up parameters and unlocks
+ all other methods)
+ ARGS:
+ x : Variable or Tensor (NxCxHxW) - the images this perturbation is
+ intended for
+ """
+ self.num_examples = x.shape[0]
+
+
+ @initialized
+ def perturbation_norm(self, x=None):
+ """ This returns the 'norm' of this perturbation. Optionally, for
+ certain norms, having access to the images for which the
+ perturbation is intended can have an effect on the output.
+ ARGS:
+ x : Variable or Tensor (NxCxHxW) - optionally can be the images
+ that the perturbation was intended for
+ RETURNS:
+ Scalar Variable
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+ @initialized
+ def constrain_params(self):
+ """ This modifies the parameters such that the perturbation falls within
+ the threat model it belongs to. E.g. for l-infinity threat models,
+ this clips the params to match the right l-infinity bound.
+
+ TODO: for non-lp norms, projecting to the nearest point in the level
+ set
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+ @initialized
+ def make_valid_image(self, x):
+ """ This takes in the minibatch self's parameters were tuned for and
+ clips the parameters such that this is still a valid image.
+ ARGS:
+ x : Variable or Tensor (NxCxHxW) - the images this this perturbation
+ was intended for
+ RETURNS:
+ None
+ """
+ pass # Only implement in classes that can create invalid images
+
+ @initialized
+ def forward(self, x):
+ """ This takes in the minibatch self's parameters were tuned for and
+ outputs a variable of the perturbation applied to the images
+ ARGS:
+ x : Variable (NxCxHxW) - the images this this perturbation
+ was intended for
+ RETURNS:
+ Variable (NxCxHxW) - the perturbation applied to the input images
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+ @initialized
+ def add_to_params(self, grad_data):
+ """ This takes in a Tensor the same shape as self's parameters and
+ adds to them. Note that this usually won't preserve gradient
+ information
+ (also this might have different signatures in subclasses)
+ ARGS:
+ x : Tensor (params-shape) - Tensor to be added to the
+ parameters of self
+ RETURNS:
+ None, but modifies self's parameters
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+ @initialized
+ def update_params(self, step_fxn):
+ """ This takes in a function step_fxn: Tensor -> Tensor that generates
+ the change to the parameters that we step along. This loops through
+ all parameters and updates signs accordingly.
+ For sequential perturbations, this also multiplies by a scalar if
+ provided
+ ARGS:
+ step_fxn : Tensor -> Tensor - function that maps tensors to tensors.
+ e.g. for FGSM, we want a function that multiplies signs
+ by step_size
+ RETURNS:
+ None, but updates the parameters
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+
+ @initialized
+ def adversarial_tensors(self, x=None):
+ """ Little helper method to get the tensors of the adversarial images
+ directly
+ """
+ assert x is not None or self.originals is not None
+ if x is None:
+ x = self.originals
+
+ return self.forward(x).data
+
+ @initialized
+ def attach_attr(self, attr_name, attr):
+ """ Special method to set an attribute if it doesn't exist in this
+ object yet. throws error if this attr already exists
+ ARGS:
+ attr_name : string - name of attribute we're attaching
+ attr: object - attribute we're attaching
+ RETURNS:
+ None
+ """
+ if hasattr(self, attr_name):
+ raise Exception("%s already has attribute %s" % (self, attr_name))
+ else:
+ setattr(self, attr_name, attr)
+
+
+ @initialized
+ def attach_originals(self, originals):
+ """ Little helper method to tack on the original images to self to
+ pass around the (images, perturbation) in a single object
+ """
+ self.attach_attr('originals', originals)
+
+
+ @initialized
+ def random_init(self):
+ """ Modifies the parameters such that they're randomly initialized
+ uniformly across the threat model (this is harder for nonLp threat
+ models...). Takes no args and returns nothing, but modifies the
+ parameters
+ """
+ raise NotImplementedError("Need to call subclass method here")
+
+ @initialized
+ def merge_perturbation(self, other, self_mask):
+ """ Special technique to merge this perturbation with another
+ perturbation of the same threat model.
+ This will return a new perturbation object that, for each parameter
+ will return the parameters of self for self_mask, and the
+ perturbation of other for NOT(self_mask)
+
+ ARGS:
+ other: AdversarialPerturbation Object - instance of other
+ adversarial perturbation that is instantiated with the
+ same threat model as self
+ self_indices: ByteTensor [N] : bytetensor indicating which
+ parameters to include from self and which to include
+ from other
+ """
+
+ # this parent class just does the shared asserts such that this is a
+ # valid thing
+ assert self.__class__ == other.__class__
+ assert self.threat_model == other.threat_model
+ assert self.num_examples == other.num_examples
+ assert self.perturbation_params == other.perturbation_params
+ assert other.initialized
+
+ @initialized
+ def collect_successful(self, classifier_net, normalizer):
+ """ Returns a list of [adversarials, originals] of the SUCCESSFUL
+ attacks only, according to the given classifier_net, normalizer
+ SUCCESSFUL here means that the adversarial is different
+ ARGS:
+ TODO: fill in when I'm not in crunchtime
+ """
+
+ assert self.originals is not None
+ adversarials = Variable(self.adversarial_tensors())
+ originals = Variable(self.originals)
+
+ adv_out = torch.max(classifier_net(normalizer(adversarials)), 1)[1]
+ out = torch.max(classifier_net(normalizer(originals)), 1)[1]
+ adv_idx_bytes = adv_out != out
+ idxs = []
+ for idx, el in enumerate(adv_idx_bytes):
+ if float(el) > 0:
+ idxs.append(idx)
+
+ idxs = torch.LongTensor(idxs)
+ if self.originals.is_cuda:
+ idxs = idxs.cuda()
+
+ return [torch.index_select(self.adversarial_tensors(), 0, idxs),
+ torch.index_select(self.originals, 0, idxs)]
+
+ @initialized
+ def collect_adversarially_successful(self, classifier_net, normalizer,
+ labels):
+ """ Returns an object containing the SUCCESSFUL attacked examples,
+ their corresponding originals, and the number of misclassified
+ examples
+ ARGS:
+ classifier_net : nn.Module subclass - neural net that is the
+ relevant classifier
+ normalizer : DifferentiableNormalize object - object to convert
+ input data to mean-zero, unit-var examples
+ labels : Variable (longTensor N) - correct labels for classification
+ of self.originals
+ RETURNS:
+ dict with structure:
+ {'adversarials': Variable(N'xCxHxW) - adversarial perturbation
+ applied
+ 'originals': Variable(N'xCxHxW) - unperturbed examples that
+ were correctly classified AND
+ successfully attacked
+ 'num_correctly_classified': int - number of correctly classified
+ unperturbed examples
+ }
+ """
+ assert self.originals is not None
+ adversarials = Variable(self.adversarial_tensors())
+ originals = Variable(self.originals)
+
+ adv_out = torch.max(classifier_net(normalizer(adversarials)), 1)[1]
+ out = torch.max(classifier_net(normalizer(originals)), 1)[1]
+
+ # First take a subset of correctly classified originals
+ correct_idxs = (out == labels) # correctly classified idxs
+ adv_idx_bytes = (adv_out != out) # attacked examples
+
+ num_correctly_classified = int(sum(correct_idxs))
+
+ adv_idxs = adv_idx_bytes * correct_idxs
+
+
+ idxs = []
+ for idx, el in enumerate(adv_idxs):
+ if float(el) > 0:
+ idxs.append(idx)
+
+ idxs = torch.LongTensor(idxs)
+ if self.originals.is_cuda:
+ idxs = idxs.cuda()
+
+
+ return {'adversarial': torch.index_select(self.adversarial_tensors(),
+ 0, idxs),
+ 'originals': torch.index_select(self.originals, 0, idxs),
+ 'num_correctly_classified': num_correctly_classified}
+
+
+
+ @initialized
+ def display(self, scale=5, successful_only=False, classifier_net=None,
+ normalizer=None):
+ """ Displays this adversarial perturbation in a 3-row format:
+ top row is adversarial images, second row is original images,
+ bottom row is difference magnified by scale (default 5)
+ ARGS:
+ scale: int - how much to magnify differences by
+ successful_only: bool - if True we only display successful (in that
+ advs output different classifier outputs)
+ If this is not None, classifie_net and normalizer
+ cannot be None
+ RETURNS:
+ None, but displays images
+ """
+ if successful_only:
+ assert classifier_net is not None
+ assert normalizer is not None
+ advs, origs = self.collect_successful(classifier_net, normalizer)
+ else:
+ advs = self.adversarial_tensors()
+ origs = self.originals
+
+ diffs = torch.clamp((advs - origs) * scale + 0.5, 0, 1)
+ img_utils.show_images([advs, origs, diffs])
+
+
+class PerturbationParameters(dict):
+ """ Object that stores parameters like a dictionary.
+ This allows perturbation classes to be only partially instantiated and
+ then fed various 'originals' later.
+ Implementation taken from : https://stackoverflow.com/a/14620633/3837607
+ (and then modified with the getattribute trick to return none instead of
+ error for missing attributes)
+ """
+ def __init__(self, *args, **kwargs):
+ super(PerturbationParameters, self).__init__(*args, **kwargs)
+ if kwargs.get('manual_gpu') is not None:
+ self.use_gpu = kwargs['manual_gpu']
+ else:
+ self.use_gpu = utils.use_gpu()
+ self.__dict__ = self
+
+ def __getattribute__(self, name):
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ return None
+
+
+class ThreatModel(object):
+ def __init__(self, perturbation_class, param_kwargs, *other_args):
+ """ Factory class to generate per_minibatch instances of Adversarial
+ perturbations.
+ ARGS:
+ perturbation_class : class - subclass of Adversarial Perturbations
+ param_kwargs : dict - dict containing named kwargs to instantiate
+ the class in perturbation class
+ """
+ assert issubclass(perturbation_class, AdversarialPerturbation)
+ self.perturbation_class = perturbation_class
+ if isinstance(param_kwargs, dict):
+ param_kwargs = PerturbationParameters(**param_kwargs)
+ self.param_kwargs = param_kwargs
+ self.other_args = other_args
+
+ def __repr__(self):
+ return "[Threat] %s: %s" % (str(self.perturbation_class.__name__),
+ self.param_kwargs)
+
+ def __call__(self, *args):
+ if args == ():
+ return self.perturbation_obj()
+ else:
+ perturbation_obj = self.perturbation_obj()
+ perturbation_obj.setup(*args)
+ return perturbation_obj
+
+
+
+ def perturbation_obj(self):
+ return self.perturbation_class(self, self.param_kwargs, *self.other_args)
+
+
+
+##############################################################################
+# #
+# ADDITION PARAMETERS #
+# #
+##############################################################################
+
+class DeltaAddition(AdversarialPerturbation):
+
+ def __init__(self, threat_model, perturbation_params, *other_args):
+ """ Maintains a delta that gets addded to the originals to generate
+ adversarial images. This is the type of adversarial perturbation
+ that the literature extensivey studies
+ ARGS:
+ threat_model : ThreatModel object that is used to initialize self
+ perturbation_params: PerturbationParameters object.
+ { lp_style : None, int or 'inf' - if not None is the type of
+ Lp_bound that we apply to this adversarial example
+ lp_bound : None or float - cannot be None if lp_style is
+ not None, but if not None should be the lp bound
+ we allow for adversarial perturbations
+ custom_norm : None or fxn:(NxCxHxW) -> Scalar Variable. This is
+ not implemented for now
+ }
+ """
+
+ super(DeltaAddition, self).__init__(threat_model, perturbation_params)
+ self.lp_style = perturbation_params.lp_style
+ self.lp_bound = perturbation_params.lp_bound
+ if perturbation_params.custom_norm is not None:
+ raise NotImplementedError("Only LP norms allowed for now")
+ self.scalar_step = perturbation_params.scalar_step or 1.0
+
+
+ def _merge_setup(self, num_examples, delta_data):
+ """ DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
+ self.num_examples = num_examples
+ self.delta = nn.Parameter(delta_data)
+ self.initialized = True
+
+
+ def setup(self, x):
+ super(DeltaAddition, self).setup(x)
+ self.delta = nn.Parameter(torch.zeros_like(x))
+ self.initialized = True
+
+ @initialized
+ def perturbation_norm(self, x=None, lp_style=None):
+ lp_style = lp_style or self.lp_style
+ assert isinstance(lp_style, int) or lp_style == 'inf'
+ return utils.batchwise_norm(self.delta, lp=lp_style)
+
+
+ @initialized
+ def constrain_params(self):
+ new_delta = utils.batchwise_lp_project(self.delta.data, self.lp_style,
+ self.lp_bound)
+ delta_diff = new_delta - self.delta.data
+ self.delta.data.add_(delta_diff)
+
+ @initialized
+ def make_valid_image(self, x):
+ new_delta = self.delta.data
+ change_in_delta = utils.clamp_0_1_delta(new_delta, x)
+ self.delta.data.add_(change_in_delta)
+
+ @initialized
+ def update_params(self, step_fxn):
+ assert self.delta.grad.data is not None
+ self.add_to_params(step_fxn(self.delta.grad.data) * self.scalar_step)
+
+ @initialized
+ def add_to_params(self, grad_data):
+ """ sets params to be self.params + grad_data """
+ self.delta.data.add_(grad_data)
+
+
+ @initialized
+ def random_init(self):
+ self.delta = nn.Parameter(utils.random_from_lp_ball(self.delta.data,
+ self.lp_style,
+ self.lp_bound))
+
+ @initialized
+ def merge_perturbation(self, other, self_mask):
+ super(DeltaAddition, self).merge_perturbation(other, self_mask)
+
+ # initialize a new perturbation
+ new_perturbation = DeltaAddition(self.threat_model,
+ self.perturbation_params)
+
+ # make the new parameters
+ new_delta = utils.fold_mask(self.delta.data, other.delta.data,
+ self_mask)
+
+ # do the merge setup and return the object
+ new_perturbation._merge_setup(self.num_examples,
+ new_delta)
+ return new_perturbation
+
+
+ def forward(self, x):
+ if not self.initialized:
+ self.setup(x)
+ self.make_valid_image(x) # not sure which one to do first...
+ self.constrain_params()
+ return x + self.delta
+
+
+
+
+##############################################################################
+# #
+# SPATIAL PARAMETERS #
+# #
+##############################################################################
+
+class ParameterizedXformAdv(AdversarialPerturbation):
+
+ def __init__(self, threat_model, perturbation_params, *other_args):
+ super(ParameterizedXformAdv, self).__init__(threat_model,
+ perturbation_params)
+ assert issubclass(perturbation_params.xform_class,
+ st.ParameterizedTransformation)
+
+ self.lp_style = perturbation_params.lp_style
+ self.lp_bound = perturbation_params.lp_bound
+ self.use_stadv = perturbation_params.use_stadv
+ self.scalar_step = perturbation_params.scalar_step or 1.0
+
+
+ def _merge_setup(self, num_examples, new_xform):
+ """ DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
+ self.num_examples = num_examples
+ self.xform = new_xform
+ self.initialized = True
+
+ def setup(self, originals):
+ super(ParameterizedXformAdv, self).setup(originals)
+ self.xform = self.perturbation_params.xform_class(shape=originals.shape,
+ manual_gpu=self.use_gpu)
+ self.initialized = True
+
+ @initialized
+ def perturbation_norm(self, x=None, lp_style=None):
+ lp_style = lp_style or self.lp_style
+ if self.use_stadv is not None:
+ assert isinstance(self.xform, st.FullSpatial)
+ return self.xform.stAdv_norm()
+ else:
+ return self.xform.norm(lp=lp_style)
+
+ @initialized
+ def constrain_params(self, x=None):
+ # Do lp projections
+ if isinstance(self.lp_style, int) or self.lp_style == 'inf':
+ self.xform.project_params(self.lp_style, self.lp_bound)
+
+
+
+ @initialized
+ def update_params(self, step_fxn):
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ params = param_list[0]
+ assert params.grad.data is not None
+ self.add_to_params(step_fxn(params.grad.data) * self.scalar_step)
+
+
+ @initialized
+ def add_to_params(self, grad_data):
+ """ Assumes only one parameters object in the Spatial Transform """
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ params = param_list[0]
+ params.data.add_(grad_data)
+
+ @initialized
+ def random_init(self):
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ param = param_list[0]
+ random_perturb = utils.random_from_lp_ball(param.data,
+ self.lp_style,
+ self.lp_bound)
+
+ param.data.add_(self.xform.identity_params(self.xform.img_shape) +
+ random_perturb - self.xform.xform_params.data)
+
+
+ @initialized
+ def merge_perturbation(self, other, self_mask):
+ super(ParameterizedXformAdv, self).merge_perturbation(other, self_mask)
+ new_perturbation = ParameterizedXformAdv(self.threat_model,
+ self.perturbation_params)
+
+ new_xform = self.xform.merge_xform(other.xform, self_mask)
+ new_perturbation._merge_setup(self.num_examples, new_xform)
+
+ return new_perturbation
+
+
+ def forward(self, x):
+ if not self.initialized:
+ self.setup(x)
+ self.constrain_params()
+ return self.xform.forward(x)
+
+
+
+
+##############################################################################
+# #
+# SPATIAL + ADDITION PARAMETERS #
+# #
+##############################################################################
+
+class SequentialPerturbation(AdversarialPerturbation):
+ """ Takes a list of perturbations and composes them. A norm needs to
+ be specified here to describe the perturbations.
+ """
+
+ def __init__(self, threat_model, perturbation_sequence,
+ global_parameters=PerturbationParameters(pad=10),
+ preinit_pipeline=None):
+ """ Initializes a sequence of adversarial perturbation layers
+ ARGS:
+ originals : NxCxHxW tensor - original images we create adversarial
+ perturbations for
+ perturbation_sequence : ThreatModel[] -
+ list of ThreatModel objects
+ global_parameters : PerturbationParameters - global parameters to
+ use. These contain things like how to norm this
+ sequence, how to constrain this sequence, etc
+ preinit_pipelines: list[]
+ if not None i
+ """
+ super(SequentialPerturbation, self).__init__(threat_model,
+ (perturbation_sequence,
+ global_parameters))
+
+ if preinit_pipeline is not None:
+ layers = preinit_pipeline
+ else:
+ layers = []
+ for threat_model in perturbation_sequence:
+ assert isinstance(threat_model, ThreatModel)
+ layers.append(threat_model())
+
+ self.pipeline = []
+ for layer_no, layer in enumerate(layers):
+ self.pipeline.append(layer)
+ self.add_module('layer_%02d' % layer_no, layer)
+
+
+ # norm: pipeline -> Scalar Variable
+ self.norm = global_parameters.norm
+ self.norm_weights = global_parameters.norm_weights
+
+ # padding with black is useful to not throw information away during
+ # sequential steps
+ self.pad = nn.ConstantPad2d(global_parameters.pad or 0, 0)
+ self.unpad = nn.ConstantPad2d(-1 * (global_parameters.pad or 0), 0)
+
+
+
+
+ def _merge_setup(self, num_examples):
+ self.num_examples = num_examples
+ self.initialized = True
+
+
+ def setup(self, x):
+ super(SequentialPerturbation, self).setup(x)
+ x = self.pad(x)
+ for layer in self.pipeline:
+ layer.setup(x)
+ self.initialized = True
+
+
+ @initialized
+ def perturbation_norm(self, x=None, lp_style=None):
+ # Need to define a nice way to describe the norm here. This can be
+ # an empirical norm between input/output
+ # For now, let's just say it's the sum of the norms of each constituent
+ if self.norm is not None:
+ return self.norm(self.pipeline, x=x, lp_style=lp_style)
+ else:
+ norm_weights = self.norm_weights or\
+ [1.0 for _ in range(len(self.pipeline))]
+ out = None
+ for i, layer in enumerate(self.pipeline):
+ weight = norm_weights[i]
+ layer_norm = layer.perturbation_norm(x=x, lp_style=lp_style)
+ if out is None:
+ out = layer_norm * weight
+ else:
+ out = out + layer_norm * weight
+ return out
+
+ @initialized
+ def make_valid_image(self, x):
+ x = self.pad(x)
+ for layer in self.pipeline:
+ layer.make_valid_image(x)
+ x = layer(x)
+
+
+ @initialized
+ def constrain_params(self):
+ # Need to do some sort of crazy projection operator for general things
+ # For now, let's just constrain each thing in sequence
+
+ for layer in self.pipeline:
+ layer.constrain_params()
+
+ @initialized
+ def update_params(self, step_fxn):
+ for layer in self.pipeline:
+ layer.update_params(step_fxn)
+
+
+ @initialized
+ def merge_perturbation(self, other, self_mask):
+ super(SequentialPerturbation, self).merge_perturbation(other, self_mask)
+
+
+ new_pipeline = []
+ for self_layer, other_layer in zip(self.pipeline, other.pipeline):
+ new_pipeline.append(self_layer.merge_perturbation(other_layer,
+ self_mask))
+
+
+ layer_params, global_params = self.perturbation_params
+
+ new_perturbation = SequentialPerturbation(self.threat_model,
+ layer_params,
+ global_parameters=global_params,
+ preinit_pipeline=new_pipeline)
+ new_perturbation._merge_setup(self.num_examples)
+
+ return new_perturbation
+
+
+
+ def forward(self, x, layer_slice=None):
+ """ Layer slice here is either an int or a tuple
+ If int, we run forward only the first layer_slice layers
+ If tuple, we start at the
+
+ """
+
+ # Blocks to handle only particular layer slices (debugging)
+ if layer_slice is None:
+ pipeline_iter = iter(self.pipeline)
+ elif isinstance(layer_slice, int):
+ pipeline_iter = iter(self.pipeline[:layer_slice])
+ elif isinstance(layer_slice, tuple):
+ pipeline_iter = iter(self.pipeline[layer_slice[0]: layer_slice[1]])
+ # End block to handle particular layer slices
+
+ # Handle padding
+ original_hw = x.shape[-2:]
+ if not self.initialized:
+ self.setup(x)
+
+ self.constrain_params()
+ self.make_valid_image(x)
+
+ x = self.pad(x)
+ for layer in pipeline_iter:
+ x = layer(x)
+ return self.unpad(x)
+
+
+ @initialized
+ def random_init(self):
+ for layer in self.pipeline:
+ layer.random_init()
+
+ @initialized
+ def attach_originals(self, originals):
+ self.originals = originals
+ for layer in self.pipeline:
+ layer.attach_originals(originals)
+
+
+
+
+
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_training.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_training.py
new file mode 100644
index 0000000..0e7f46c
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/adversarial_training.py
@@ -0,0 +1,550 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_training.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Contains training code for adversarial training """
+
+from __future__ import print_function
+import torch
+import torch.cuda as cuda
+import torch.optim as optim
+import torch.nn as nn
+
+from torch.autograd import Variable
+
+import random
+
+from .utils import pytorch_utils as utils, checkpoints
+
+
+##############################################################################
+# #
+# ATTACK PARAMETERS OBJECT #
+# #
+##############################################################################
+
+class AdversarialAttackParameters(object):
+ """ Wrapper to store an adversarial attack object as well as some extra
+ parameters for how to use it in training
+ """
+
+ def __init__(self, adv_attack_obj, proportion_attacked=1.0,
+ attack_specific_params=None):
+ """ Stores params for how to use adversarial attacks in training
+ ARGS:
+ adv_attack_obj : AdversarialAttack subclass -
+ thing that actually does the attack
+ proportion_attacked: float between [0.0, 1.0] - what proportion of
+ the minibatch we build adv examples for
+ attack_specific_params: possibly None dict, but possibly dict with
+ specific parameters for attacks
+
+ """
+ self.adv_attack_obj = adv_attack_obj
+ self.proportion_attacked = proportion_attacked
+
+ attack_specific_params = attack_specific_params or {}
+ self.attack_specific_params = attack_specific_params
+ self.attack_kwargs = attack_specific_params.get('attack_kwargs', {})
+
+ def set_gpu(self, use_gpu):
+ """ Propagates changes of the 'use_gpu' parameter down to the attack
+ ARGS:
+ use_gpu : bool - if True, the attack uses the GPU, ow it doesn't
+ RETURNS:
+ None
+ """
+ self.adv_attack_obj.use_gpu = use_gpu
+
+ def attack(self, inputs, labels):
+ """ Builds some adversarial examples given real inputs and labels
+ ARGS:
+ inputs : torch.Tensor (NxCxHxW) - tensor with examples needed
+ labels : torch.Tensor (N) - tensor with the examples needed
+ RETURNS:
+ some sample of (self.proportion_attacked * N ) examples that are
+ adversarial, and the corresponding NONADVERSARIAL LABELS
+
+ output is a tuple with three tensors:
+ (adv_examples, pre_adv_labels, selected_idxs, coupled )
+ adv_examples: Tensor with shape (N'xCxHxW) [the perturbed outputs]
+ pre_adv_labels: Tensor with shape (N') [original labels]
+ selected_idxs : Tensor with shape (N') [idxs selected]
+ adv_inputs : Tensor with shape (N') [examples used to make advs]
+ perturbation: Adversarial Perturbation Object
+ """
+ num_elements = inputs.shape[0]
+
+ selected_idxs = sorted(random.sample(list(range(num_elements)),
+ int(self.proportion_attacked * num_elements)))
+
+ selected_idxs = inputs.new(selected_idxs).long()
+ if selected_idxs.numel() == 0:
+ return (None, None, None)
+
+ adv_inputs = Variable(inputs.index_select(0, selected_idxs))
+ pre_adv_labels = labels.index_select(0, selected_idxs)
+
+ perturbation = self.adv_attack_obj.attack(adv_inputs.data,
+ pre_adv_labels,
+ **self.attack_kwargs)
+ adv_examples = perturbation(adv_inputs)
+
+ return (adv_examples, pre_adv_labels, selected_idxs, adv_inputs,
+ perturbation)
+
+ def eval(self, ground_inputs, adv_inputs, labels, idxs, topk=1):
+ """ Outputs the accuracy of the adversarial examples
+
+ NOTE: notice the difference between N and N' in the argument
+ ARGS:
+ ground_inputs: Variable (NxCxHxW) - examples before we did
+ adversarial perturbation. Vals in [0, 1] range
+ adversarials: Variable (N'xCxHxW) - examples after we did
+ adversarial perturbation. Should be same shape and
+ in same order as ground_truth
+ labels: Variable (longTensor N) - correct labels of classification
+ output
+ idxs: Variable (longtensor N') - indices of ground_inputs/labels
+ used for adversarials.
+ RETURNS:
+ tuple of (% of correctly classified original examples,
+ % of correctly classified adversarial examples)
+
+ """
+
+ selected_grounds = ground_inputs.index_select(0, idxs)
+ selected_labels = labels.index_select(0, idxs)
+ return self.adv_attack_obj.eval(selected_grounds, adv_inputs,
+ selected_labels, topk=topk)
+
+ def eval_attack_only(self, adv_inputs, labels, topk=1):
+ """ Outputs the accuracy of the adv_inputs only
+ ARGS:
+ adv_inputs: Variable NxCxHxW - examples after we did adversarial
+ perturbation
+ labels: Variable (longtensor N) - correct labels of classification
+ output
+ topk: int - criterion for 'correct' classification
+ RETURNS:
+ (int) number of correctly classified examples
+ """
+
+ return self.adv_attack_obj.eval_attack_only(adv_inputs, labels,
+ topk=topk)
+
+
+##############################################################################
+# #
+# TRAINING OBJECT #
+# #
+##############################################################################
+
+
+class AdversarialTraining(object):
+ """ Wrapper for training of a NN with adversarial examples cooked in
+ """
+
+ def __init__(self, classifier_net, normalizer,
+ experiment_name, architecture_name,
+ manual_gpu=None):
+
+ """
+ ARGS:
+ classifier_net : nn.Module subclass - instance of neural net to classify
+ images. Can have already be trained, or not
+ normalizer : DifferentiableNormalize - object to convert to zero-mean
+ unit-variance domain
+ experiment_name : String - human-readable name of the 'trained_model'
+ (this is helpful for identifying checkpoints later)
+ manual_gpu : None or bool - if not None is a manual override of whether
+ or not to use the GPU. If left None, we use the GPU if we
+ can
+
+ ON NOMENCLATURE:
+ Depending on verbosity levels, training checkpoints are saved after
+ some training epochs. These are saved as
+ '//.path.tar'
+
+ Best practice is to keep architecture_name consistent across
+ adversarially trained models built off the same architecture and having
+ a descriptive experiment_name for each training instance
+ """
+ self.classifier_net = classifier_net
+ self.normalizer = normalizer
+ self.experiment_name = experiment_name
+ self.architecture_name = architecture_name
+
+ if manual_gpu is not None:
+ self.use_gpu = manual_gpu
+ else:
+ self.use_gpu = utils.use_gpu()
+
+ self.verbosity_level = None
+ self.verbosity_minibatch = None
+ self.verbosity_adv = None
+ self.verbosity_epoch = None
+
+ self.logger = utils.TrainingLogger()
+ self.log_level = None
+ self.log_minibatch = None
+ self.log_adv = None
+ self.log_epoch = None
+
+ def reset_logger(self):
+ """ Clears the self.logger instance - useful occasionally """
+ self.logger = utils.TrainingLogger()
+
+ def set_verbosity_loglevel(self, level,
+ verbosity_or_loglevel='verbosity'):
+ """ Sets the verbosity or loglevel for training.
+ Is called in .train method so this method doesn't need to be
+ explicitly called.
+
+ Verbosity is mapped from a string to a comparable int 'level'.
+ _level : int - comparable value of verbosity
+ _minibatch: int - we do a printout every this many
+ minibatches
+ _adv: int - we evaluate the efficacy of our attack every
+ this many minibatches
+ _epoch: int - we printout/log and checkpoint every this many
+ epochs
+ ARGS:
+ level : string ['low', 'medium', 'high', 'snoop'],
+ varying levels of verbosity/logging in increasing order
+
+ RETURNS: None
+ """
+ assert level in ['low', 'medium', 'high', 'snoop']
+ assert verbosity_or_loglevel in ['verbosity', 'loglevel']
+ setattr(self, verbosity_or_loglevel, level)
+
+ _level = {'low': 0,
+ 'medium': 1,
+ 'high': 2,
+ 'snoop': 420}[level]
+ setattr(self, verbosity_or_loglevel + '_level', _level)
+
+ _minibatch = {'medium': 2000,
+ 'high': 100,
+ 'snoop': 1}.get(level)
+ setattr(self, verbosity_or_loglevel + '_minibatch', _minibatch)
+
+ _adv = {'medium': 2000,
+ 'high': 100,
+ 'snoop': 1}.get(level)
+ setattr(self, verbosity_or_loglevel + '_adv', _minibatch)
+
+ _epoch = {'low': 100,
+ 'medium': 10,
+ 'high': 1,
+ 'snoop': 1}.get(level)
+ setattr(self, verbosity_or_loglevel + '_epoch', _epoch)
+
+ def _attack_subroutine(self, attack_parameters, inputs, labels,
+ epoch_num, minibatch_num, adv_saver,
+ logger):
+ """ Subroutine to run the specified attack on a minibatch and append
+ the results to inputs/labels.
+
+ NOTE: THIS DOES NOT MUTATE inputs/labels !!!!
+
+ ARGS:
+ attack_parameters: {k: AdversarialAttackParameters} (or None) -
+ if not None, contains info on how to do adv
+ attacks. If None, we don't train adversarially
+ inputs : Tensor (NxCxHxW) - minibatch of data we build adversarial
+ examples for
+ labels : Tensor (longtensor N) - minibatch of labels
+ epoch_num : int - number of which epoch we're working on.
+ Is helpful for printing
+ minibatch_num : int - number of which minibatch we're working on.
+ Is helpful for printing
+ adv_saver : None or checkpoints.CustomDataSaver -
+ if not None, we save the adversarial images for later
+ use, else we don't save them.
+ logger : utils.TrainingLogger instance - logger instance to keep
+ track of logging data if we need data for this instance
+ RETURNS:
+ (inputs, labels, adv_inputs, coupled_inputs)
+ where inputs = ++ adv_inputs
+ labels is original labels
+ adv_inputs is the (Variable) adversarial examples generated,
+ coupled_inputs is the (Variable) inputs used to generate the
+ adversarial examples (useful for when we don't
+ augment 1:1).
+ """
+ if attack_parameters is None:
+ return inputs, labels, None, None
+
+ assert isinstance(attack_parameters, dict)
+
+ adv_inputs_total, adv_labels_total, coupled_inputs = [], [], []
+ for (key, param) in attack_parameters.items():
+ adv_data = param.attack(inputs, labels)
+ adv_inputs, adv_labels, adv_idxs, og_adv_inputs, _ = adv_data
+
+ needs_print = (self.verbosity_level >= 1 and
+ minibatch_num % self.verbosity_adv == self.verbosity_adv - 1)
+ needs_log = (self.loglevel_level >= 1 and
+ minibatch_num % self.loglevel_adv == self.loglevel_adv - 1)
+
+ if needs_print or needs_log:
+ accuracy = param.eval(inputs, adv_inputs, labels, adv_idxs)
+
+ if needs_print:
+ print('[%d, %5d] accuracy: (%.3f, %.3f)' %
+ (epoch_num, minibatch_num + 1, accuracy[1], accuracy[0]))
+
+ if needs_log:
+ logger.log(key, epoch_num, minibatch_num + 1,
+ (accuracy[1], accuracy[0]))
+
+ if adv_saver is not None: # Save the adversarial examples
+ adv_saver.save_minibatch(adv_inputs, adv_labels)
+
+ adv_inputs_total.append(adv_inputs)
+ adv_labels_total.append(adv_labels)
+ coupled_inputs.append(og_adv_inputs)
+
+ inputs = torch.cat([inputs] + [_.data for _ in adv_inputs_total], dim=0)
+ labels = torch.cat([labels] + adv_labels_total, dim=0)
+ coupled = torch.cat(coupled_inputs, dim=0)
+ return inputs, labels, torch.cat(adv_inputs_total, dim=0), coupled
+
+ def train(self, data_loader, num_epochs, train_loss,
+ optimizer=None, attack_parameters=None,
+ verbosity='medium', loglevel='medium', logger=None,
+ starting_epoch=0, adversarial_save_dir=None,
+ regularize_adv_scale=None):
+ """ Modifies the NN weights of self.classifier_net by training with
+ the specified parameters s
+ ARGS:
+ data_loader: torch.utils.data.DataLoader OR
+ checkpoints.CustomDataLoader - object that loads the
+ data
+ num_epoch: int - number of epochs to train on
+ train_loss: ???? - TBD
+ optimizer: torch.Optimizer subclass - defaults to Adam with some
+ decent default params. Pass this in as an actual argument
+ to do anything different
+ attack_parameters: AdversarialAttackParameters obj | None |
+ {key: AdversarialAttackParameters} -
+ if not None, is either an object or dict of
+ objects containing names and info on how to do
+ adv attacks. If None, we don't train
+ adversarially
+ verbosity : string - must be 'low', 'medium', 'high', which
+ describes how much to print
+ loglevel : string - must be 'low', 'medium', 'high', which
+ describes how much to log
+ logger : if not None, is a utils.TrainingLogger instance. Otherwise
+ we use this instance's self.logger object to log
+ starting_epoch : int - which epoch number we start on. Is useful
+ for correct labeling of checkpoints and figuring
+ out how many epochs we actually need to run for
+ (i.e., num_epochs - starting_epoch)
+ adversarial_save_dir: string or None - if not None is the name of
+ the directory we save adversarial images to.
+ If None, we don't save adversarial images
+ regularize_adv_scale : float > 0 or None - if not None we do L1 loss
+ between the logits of the adv examples and
+ the inputs used to generate them. This is the
+ scale constant of that loss
+ stdout_prints: bool - if True we print out using stdout so we don't
+ spam logs like crazy
+
+ RETURNS:
+ None, but modifies the classifier_net's weights
+ """
+
+ ######################################################################
+ # Setup/ input validations #
+ ######################################################################
+ self.classifier_net.train() # in training mode
+ assert isinstance(num_epochs, int)
+
+ if attack_parameters is not None:
+ if not isinstance(attack_parameters, dict):
+ attack_parameters = {'attack': attack_parameters}
+
+ # assert that the adv attacker uses the NN that's being trained
+ for param in attack_parameters.values():
+ assert (param.adv_attack_obj.classifier_net ==
+ self.classifier_net)
+
+ assert not (self.use_gpu and not cuda.is_available())
+ if self.use_gpu:
+ self.classifier_net.cuda()
+ if attack_parameters is not None:
+ for param in attack_parameters.values():
+ param.set_gpu(self.use_gpu)
+
+ # Verbosity parameters
+ assert verbosity in ['low', 'medium', 'high', 'snoop', None]
+ self.set_verbosity_loglevel(verbosity,
+ verbosity_or_loglevel='verbosity')
+ verbosity_level = self.verbosity_level
+ verbosity_minibatch = self.verbosity_minibatch
+ verbosity_epoch = self.verbosity_epoch
+
+ # Loglevel parameters and logger initialization
+ assert loglevel in ['low', 'medium', 'high', 'snoop', None]
+ if logger is None:
+ logger = self.logger
+ if logger.data_count() > 0:
+ print("WARNING: LOGGER IS NOT EMPTY! BE CAREFUL!")
+ logger.add_series('training_loss')
+ for key in (attack_parameters or {}).keys():
+ logger.add_series(key)
+
+ self.set_verbosity_loglevel(loglevel, verbosity_or_loglevel='loglevel')
+ loglevel_level = self.loglevel_level
+ loglevel_minibatch = self.loglevel_minibatch
+ loglevel_epoch = self.loglevel_epoch
+
+ # Adversarial image saver:
+ adv_saver = None
+ if adversarial_save_dir is not None and attack_parameters is not None:
+ adv_saver = checkpoints.CustomDataSaver(adversarial_save_dir)
+
+ # setup loss fxn, optimizer
+ optimizer = optimizer or optim.Adam(self.classifier_net.parameters(),
+ lr=0.001)
+
+ # setup regularize adv object
+ if regularize_adv_scale is not None:
+ regularize_adv_criterion = nn.L1Loss()
+
+ ######################################################################
+ # Training loop #
+ ######################################################################
+
+ for epoch in range(starting_epoch + 1, num_epochs + 1):
+ running_loss_print, running_loss_print_mb = 0.0, 0
+ running_loss_log, running_loss_log_mb = 0.0, 0
+ for i, data in enumerate(data_loader, 0):
+ inputs, labels = data
+ if self.use_gpu:
+ inputs = inputs.cuda()
+ labels = labels.cuda()
+
+ # Build adversarial examples
+ attack_out = self._attack_subroutine(attack_parameters,
+ inputs, labels,
+ epoch, i, adv_saver,
+ logger)
+ inputs, labels, adv_examples, adv_inputs = attack_out
+ # Now proceed with standard training
+ self.normalizer.differentiable_call()
+ self.classifier_net.train()
+ inputs, labels = Variable(inputs), Variable(labels)
+ optimizer.zero_grad()
+
+ # forward step
+ outputs = self.classifier_net.forward(self.normalizer(inputs))
+ loss = train_loss.forward(outputs, labels)
+
+ if regularize_adv_scale is not None:
+ # BE SURE TO 'DETACH' THE ADV_INPUTS!!!
+ reg_adv_loss = regularize_adv_criterion(adv_examples,
+ Variable(adv_inputs.data))
+ print(float(loss), regularize_adv_scale * float(reg_adv_loss))
+ loss = loss + regularize_adv_scale * reg_adv_loss
+
+ # backward step
+ loss.backward()
+ optimizer.step()
+
+ # print things
+
+ running_loss_print += float(loss.data)
+ running_loss_print_mb += 1
+ if (verbosity_level >= 1 and
+ i % verbosity_minibatch == verbosity_minibatch - 1):
+ print('[%d, %5d] loss: %.6f' %
+ (epoch, i + 1, running_loss_print /
+ float(running_loss_print_mb)))
+ running_loss_print = 0.0
+ running_loss_print_mb = 0
+
+ # log things
+ running_loss_log += float(loss.data)
+ running_loss_log_mb += 1
+ if (loglevel_level >= 1 and
+ i % loglevel_minibatch == loglevel_minibatch - 1):
+ logger.log('training_loss', epoch, i + 1,
+ running_loss_log / float(running_loss_log_mb))
+ running_loss_log = 0.0
+ running_loss_log_mb = 0
+
+ # end_of_epoch
+ if epoch % verbosity_epoch == 0:
+ print("COMPLETED EPOCH %04d... checkpointing here" % epoch)
+ checkpoints.save_state_dict(self.experiment_name,
+ self.architecture_name,
+ epoch, self.classifier_net,
+ k_highest=3)
+
+ if verbosity_level >= 1:
+ print('Finished Training')
+
+ return logger
+
+ def train_from_checkpoint(self, data_loader, num_epochs, loss_fxn,
+ optimizer=None, attack_parameters=None,
+ verbosity='medium',
+ starting_epoch='max',
+ adversarial_save_dir=None):
+ """ Resumes training from a saved checkpoint with the same architecture.
+ i.e. loads weights from specified checkpoint, figures out which
+ epoch we checkpointed on and then continues training until
+ we reach num_epochs epochs
+ ARGS:
+ same as in train
+ starting_epoch: 'max' or int - which epoch we start training from.
+ 'max' means the highest epoch we can find,
+ an int means a specified int epoch exactly.
+ RETURNS:
+ None
+ """
+
+ ######################################################################
+ # Checkpoint handling block #
+ ######################################################################
+ # which epoch to load
+ valid_epochs = checkpoints.list_saved_epochs(self.experiment_name,
+ self.architecture_name)
+ assert valid_epochs != []
+ if starting_epoch == 'max':
+ epoch = max(valid_epochs)
+ else:
+ assert starting_epoch in valid_epochs
+ epoch = starting_epoch
+
+ # modify the classifer to use these weights
+
+ self.classifier_net = checkpoints.load_state_dict(self.experiment_name,
+ self.architecture_name,
+ epoch,
+ self.classifier_net)
+
+ ######################################################################
+ # Training block #
+ ######################################################################
+
+ self.train(data_loader, num_epochs, loss_fxn,
+ optimizer=optimizer,
+ attack_parameters=attack_parameters,
+ verbosity=verbosity,
+ starting_epoch=epoch,
+ adversarial_save_dir=adversarial_save_dir)
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/config.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/config.py
new file mode 100644
index 0000000..21c1ec3
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/config.py
@@ -0,0 +1,40 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/config.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+import os
+
+config_dir = os.path.abspath(os.path.dirname(__file__))
+
+
+def path_resolver(path):
+ if path.startswith('~/'):
+ return os.path.expanduser(path)
+
+ if path.startswith('./'):
+ return os.path.join(*[config_dir] + path.split('/')[1:])
+
+
+DEFAULT_DATASETS_DIR = path_resolver('~/datasets')
+MODEL_PATH = path_resolver('./pretrained_models/')
+OUTPUT_IMAGE_PATH = path_resolver('./output_images/')
+
+DEFAULT_BATCH_SIZE = 128
+DEFAULT_WORKERS = 4
+CIFAR10_MEANS = [0.485, 0.456, 0.406]
+CIFAR10_STDS = [0.229, 0.224, 0.225]
+
+WIDE_CIFAR10_MEANS = [0.4914, 0.4822, 0.4465]
+WIDE_CIFAR10_STDS = [0.2023, 0.1994, 0.2010]
+
+IMAGENET_MEANS = [0.485, 0.456, 0.406]
+IMAGENET_STDS = [0.229, 0.224, 0.225]
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/loss_functions.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/loss_functions.py
new file mode 100644
index 0000000..fe2a80c
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/loss_functions.py
@@ -0,0 +1,562 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/loss_functions.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+import torch.nn as nn
+import torch
+from numbers import Number
+from .utils import pytorch_utils as utils
+from .utils import image_utils as img_utils
+from . import spatial_transformers as st
+from torch.autograd import Variable
+from functools import partial
+from . import adversarial_perturbations as ap
+
+""" Loss function building blocks """
+
+
+##############################################################################
+# #
+# LOSS FUNCTION WRAPPER #
+# #
+##############################################################################
+
+class RegularizedLoss(object):
+ """ Wrapper for multiple PartialLoss objects where we combine with
+ regularization constants """
+
+ def __init__(self, losses, scalars, negate=False):
+ """
+ ARGS:
+ losses : dict - dictionary of partialLoss objects, each is keyed
+ with a nice identifying name
+ scalars : dict - dictionary of scalars, each is keyed with the
+ same identifying name as is in self.losses
+ negate : bool - if True, we negate the whole thing at the end
+ """
+
+ assert sorted(losses.keys()) == sorted(scalars.keys())
+
+ self.losses = losses
+ self.scalars = scalars
+ self.negate = negate
+
+ def forward(self, examples, labels, *args, **kwargs):
+
+ output = None
+ output_per_example = kwargs.get('output_per_example', False)
+ for k in self.losses:
+ loss = self.losses[k]
+ scalar = self.scalars[k]
+
+ loss_val = loss.forward(examples, labels, *args, **kwargs)
+ # assert scalar is either a...
+ assert (isinstance(scalar, float) or # number
+ scalar.numel() == 1 or # tf wrapping of a number
+ scalar.shape == loss_val.shape) # same as the loss_val
+
+ addendum = loss_val * scalar
+ if addendum.numel() > 1:
+ if not output_per_example:
+ addendum = torch.sum(addendum)
+
+ if output is None:
+ output = addendum
+ else:
+ output = output + addendum
+ if self.negate:
+ return output * -1
+ else:
+ return output
+
+ def setup_attack_batch(self, fix_im):
+ """ Setup before calling loss on a new minibatch. Ensures the correct
+ fix_im for reference regularizers and that all grads are zeroed
+ ARGS:
+ fix_im: Variable (NxCxHxW) - Ground images for this minibatch
+ SHOULD BE IN [0.0, 1.0] RANGE
+ """
+ for loss in self.losses.values():
+ if isinstance(loss, ReferenceRegularizer):
+ loss.setup_attack_batch(fix_im)
+ else:
+ loss.zero_grad()
+
+ def cleanup_attack_batch(self):
+ """ Does some cleanup stuff after we finish on a minibatch:
+ - clears the fixed images for ReferenceRegularizers
+ - zeros grads
+ - clears example-based scalars (i.e. scalars that depend on which
+ example we're using)
+ """
+ for loss in self.losses.values():
+ if isinstance(loss, ReferenceRegularizer):
+ loss.cleanup_attack_batch()
+ else:
+ loss.zero_grad()
+
+ for key, scalar in self.scalars.items():
+ if not isinstance(scalar, Number):
+ self.scalars[key] = None
+
+ def zero_grad(self):
+ for loss in self.losses.values():
+ loss.zero_grad() # probably zeros the same net more than once...
+
+
+class PartialLoss(object):
+ """ Partially applied loss object. Has forward and zero_grad methods """
+
+ def __init__(self):
+ self.nets = []
+
+ def zero_grad(self):
+ for net in self.nets:
+ net.zero_grad()
+
+
+##############################################################################
+# #
+# LOSS FUNCTIONS #
+# #
+##############################################################################
+
+############################################################################
+# NAIVE CORRECT INDICATOR LOSS #
+############################################################################
+
+class IncorrectIndicator(PartialLoss):
+ def __init__(self, classifier, normalizer=None):
+ super(IncorrectIndicator, self).__init__()
+ self.classifier = classifier
+ self.normalizer = normalizer
+
+ def forward(self, examples, labels, *args, **kwargs):
+ """ Returns either (the number | a boolean vector) of examples that
+ don't match the labels when run through the
+ classifier(normalizer(.)) composition.
+ ARGS:
+ examples: Variable (NxCxHxW) - should be same shape as
+ ctx.fix_im, is the examples we define loss for.
+ SHOULD BE IN [0.0, 1.0] RANGE
+ labels: Variable (longTensor of length N) - true classification
+ output for fix_im/examples
+ KWARGS:
+ return_type: String - either 'int' or 'vector'. If 'int', we return
+ the number of correctly classified examples,
+ if 'vector' we return a boolean length-N longtensor
+ with the indices of
+ RETURNS:
+ scalar loss variable or boolean vector, depending on kwargs
+ """
+ return_type = kwargs.get('return_type', 'int')
+ assert return_type in ['int', 'vector']
+
+ class_out = self.classifier.forward(self.normalizer.forward(examples))
+
+ _, outputs = torch.max(class_out, 1)
+ incorrect_indicator = outputs != labels
+
+ if return_type == 'int':
+ return torch.sum(incorrect_indicator)
+ else:
+ return incorrect_indicator
+
+
+##############################################################################
+# Standard XEntropy Loss #
+##############################################################################
+
+class PartialXentropy(PartialLoss):
+ def __init__(self, classifier, normalizer=None):
+ super(PartialXentropy, self).__init__()
+ self.classifier = classifier
+ self.normalizer = normalizer
+ self.nets.append(self.classifier)
+
+ def forward(self, examples, labels, *args, **kwargs):
+ """ Returns XEntropy loss
+ ARGS:
+ examples: Variable (NxCxHxW) - should be same shape as
+ ctx.fix_im, is the examples we define loss for.
+ SHOULD BE IN [0.0, 1.0] RANGE
+ labels: Variable (longTensor of length N) - true classification
+ output for fix_im/examples
+ RETURNS:
+ scalar loss variable
+ """
+
+ if self.normalizer is not None:
+ normed_examples = self.normalizer.forward(examples)
+ else:
+ normed_examples = examples
+
+ xentropy_init_kwargs = {}
+ if kwargs.get('output_per_example') == True:
+ xentropy_init_kwargs['reduction'] = 'none'
+ criterion = nn.CrossEntropyLoss(**xentropy_init_kwargs)
+ return criterion(self.classifier.forward(normed_examples), labels)
+
+
+##############################################################################
+# Carlini Wagner loss functions #
+##############################################################################
+
+class CWLossF6(PartialLoss):
+ def __init__(self, classifier, normalizer=None, kappa=0.0):
+ super(CWLossF6, self).__init__()
+ self.classifier = classifier
+ self.normalizer = normalizer
+ self.nets.append(self.classifier)
+ self.kappa = kappa
+
+ def forward(self, examples, labels, *args, **kwargs):
+ classifier_in = self.normalizer.forward(examples)
+ classifier_out = self.classifier.forward(classifier_in)
+
+ # get target logits
+ target_logits = torch.gather(classifier_out, 1, labels.view(-1, 1))
+
+ # get largest non-target logits
+ max_2_logits, argmax_2_logits = torch.topk(classifier_out, 2, dim=1)
+ top_max, second_max = max_2_logits.chunk(2, dim=1)
+ top_argmax, _ = argmax_2_logits.chunk(2, dim=1)
+ targets_eq_max = top_argmax.squeeze().eq(labels).float().view(-1, 1)
+ targets_ne_max = top_argmax.squeeze().ne(labels).float().view(-1, 1)
+ max_other = targets_eq_max * second_max + targets_ne_max * top_max
+
+ if kwargs.get('targeted', False):
+ # in targeted case, want to make target most likely
+ f6 = torch.clamp(max_other - target_logits, min=-1 * self.kappa)
+ else:
+ # in NONtargeted case, want to make NONtarget most likely
+ f6 = torch.clamp(target_logits - max_other, min=-1 * self.kappa)
+
+ return f6.squeeze()
+
+
+##############################################################################
+# #
+# REFERENCE REGULARIZERS #
+# #
+##############################################################################
+""" Regularization terms that refer back to a set of 'fixed images', or the
+ original images.
+ example: L2 regularization which computes L2dist between a perturbed image
+ and the FIXED ORIGINAL IMAGE
+
+ NOTE: it's important that these return Variables that are scalars
+ (output.numel() == 1), otherwise there's a memory leak w/ CUDA.
+ See my discussion on this here:
+ https://discuss.pytorch.org/t/cuda-memory-not-being-freed/15965
+"""
+
+
+class ReferenceRegularizer(PartialLoss):
+ def __init__(self, fix_im):
+ super(ReferenceRegularizer, self).__init__()
+ self.fix_im = fix_im
+
+ def setup_attack_batch(self, fix_im):
+ """ Setup function to ensure fixed images are set
+ has been made; also zeros grads
+ ARGS:
+ fix_im: Variable (NxCxHxW) - Ground images for this minibatch
+ SHOULD BE IN [0.0, 1.0] RANGE
+ """
+ self.fix_im = fix_im
+ self.zero_grad()
+
+ def cleanup_attack_batch(self):
+ """ Cleanup function to clear the fixed images after an attack batch
+ has been made; also zeros grads
+ """
+ old_fix_im = self.fix_im
+ self.fix_im = None
+ del old_fix_im
+ self.zero_grad()
+
+
+#############################################################################
+# SOFT L_INF REGULARIZATION #
+#############################################################################
+
+class SoftLInfRegularization(ReferenceRegularizer):
+ '''
+ see page 10 of this paper (https://arxiv.org/pdf/1608.04644.pdf)
+ for discussion on why we want SOFT l inf
+ '''
+
+ def __init__(self, fix_im, **kwargs):
+ super(SoftLInfRegularization, self).__init__(fix_im)
+
+ def forward(self, examples, *args, **kwargs):
+ # ARGS should have one element, which serves as the tau value
+
+ tau = 8.0 / 255.0 # starts at 1 each time?
+ scale_factor = 0.9
+ l_inf_dist = float(torch.max(torch.abs(examples - self.fix_im)))
+ '''
+ while scale_factor * tau > l_inf_dist:
+ tau *= scale_factor
+
+ assert tau > l_inf_dist
+ '''
+ delta_minus_taus = torch.clamp(torch.abs(examples - self.fix_im) - tau,
+ min=0.0)
+ batchwise = utils.batchwise_norm(delta_minus_taus, 'inf', dim=0)
+ return batchwise.squeeze()
+
+
+#############################################################################
+# L2 REGULARIZATION #
+#############################################################################
+
+class L2Regularization(ReferenceRegularizer):
+
+ def __init__(self, fix_im, **kwargs):
+ super(L2Regularization, self).__init__(fix_im)
+
+ def forward(self, examples, *args, **kwargs):
+ l2_dist = img_utils.nchw_l2(examples, self.fix_im,
+ squared=True).view(-1, 1)
+ return l2_dist.squeeze()
+
+
+#############################################################################
+# LPIPS PERCEPTUAL REGULARIZATION #
+#############################################################################
+
+class LpipsRegularization(ReferenceRegularizer):
+
+ def __init__(self, fix_im, **kwargs):
+ super(LpipsRegularization, self).__init__(fix_im)
+
+ manual_gpu = kwargs.get('manual_gpu', None)
+ if manual_gpu is not None:
+ self.use_gpu = manual_gpu
+ else:
+ self.use_gpu = utils.use_gpu()
+
+ self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
+
+ def forward(self, examples, *args, **kwargs):
+ xform = lambda im: im * 2.0 - 1.0
+ perceptual_loss = self.dist_model.forward_var(examples,
+ self.fix_im)
+
+ return perceptual_loss.squeeze()
+
+
+#############################################################################
+# SSIM PERCEPTUAL REGULARIZATION #
+#############################################################################
+
+class SSIMRegularization(ReferenceRegularizer):
+
+ def __init__(self, fix_im, **kwargs):
+ super(SSIMRegularization, self).__init__(fix_im)
+
+ if 'window_size' in kwargs:
+ self.ssim_instance = ssim.SSIM(window_size=kwargs['window_size'])
+ else:
+ self.ssim_instance = ssim.SSIM()
+
+ manual_gpu = kwargs.get('manual_gpu', None)
+ if manual_gpu is not None:
+ self.use_gpu = manual_gpu
+ else:
+ self.use_gpu = utils.use_gpu()
+
+ def forward(self, examples, *args, **kwargs):
+ output = []
+ for ex, fix_ex in zip(examples, self.fix_im):
+ output.append(1.0 - self.ssim_instance(ex.unsqueeze(0),
+ fix_ex.unsqueeze(0)))
+ return torch.stack(output)
+
+
+##############################################################################
+# #
+# SPATIAL LOSS FUNCTIONS #
+# #
+##############################################################################
+
+class FullSpatialLpLoss(PartialLoss):
+ """ Spatial loss using lp norms on the spatial transformation parameters
+ This is defined as the Lp difference between the identity map and the
+ provided spatial transformation parameters
+ """
+
+ def __init__(self, **kwargs):
+ super(FullSpatialLpLoss, self).__init__()
+
+ lp = kwargs.get('lp', 2)
+ assert lp in [1, 2, 'inf']
+ self.lp = lp
+
+ def forward(self, examples, *args, **kwargs):
+ """ Computes lp loss between identity map and spatial transformation.
+ There better be a kwarg with key 'spatial' which is as FullSpatial
+ object describing how the examples were generated from the originals
+ """
+ st_obj = kwargs['spatial']
+ assert isinstance(st_obj, st.FullSpatial)
+
+ # First create the identity map and make same type as examples
+ identity_map = Variable(st_obj.identity_params(examples.shape))
+ if examples.is_cuda:
+ identity_map.cuda()
+
+ # Then take diffs and take lp norms
+ diffs = st_obj.grid_params - identity_map
+ lp_norm = utils.batchwise_norm(diffs, self.lp, dim=0)
+ return lp_norm # return Nx1 variable, will sum in parent class
+
+
+class PerturbationNormLoss(PartialLoss):
+
+ def __init__(self, **kwargs):
+ super(PerturbationNormLoss, self).__init__()
+
+ lp = kwargs.get('lp', 2)
+ assert lp in [1, 2, 'inf']
+ self.lp = lp
+
+ def forward(self, examples, *args, **kwargs):
+ """ Computes perturbation norm and multiplies by scale
+ There better be a kwarg with key 'perturbation' which is a perturbation
+ object with a 'perturbation_norm' method that takes 'lp_style' as a
+ kwarg
+ """
+
+ perturbation = kwargs['perturbation']
+ assert isinstance(perturbation, ap.AdversarialPerturbation)
+
+ return perturbation.perturbation_norm(lp_style=self.lp)
+
+
+##############################################################################
+# #
+# Combined Transformer Loss #
+# #
+##############################################################################
+
+class CombinedTransformerLoss(ReferenceRegularizer):
+ """ General class for distance functions and loss functions of the form
+ min_T ||X - T(Y)|| + c * || T ||
+ where X is the original image, and Y is the 'adversarial' input image.
+ """
+
+ def __init__(self, fix_im, transform_class=None,
+ regularization_constant=1.0,
+ transformation_loss=partial(utils.summed_lp_norm, lp=2),
+ transform_norm_kwargs=None):
+ """ Takes in a reference fix im and a class of transformations we need
+ to search over to compute forward.
+ """
+ super(CombinedTransformerLoss, self).__init__(fix_im)
+ self.transform_class = transform_class
+ self.regularization_constant = regularization_constant
+ self.transformation_loss = transformation_loss
+ self.transform_norm_kwargs = transform_norm_kwargs or {}
+ self.transformer = None
+
+ def cleanup_attack_batch(self):
+ super(CombinedTransformerLoss, self).cleanup_attack_batch()
+ self.transformer = None
+
+ def _inner_loss(self, examples):
+ """ Computes the combined loss for a particular transformation """
+
+ trans_examples = self.transformer.forward(examples)
+ trans_loss = self.transformation_loss(self.fix_im - trans_examples)
+
+ trans_norm = self.transformer.norm(**self.transform_norm_kwargs)
+ return trans_loss + trans_norm * self.regularization_constant
+
+ def forward(self, examples, *args, **kwargs):
+ """ Computes the distance between examples and args
+ ARGS:
+ examples : NxCxHxW Variable - 'adversarially' perturbed image from
+ the self.fix_im
+ KWARGS:
+ optimization stuff here
+ """
+
+ ######################################################################
+ # Setup transformer + optimizer #
+ ######################################################################
+ self.transformer = self.transform_class(shape=examples.shape)
+
+ optim_kwargs = kwargs.get('xform_loss_optim_kwargs', {})
+ optim_type = kwargs.get('xform_loss_optim_type', torch.optim.Adam)
+ num_iter = kwargs.get('xform_loss_num_iter', 20)
+
+ optimizer = optim_type(self.transformer.parameters(), **optim_kwargs)
+
+ #####################################################################
+ # Iterate and optimize the transformer #
+ #####################################################################
+ for iter_no in range(num_iter):
+ optimizer.zero_grad()
+ loss = self._inner_loss(examples)
+ loss.backward()
+ optimizer.step()
+
+ return self._inner_loss(examples)
+
+
+class RelaxedTransformerLoss(ReferenceRegularizer):
+ """ Relaxed version of transformer loss: assumes that the adversarial
+ examples are of the form Y=S(X) + delta for some S in the
+ transformation class and some small delta perturbation outside the
+ perturbation.
+
+ In this case, we just compute ||delta|| + c||S||
+
+ This saves us from having to do the inner minmization step
+ """
+
+ def __init__(self, fix_im,
+ regularization_constant=1.0,
+ transformation_loss=partial(utils.summed_lp_norm, lp=2),
+ transform_norm_kwargs=None):
+ """ Takes in a reference fix im and a class of transformations we need
+ to search over to compute forward.
+ """
+ super(RelaxedTransformerLoss, self).__init__(fix_im)
+ self.regularization_constant = regularization_constant
+ self.transformation_loss = transformation_loss
+ self.transform_norm_kwargs = transform_norm_kwargs or {}
+
+ def forward(self, examples, *args, **kwargs):
+ """ Computes the distance between examples and args
+ ARGS:
+ examples : NxCxHxW Variable - 'adversarially' perturbed image from
+ the self.fix_im
+ KWARGS:
+ optimization stuff here
+ """
+
+ # Collect transformer norm
+ transformer = kwargs['transformer']
+ assert isinstance(transformer, st.ParameterizedTransformation)
+
+ transformer_norm = self.regularization_constant * \
+ transformer.norm(**self.transform_norm_kwargs)
+
+ # Collect transformation loss
+ delta = self.transformer.forward(self.fix_im) - examples
+ transformation_loss = self.transformation_loss(delta)
+
+ return transformation_loss + transformer_norm
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/spatial_transformers.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/spatial_transformers.py
new file mode 100644
index 0000000..a05b861
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/spatial_transformers.py
@@ -0,0 +1,528 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/spatial_transformers.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" File that contains various parameterizations for spatial transformation
+ styles. At its simplest, spatial transforms can be affine grids,
+ parameterized by 6 values. At their most complex, for a CxHxW type image
+ grids can be parameterized by CxHxWx2 parameters.
+
+ This file will define subclasses of nn.Module that will have parameters
+ corresponding to the transformation parameters and will take in an image
+ and output a transformed image.
+
+ Further we'll also want a method to initialize each set to be the identity
+ initially
+"""
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from .utils import pytorch_utils as utils
+from torch.autograd import Variable
+
+
+##############################################################################
+# #
+# SKELETON CLASS #
+# #
+##############################################################################
+
+class ParameterizedTransformation(nn.Module):
+ """ General class of transformations.
+ All subclasses need the following methods:
+ - norm: no args -> scalar variable
+ - identity_params: shape -> TENSOR : takes an input shape and outputs
+ the subclass-specific parameter for the identity
+ transformation
+ - forward : Variable -> Variable - is the transformation
+ """
+
+ def __init__(self, **kwargs):
+ super(ParameterizedTransformation, self).__init__()
+
+ if kwargs.get('manual_gpu', None) is not None:
+ self.use_gpu = kwargs['manual_gpu']
+ else:
+ self.use_gpu = utils.use_gpu()
+
+ def norm(self, lp='inf'):
+ raise NotImplementedError("Need to call subclass's norm!")
+
+ @classmethod
+ def identity_params(self, shape):
+ raise NotImplementedError("Need to call subclass's identity_params!")
+
+ def merge_xform(self, other, self_mask):
+ """ Takes in an other instance of this same class with the same
+ shape of parameters (NxSHAPE) and a self_mask bytetensor of length
+ N and outputs the merge between self's parameters for the indices
+ of 1s in the self_mask and other's parameters for the indices of 0's
+ ARGS:
+ other: instance of same class as self with params of shape NxSHAPE -
+ the thing we merge with this one
+ self_mask : ByteTensor (length N) - which indices of parameters we
+ keep from self, and which we keep from other
+ RETURNS:
+ New instance of this class that's merged between the self and other
+ (same shaped params)
+ """
+
+ # JUST DO ASSERTS IN THE SKELETON CLASS
+ assert self.__class__ == other.__class__
+
+ self_params = self.xform_params.data
+ other_params = other.xform_params.data
+ assert self_params.shape == other_params.shape
+ assert self_params.shape[0] == self_mask.shape[0]
+ assert other_params.shape[0] == self_mask.shape[0]
+
+ new_xform = self.__class__(shape=self.img_shape)
+
+ new_params = utils.fold_mask(self.xform_params.data,
+ other.xform_params.data, self_mask)
+ new_xform.xform_params = nn.Parameter(new_params)
+ new_xform.use_gpu = self.use_gpu
+ return new_xform
+
+
+ def forward(self, examples):
+ raise NotImplementedError("Need to call subclass's forward!")
+
+
+
+
+
+###############################################################################
+# #
+# FULLY PARAMETERIZED SPATIAL TRANSFORMATION NETWORK #
+# #
+###############################################################################
+
+class FullSpatial(ParameterizedTransformation):
+ def __init__(self, *args, **kwargs):
+ """ FullSpatial just has parameters that are the grid themselves.
+ Forward then will just call grid sample using these params directly
+ """
+
+ super(FullSpatial, self).__init__(**kwargs)
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+ self.xform_params = nn.Parameter(self.identity_params(img_shape))
+
+
+
+ def identity_params(self, shape):
+ """ Returns some grid parameters such that the minibatch of images isn't
+ changed when forward is called on it
+ ARGS:
+ shape: torch.Size - shape of the minibatch of images we'll be
+ transforming. First index should be num examples
+ RETURNS:
+ torch TENSOR (not variable!!!)
+ if shape arg has shape NxCxHxW, this has shape NxCxHxWx2
+ """
+
+ # Work smarter not harder -- use idenity affine transforms here
+ num_examples = shape[0]
+ identity_affine_transform = torch.zeros(num_examples, 2, 3)
+ if self.use_gpu:
+ identity_affine_transform = identity_affine_transform.cuda()
+
+ identity_affine_transform[:,0,0] = 1
+ identity_affine_transform[:,1,1] = 1
+
+ return F.affine_grid(identity_affine_transform, shape).data
+
+
+ def stAdv_norm(self):
+ """ Computes the norm used in
+ "Spatially Transformed Adversarial Examples"
+ """
+
+ # ONLY WORKS FOR SQUARE MATRICES
+ dtype = self.xform_params.data.type()
+ num_examples, height, width = tuple(self.xform_params.shape[0:3])
+ assert height == width
+ ######################################################################
+ # Build permutation matrices #
+ ######################################################################
+
+ def id_builder():
+ x = torch.zeros(height, width).type(dtype)
+ for i in range(height):
+ x[i,i] = 1
+ return x
+
+ col_permuts = []
+ row_permuts = []
+ # torch.matmul(foo, col_permut)
+ for col in ['left', 'right']:
+ col_val = {'left': -1, 'right': 1}[col]
+ idx = ((torch.arange(width) - col_val) % width)
+ idx = idx.type(dtype).type(torch.LongTensor)
+ if self.xform_params.is_cuda:
+ idx = idx.cuda()
+
+ col_permut = torch.zeros(height, width).index_copy_(1, idx.cpu(),
+ id_builder().cpu())
+ col_permut = col_permut.type(dtype)
+
+ if col == 'left':
+ col_permut[-1][0] = 0
+ col_permut[0][0] = 1
+ else:
+ col_permut[0][-1] = 0
+ col_permut[-1][-1] = 1
+ col_permut = Variable(col_permut)
+ col_permuts.append(col_permut)
+ row_permuts.append(col_permut.transpose(0, 1))
+
+ ######################################################################
+ # Build delta_u, delta_v grids #
+ ######################################################################
+ id_params = Variable(self.identity_params(self.img_shape))
+ delta_grids = self.xform_params - id_params
+ delta_grids = delta_grids.permute(0, 3, 1, 2)
+
+ ######################################################################
+ # Compute the norm #
+ ######################################################################
+ output = Variable(torch.zeros(num_examples).type(dtype))
+
+ for row_or_col, permutes in zip(['row', 'col'],
+ [row_permuts, col_permuts]):
+ for permute in permutes:
+ if row_or_col == 'row':
+ temp = delta_grids - torch.matmul(permute, delta_grids)
+ else:
+ temp = delta_grids - torch.matmul(delta_grids, permute)
+ temp = temp.pow(2)
+ temp = temp.sum(1)
+ temp = (temp + 1e-10).pow(0.5)
+ output.add_(temp.sum((1, 2)))
+ return output
+
+
+ def norm(self, lp='inf'):
+ """ Returns the 'norm' of this transformation in terms of an LP norm on
+ the parameters, summed across each transformation per minibatch
+ ARGS:
+ lp : int or 'inf' - which lp type norm we use
+ """
+
+ if isinstance(lp, int) or lp == 'inf':
+ identity_params = Variable(self.identity_params(self.img_shape))
+ return utils.batchwise_norm(self.xform_params - identity_params, lp,
+ dim=0)
+ else:
+ assert lp == 'stAdv'
+ return self._stAdv_norm()
+
+
+ def clip_params(self):
+ """ Clips the parameters to be between -1 and 1 as required for
+ grid_sample
+ """
+ clamp_params = torch.clamp(self.xform_params, -1, 1).data
+ change_in_params = clamp_params - self.xform_params.data
+ self.xform_params.data.add_(change_in_params)
+
+
+ def merge_xform(self, other, self_mask):
+ """ Takes in an other instance of this same class with the same
+ shape of parameters (NxSHAPE) and a self_mask bytetensor of length
+ N and outputs the merge between self's parameters for the indices
+ of 1s in the self_mask and other's parameters for the indices of 0's
+ """
+ super(FullSpatial, self).merge_xform(other, self_mask)
+
+ new_xform = FullSpatial(shape=self.img_shape,
+ manual_gpu=self.use_gpu)
+
+ new_params = utils.fold_mask(self.xform_params.data,
+ other.xform_params.data, self_mask)
+ new_xform.xform_params = nn.Parameter(new_params)
+
+ return new_xform
+
+
+
+ def project_params(self, lp, lp_bound):
+ """ Projects the params to be within lp_bound (according to an lp)
+ of the identity map. First thing we do is clip the params to be
+ valid, too
+ ARGS:
+ lp : int or 'inf' - which LP norm we use. Must be an int or the
+ string 'inf'
+ lp_bound : float - how far we're allowed to go in LP land
+ RETURNS:
+ None, but modifies self.xform_params
+ """
+
+ assert isinstance(lp, int) or lp == 'inf'
+
+ # clip first
+ self.clip_params()
+
+ # then project back
+
+ if lp == 'inf':
+ identity_params = self.identity_params(self.img_shape)
+ clamp_params = utils.clamp_ref(self.xform_params.data,
+ identity_params, lp_bound)
+ change_in_params = clamp_params - self.xform_params.data
+ self.xform_params.data.add_(change_in_params)
+ else:
+ raise NotImplementedError("Only L-infinity bounds working for now ")
+
+
+ def forward(self, x):
+ # usual forward technique
+ return F.grid_sample(x, self.xform_params)
+
+
+
+
+###############################################################################
+# #
+# AFFINE TRANSFORMATION NETWORK #
+# #
+###############################################################################
+
+class AffineTransform(ParameterizedTransformation):
+ """ Affine transformation -- just has 6 parameters per example: 4 for 2d
+ rotation, and 1 for translation in each direction
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(AffineTransform, self).__init__(**kwargs)
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+ self.xform_params = nn.Parameter(self.identity_params(img_shape))
+
+
+ def norm(self, lp='inf'):
+ identity_params = Variable(self.identity_params(self.img_shape))
+ return utils.batchwise_norm(self.xform_params - identity_params, lp,
+ dim=0)
+
+ def identity_params(self, shape):
+ """ Returns parameters for identity affine transformation
+ ARGS:
+ shape: torch.Size - shape of the minibatch of images we'll be
+ transforming. First index should be num examples
+ RETURNS:
+ torch TENSOR (not variable!!!)
+ if shape arg has shape NxCxHxW, this has shape Nx2x3
+ """
+
+ # Work smarter not harder -- use idenity affine transforms here
+ num_examples = shape[0]
+ identity_affine_transform = torch.zeros(num_examples, 2, 3)
+ if self.use_gpu:
+ identity_affine_transform = identity_affine_transform.cuda()
+
+ identity_affine_transform[:,0,0] = 1
+ identity_affine_transform[:,1,1] = 1
+
+ return identity_affine_transform
+
+
+ def project_params(self, lp, lp_bound):
+ """ Projects the params to be within lp_bound (according to an lp)
+ of the identity map. First thing we do is clip the params to be
+ valid, too
+ ARGS:
+ lp : int or 'inf' - which LP norm we use. Must be an int or the
+ string 'inf'
+ lp_bound : float - how far we're allowed to go in LP land
+ RETURNS:
+ None, but modifies self.xform_params
+ """
+
+ assert isinstance(lp, int) or lp == 'inf'
+
+ diff = self.xform_params.data - self.identity_params(self.img_shape)
+ new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
+ self.xform_params.data.add_(new_diff - diff)
+
+
+ def forward(self, x):
+ # usual forward technique with affine grid
+ grid = F.affine_grid(self.xform_params, x.shape)
+ return F.grid_sample(x, grid)
+
+
+
+class RotationTransform(AffineTransform):
+ """ Rotations only -- only has one parameter, the angle by which we rotate
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(RotationTransform, self).__init__(**kwargs)
+ '''
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+ self.xform_params = nn.Parameter(self.identity_params(img_shape))
+ '''
+
+
+ def identity_params(self, shape):
+ num_examples = shape[0]
+ params = torch.zeros(num_examples)
+ if self.use_gpu:
+ params = params.cuda()
+ return params
+
+
+ def make_grid(self, x):
+ assert isinstance(x, Variable)
+ cos_xform = self.xform_params.cos()
+ sin_xform = self.xform_params.sin()
+ zeros = torch.zeros_like(self.xform_params)
+
+ affine_xform = torch.stack([cos_xform, -sin_xform, zeros,
+ sin_xform, cos_xform, zeros])
+ affine_xform = affine_xform.transpose(0, 1).contiguous().view(-1, 2, 3)
+
+ return F.affine_grid(affine_xform, x.shape)
+
+ def forward(self, x):
+ return F.grid_sample(x, self.make_grid(x))
+
+
+
+class TranslationTransform(AffineTransform):
+ """ Rotations only -- only has one parameter, the angle by which we rotate
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(TranslationTransform, self).__init__(**kwargs)
+
+
+
+ def identity_params(self, shape):
+ num_examples = shape[0]
+ params = torch.zeros(num_examples, 2) # x and y translation only
+ if self.use_gpu:
+ params = params.cuda()
+ return params
+
+ def make_grid(self, x):
+ assert isinstance(x, Variable)
+ ones = Variable(torch.ones(self.xform_params.shape[0]))
+ zeros = Variable(torch.zeros(self.xform_params.shape[0]))
+ if self.xform_params.cuda:
+ ones = ones.cuda()
+ zeros = zeros.cuda()
+
+ affine_xform = torch.stack([ones, zeros, self.xform_params[:,0],
+ zeros, ones, self.xform_params[:,1]])
+
+ affine_xform = affine_xform.transpose(0, 1).contiguous().view(-1, 2, 3)
+
+ return F.affine_grid(affine_xform, x.shape)
+
+ def forward(self, x):
+ return F.grid_sample(x, self.make_grid(x))
+
+
+
+##############################################################################
+# #
+# BARREL + PINCUSHION TRANSFORMATIONS #
+# #
+##############################################################################
+
+class PointScaleTransform(ParameterizedTransformation):
+ """ Point Scale transformations are pincushion/barrel distortions.
+ We pick a point to anchor the image and optimize a distortion size to
+ either dilate or contract
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(PointScaleTransform, self).__init__(**kwargs)
+ img_shape = kwargs['shape']
+ self.img_shape = img_shape
+ self.xform_params = nn.Parameter(self.identity_params(img_shape))
+
+
+
+ def norm(self, lp='inf'):
+ return utils.batchwise_norm(self.xform_params, lp, dim=0)
+
+
+ def project_params(self, lp, lp_bound):
+ """ Projects the params to be within lp_bound (according to an lp)
+ of the identity map. First thing we do is clip the params to be
+ valid, too
+ ARGS:
+ lp : int or 'inf' - which LP norm we use. Must be an int or the
+ string 'inf'
+ lp_bound : float - how far we're allowed to go in LP land
+ RETURNS:
+ None, but modifies self.xform_params
+ """
+
+ assert isinstance(lp, int) or lp == 'inf'
+
+ diff = self.xform_params.data
+ new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
+ self.xform_params.data.add_(new_diff - diff)
+
+ def identity_params(self, shape):
+ num_examples = shape[0]
+ identity_param = torch.zeros(num_examples)
+ if self.use_gpu:
+ identity_param = identity_param.cuda()
+
+ return identity_param
+
+
+ def make_grid(self):
+
+ ######################################################################
+ # Compute identity flow grid first #
+ ######################################################################
+
+ num_examples = self.img_shape[0]
+ identity_affine_transform = torch.zeros(num_examples, 2, 3)
+ if self.use_gpu:
+ identity_affine_transform = identity_affine_transform.cuda()
+
+ identity_affine_transform[:,0,0] = 1
+ identity_affine_transform[:,1,1] = 1
+
+ basic_grid = F.affine_grid(identity_affine_transform, self.img_shape)
+
+ ######################################################################
+ # Compute scaling based on parameters #
+ ######################################################################
+
+ radii_squared = basic_grid.pow(2).sum(-1)
+
+ new_radii = (radii_squared + 1e-20).pow(0.5) *\
+ (1 + self.xform_params.view(-1, 1, 1) * radii_squared)
+ thetas = torch.atan2(basic_grid[:,:,:,1], (basic_grid[:,:,:, 0]))
+ cosines = torch.cos(thetas) * new_radii
+ sines = torch.sin(thetas) * new_radii
+
+ return torch.stack([cosines, sines], -1)
+
+
+
+ def forward(self, x):
+ return F.grid_sample(x, self.make_grid())
+
+
+
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/__init__.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/__init__.py
new file mode 100644
index 0000000..352b770
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/__init__.py
@@ -0,0 +1,12 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/__init__.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/checkpoints.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/checkpoints.py
new file mode 100644
index 0000000..2237668
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/checkpoints.py
@@ -0,0 +1,311 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/checkpoints.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Code for saving/loading pytorch models and batches of adversarial images
+
+CHECKPOINT NAMING CONVENTIONS:
+ ..<6 digits of epoch number>path.tar
+e.g.
+ fgsm_def.resnet32.20180301.120000.path.tar
+
+All checkpoints are stored in CHECKPOINT_DIR
+
+Checkpoints are state dicts only!!!
+
+"""
+
+import torch
+import os
+import re
+import glob
+from .. import config
+import numpy as np
+import random
+
+CHECKPOINT_DIR = config.MODEL_PATH
+OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH
+
+
+##############################################################################
+# #
+# CHECKPOINTING MODELS #
+# #
+##############################################################################
+
+
+def clear_experiment(experiment_name, architecture):
+ """ Deletes all saved state dicts for an experiment/architecture pair """
+
+ for filename in params_to_filename(experiment_name, architecture):
+ full_path = os.path.join(*[CHECKPOINT_DIR, filename])
+ os.remove(full_path) if os.path.exists(full_path) else None
+
+
+def list_saved_epochs(experiment_name, architecture):
+ """ Returns a list of int epochs we've checkpointed for this
+ experiment name and architecture
+ """
+
+ extract_epoch = lambda f: int(f.split('.')[-3])
+ filename_list = params_to_filename(experiment_name, architecture)
+ return [extract_epoch(f) for f in filename_list]
+
+
+def params_to_filename(experiment_name, architecture, epoch_val=None):
+ """ Outputs string name of file.
+ ARGS:
+ experiment_name : string - name of experiment we're saving
+ architecture : string - abbreviation for model architecture
+ epoch_val : int/(intLo, intHi)/None -
+ - if int we return this int exactly
+ - if (intLo, intHi) we return all existing filenames with
+ highest epoch in range (intLo, intHi), in sorted order
+ - if None, we return all existing filenames with params
+ in ascending epoch-sorted order
+
+ RETURNS:
+ filenames: string or (possibly empty) string[] of just the base name
+ of saved models
+ """
+
+ if isinstance(epoch_val, int):
+ return '.'.join([experiment_name, architecture, '%06d' % epoch_val,
+ 'path', 'tar'])
+
+ glob_prefix = os.path.join(*[CHECKPOINT_DIR,
+ '%s.%s.*' % (experiment_name, architecture)])
+ re_prefix = '%s\.%s\.' % (experiment_name, architecture)
+ re_suffix = r'\.path\.tar'
+
+ valid_name = lambda f: bool(re.match(re_prefix + r'\d{6}' + re_suffix, f))
+ select_epoch = lambda f: int(re.sub(re_prefix, '',
+ re.sub(re_suffix, '', f)))
+ valid_epoch = lambda e: (e >= (epoch_val or (0, 0))[0] and
+ e <= (epoch_val or (0, float('inf')))[1])
+
+ filename_epoch_pairs = []
+ for full_path in glob.glob(glob_prefix):
+ filename = os.path.basename(full_path)
+ if not valid_name(filename):
+ continue
+
+ epoch = select_epoch(filename)
+ if valid_epoch(epoch):
+ filename_epoch_pairs.append((filename, epoch))
+
+ return [_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]
+
+
+def save_state_dict(experiment_name, architecture, epoch_val, model,
+ k_highest=10):
+ """ Saves the state dict of a model with the given parameters.
+ ARGS:
+ experiment_name : string - name of experiment we're saving
+ architecture : string - abbreviation for model architecture
+ epoch_val : int - which epoch we're saving
+ model : model - object we're saving the state dict of
+ k_higest : int - if not None, we make sure to not include more than
+ k state_dicts for (experiment_name, architecture) pair,
+ keeping the k-most recent if we overflow
+ RETURNS:
+ The model we saved
+ """
+
+ # First resolve THIS filename
+ this_filename = params_to_filename(experiment_name, architecture, epoch_val)
+
+ # Next clear up memory if too many state dicts
+ current_filenames = params_to_filename(experiment_name, architecture)
+ delete_els = []
+ if k_highest is not None:
+ num_to_delete = len(current_filenames) - k_highest + 1
+ if num_to_delete > 0:
+ delete_els = sorted(current_filenames)[:num_to_delete]
+
+ for delete_el in delete_els:
+ full_path = os.path.join(*[CHECKPOINT_DIR, delete_el])
+ os.remove(full_path) if os.path.exists(full_path) else None
+
+ # Finally save the state dict
+ torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR,
+ this_filename]))
+
+ return model
+
+
+def load_state_dict_from_filename(filename, model):
+ """ Skips the whole parameter argument thing and just loads the whole
+ state dict from a filename.
+ ARGS:
+ filename : string - filename without directories
+ model : nn.Module - has 'load_state_dict' method
+ RETURNS:
+ the model loaded with the weights contained in the file
+ """
+ assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1
+
+ # LOAD FILENAME
+
+ # If state_dict in keys, use that as the loader
+ right_dict = lambda d: d.get('state_dict', d)
+
+ model.load_state_dict(right_dict(torch.load(
+ os.path.join(*[CHECKPOINT_DIR, filename]))))
+ return model
+
+
+def load_state_dict(experiment_name, architecture, epoch, model):
+ """ Loads a checkpoint that was previously saved
+ experiment_name : string - name of experiment we're saving
+ architecture : string - abbreviation for model architecture
+ epoch_val : int - which epoch we're loading
+ """
+
+ filename = params_to_filename(experiment_name, architecture, epoch)
+ return load_state_dict_from_filename(filename, model)
+
+
+###############################################################################
+# #
+# CHECKPOINTING DATA #
+# #
+###############################################################################
+"""
+ This is a hacky fix to save batches of adversarial images along with their
+ labels.
+"""
+
+
+class CustomDataSaver(object):
+ # TODO: make this more pytorch compliant
+ def __init__(self, image_subdirectory):
+ self.image_subdirectory = image_subdirectory
+ # make this folder if it doesn't exist yet
+
+ def save_minibatch(self, examples, labels):
+ """ Assigns a random name to this minibatch and saves the examples and
+ labels in two separate files:
+ .examples.npy and .labels.npy
+ ARGS:
+ examples: Variable or Tensor (NxCxHxW) - examples to be saved
+ labels : Variable or Tensor (N) - labels matching the examples
+ """
+ # First make both examples and labels into numpy arrays
+ examples = examples.cpu().numpy()
+ labels = labels.cpu().numpy()
+
+ # Make a name for the files
+ random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET
+
+ # Save both files
+ example_file = '%s.examples.npy' % random_string
+ example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
+ example_file)
+ np.save(example_path, examples)
+
+ label_file = '%s.labels.npy' % random_string
+ label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
+ label_file)
+ np.save(label_path, labels)
+
+
+class CustomDataLoader(object):
+ # TODO: make this more pytorch compliant
+ def __init__(self, image_subdirectory, batch_size=128, to_tensor=True,
+ use_gpu=False):
+ super(CustomDataLoader, self).__init__()
+ self.image_subdirectory = image_subdirectory
+ self.batch_size = batch_size
+
+ assert to_tensor >= use_gpu
+ self.to_tensor = to_tensor
+ self.use_gpu = use_gpu
+
+ def _prepare_data(self, examples, labels):
+ """ Takes in numpy examples and labels and tensor-ifies and cuda's them
+ if necessary
+ """
+
+ if self.to_tensor:
+ examples = torch.Tensor(examples)
+ labels = torch.Tensor(labels)
+
+ if self.use_gpu:
+ examples = examples.cuda()
+ labels = labels.cuda()
+
+ return (examples, labels)
+
+ def _base_loader(self, prefix, which):
+ assert which in ['examples', 'labels']
+ filename = '%s.%s.npy' % (prefix, which)
+ full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
+ filename)
+ return np.load(full_path)
+
+ def _example_loader(self, prefix):
+ """ Loads the numpy array of examples given the random 'prefix' """
+ return self._base_loader(prefix, 'examples')
+
+ def _label_loader(self, prefix):
+ """ Loads the numpy array of labels given the random 'prefix' """
+ return self._base_loader(prefix, 'labels')
+
+ def __iter__(self):
+
+ # First collect all the filenames:
+ glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
+ '*')
+ files = glob.glob(glob_prefix)
+ valid_random_names = set(os.path.basename(_).split('.')[0]
+ for _ in files)
+
+ # Now loop through filenames and yield out minibatches of correct size
+ running_examples, running_labels = [], []
+ running_size = 0
+ for random_name in valid_random_names:
+ # Load data from files and append to 'running' lists
+ loaded_examples = self._example_loader(random_name)
+ loaded_labels = self._label_loader(random_name)
+ running_examples.append(loaded_examples)
+ running_labels.append(loaded_labels)
+ running_size += loaded_examples.shape[0]
+
+ if running_size < self.batch_size:
+ # Load enough data to populate one minibatch, which might
+ # take multiple files
+ continue
+
+ # Concatenate all images together
+ merged_examples = np.concatenate(running_examples, axis=0)
+ merged_labels = np.concatenate(running_labels, axis=0)
+
+ # Make minibatches out of concatenated things,
+ for batch_no in range(running_size // self.batch_size):
+ index_lo = batch_no * self.batch_size
+ index_hi = index_lo + self.batch_size
+ example_batch = merged_examples[index_lo:index_hi]
+ label_batch = merged_labels[index_lo:index_hi]
+ yield self._prepare_data(example_batch, label_batch)
+
+ # Handle any remainder for remaining files
+ remainder_idx = (running_size // self.batch_size) * self.batch_size
+ running_examples = [merged_examples[remainder_idx:]]
+ running_labels = [merged_labels[remainder_idx:]]
+ running_size = running_size - remainder_idx
+
+ # If we're out of files, yield this last sub-minibatch of data
+ if running_size > 0:
+ merged_examples = np.concatenate(running_examples, axis=0)
+ merged_labels = np.concatenate(running_labels, axis=0)
+ yield self._prepare_data(merged_examples, merged_labels)
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/discretization.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/discretization.py
new file mode 100644
index 0000000..f4f4488
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/discretization.py
@@ -0,0 +1,224 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/discretization.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" File that holds techniques for discretizing images --
+ In general, images of the form NxCxHxW will with values in the [0.,1.] range
+ need to be converted to the [0, 255 (int)] range to be displayed as images.
+
+ Sometimes the naive rounding scheme can mess up the classification, so this
+ file holds techniques to discretize these images into tensors with values
+ of the form i/255.0 for some integers i.
+"""
+
+import torch
+from torch.autograd import Variable
+from . import pytorch_utils as utils
+
+
+##############################################################################
+# #
+# HELPER METHODS #
+# #
+##############################################################################
+
+
+def discretize_image(img_tensor, zero_one=False):
+ """ Discretizes an image tensor into a tensor filled with ints ranging
+ between 0 and 255
+ ARGS:
+ img_tensor : floatTensor (NxCxHxW) - tensor to be discretized
+ pixel_max : int - discretization bucket size
+ zero_one : bool - if True divides output by 255 before returning it
+ """
+
+ assert float(torch.min(img_tensor)) >= 0.
+ assert float(torch.max(img_tensor)) <= 1.0
+
+ original_shape = img_tensor.shape
+ if img_tensor.dim() != 4:
+ img_tensor = img_tensor.unsqueeze(0)
+
+ int_tensors = [] # actually floatTensor, but full of ints
+ img_shape = original_shape[1:]
+ for example in img_tensor:
+ pixel_channel_tuples = zip(*list(smp.toimage(example).getdata()))
+ int_tensors.append(img_tensor.new(pixel_channel_tuples).view(img_shape))
+
+ stacked_tensors = torch.stack(int_tensors)
+ if zero_one:
+ return stacked_tensors / 255.0
+ return stacked_tensors
+
+
+##############################################################################
+# #
+# MAIN DISCRETIZATION TECHNIQUES #
+# #
+##############################################################################
+
+def discretized_adversarial(img_tensor, classifier_net, normalizer,
+ flavor='greedy'):
+ """ Takes in an image_tensor and classifier/normalizer pair and outputs a
+ 'discretized' image_tensor [each val is i/255.0 for some integer i]
+ with the same classification
+ ARGS:
+ img_tensor : tensor (NxCxHxW) - tensor of images with values between
+ 0.0 and 1.0.
+ classifier_net : NN - neural net with .forward method to classify
+ normalized images
+ normalizer : differentiableNormalizer object - normalizes 0,1 images
+ into classifier_domain
+ flavor : string - either 'random' or 'greedy', determining which
+ 'next_pixel_to_flip' function we use
+ RETURNS:
+ img_tensor of the same shape, but no with values of the form i/255.0
+ for integers i.
+ """
+
+ img_tensor = utils.safe_tensor(img_tensor)
+
+ nptf_map = {'random': flip_random_pixel,
+ 'greedy': flip_greedy_pixel}
+ next_pixel_to_flip = nptf_map[flavor](classifier_net, normalizer)
+
+ ##########################################################################
+ # First figure out 'correct' labels and the 'discretized' labels #
+ ##########################################################################
+ var_img = utils.safe_var(img_tensor)
+ norm_var = normalizer.forward(var_img)
+ norm_output = classifier_net.forward(norm_var)
+ correct_targets = norm_output.max(1)[1]
+
+ og_discretized = utils.safe_var(discretize_image(img_tensor, zero_one=True))
+ norm_discretized = normalizer.forward(og_discretized)
+ discretized_output = classifier_net.forward(norm_discretized)
+ discretized_targets = discretized_output.max(1)[1]
+
+ ##########################################################################
+ # Collect idxs for examples affected by discretization #
+ ##########################################################################
+ incorrect_idxs = set()
+
+ for i, el in enumerate(correct_targets.ne(discretized_targets)):
+ if float(el) != 0:
+ incorrect_idxs.add(i)
+
+ ##########################################################################
+ # Fix all bad images #
+ ##########################################################################
+
+ corrected_imgs = []
+ for idx in incorrect_idxs:
+ desired_target = correct_targets[idx]
+ example = og_discretized[idx].data.clone() # tensor
+ signs = torch.sign(var_img - og_discretized)
+ bad_discretization = True
+ pixels_changed_so_far = set() # populated with tuples of idxs
+
+ while bad_discretization:
+ pixel_idx, grad_sign = next_pixel_to_flip(example,
+ pixels_changed_so_far,
+ desired_target)
+ pixels_changed_so_far.add(pixel_idx)
+
+ if grad_sign == 0:
+ grad_sign = utils.tuple_getter(signs[idx], pixel_idx)
+
+ new_val = (grad_sign / 255. + utils.tuple_getter(example, pixel_idx))
+ utils.tuple_setter(example, pixel_idx, float(new_val))
+
+ new_out = classifier_net.forward(normalizer.forward( \
+ Variable(example.unsqueeze(0))))
+ bad_discretization = (int(desired_target) != int(new_out.max(1)[1]))
+ corrected_imgs.append(example)
+
+ # Stack up results
+ output = []
+
+ for idx in range(len(img_tensor)):
+ if idx in incorrect_idxs:
+ output.append(corrected_imgs.pop(0))
+ else:
+ output.append(og_discretized[idx].data)
+
+ return torch.stack(output) # Variable
+
+
+#############################################################################
+# #
+# FLIP TECHNIQUES #
+# #
+#############################################################################
+''' Flip techniques in general have the following specs:
+ ARGS:
+ classifier_net : NN - neural net with .forward method to classify
+ normalized images
+ normalizer : differentiableNormalizer object - normalizes 0,1 images
+ into classifier_domain
+ RETURNS: flip_function
+'''
+
+'''
+ Flip function is a function that takes the following args:
+ ARGS:
+ img_tensor : Tensor (CxHxW) - image tensor in range 0.0 to 1.0 and is
+ already discretized
+ pixels_changed_so_far: set - set of index_tuples that have already been
+ modified (we don't want to modify a pixel by
+ more than 1/255 in any channel)
+ correct_target : torch.LongTensor (1) - single element in a tensor that
+ is the target class
+ (e.g. int between 0 and 9 for CIFAR )
+ RETURNS: (idx_tuple, sign)
+ index_tuple is a triple of indices indicating which pixel-channel needs
+ to be modified, and sign is in {-1, 0, 1}. If +-1, we will modify the
+ pixel-channel in that direction, otherwise we'll modify in the opposite
+ of the direction that discretization rounded to.
+'''
+
+
+def flip_random_pixel(classifier_net, normalizer):
+ def flip_fxn(img_tensor, pixels_changed_so_far, correct_target):
+ numel = img_tensor.numel()
+ if len(pixels_changed_so_far) > numel * .9:
+ raise Exception("WHAT IS GOING ON???")
+
+ while True:
+ pixel_idx, _ = utils.random_element_index(img_tensor)
+ if pixel_idx not in pixels_changed_so_far:
+ return pixel_idx, 0
+
+ return flip_fxn
+
+
+def flip_greedy_pixel(classifier_net, normalizer):
+ def flip_fxn(img_tensor, pixels_changed_so_far, correct_target,
+ classifier_net=classifier_net, normalizer=normalizer):
+ # Computes gradient and figures out which px most affects class_out
+ classifier_net.zero_grad()
+ img_var = Variable(img_tensor.unsqueeze(0), requires_grad=True)
+ class_out = classifier_net.forward(normalizer.forward(img_var))
+
+ criterion = torch.nn.CrossEntropyLoss()
+ loss = criterion(class_out, correct_target) # RESHAPE HERE
+ loss.backward()
+ # Really inefficient algorithm here, can probably do better
+ new_grad_data = img_var.grad.data.clone().squeeze()
+ signs = new_grad_data.sign()
+ for idx_tuple in pixels_changed_so_far:
+ utils.tuple_setter(new_grad_data, idx_tuple, 0)
+
+ argmax = utils.torch_argmax(new_grad_data.abs())
+ return argmax, -1 * utils.tuple_getter(signs, argmax)
+
+ return flip_fxn
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/image_utils.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/image_utils.py
new file mode 100644
index 0000000..c6e09d7
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/image_utils.py
@@ -0,0 +1,193 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/image_utils.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Specific utilities for image classification
+ (i.e. RGB images i.e. tensors of the form NxCxHxW )
+"""
+
+from __future__ import print_function
+import torch
+import numpy as np
+import matplotlib.pyplot as plt
+import random
+
+
+def nhwc255_xform(img_np_array):
+ """ Takes in a numpy array and transposes it so that the channel is the last
+ axis. Also multiplies all values by 255.0
+ ARGS:
+ img_np_array : np.ndarray - array of shape (NxHxWxC) or (NxCxHxW)
+ [assumes that we're in NCHW by default,
+ but if not ambiguous will handle NHWC too ]
+ RETURNS:
+ array of form NHWC
+ """
+ assert isinstance(img_np_array, np.ndarray)
+ shape = img_np_array.shape
+ assert len(shape) == 4
+
+ # determine which configuration we're in
+ ambiguous = (shape[1] == shape[3] == 3)
+ nhwc = (shape[1] == 3)
+
+ # transpose unless we're unambiguously in nhwc case
+ if nhwc and not ambiguous:
+ return img_np_array * 255.0
+ else:
+ return np.transpose(img_np_array, (0, 2, 3, 1)) * 255.0
+
+
+def show_images(images, normalize=None, ipython=True,
+ margin_height=2, margin_color='red',
+ figsize=(18, 16)):
+ """ Shows pytorch tensors/variables as images """
+
+ # first format the first arg to be hz-stacked numpy arrays
+ if not isinstance(images, list):
+ images = [images]
+ images = [np.dstack(image.cpu().numpy()) for image in images]
+ image_shape = images[0].shape
+ assert all(image.shape == image_shape for image in images)
+ assert all(image.ndim == 3 for image in images) # CxHxW
+
+ # now build the list of final rows
+ rows = []
+ if margin_height > 0:
+ assert margin_color in ['red', 'black']
+ margin_shape = list(image_shape)
+ margin_shape[1] = margin_height
+ margin = np.zeros(margin_shape)
+ if margin_color == 'red':
+ margin[0] = 1
+ else:
+ margin = None
+
+ for image_row in images:
+ rows.append(margin)
+ rows.append(image_row)
+
+ rows = [_ for _ in rows[1:] if _ is not None]
+ plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
+
+ cat_rows = np.concatenate(rows, 1).transpose(1, 2, 0)
+ imshow_kwargs = {}
+ if cat_rows.shape[-1] == 1: # 1 channel: greyscale
+ cat_rows = cat_rows.squeeze()
+ imshow_kwargs['cmap'] = 'gray'
+
+ plt.imshow(cat_rows, **imshow_kwargs)
+
+ plt.show()
+
+
+def display_adversarial_2row(classifier_net, normalizer, original_images,
+ adversarial_images, num_to_show=4, which='incorrect',
+ ipython=False, margin_width=2):
+ """ Displays adversarial images side-by-side with their unperturbed
+ counterparts. Opens a window displaying two rows: top row is original
+ images, bottom row is perturbed
+ ARGS:
+ classifier_net : nn - with a .forward method that takes normalized
+ variables and outputs logits
+ normalizer : object w/ .forward method - should probably be an instance
+ of utils.DifferentiableNormalize or utils.IdentityNormalize
+ original_images: Variable or Tensor (NxCxHxW) - original images to
+ display. Images in [0., 1.] range
+ adversarial_images: Variable or Tensor (NxCxHxW) - perturbed images to
+ display. Should be same shape as original_images
+ num_to_show : int - number of images to show
+ which : string in ['incorrect', 'random', 'correct'] - which images to
+ show.
+ -- 'incorrect' means successfully attacked images,
+ -- 'random' means some random selection of images
+ -- 'correct' means unsuccessfully attacked images
+ ipython: bool - if True, we use in an ipython notebook so slightly
+ different way to show Images
+ margin_width - int : height in pixels of the red margin separating top
+ and bottom rows. Set to 0 for no margin
+ RETURNS:
+ None, but displays images
+ """
+ assert which in ['incorrect', 'random', 'correct']
+
+ # If not 'random' selection, prune to only the valid things
+ to_sample_idxs = []
+ if which != 'random':
+ classifier_net.eval() # can never be too safe =)
+
+ # classify the originals with top1
+ original_norm_var = normalizer.forward(original_images)
+ original_out_logits = classifier_net.forward(original_norm_var)
+ _, original_out_classes = original_out_logits.max(1)
+
+ # classify the adversarials with top1
+ adv_norm_var = normalizer.forward(adversarial_images)
+ adv_out_logits = classifier_net.forward(adv_norm_var)
+ _, adv_out_classes = adv_out_logits.max(1)
+
+ # collect indices of matching
+ selector = lambda var: (which == 'correct') == bool(float(var))
+ for idx, var_el in enumerate(original_out_classes == adv_out_classes):
+ if selector(var_el):
+ to_sample_idxs.append(idx)
+ else:
+ to_sample_idxs = list(range(original_images.shape[0]))
+
+ # Now select some indices to show
+ if to_sample_idxs == []:
+ print("Couldn't show anything. Try changing the 'which' argument here")
+ return
+
+ to_show_idxs = random.sample(to_sample_idxs, min([num_to_show,
+ len(to_sample_idxs)]))
+
+ # Now start building up the images : first horizontally, then vertically
+ top_row = torch.cat([original_images[idx] for idx in to_show_idxs], dim=2)
+ bottom_row = torch.cat([adversarial_images[idx] for idx in to_show_idxs],
+ dim=2)
+
+ if margin_width > 0:
+ margin = torch.zeros(3, margin_width, top_row.shape[-1])
+ margin[0] = 1.0 # make it red
+ margin = margin.type(type(top_row))
+ stack = [top_row, margin, bottom_row]
+ else:
+ stack = [top_row, bottom_row]
+
+ plt.imshow(torch.cat(stack, dim=1).cpu().numpy().transpose(1, 2, 0))
+ plt.show()
+
+
+def display_adversarial_notebook():
+ pass
+
+
+def nchw_l2(x, y, squared=True):
+ """ Computes l2 norm between two NxCxHxW images
+ ARGS:
+ x, y: Tensor/Variable (NxCxHxW) - x, y must be same type & shape.
+ squared : bool - if True we return squared loss, otherwise we return
+ square root of l2
+ RETURNS:
+ ||x - y ||_2 ^2 (no exponent if squared == False),
+ shape is (Nx1x1x1)
+ """
+ temp = torch.pow(x - y, 2) # square diff
+
+ for i in range(1, temp.dim()): # reduce on all but first dimension
+ temp = torch.sum(temp, i, keepdim=True)
+
+ if not squared:
+ temp = torch.pow(temp, 0.5)
+
+ return temp.squeeze()
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_ssim.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_ssim.py
new file mode 100644
index 0000000..c50d92b
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_ssim.py
@@ -0,0 +1,89 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/pytorch_ssim.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Implementation directly lifted from Po-Hsun-Su for pytorch ssim
+See github repo here: https://github.com/Po-Hsun-Su/pytorch-ssim
+"""
+
+import torch
+import torch.nn.functional as F
+from torch.autograd import Variable
+from math import exp
+
+def gaussian(window_size, sigma):
+ gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
+ return gauss/gauss.sum()
+
+def create_window(window_size, channel):
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
+ window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
+ return window
+
+def _ssim(img1, img2, window, window_size, channel, size_average = True):
+ mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
+ mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
+
+ mu1_sq = mu1.pow(2)
+ mu2_sq = mu2.pow(2)
+ mu1_mu2 = mu1*mu2
+
+ sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
+ sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
+ sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
+
+ C1 = 0.01**2
+ C2 = 0.03**2
+
+ ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
+
+ if size_average:
+ return ssim_map.mean()
+ else:
+ return ssim_map.mean(1).mean(1).mean(1)
+
+class SSIM(torch.nn.Module):
+ def __init__(self, window_size = 11, size_average = True):
+ super(SSIM, self).__init__()
+ self.window_size = window_size
+ self.size_average = size_average
+ self.channel = 1
+ self.window = create_window(window_size, self.channel)
+
+ def forward(self, img1, img2):
+ (_, channel, _, _) = img1.size()
+
+ if channel == self.channel and self.window.data.type() == img1.data.type():
+ window = self.window
+ else:
+ window = create_window(self.window_size, channel)
+
+ if img1.is_cuda:
+ window = window.cuda(img1.get_device())
+ window = window.type_as(img1)
+
+ self.window = window
+ self.channel = channel
+
+
+ return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
+
+def ssim(img1, img2, window_size = 11, size_average = True):
+ (_, channel, _, _) = img1.size()
+ window = create_window(window_size, channel)
+
+ if img1.is_cuda:
+ window = window.cuda(img1.get_device())
+ window = window.type_as(img1)
+
+ return _ssim(img1, img2, window, window_size, channel, size_average)
\ No newline at end of file
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_utils.py b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_utils.py
new file mode 100644
index 0000000..0235137
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/mister_ed/utils/pytorch_utils.py
@@ -0,0 +1,637 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/pytorch_utils.py
+#
+# The license for the original version of this file can be
+# found in the `recoloradv` directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+""" Utilities for general pytorch helpfulness """
+
+from __future__ import print_function
+import torch
+import numpy as np
+import torchvision.transforms as transforms
+import torch.cuda as cuda
+import gc
+import os
+import warnings
+from torch.autograd import Variable, Function
+import subprocess
+from functools import reduce
+
+
+###############################################################################
+# #
+# SAFETY DANCE #
+# #
+###############################################################################
+# aka things for safer pytorch usage
+
+
+def use_gpu():
+ """ The shortcut to retrieve the environment variable 'MISTER_ED_GPU'"""
+ try:
+ str_val = os.environ['MISTER_ED_GPU']
+ except:
+ set_global_gpu()
+ str_val = os.environ['MISTER_ED_GPU']
+ assert str_val in ['True', 'False']
+ return str_val == 'True'
+
+
+def set_global_gpu(manual=None):
+ """ Sets the environment variable 'MISTER_ED_GPU'. Defaults to using gpu
+ if cuda is available
+ ARGS:
+ manual : bool - we set the 'MISTER_ED_GPU' environment var to the string
+ of whatever this is
+ RETURNS
+ None
+ """
+ if manual is None:
+ val = cuda.is_available()
+ else:
+ val = manual
+ os.environ['MISTER_ED_GPU'] = str(val)
+
+
+def unset_global_gpu():
+ """ Removes the environment variable 'MISTER_ED_GPU'
+ # NOTE: this relies on unsetenv, which works on 'most flavors of Unix'
+ according to the docs
+ """
+ try:
+ os.unsetenv('MISTER_ED_GPU')
+ except:
+ raise Warning("os.unsetenv(.) isn't working properly")
+
+
+def cuda_assert(use_cuda):
+ assert not (use_cuda and not cuda.is_available())
+
+
+def safe_var(entity, **kwargs):
+ """ Returns a variable of an entity, which may or may not already be a
+ variable
+ """
+ warnings.warn("As of >=pytorch0.4.0 this is no longer necessary",
+ DeprecationWarning)
+ if isinstance(entity, Variable):
+ return entity
+ elif isinstance(entity, torch._C._TensorBase):
+ return Variable(entity, **kwargs)
+ else:
+ raise Exception("Can't cast %s to a Variable" %
+ entity.__class__.__name__)
+
+
+def safe_tensor(entity):
+ """ Returns a tensor of an entity, which may or may not already be a
+ tensor
+ """
+ warnings.warn("As of >=pytorch0.4.0 this is no longer necessary",
+ DeprecationWarning)
+ if isinstance(entity, Variable):
+ return entity.data
+ elif isinstance(entity, torch.tensor._TensorBase):
+ return entity
+ elif isinstance(entity, np.ndarray):
+ return torch.Tensor(entity) # UNSAFE CUDA CASTING
+ else:
+ raise Exception("Can't cast %s to a Variable" %
+ entity.__class__.__name__)
+
+
+##############################################################################
+# #
+# CONVENIENCE STORE #
+# #
+##############################################################################
+# aka convenient things that are not builtin to pytorch
+
+class AverageMeter(object):
+ """Computes and stores the average and current value"""
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+ def __str__(self):
+ return str(self.avg)
+
+
+def tuple_getter(tensor, idx_tuple):
+ """ access a tensor by a tuple """
+ tensor_ = tensor
+ for el in idx_tuple:
+ tensor_ = tensor_[el]
+ return tensor_
+
+
+def tuple_setter(tensor, idx_tuple, val):
+ """ Sets a tensor element while indexing by a tuple"""
+
+ tensor_ = tensor
+ for el in idx_tuple[:-1]:
+ tensor_ = tensor_[el]
+
+ tensor_[idx_tuple[-1]] = val
+ return tensor
+
+
+def torch_argmax(tensor):
+ """ Returns the idx tuple that corresponds to the max value in the tensor"""
+
+ flat_tensor = tensor.view(tensor.numel())
+ _, argmax = flat_tensor.max(0)
+ return np.unravel_index(int(argmax), tensor.shape)
+
+
+def torch_argmin(tensor):
+ """ Returns the idx tuple that corresponds to the min value in the tensor"""
+ flat_tensor = tensor.view(tensor.numel())
+ _, argmin = flat_tensor.min(0)
+ return np.unravel_index(int(argmin), tensor.shape)
+
+
+def clamp_ref(x, y, l_inf):
+ """ Clamps each element of x to be within l_inf of each element of y """
+ return torch.clamp(x - y, -l_inf, l_inf) + y
+
+
+def torch_arctanh(x, eps=1e-6):
+ x *= (1. - eps)
+ return (torch.log((1 + x) / (1 - x))) * 0.5
+
+
+def tanh_rescale(x, x_min=-1., x_max=1.):
+ return (torch.tanh(x) + 1) * 0.5 * (x_max - x_min) + x_min
+
+
+def checkpoint_incremental_array(output_file, numpy_list,
+ return_concat=True):
+ """ Takes in a string of a filename and a list of numpy arrays and
+ concatenates them along first axis, saves them to a file, and then
+ outputs a list containing only that single concatenated array
+ ARGS:
+ output_file : string ending in .npy - full path location of the
+ place we're saving this numpy array
+ numpy_list : list of numpy arrays (all same shape except for the first
+ axis) - list of arrays we concat and save to file
+ return_concat : boolean - if True, we return these concatenated arrays
+ in a list, else we return nothing
+ RETURNS:
+ maybe nothing, maybe the a singleton list containing the concatenated
+ arrays
+ """
+ concat = np.concatenate(numpy_list, axis=0)
+ np.save(output_file, concat)
+ if return_concat:
+ return [concat]
+
+
+def sizeof_fmt(num, suffix='B'):
+ """ https://stackoverflow.com/a/1094933
+ answer by Sridhar Ratnakumar """
+ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Yi', suffix)
+
+
+def clip_0_1(tensorlike):
+ # Clips tensorlike object into [0., 1.0] range
+ return torch.clamp(tensorlike, 0.0, 1.0)
+
+
+def clamp_0_1_delta(x, y):
+ """ Returns the delta that'd have to be added to (x + y) such that
+ (x + y) + delta is in the range [0.0, 1.0]
+ """
+ return torch.clamp(x + y, 0.0, 1.0) - (x + y)
+
+
+def random_linf_pertubation(examples_like, l_inf):
+ """ Returns an object of the same type/shape as examples_like that holds
+ a uniformly random pertubation in the l_infinity box of l_inf.
+ NOTE THAT THIS DOES NOT ADD TO examples_like!
+ """
+
+ is_var = isinstance(examples_like, Variable)
+
+ random_tensor = (torch.rand(*examples_like.shape) * l_inf * 2 -
+ torch.ones(*examples_like.shape) * l_inf)
+
+ random_tensor.type(type(examples_like))
+
+ if is_var:
+ return Variable(random_tensor)
+ else:
+ return random_tensor
+
+
+def batchwise_norm(examples, lp, dim=0):
+ """ Returns the per-example norm of the examples, keeping along the
+ specified dimension.
+ e.g. if examples is NxCxHxW, applying this fxn with dim=0 will return a
+ N-length tensor with the lp norm of each example
+ ARGS:
+ examples : tensor or Variable - needs more than one dimension
+ lp : string or int - either 'inf' or an int for which lp norm we use
+ dim : int - which dimension to keep
+ RETURNS:
+ 1D object of same type as examples, but with shape examples.shape[dim]
+ """
+
+ assert isinstance(lp, int) or lp == 'inf'
+ examples = torch.abs(examples)
+ example_dim = examples.dim()
+ if dim != 0:
+ examples = examples.transpose(dim, 0)
+
+ if lp == 'inf':
+ for reduction in range(1, example_dim):
+ examples, _ = examples.max(1)
+ return examples
+
+ else:
+ examples = torch.pow(examples + 1e-10, lp)
+ for reduction in range(1, example_dim):
+ examples = examples.sum(1)
+ return torch.pow(examples, 1.0 / lp)
+
+
+def batchwise_lp_project(x, lp, lp_bound, dim=0):
+ """ Projects x (a N-by-(...) TENSOR) to be a N-by-(...) TENSOR into the
+ provided lp ball
+ ARGS:
+ x : Tensor (N-by-(...)) - arbitrary style
+ lp : 'inf' or int - which style of lp we use
+ lp_bound : float - size of lp ball we project into
+ dim : int - if not 0 is the dimension we keep and project onto
+ RETURNS:
+ None
+ """
+ assert isinstance(lp, int) or lp == 'inf'
+
+ if lp == 'inf':
+ return torch.clamp(x, -lp_bound, lp_bound)
+
+ needs_squeeze = False
+ if len(x.shape) == 1:
+ x = x.unsqueeze(1)
+ needs_squeeze = True
+
+ output = torch.renorm(x, lp, dim, lp_bound)
+
+ if needs_squeeze:
+ return output.squeeze()
+ return output
+
+
+def summed_lp_norm(examples, lp):
+ """ Returns the sum of the lp norm of each example in examples
+ ARGS:
+ examples : tensor or Variable, with first dimension having size N
+ lp : string or int - either 'inf' or an int for which lp norm we use
+ RETURNS:
+ sum of each of the lp norm of each of the N elements in examples
+ """
+ return torch.sum(batchwise_norm(examples, lp, dim=0))
+
+
+def random_from_lp_ball(tensorlike, lp, lp_bound, dim=0):
+ """ Returns a new object of the same type/shape as tensorlike that is
+ randomly samples from the unit ball.
+
+ NOTE THIS IS NOT A UNIFORM SAMPLING METHOD!
+ (that's hard to implement, https://mathoverflow.net/a/9192/123034)
+
+ ARGS:
+ tensorlike : Tensor - reference object for which we generate
+ a new object of same shape/memory_location
+ lp : int or 'inf' - which style of lp we use
+ lp_bound : float - size of the L
+ dim : int - which dimension is the 'keep dimension'
+ RETURNS:
+ new tensorlike where each slice across dim is uniform across the
+ lp ball of size lp_bound
+ """
+ assert isinstance(lp, int) or lp == 'inf'
+
+ rand_direction = torch.rand(tensorlike.shape).type(tensorlike.type())
+
+ if lp == 'inf':
+ return rand_direction * (2 * lp_bound) - lp_bound
+ else:
+ rand_direction = rand_direction - 0.5 # allow for sign swapping
+ # first magnify such that each element is above the ball
+ min_norm = torch.min(batchwise_norm(rand_direction.abs(), lp, dim=dim))
+ rand_direction = rand_direction / (min_norm + 1e-6)
+ rand_magnitudes = torch.rand(tensorlike.shape[dim]).type(
+ tensorlike.type())
+ rand_magnitudes = rand_magnitudes.unsqueeze(1)
+ rand_magnitudes = rand_magnitudes.expand(*rand_direction.shape)
+
+ return torch.renorm(rand_direction, lp, dim, lp_bound) * rand_magnitudes
+
+
+def tanh_transform(tensorlike, forward=True):
+ """ Takes in Tensor or Variable and converts it between [0, 1] range and
+ (-inf, +inf) range by performing an invertible tanh transformation.
+ ARGS:
+ tensorlike : Tensor or Variable (arbitrary shape) - object to be
+ modified into or out of tanh space
+ forward : bool - if True we convert from [0, 1] space to (-inf, +inf)
+ space
+ if False we convert from (-inf, +inf) space to [0, 1]
+ space
+ RETURNS:
+ object of the same shape/type as tensorlike, but with the appropriate
+ transformation
+ """
+ if forward:
+ assert torch.min(tensorlike) >= 0.0
+ assert torch.max(tensorlike) <= 1.0
+ # first convert to [-1, +1] space
+ temp = (tensorlike * 2 - 1) * (1 - 1e-6)
+ return torch.log((1 + temp) / (1 - temp)) / 2.0
+
+ else:
+ return (torch.tanh(tensorlike) + 1) / 2.0
+
+
+def fold_mask(x, y, mask):
+ """ Creates a new tensor that's the result of masking between x and y
+ ARGS:
+ x : Tensor or Variable (NxSHAPE) - tensor that we're selecting where the
+ masked values are 1
+ y : Tensor or Variable (NxSHAPE) - tensor that we're selecting where the
+ masked values are 0
+ mask: ByteTensor (N) - masked values. Is only one dimensional: we expand
+ it in the creation of this
+ RETURNS:
+ new object of the same shape/type as x and y
+ """
+ assert x.shape == y.shape
+ assert mask.shape == (x.shape[0],)
+ assert type(x) == type(y)
+ is_var = isinstance(x, Variable)
+ if is_var:
+ assert isinstance(mask, Variable)
+
+ per_example_shape = x.shape[1:]
+ make_broadcastable = lambda m: m.view(-1, *tuple([1] * (x.dim() - 1)))
+
+ broadcast_mask = make_broadcastable(mask)
+ broadcast_not_mask = make_broadcastable(1 - safe_tensor(mask))
+ if is_var:
+ broadcast_not_mask = Variable(broadcast_not_mask)
+
+ output = torch.zeros_like(x)
+ output.add_(x * (broadcast_mask.type(x.type())))
+ output.add_(y * (broadcast_not_mask.type(y.type())))
+
+ return output
+
+
+###############################################################################
+# #
+# CUDA RELATED THINGS #
+# #
+###############################################################################
+
+# fxn taken from https://discuss.pytorch.org/t/memory-leaks-in-trans-conv/12492
+def get_gpu_memory_map():
+ try:
+ result = subprocess.check_output(
+ [
+ 'nvidia-smi', '--query-gpu=memory.used',
+ '--format=csv,nounits,noheader'
+ ])
+ except:
+ result = ""
+ try:
+ return float(result)
+ except:
+ return result
+
+
+def rough_gpu_estimate():
+ """ Roughly estimates the size of the cuda tensors stored on GPUs.
+ If multiple gpus, returns a dict of {GPU_id: total num elements }
+ otherwise just returns the total number of elements
+ """
+ cuda_count = {}
+ listprod = lambda l: reduce(lambda x, y: x * y, l)
+ for el in gc.get_objects():
+ if isinstance(el, (torch.tensor._TensorBase, Variable)) and el.is_cuda:
+ device = el.get_device()
+ cuda_count[device] = (cuda_count.get(device, 0) +
+ listprod(el.size()))
+
+ if len(cuda_count.keys()) == 0:
+ return 0
+ elif len(cuda_count.keys()) == 1:
+ return sizeof_fmt(cuda_count.values()[0])
+ else:
+ return {k: sizeof_fmt(v) for k, v in cuda_count.items()}
+
+
+##############################################################################
+# #
+# CLASSIFICATION HELPERS #
+# #
+##############################################################################
+# aka little utils that are useful for classification
+
+def accuracy_int(output, target, topk=1):
+ """ Computes the number of correct examples in the output.
+ RETURNS an int!
+ """
+ _, pred = output.topk(topk, 1, True, True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+ return int(correct.data.sum())
+
+
+def accuracy(output, target, topk=(1,)):
+ """Computes the precision@k for the specified values of k"""
+ maxk = max(topk)
+ batch_size = target.size(0)
+
+ _, pred = output.topk(maxk, 1, True, True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+
+ res = []
+ for k in topk:
+ correct_k = correct[:k].view(-1).float().sum(0)
+ res.append(correct_k.mul_(100.0 / batch_size))
+ return res
+
+
+###############################################################################
+# #
+# NORMALIZERS #
+# #
+###############################################################################
+
+
+class IdentityNormalize(Function):
+ def __init__(self):
+ pass
+
+ def forward(self, var):
+ return var
+
+ def differentiable_call(self):
+ pass
+
+
+class DifferentiableNormalize(Function):
+
+ def __init__(self, mean, std):
+ super(DifferentiableNormalize, self).__init__()
+ self.mean = mean
+ self.std = std
+ self.differentiable = True
+ self.nondiff_normer = transforms.Normalize(mean, std)
+
+ def __call__(self, var):
+ if self.differentiable:
+ return self.forward(var)
+ else:
+ return self.nondiff_normer(var)
+
+ def _setter(self, c, mean, std):
+ """ Modifies params going forward """
+ if mean is not None:
+ self.mean = mean
+ assert len(self.mean) == c
+
+ if std is not None:
+ self.std = std
+ assert len(self.std) == c
+
+ if mean is not None or std is not None:
+ self.nondiff_normer = transforms.Normalize(self.mean, self.std)
+
+ def differentiable_call(self):
+ """ Sets the __call__ method to be the differentiable version """
+ self.differentiable = True
+
+ def nondifferentiable_call(self):
+ """ Sets the __call__ method to be the torchvision.transforms version"""
+ self.differentiable = False
+
+ def forward(self, var, mean=None, std=None):
+ """ Normalizes var by subtracting the mean of each channel and then
+ dividing each channel by standard dev
+ ARGS:
+ self - stores mean and std for later
+ var - Variable of shape NxCxHxW
+ mean - if not None is a list of length C for channel-means
+ std - if not None is a list of length C for channel-stds
+ RETURNS:
+ variable of normalized var
+ """
+ c = var.shape[1]
+ self._setter(c, mean, std)
+
+ mean_var = Variable(var.data.new(self.mean).view(1, c, 1, 1))
+ std_var = Variable(var.data.new(self.std).view(1, c, 1, 1))
+ return (var - mean_var) / std_var
+
+
+##############################################################################
+# #
+# TRAINING LOGGER #
+# #
+##############################################################################
+
+
+class TrainingLogger(object):
+
+ def __init__(self):
+ """ Unified object to keep track of training data at a specified logging
+ level. Namely this tracks ground accuracy, loss and attack accuracy
+ for each attack incorporated into adversarial training.
+ Will ultimately contain plotting techniques too (TODO!)
+ """
+ self.series = {}
+
+ def data_count(self):
+ """ Returns the number of data points in this logger instance """
+ return sum(len(_) for _ in self.series.values())
+
+ def add_series(self, name):
+ """ Adds the name of a 'data series' where each data series is a list
+ of data-entries, where each data-entry is of the form
+ ((epoch, minibatch), data-value ) [and data-value is a float]
+ """
+ if name not in self.series:
+ self.series[name] = []
+
+ def sort_series(self, name, return_keys=False):
+ """ Simply returns the series of specified name sorted by epoch and then
+ minibatch.
+ ARGS:
+ name: string - name of exsiting series in self.series
+ return_keys: bool - if True, the output list is like
+ [((epoch, minibatch), val), ...]
+ and if False, it's just like [val, ... val...]
+ RETURNS:
+ sorted list of outputs, the exact form of which is determined by
+ the value of return_keys
+ """
+ data_series = self.series[name]
+
+ sorted_series = sorted(data_series, key=lambda p: p[0])
+
+ if return_keys is False:
+ return [_[1] for _ in sorted_series]
+ else:
+ return sorted_series
+
+ def get_series(self, name):
+ """ simple getter method for the given named data series """
+ return self.series[name]
+
+ def log_datapoint(self, name, data_tuple):
+ """ Logs the full data point
+ ARGS:
+ name: string - name of existing series in self.series
+ data_tuple : tuple of form ((epoch, minibatch), value)
+ RETURNS:
+ None
+ """
+ self.series[name].append(data_tuple)
+
+ def log(self, name, epoch, minibatch, value):
+ """ Logs the data point by specifying each of epoch, minibatch, value
+ ARGS:
+ name : string - name of existing series in self.series
+ epoch: int - which epoch of training we're logging
+ minibatch : int - which minibatch of training we're logging
+ value : - value we're logging
+ """
+ self.log_datapoint(name, ((epoch, minibatch), value))
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/norms.py b/case_studies/diffpure/stadv_eot/recoloradv/norms.py
new file mode 100644
index 0000000..57f65cd
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/norms.py
@@ -0,0 +1,51 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/norms.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+import torch
+from torch.autograd import Variable
+
+
+def smoothness(grid):
+ """
+ Given a variable of dimensions (N, X, Y, [Z], C), computes the sum of
+ the differences between adjacent points in the grid formed by the
+ dimensions X, Y, and (optionally) Z. Returns a tensor of dimension N.
+ """
+
+ num_dims = len(grid.size()) - 2
+ batch_size = grid.size()[0]
+ norm = Variable(torch.zeros(batch_size, dtype=grid.data.dtype,
+ device=grid.data.device))
+
+ for dim in range(num_dims):
+ slice_before = (slice(None),) * (dim + 1)
+ slice_after = (slice(None),) * (num_dims - dim)
+ shifted_grids = [
+ # left
+ torch.cat([
+ grid[slice_before + (slice(1, None),) + slice_after],
+ grid[slice_before + (slice(-1, None),) + slice_after],
+ ], dim + 1),
+ # right
+ torch.cat([
+ grid[slice_before + (slice(None, 1),) + slice_after],
+ grid[slice_before + (slice(None, -1),) + slice_after],
+ ], dim + 1)
+ ]
+ for shifted_grid in shifted_grids:
+ delta = shifted_grid - grid
+ norm_components = (delta.pow(2).sum(-1) + 1e-10).pow(0.5)
+ norm.add_(norm_components.sum(
+ tuple(range(1, len(norm_components.size())))))
+
+ return norm
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/perturbations.py b/case_studies/diffpure/stadv_eot/recoloradv/perturbations.py
new file mode 100644
index 0000000..bf07af7
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/perturbations.py
@@ -0,0 +1,129 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/perturbations.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+from .mister_ed import adversarial_perturbations as ap
+from .mister_ed.adversarial_perturbations import initialized
+from .mister_ed.utils import pytorch_utils as utils
+
+from . import color_transformers as ct
+from . import color_spaces as cs
+
+
+class ReColorAdv(ap.AdversarialPerturbation):
+ """
+ Puts the color at each pixel in the image through the same transformation.
+
+ Parameters:
+ - lp_style: number or 'inf'
+ - lp_bound: maximum norm of color transformation. Can be a tensor of size
+ (num_channels,), in which case each channel will be bounded by the
+ cooresponding bound in the tensor. For instance, passing
+ [0.1, 0.15, 0.05] would allow a norm of 0.1 for R, 0.15 for G, and 0.05
+ for B. Not supported by all transformations.
+ - use_smooth_loss: whether to optimize using the loss function
+ for FullSpatial that rewards smooth vector fields
+ - xform_class: a subclass of
+ color_transformers.ParameterizedTransformation
+ - xform_params: dict of parameters to pass to the xform_class.
+ - cspace_class: a subclass of color_spaces.ColorSpace that indicates
+ in which color space the transformation should be performed
+ (RGB by default)
+ """
+
+ def __init__(self, threat_model, perturbation_params, *other_args):
+ super().__init__(threat_model, perturbation_params)
+ assert issubclass(perturbation_params.xform_class,
+ ct.ParameterizedTransformation)
+
+ self.lp_style = perturbation_params.lp_style
+ self.lp_bound = perturbation_params.lp_bound
+ self.use_smooth_loss = perturbation_params.use_smooth_loss
+ self.scalar_step = perturbation_params.scalar_step or 1.0
+ self.cspace = perturbation_params.cspace or cs.RGBColorSpace()
+
+ def _merge_setup(self, num_examples, new_xform):
+ """ DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
+ self.num_examples = num_examples
+ self.xform = new_xform
+ self.initialized = True
+
+ def setup(self, originals):
+ super().setup(originals)
+ self.xform = self.perturbation_params.xform_class(
+ shape=originals.shape, manual_gpu=self.use_gpu,
+ cspace=self.cspace,
+ **(self.perturbation_params.xform_params or {}),
+ )
+ self.initialized = True
+
+ @initialized
+ def perturbation_norm(self, x=None, lp_style=None):
+ lp_style = lp_style or self.lp_style
+ if self.use_smooth_loss:
+ assert isinstance(self.xform, ct.FullSpatial)
+ return self.xform.smoothness_norm()
+ else:
+ return self.xform.norm(lp=lp_style)
+
+ @initialized
+ def constrain_params(self, x=None):
+ # Do lp projections
+ if isinstance(self.lp_style, int) or self.lp_style == 'inf':
+ self.xform.project_params(self.lp_style, self.lp_bound)
+
+ @initialized
+ def update_params(self, step_fxn):
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ params = param_list[0]
+ assert params.grad.data is not None
+ self.add_to_params(step_fxn(params.grad.data) * self.scalar_step)
+
+ @initialized
+ def add_to_params(self, grad_data):
+ """ Assumes only one parameters object in the Spatial Transform """
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ params = param_list[0]
+ params.data.add_(grad_data)
+
+ @initialized
+ def random_init(self):
+ param_list = list(self.xform.parameters())
+ assert len(param_list) == 1
+ param = param_list[0]
+ random_perturb = utils.random_from_lp_ball(param.data,
+ self.lp_style,
+ self.lp_bound)
+
+ param.data.add_(self.xform.identity_params +
+ random_perturb - self.xform.xform_params.data)
+
+ @initialized
+ def merge_perturbation(self, other, self_mask):
+ super().merge_perturbation(other, self_mask)
+ new_perturbation = ReColorAdv(self.threat_model,
+ self.perturbation_params)
+
+ new_xform = self.xform.merge_xform(other.xform, self_mask)
+ new_perturbation._merge_setup(self.num_examples, new_xform)
+
+ return new_perturbation
+
+ def forward(self, x):
+ if not self.initialized:
+ self.setup(x)
+ self.constrain_params()
+
+ return self.cspace.to_rgb(
+ self.xform.forward(self.cspace.from_rgb(x)))
diff --git a/case_studies/diffpure/stadv_eot/recoloradv/utils.py b/case_studies/diffpure/stadv_eot/recoloradv/utils.py
new file mode 100644
index 0000000..bd10178
--- /dev/null
+++ b/case_studies/diffpure/stadv_eot/recoloradv/utils.py
@@ -0,0 +1,108 @@
+# ---------------------------------------------------------------
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# This file has been modified from recoloradv.
+#
+# Source:
+# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/utils.py
+#
+# The license for the original version of this file can be
+# found in this directory (LICENSE_RECOLORADV).
+# The modifications to this file are subject to the same license.
+# ---------------------------------------------------------------
+
+from torch import nn
+from torch import optim
+
+from .mister_ed.utils.pytorch_utils import DifferentiableNormalize
+from .mister_ed import adversarial_perturbations as ap
+from .mister_ed import adversarial_attacks as aa
+from .mister_ed import spatial_transformers as st
+from .mister_ed import loss_functions as lf
+from .mister_ed import adversarial_training as advtrain
+
+from . import perturbations as pt
+from . import color_transformers as ct
+from . import color_spaces as cs
+
+
+def get_attack_from_name(
+ name: str,
+ classifier: nn.Module,
+ normalizer: DifferentiableNormalize,
+ verbose: bool = False,
+) -> advtrain.AdversarialAttackParameters:
+ """
+ Builds an attack from a name like "recoloradv" or "stadv+delta" or
+ "recoloradv+stadv+delta".
+ """
+
+ threats = []
+ norm_weights = []
+
+ for attack_part in name.split('+'):
+ if attack_part == 'delta':
+ threats.append(ap.ThreatModel(
+ ap.DeltaAddition,
+ ap.PerturbationParameters(
+ lp_style='inf',
+ lp_bound=8.0 / 255,
+ ),
+ ))
+ norm_weights.append(0.0)
+ elif attack_part == 'stadv':
+ threats.append(ap.ThreatModel(
+ ap.ParameterizedXformAdv,
+ ap.PerturbationParameters(
+ lp_style='inf',
+ lp_bound=0.05,
+ xform_class=st.FullSpatial,
+ use_stadv=True,
+ ),
+ ))
+ norm_weights.append(1.0)
+ elif attack_part == 'recoloradv':
+ threats.append(ap.ThreatModel(
+ pt.ReColorAdv,
+ ap.PerturbationParameters(
+ lp_style='inf',
+ lp_bound=[0.06, 0.06, 0.06],
+ xform_params={
+ 'resolution_x': 16,
+ 'resolution_y': 32,
+ 'resolution_z': 32,
+ },
+ xform_class=ct.FullSpatial,
+ use_smooth_loss=True,
+ cspace=cs.CIELUVColorSpace(),
+ ),
+ ))
+ norm_weights.append(1.0)
+ else:
+ raise ValueError(f'Invalid attack "{attack_part}"')
+
+ sequence_threat = ap.ThreatModel(
+ ap.SequentialPerturbation,
+ threats,
+ ap.PerturbationParameters(norm_weights=norm_weights),
+ )
+
+ # use PGD attack
+ adv_loss = lf.CWLossF6(classifier, normalizer, kappa=float('inf'))
+ st_loss = lf.PerturbationNormLoss(lp=2)
+ loss_fxn = lf.RegularizedLoss({'adv': adv_loss, 'pert': st_loss},
+ {'adv': 1.0, 'pert': 0.05},
+ negate=True)
+
+ pgd_attack = aa.PGD(classifier, normalizer, sequence_threat, loss_fxn)
+ return advtrain.AdversarialAttackParameters(
+ pgd_attack,
+ 1.0,
+ attack_specific_params={'attack_kwargs': {
+ 'num_iterations': 100,
+ 'optimizer': optim.Adam,
+ 'optimizer_kwargs': {'lr': 0.001},
+ 'signed': False,
+ 'verbose': verbose,
+ }},
+ )
diff --git a/case_studies/dla/adversarial_evaluation.py b/case_studies/dla/adversarial_evaluation.py
new file mode 100644
index 0000000..5200fcd
--- /dev/null
+++ b/case_studies/dla/adversarial_evaluation.py
@@ -0,0 +1,133 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import warnings
+warnings.filterwarnings("ignore")
+
+import argparse
+
+import torch
+import numpy as np
+
+import defense_v3
+import defense_v2
+import defense
+from cifar import CIFAR10
+import pgd_attack
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--batch-size", type=int, default=512)
+ parser.add_argument("--n-samples", type=int, default=512)
+ parser.add_argument("--adversarial-attack",
+ choices=(None, "pgd", "selective-pgd", "joined-pgd"),
+ default=None)
+ parser.add_argument("--epsilon", type=float, default=0.31)
+ parser.add_argument("--n-steps", type=int, default=100)
+ parser.add_argument("--step-size", type=float, default=0.001)
+ parser.add_argument("--threshold", type=float, default=None)
+ parser.add_argument("--fpr-threshold", type=float, default=0.05)
+
+ args = parser.parse_args()
+ assert args.n_samples < 5000
+ if args.epsilon > 0 or args.n_steps > 0 or args.step_size > 0:
+ assert args.adversarial_attack is not None
+
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ dataset = CIFAR10(tf_mode=True)
+ classifier_and_detector, classifier, detector = defense_v2.load_model(
+ device=device)
+
+ n_batches = int(np.ceil(args.n_samples / args.batch_size))
+
+ is_adv = []
+ adv_detector_scores = []
+ detector_scores = []
+ for batch_idx in range(n_batches):
+ x_batch = dataset.test_data[batch_idx*args.batch_size :
+ (batch_idx+1)*args.batch_size]
+ y_batch = dataset.test_labels[batch_idx*args.batch_size :
+ (batch_idx+1)*args.batch_size]
+ x_batch = x_batch.transpose((0, 3, 1, 2))
+ x_batch = torch.tensor(x_batch, dtype=torch.float32).to(device)
+ y_batch = torch.tensor(y_batch, dtype=torch.long).to(device)
+
+ if args.adversarial_attack is not None:
+ x_adv_batch = pgd_attack.attack(
+ x_batch, y_batch, classifier, classifier_and_detector,
+ args.adversarial_attack, args.n_steps, args.step_size, args.epsilon)
+
+ with torch.no_grad():
+ logits, adv_detector_scores_batch = classifier_and_detector(x_adv_batch)
+ adv_detector_scores_batch = adv_detector_scores_batch.cpu().numpy()
+ adv_predictions_batch = logits.argmax(1)
+ detector_scores_batch = detector(x_batch).cpu().numpy()
+
+ is_adv_batch = adv_predictions_batch != y_batch
+ is_adv_batch = is_adv_batch.cpu().numpy()
+
+ is_adv.append(is_adv_batch)
+ detector_scores.append(detector_scores_batch)
+ adv_detector_scores.append(adv_detector_scores_batch)
+
+ is_adv = np.concatenate(is_adv, 0)
+ is_adv = is_adv[:args.n_samples]
+
+ detector_scores = np.concatenate(detector_scores, 0)
+ detector_scores = detector_scores[:args.n_samples]
+
+ adv_detector_scores = np.concatenate(adv_detector_scores, 0)
+ adv_detector_scores = adv_detector_scores[:args.n_samples]
+
+ if args.threshold is None:
+ detector_threshold = np.sort(detector_scores)[
+ -int(len(detector_scores) * args.fpr_threshold)]
+ print("Threshold for FPR", args.fpr_threshold, "=", detector_threshold)
+ else:
+ detector_threshold = args.threshold
+ adv_is_detected = adv_detector_scores > detector_threshold
+ is_detected = detector_scores > detector_threshold
+
+ # true positive: detected + adversarial example
+ # true negative: not detected + normal example
+ # false positive: detected + normal example
+ # false negative: not detected + adversarial example
+ tpr = np.mean(adv_is_detected)
+ fnr = np.mean(~adv_is_detected)
+ tnr = np.mean(~is_detected)
+ fpr = np.mean(is_detected)
+
+ tp = np.sum(adv_is_detected)
+ fn = np.sum(~adv_is_detected)
+ fp = np.sum(is_detected)
+
+ f1 = tp / (tp + 0.5 * (fp + fn))
+
+ print("TPR", tpr)
+ print("FPR", fpr)
+ print("TNR", tnr)
+ print("FNR", fnr)
+ print("F1 ", f1)
+
+ is_adv_and_not_detected = np.logical_and(is_adv, ~adv_is_detected)
+
+ print("Attack Success Rate (w/o detector):", np.mean(is_adv))
+ print("Attack Success Rate (w/ detector):", np.mean(is_adv_and_not_detected))
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/case_studies/dla/adversarial_evaluation.sh b/case_studies/dla/adversarial_evaluation.sh
new file mode 100644
index 0000000..641aae6
--- /dev/null
+++ b/case_studies/dla/adversarial_evaluation.sh
@@ -0,0 +1,27 @@
+n_samples=512
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "epsilon = 0.01, FGSM"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/adversarial_evaluation.py \
+ --adversarial-attack=pgd \
+ --epsilon=0.01 \
+ --step-size=0.01 \
+ --n-steps=1 \
+ --n-samples=$n_samples \
+ --batch-size=256
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "epsilon = 0.01, PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/adversarial_evaluation.py \
+ --adversarial-attack=pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-samples=$n_samples \
+ --batch-size=256
\ No newline at end of file
diff --git a/case_studies/dla/binarization_test.py b/case_studies/dla/binarization_test.py
new file mode 100644
index 0000000..3d18190
--- /dev/null
+++ b/case_studies/dla/binarization_test.py
@@ -0,0 +1,241 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import math
+import warnings
+
+from active_tests.decision_boundary_binarization import format_result
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack
+from argparse_utils import DecisionBoundaryBinarizationSettings
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+# import warnings
+# warnings.filterwarnings("ignore")
+
+import argparse
+
+import torch
+import numpy as np
+import utils as ut
+
+import defense_v2
+from cifar import CIFAR10
+import pgd_attack
+
+
+class TorchWithDetectAndOtherReadout(torch.nn.Module):
+ def __init__(self, model, alarm, other_readout):
+ super().__init__()
+ self.model = model
+ self.alarm = alarm
+ self.other_readout = other_readout
+
+ def forward(self, x):
+ _, hidden, features = self.model(x, return_features=True)
+ is_ok = self.alarm(hidden)
+ out = self.other_readout(features)
+ return out, is_ok
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--batch-size", type=int, default=512)
+ parser.add_argument("--n-samples", type=int, default=512)
+ parser.add_argument("--adversarial-attack",
+ choices=("pgd", "selective-pgd", "joined-pgd"),
+ required=True)
+ parser.add_argument("--epsilon", type=float, default=0)
+ parser.add_argument("--n-steps", type=int, default=0)
+ parser.add_argument("--step-size", type=float, default=0)
+
+ parser.add_argument("--n-boundary-points", default=49, type=int)
+ parser.add_argument("--n-inner-points", default=10, type=int)
+ # parser.add_argument("--dont-verify-training-data", action="store_true")
+ # parser.add_argument("--use-boundary-adverarials", action="store_true")
+ parser.add_argument("--inverted-test", action="store_true")
+
+ args = parser.parse_args()
+ assert args.n_samples < 5000
+ if args.epsilon > 0 or args.n_steps > 0 or args.step_size > 0:
+ assert args.adversarial_attack is not None
+
+ if args.inverted_test:
+ print("Running inverted test")
+ else:
+ print("Running normal/non-inverted test")
+
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ dataset = CIFAR10(tf_mode=True)
+ classifier_and_detector, classifier, detector = defense_v2.load_model(
+ device=device)
+
+ def verify_valid_input_data(x_set: torch.Tensor) -> np.ndarray:
+ """Returns True if something is not detected as an adversarial example."""
+ n_batches = math.ceil(x_set.shape[0] / args.batch_size)
+ with torch.no_grad():
+ return np.concatenate(
+ [(detector(
+ x_set[b * args.batch_size:(b + 1) * args.batch_size]
+ ) < 0).cpu().numpy() for b in range(n_batches)])
+
+ def get_boundary_adversarials(x, y, n_samples, epsilon):
+ """Generate adversarial examples for the base classifier."""
+ assert len(x.shape) == 3
+ x = x.unsqueeze(0)
+ x = torch.repeat_interleave(x, n_samples, dim=0)
+
+ y = y.unsqueeze(0)
+ y = torch.repeat_interleave(y, n_samples, dim=0)
+
+ for _ in range(25):
+ x_adv = pgd_attack.attack(
+ x, y, classifier, classifier_and_detector,
+ "pgd", args.n_steps, args.step_size, epsilon)
+
+ # project adversarials to the max norm boundary
+ x_adv = ut.clipping_aware_rescaling(x, x_adv - x, args.epsilon,
+ norm="linf")
+ is_valid = verify_valid_input_data(x_adv)
+ is_invalid = ~is_valid
+
+ if np.all(is_invalid):
+ # generative until we finally found an adversarial example that gets
+ # detected
+ break
+ else:
+ raise RuntimeError("Could not generate adversarial example that gets "
+ "detected after 25 trials.")
+ return x_adv
+
+ def run_attack(m, l, attack_kwargs):
+ modified_classifier_and_detector = TorchWithDetectAndOtherReadout(
+ classifier_and_detector.model,
+ (lambda *args, **kwargs: -classifier_and_detector.alarm(
+ *args,
+ **kwargs)) if args.inverted_test else classifier_and_detector.alarm,
+ list(m.children())[-1])
+ for x, y in l:
+ x, y = x.to(device), y.to(device)
+ x_adv = pgd_attack.attack(
+ x, y, m, modified_classifier_and_detector,
+ args.adversarial_attack, args.n_steps, args.step_size, args.epsilon)
+ with torch.no_grad():
+ logits = m(x_adv)
+ is_adv = (logits.argmax(1) != y).cpu().numpy()
+
+ if args.inverted_test:
+ undetected = (detector(x_adv) > 0).cpu().numpy()
+ else:
+ undetected = (detector(x_adv) < 0).cpu().numpy()
+ is_adv = np.logical_and(is_adv, undetected)
+
+ return is_adv, (x_adv, logits)
+
+ class FeatureExtractor(torch.nn.Module):
+ def __init__(self, classifier_and_detector):
+ super().__init__()
+ self.classifier = classifier_and_detector.model
+
+ def forward(self, x, features_only=True):
+ if features_only:
+ _, _, f = self.classifier(x, return_features=True)
+ return f
+ else:
+ return self.classifier(x)
+
+ feature_extractor = FeatureExtractor(classifier_and_detector)
+
+ # select clean data samples which don't get rejected by the detector
+ test_data_x = []
+ test_data_y = []
+ batch_idx = 0
+ n_samples = 0
+ with torch.no_grad():
+ while n_samples < args.n_samples:
+ x_batch = dataset.test_data[batch_idx * args.batch_size:
+ (batch_idx + 1) * args.batch_size]
+ y_batch = dataset.test_labels[batch_idx * args.batch_size:
+ (batch_idx + 1) * args.batch_size]
+ x_batch = x_batch.transpose((0, 3, 1, 2))
+ x_batch = torch.tensor(x_batch, dtype=torch.float32)
+ y_batch = torch.tensor(y_batch, dtype=torch.long)
+ mask = verify_valid_input_data(x_batch.to(device))
+ x_batch = x_batch[mask].numpy()
+ y_batch = y_batch[mask].numpy()
+ test_data_x.append(x_batch)
+ test_data_y.append(y_batch)
+ n_samples += len(x_batch)
+ test_data_x = np.concatenate(test_data_x, 0)
+ test_data_y = np.concatenate(test_data_y, 0)
+ test_data_x = test_data_x[:args.n_samples]
+ test_data_y = test_data_y[:args.n_samples]
+ del batch_idx, n_samples
+
+ test_loader = ut.build_dataloader_from_arrays(
+ test_data_x, test_data_y)
+
+ if args.inverted_test:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=1,
+ )
+ else:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=args.n_boundary_points - 1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=0,
+ )
+
+ far_off_distance = 1.75
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, attack_kwargs: run_attack(m, l, attack_kwargs),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.epsilon,
+ norm="linf",
+ lr=10000,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_inner_points=args.n_inner_points,
+ **additional_settings
+ ),
+ n_samples=args.n_samples,
+ device=device,
+ batch_size=args.batch_size,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+
+ verify_valid_boundary_training_data_fn=verify_valid_input_data,
+ get_boundary_adversarials_fn=get_boundary_adversarials,
+ verify_valid_inner_training_data_fn=None,
+ verify_valid_input_validation_data_fn=None,
+ fill_batches_for_verification=False,
+ far_off_distance=far_off_distance
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/dla/binarization_test.sh b/case_studies/dla/binarization_test.sh
new file mode 100644
index 0000000..f81c9a9
--- /dev/null
+++ b/case_studies/dla/binarization_test.sh
@@ -0,0 +1,94 @@
+n_samples=512
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal test (1 boundary, 999 inner points), normal PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Inverted test (1 boundary, 999 inner points), normal PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048 \
+ --inverted-test
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal test (1 boundary, 999 inner points), joined PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=joined-pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Inverted test (1 boundary, 999 inner points), joined PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=joined-pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048 \
+ --inverted-test
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal test (1 boundary, 999 inner points), selective PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=selective-pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Inverted test (1 boundary, 999 inner points), selective PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) \
+ python case_studies/dla/binarization_test.py \
+ --adversarial-attack=selective-pgd \
+ --epsilon=0.01 \
+ --step-size=0.001 \
+ --n-steps=200 \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --n-samples=$n_samples \
+ --batch-size=2048 \
+ --inverted-test
\ No newline at end of file
diff --git a/case_studies/dla/cifar.py b/case_studies/dla/cifar.py
new file mode 100644
index 0000000..294d151
--- /dev/null
+++ b/case_studies/dla/cifar.py
@@ -0,0 +1,51 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+
+class CIFAR10:
+ def __init__(self, seed = 43, tf_mode=False):
+ if tf_mode:
+ import tensorflow.compat.v1 as tf
+ tf.disable_v2_behavior()
+ tf.compat.v1.disable_eager_execution()
+ (train_data, train_labels),(self.test_data, self.test_labels) = tf.keras.datasets.cifar10.load_data()
+ else:
+ import torchvision
+ train_dataset = torchvision.datasets.CIFAR10("data", train=True)
+ test_dataset = torchvision.datasets.CIFAR10("data", train=False)
+ train_data, train_labels = train_dataset.data, np.array(train_dataset.targets, dtype=int)
+ self.test_data, self.test_labels = test_dataset.data, np.array(test_dataset.targets, dtype=int)
+
+ train_data = train_data/255.
+ self.test_data = self.test_data/255.
+
+ VALIDATION_SIZE = 5000
+
+ np.random.seed(seed)
+ shuffled_indices = np.arange(len(train_data))
+ np.random.shuffle(shuffled_indices)
+ train_data = train_data[shuffled_indices]
+ train_labels = train_labels[shuffled_indices]
+
+ shuffled_indices = np.arange(len(self.test_data))
+ np.random.shuffle(shuffled_indices)
+ self.test_data = self.test_data[shuffled_indices]
+ self.test_labels = self.test_labels[shuffled_indices].flatten()
+
+ self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]
+ self.validation_labels = train_labels[:VALIDATION_SIZE]
+ self.train_data = train_data[VALIDATION_SIZE:, :, :, :]
+ self.train_labels = train_labels[VALIDATION_SIZE:]
\ No newline at end of file
diff --git a/case_studies/dla/defense.py b/case_studies/dla/defense.py
new file mode 100644
index 0000000..1add5e9
--- /dev/null
+++ b/case_studies/dla/defense.py
@@ -0,0 +1,187 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import numpy as np
+
+
+class TorchAlarm(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Linear(28682, 112),
+ torch.nn.ReLU(),
+ torch.nn.Linear(112, 100),
+ torch.nn.ReLU(),
+ torch.nn.Linear(100, 300),
+ torch.nn.ReLU(),
+ torch.nn.Linear(300, 200),
+ torch.nn.ReLU(),
+ torch.nn.Linear(200, 77),
+ torch.nn.ReLU(),
+ torch.nn.Linear(77, 1),
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+
+class resnet_layer_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.d]
+ def forward(self, inputs):
+ x1 = self.a(inputs)
+ x2 = self.b(x1)
+ x3 = torch.nn.ReLU()(x2)
+ x4 = self.c(x3)
+ x5 = self.d(x4)
+ x6 = x5 + inputs
+ return x6, x2, x5
+
+
+class resnet_layer2_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.c2, self.d]
+
+ def forward(self, x):
+ xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
+ y = self.a(xp)
+ y = self.b(y)
+ y = torch.nn.ReLU()(y)
+ y = self.c(y)
+ z = self.c2(x)
+ y = self.d(y)
+ x = z+y
+ return x
+
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
+ torch.nn.BatchNorm2d(16, eps=.000),
+ torch.nn.ReLU(),
+ # AAA
+
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+
+
+ resnet_layer2_torch(16, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer2_torch(32, 64),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+
+ torch.nn.AvgPool2d(8),
+ #
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(64, 10),
+
+
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ extra = []
+ for i,layer in enumerate(self.layers):
+ if isinstance(layer, resnet_layer_torch):
+ x,y,z = layer(x)
+ if i == 11:
+ extra.append(y)
+ if i == 19:
+ extra.append(z)
+ else:
+ x = layer(x)
+ if i == 1:
+ extra.append(x)
+
+ extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
+ return x, extra
+
+
+class TorchWithDetect:
+ def __init__(self, model, alarm):
+ self.model = model
+ self.alarm = alarm
+
+ def __call__(self, x):
+ out, hidden = self.model(x)
+ is_ok = self.alarm(hidden)
+ return out, is_ok
+
+
+#def load_model(path_model_weights='checkpoints/dla/dla_cifar_classifier.h5',
+# path_detector_weights='checkpoints/dla/dla_cifar_detector.h5', device=None):
+def load_model(path_model_weights='/home/AUTHOR/dla/dla/cifar_model.h5',
+ path_detector_weights='/home/AUTHOR/dla/dla/cifar_alarm.h5', device=None):
+ torch_model = TorchModel()
+ torch_model.load_state_dict(
+ torch.load(path_model_weights))
+ torch_model.eval().to(device)
+
+ torch_alarm = TorchAlarm()
+ torch_alarm.load_state_dict(
+ torch.load(path_detector_weights))
+ torch_alarm.eval().to(device)
+
+ return TorchWithDetect(torch_model, torch_alarm), \
+ lambda x: torch_model(x)[0], \
+ lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten()
diff --git a/case_studies/dla/defense_v2.py b/case_studies/dla/defense_v2.py
new file mode 100644
index 0000000..fbb10d8
--- /dev/null
+++ b/case_studies/dla/defense_v2.py
@@ -0,0 +1,257 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+
+class TorchAlarm(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Linear(28682, 112),
+ torch.nn.ReLU(),
+ torch.nn.Linear(112, 100),
+ torch.nn.ReLU(),
+ torch.nn.Linear(100, 300),
+ torch.nn.ReLU(),
+ torch.nn.Linear(300, 200),
+ torch.nn.ReLU(),
+ torch.nn.Linear(200, 77),
+ torch.nn.ReLU(),
+ torch.nn.Linear(77, 1),
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+class resnet_layer_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.d]
+ def forward(self, inputs):
+ x1 = self.a(inputs)
+ x2 = self.b(x1)
+ x3 = torch.nn.ReLU()(x2)
+ x4 = self.c(x3)
+ x5 = self.d(x4)
+ x6 = x5 + inputs
+ return x6, x2, x5
+
+
+class resnet_layer2_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.c2, self.d]
+
+ def forward(self, x):
+ xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
+ y = self.a(xp)
+ y = self.b(y)
+ y = torch.nn.ReLU()(y)
+ y = self.c(y)
+ z = self.c2(x)
+ y = self.d(y)
+ x = z+y
+ return x
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
+ torch.nn.BatchNorm2d(16, eps=.000),
+ torch.nn.ReLU(),
+ # AAA
+
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+
+
+ resnet_layer2_torch(16, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer2_torch(32, 64),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+
+ torch.nn.AvgPool2d(8),
+ #
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(64, 10),
+
+
+ ])
+
+ def __call__(self, x, training=False, return_features=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ extra = []
+ for i,layer in enumerate(self.layers):
+ if isinstance(layer, resnet_layer_torch):
+ x,y,z = layer(x)
+ if i == 11:
+ extra.append(y)
+ if i == 19:
+ extra.append(z)
+ else:
+ x = layer(x)
+ if i == 1:
+ extra.append(x)
+
+ if isinstance(layer, torch.nn.Flatten):
+ features = x
+
+ extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
+
+ if return_features:
+ return x, extra, features
+ else:
+ return x, extra
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.Conv2d(32, 32, kernel_size=3),
+ torch.nn.ReLU(),
+ torch.nn.MaxPool2d(2, 2),
+ torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.Conv2d(64, 64, kernel_size=3),
+ torch.nn.ReLU(),
+ torch.nn.MaxPool2d(2, 2),
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(2304, 512),
+ torch.nn.ReLU(),
+ torch.nn.Linear(512, 10)])
+
+
+ def __call__(self, x, training=False, return_features=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+
+ for i,layer in enumerate(self.layers):
+ #print('l',layer)
+ #print('x',x.shape)
+ x = layer(x)
+ if i == 13:
+ #print("Have", x)
+ sav = x
+
+ if isinstance(layer, torch.nn.Flatten):
+ features = x
+ #print('aaa', sav)
+ if return_features:
+ return x, torch.cat([sav, x], axis=1), features
+ else:
+ return x, torch.cat([sav, x], axis=1)
+
+
+class TorchAlarm(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Linear(522, 100),
+ torch.nn.SELU(),
+ torch.nn.Linear(100, 50),
+ torch.nn.SELU(),
+ torch.nn.Linear(50, 10),
+ torch.nn.SELU(),
+ torch.nn.Linear(10, 1)
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+
+class TorchWithDetect:
+ def __init__(self, model, alarm):
+ self.model = model
+ self.alarm = alarm
+
+ def __call__(self, x):
+ out, hidden = self.model(x)
+ is_ok = self.alarm(hidden)
+ return out, is_ok
+
+
+def load_model(path_model_weights='checkpoints/dla/dla_cifar_classifier_v2.h5',
+ path_detector_weights='checkpoints/dla/dla_cifar_detector_v2.h5', device=None):
+ torch_model = TorchModel()
+ torch_model.load_state_dict(
+ torch.load(path_model_weights))
+ torch_model.eval().to(device)
+
+ torch_alarm = TorchAlarm()
+ torch_alarm.load_state_dict(
+ torch.load(path_detector_weights))
+ torch_alarm.eval().to(device)
+
+ return TorchWithDetect(torch_model, torch_alarm), \
+ lambda x: torch_model(x)[0], \
+ lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten()
\ No newline at end of file
diff --git a/case_studies/dla/defense_v3.py b/case_studies/dla/defense_v3.py
new file mode 100644
index 0000000..e68f0e1
--- /dev/null
+++ b/case_studies/dla/defense_v3.py
@@ -0,0 +1,263 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+
+class TorchResnetAlarm(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Linear(28682, 112),
+ torch.nn.ReLU(),
+ torch.nn.Linear(112, 100),
+ torch.nn.ReLU(),
+ torch.nn.Linear(100, 300),
+ torch.nn.ReLU(),
+ torch.nn.Linear(300, 200),
+ torch.nn.ReLU(),
+ torch.nn.Linear(200, 77),
+ torch.nn.ReLU(),
+ torch.nn.Linear(77, 1),
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+class resnet_layer_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.d]
+ def forward(self, inputs):
+ x1 = self.a(inputs)
+ x2 = self.b(x1)
+ x3 = torch.nn.ReLU()(x2)
+ x4 = self.c(x3)
+ x5 = self.d(x4)
+ x6 = x5 + inputs
+ return x6, x2, x5
+
+
+class resnet_layer2_torch(torch.nn.Module):
+ def __init__(self,
+ prior_filters=16,
+ num_filters=16,
+ kernel_size=3,
+ strides=1):
+ super().__init__()
+ self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
+ self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
+ self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
+ self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
+ self.layers = [self.a, self.b, self.c, self.c2, self.d]
+
+ def forward(self, x):
+ xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
+ y = self.a(xp)
+ y = self.b(y)
+ y = torch.nn.ReLU()(y)
+ y = self.c(y)
+ z = self.c2(x)
+ y = self.d(y)
+ x = z+y
+ return x
+
+class TorchResnetModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
+ torch.nn.BatchNorm2d(16, eps=.000),
+ torch.nn.ReLU(),
+ # AAA
+
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+ resnet_layer_torch(16, 16),
+ torch.nn.ReLU(),
+
+
+ resnet_layer2_torch(16, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+ resnet_layer_torch(32, 32),
+ torch.nn.ReLU(),
+
+ resnet_layer2_torch(32, 64),
+ torch.nn.ReLU(),
+
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+ resnet_layer_torch(64, 64),
+ torch.nn.ReLU(),
+
+ torch.nn.AvgPool2d(8),
+ #
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(64, 10),
+
+
+ ])
+
+ def __call__(self, x, training=False, return_features=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ extra = []
+ for i,layer in enumerate(self.layers):
+ if isinstance(layer, resnet_layer_torch):
+ x,y,z = layer(x)
+ if i == 11:
+ extra.append(y)
+ if i == 19:
+ extra.append(z)
+ else:
+ x = layer(x)
+ if i == 1:
+ extra.append(x)
+
+ if isinstance(layer, torch.nn.Flatten):
+ features = x
+
+ extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
+
+ if return_features:
+ return x, extra, features
+ else:
+ return x, extra
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.Conv2d(32, 32, kernel_size=3),
+ torch.nn.ReLU(),
+ torch.nn.MaxPool2d(2, 2),
+ torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.Conv2d(64, 64, kernel_size=3),
+ torch.nn.ReLU(),
+ torch.nn.MaxPool2d(2, 2),
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(2304, 512),
+ torch.nn.ReLU(),
+ torch.nn.Linear(512, 10)])
+
+
+ def __call__(self, x, training=False, return_features=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+
+ for i,layer in enumerate(self.layers):
+ #print('l',layer)
+ #print('x',x.shape)
+ x = layer(x)
+ if i == 13:
+ #print("Have", x)
+ sav = x
+
+ if isinstance(layer, torch.nn.Flatten):
+ features = x
+ #print('aaa', sav)
+ if return_features:
+ return x, torch.cat([sav, x], axis=1), features
+ else:
+ return x, torch.cat([sav, x], axis=1)
+
+
+class TorchAlarm(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Linear(522, 100),
+ # modification by AUTHOR
+ #torch.nn.SELU(),
+ torch.nn.Linear(100, 50),
+ # modification by AUTHOR
+ #torch.nn.SELU(),
+ torch.nn.Linear(50, 10),
+ # modification by AUTHOR
+ #torch.nn.SELU(),
+ torch.nn.Linear(10, 1)
+ ])
+
+ def __call__(self, x, training=False):
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ for i, layer in enumerate(self.layers):
+ x = layer(x)
+ # modification by AUTHOR
+ if i != len(self.layers) - 1:
+ x = torch.relu(x)
+ return x
+
+
+class TorchWithDetect:
+ def __init__(self, model, alarm):
+ self.model = model
+ self.alarm = alarm
+
+ def __call__(self, x):
+ out, hidden = self.model(x)
+ is_ok = self.alarm(hidden)
+ return out, is_ok
+
+
+def load_model(path_model_weights='/home/AUTHOR/dla/more_dla/CIFAR10_keras_cnn.torchmodel',
+ path_detector_weights='/home/AUTHOR/dla/more_dla/CIFAR10_keras_cnn_LinfPGD_alarm_DLA.torchmodel', device=None):
+ torch_model = TorchModel()
+ torch_model.load_state_dict(
+ torch.load(path_model_weights))
+ torch_model.eval().to(device)
+
+ torch_alarm = TorchAlarm()
+ torch_alarm.load_state_dict(
+ torch.load(path_detector_weights))
+ torch_alarm.eval().to(device)
+
+ return TorchWithDetect(torch_model, torch_alarm), \
+ lambda x: torch_model(x)[0], \
+ lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten()
\ No newline at end of file
diff --git a/case_studies/dla/dla.py b/case_studies/dla/dla.py
new file mode 100644
index 0000000..5ae8036
--- /dev/null
+++ b/case_studies/dla/dla.py
@@ -0,0 +1,39 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import defense
+
+
+if __name__ == '__main__':
+ device = "cuda"
+ classifier_and_detector, classifier, detector = defense.load_model(device=device)
+ data = defense.CIFAR10(seed=43)
+
+ # ### PGD
+ num_images = 2000
+
+ images = torch.tensor(data.test_data[:num_images].transpose((0, 3, 1, 2)), dtype=torch.float32, requires_grad=False)
+ targets = torch.tensor(data.test_labels[:num_images], dtype=torch.int64)
+
+ outs, is_ok = classifier_and_detector(images.to(device))
+ outs = outs.cpu()
+ is_ok = is_ok.cpu()
+ import pdb; pdb.set_trace()
+
+ correct = data.test_labels[:num_images] == outs.argmax(1).detach().numpy()
+ is_ok = is_ok.detach().numpy()
+ print('acc', correct.mean())
+ print('fraction bad', (is_ok > 0).mean())
+ print("successful attacks", ((~correct) & (is_ok < 0)).mean())
diff --git a/case_studies/dla/pgd_attack.py b/case_studies/dla/pgd_attack.py
new file mode 100644
index 0000000..1945b95
--- /dev/null
+++ b/case_studies/dla/pgd_attack.py
@@ -0,0 +1,69 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn.functional as F
+import attacks.pgd
+
+
+def attack(x_batch, y_batch, classifier, classifier_and_detector,
+ adversarial_attack, n_steps, step_size, epsilon):
+ if adversarial_attack == "pgd":
+ loss_fn = lambda x, y: -F.cross_entropy(classifier(x), y)
+ def is_adversarial_fn(x, y):
+ with torch.no_grad():
+ return classifier(x).argmax(-1) != y
+
+ elif adversarial_attack == "joined-pgd":
+ def loss_fn(x, y):
+ l, k = classifier_and_detector(x)
+ return -F.cross_entropy(l, y) - F.binary_cross_entropy_with_logits(
+ k, torch.ones_like(k))
+
+ def is_adversarial_fn(x, y):
+ with torch.no_grad():
+ l, k = classifier_and_detector(x)
+ yc = l.argmax(1) != y
+ yd = k < 0
+ return torch.logical_and(yc, yd)
+
+ elif adversarial_attack == "selective-pgd":
+ def loss_fn(x, y):
+ l, k = classifier_and_detector(x)
+ mc = (l.argmax(1) == y).float().detach()
+ md = (k > 0).float().detach()
+
+ return -torch.mean(
+ mc * F.cross_entropy(l, y, reduction="none") +
+ md * F.binary_cross_entropy_with_logits(
+ k, torch.ones_like(k), reduction="none")
+ )
+
+ def is_adversarial_fn(x, y):
+ with torch.no_grad():
+ l, k = classifier_and_detector(x)
+ yc = l.argmax(1) != y
+ yd = k < 0
+ return torch.logical_and(yc, yd)
+
+ elif adversarial_attack == "orthogonal-pgd":
+ raise ValueError("not implemented")
+
+ x_batch = attacks.pgd.general_pgd(loss_fn, is_adversarial_fn,
+ x_batch, y_batch, n_steps,
+ step_size, epsilon, "linf",
+ early_stopping=True,
+ random_start=False
+ )[0]
+ return x_batch
diff --git a/case_studies/empir/README.md b/case_studies/empir/README.md
new file mode 100644
index 0000000..bf82ea7
--- /dev/null
+++ b/case_studies/empir/README.md
@@ -0,0 +1,145 @@
+# EMPIR: Ensembles of Mixed Precision Deep Networks for Increased Robustness against Adversarial Attacks
+[](https://travis-ci.org/tensorflow/cleverhans)
+
+This repository contains the source code for the paper EMPIR: Ensembles of Mixed Precision Deep Networks for Increased Robustness against Adversarial Attacks ([Accepted at ICLR 2020](https://openreview.net/forum?id=HJem3yHKwH))
+
+It is based on [CleverHans](https://github.com/tensorflow/cleverhans) 1.0.0, a Python library to
+benchmark machine learning systems' vulnerability to
+[adversarial examples](http://karpathy.github.io/2015/03/30/breaking-convnets/).
+You can learn more about such vulnerabilities on the accompanying [blog](http://cleverhans.io).
+
+## Setting up
++ Install [TensorFlow](https://www.tensorflow.org/)
++ Install [Keras](https://keras.io/)
++ Git clone this repository
++ For ImageNet results, download ImageNet dataset and convert the data into `TFRecords` using [this](https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py) script.
+
+We tested this setup using tensorflow-gpu 1.10, keras 2.2.4, python 3.5, CUDA 9.2 and Ubuntu 18.04 on a single RTX 2080 Ti GPU. Tensorflow was installed using [anaconda](https://www.anaconda.com/).
+
+## Example commands
++ `python examples/mnist_attack.py --wbits=$model1_weight_prec --abits=$model1_activation_prec --wbits2=$model2_weight_prec --abits2=$model2_activation_prec --ensembleThree --model_path1=/path/to/model1/ckpt --model_path2=/path/to/model2/ckpt --model_path3=/path/to/model3/ckpt` - White-Box CW attack on MNISTconv EMPIR model
++ `python examples/mnist_attack.py --model_path=/path/to/baseline/model/ckpt` - White-Box CW attack on MNISTconv baseline model
++ `python examples/cifar10_attack.py --abits=$model1_activation_prec --wbits=$model1_weight_prec --abits2=$model2_activation_prec --wbits2=$model2_weight_prec --model_path1=/path/to/model1/ckpt --model_path2=/path/to/model2/ckpt --model_path3=/path/to/model3/ckpt --ensembleThree` - White-Box CW attack on CIFARconv EMPIR model
++ `python examples/cifar10_attack.py --model_path=/path/to/baseline/model/ckpt` - White-Box CW attack on CIFARconv baseline model
++ `python examples/alexnet_attack.py --batch_size=100 --imagenet_path=/path/to/imagenet/tf_records --ensembleThree --abits=$model1_activation_prec --wbits=$model1_weight_prec --abits2=$model2_activation_prec --wbits2=$model2_weight_prec --model_path1=/path/to/model1/ckpt --model_path2=/path/to/model2/ckpt --model_path3=/path/to/model3/ckpt` - White-Box CW attack on AlexNet EMPIR model
++ `python examples/alexnet_attack.py --batch_size=100 --imagenet_path=/path/to/imagenet/tf_records --model_path=/path/to/baseline/model/ckpt` - White-Box CW attack on AlexNet baseline model
+
+## Results
++ EMPIR models
+
+
+ Dataset |
+ Precisions |
+ Unperturbed Accuracy (%) |
+ Adversarial Accuracy (%) |
+
+
+ Model 1 |
+ Model 2 |
+ Model 3 |
+ CW |
+ FGSM |
+ BIM |
+ PGD |
+ Average |
+
+
+ MNIST |
+ abits=4, wbits=2 Download |
+ abits=4, wbits=2 Download |
+ Full-precision (32 bits) Download |
+ 98.89 |
+ 86.73 |
+ 67.06 |
+ 18.61 |
+ 17.51 |
+ 47.48 |
+
+
+ CIFAR-10 |
+ abits=2, wbits=4 Download |
+ abits=2, wbits=2 Download |
+ Full-precision (32 bits) Download |
+ 72.56 |
+ 48.51 |
+ 20.45 |
+ 24.59 |
+ 13.55 |
+ 26.78 |
+
+
+ ImageNet |
+ abits=2, wbits=2 Download |
+ abits=4, wbits=4 Download |
+ Full-precision (32 bits) Download |
+ 55.09 |
+ 29.36 |
+ 21.65 |
+ 20.67 |
+ 11.76 |
+ 20.86 |
+
+
+
++ Baseline models
+
+
+ Dataset |
+ Models |
+ Unperturbed Accuracy (%) |
+ Adversarial Accuracy (%) |
+
+
+ CW |
+ FGSM |
+ BIM |
+ PGD |
+ Average |
+
+
+ MNIST |
+ MNISTconv Download |
+ 98.87 |
+ 3.69 |
+ 14.32 |
+ 0.9 |
+ 0.77 |
+ 4.92 |
+
+
+ CIFAR-10 |
+ CIFARconv Download |
+ 74.54 |
+ 13.38 |
+ 10.28 |
+ 11.97 |
+ 10.69 |
+ 11.58 |
+
+
+ ImageNet |
+ AlexNet Download |
+ 53.23 |
+ 9.94 |
+ 10.29 |
+ 10.81 |
+ 10.30 |
+ 10.34 |
+
+
+
+## Citing this work
+
+```
+@inproceedings{
+sen2020empir,
+title={{\{}EMPIR{\}}: Ensembles of Mixed Precision Deep Networks for Increased Robustness Against Adversarial Attacks},
+author={Sanchari Sen and Balaraman Ravindran and Anand Raghunathan},
+booktitle={International Conference on Learning Representations},
+year={2020},
+url={https://openreview.net/forum?id=HJem3yHKwH}
+}
+```
+## Copyright
+
+Copyright 2017 - Google Inc., OpenAI and Pennsylvania State University.
diff --git a/case_studies/empir/cleverhans_tutorials/__init__.py b/case_studies/empir/cleverhans_tutorials/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/empir/cleverhans_tutorials/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/empir/cleverhans_tutorials/tutorial_models.py b/case_studies/empir/cleverhans_tutorials/tutorial_models.py
new file mode 100644
index 0000000..26283ad
--- /dev/null
+++ b/case_studies/empir/cleverhans_tutorials/tutorial_models.py
@@ -0,0 +1,1984 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A pure TensorFlow implementation of a neural network. This can be
+used as a drop-in replacement for a Keras model.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy as np
+import tensorflow as tf
+from modified_cleverhans.model import Model
+
+BN_EPSILON = 1e-5
+
+## For alexnet's local response normalization
+RADIUS = 2; ALPHA = 2E-05; BETA = 0.75; BIAS = 1.0 # values copied from myalexnet_forward_newtf.py
+
+@tf.RegisterGradient("QuantizeGrad")
+def quantize_grad(op, grad):
+ return tf.clip_by_value(tf.identity(grad), -1, 1)
+
+
+def hard_sigmoid(x):
+ return tf.cast(tf.clip_by_value((x + 1.) / 2., 0., 1.), tf.float32)
+
+
+class MLP(Model):
+ """
+ An example of a bare bones multilayer perceptron (MLP) class.
+ """
+
+ def __init__(self, layers, input_shape):
+ super(MLP, self).__init__()
+
+ self.layer_names = []
+ self.layers = layers
+ self.input_shape = input_shape
+ if isinstance(layers[-1], Softmax):
+ layers[-1].name = 'probs'
+ layers[-2].name = 'logits'
+ else:
+ layers[-1].name = 'logits'
+ for i, layer in enumerate(self.layers):
+ if hasattr(layer, 'name'):
+ name = layer.name
+ else:
+ name = layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+ print(self.layer_names)
+
+ def fprop(self, x, reuse, set_ref=False):
+ states = []
+ for layer in self.layers:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+ states = dict(zip(self.get_layer_names(), states))
+ return states
+
+# special distilled model class consisting of a teacher and a student model
+class distilledModel(Model):
+ """
+ An example of a bare bones multilayer perceptron (MLP) class.
+ """
+
+ def __init__(self, teacher_layers, student_layers, input_shape):
+ super(distilledModel, self).__init__()
+
+ self.layer_names = []
+ self.teacher_layers = teacher_layers
+ self.student_layers = student_layers
+ self.input_shape = input_shape
+ original_input_shape = input_shape
+ if isinstance(teacher_layers[-1], Softmax):
+ teacher_layers[-1].name = 'teacher_probs'
+ teacher_layers[-2].name = 'teacher_logits'
+ else:
+ layers[-1].name = 'teacher_logits'
+ for i, layer in enumerate(self.teacher_layers):
+ if hasattr(layer, 'name'):
+ name = layer.name
+ else:
+ name = layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+
+ input_shape = original_input_shape
+ if isinstance(student_layers[-1], Softmax):
+ student_layers[-1].name = 'probs'
+ student_layers[-2].name = 'logits'
+ else:
+ student_layers[-1].name = 'logits'
+ for i, layer in enumerate(self.student_layers):
+ if hasattr(layer, 'name'):
+ name = layer.name
+ else:
+ name = layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+
+ print(self.layer_names)
+
+ def fprop(self, x, reuse, set_ref=False):
+ states = []
+ original_x = x
+ for layer in self.teacher_layers:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+ x = original_x
+ num_student_layers = len(self.student_layers)
+ layer_count = 0
+ for layer in self.student_layers:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+ layer_count = layer_count + 1
+ states = dict(zip(self.get_layer_names(), states))
+ return states
+
+# ensembleThreeModel class build on Model class that forms ensemble of three models
+class ensembleThreeModel(Model):
+ """
+ An example ensemble model.
+ """
+
+ def __init__(self, layers1, layers2, layers3, input_shape, num_classes): #layers1: layers of model1, layers2: layers of model2
+ super(ensembleThreeModel, self).__init__()
+
+ self.layer_names = []
+ self.layers1 = layers1
+ self.layers2 = layers2
+ self.layers3 = layers3
+ self.input_shape = input_shape
+ self.num_classes = num_classes
+ original_input_shape = input_shape
+ if isinstance(layers1[-1], Softmax):
+ layers1[-1].name = 'probs'
+ layers1[-2].name = 'logits'
+ else:
+ layers1[-1].name = 'logits'
+ # First model
+ for i, layer in enumerate(self.layers1):
+ if hasattr(layer, 'name'):
+ if layer.name == 'probs' or layer.name == 'logits':
+ name = layer.name
+ else:
+ name = 'Model1_' + layer.name
+ else:
+ name = 'Model1_' + layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+
+ input_shape = original_input_shape
+ # Second model
+ if isinstance(layers2[-1], Softmax):
+ layers2[-1].name = 'probs'
+ layers2[-2].name = 'logits'
+ else:
+ layers2[-1].name = 'logits'
+ for i, layer in enumerate(self.layers2):
+ if hasattr(layer, 'name'):
+ if layer.name == 'probs' or layer.name == 'logits':
+ name = layer.name
+ else:
+ name = 'Model2_' + layer.name
+ else:
+ name = 'Model2_' + layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+ input_shape = original_input_shape
+ # Third model
+ if isinstance(layers3[-1], Softmax):
+ layers3[-1].name = 'probs'
+ layers3[-2].name = 'logits'
+ else:
+ layers3[-1].name = 'logits'
+ for i, layer in enumerate(self.layers3):
+ if hasattr(layer, 'name'):
+ if layer.name == 'probs' or layer.name == 'logits':
+ name = layer.name
+ else:
+ name = 'Model3_' + layer.name
+ else:
+ name = 'Model3_' + layer.__class__.__name__ + str(i)
+ self.layer_names.append(name)
+
+ layer.set_input_shape(input_shape, False)
+ input_shape = layer.get_output_shape()
+ self.layer_names.append('combined_features')
+ self.layer_names.append('combined_logits')
+
+ combined_layer_name = 'combined' ## Gives the final class prediction based on max voting
+ self.layer_names.append(combined_layer_name)
+ combinedCorrectProb_layer_name = 'combinedAvgCorrectProb' ## Gives average probability values of the models that decided the final prediction
+ self.layer_names.append(combinedCorrectProb_layer_name)
+ combinedProb_layer_name = 'combinedAvgProb' ## Gives average probability values of all the models
+ self.layer_names.append(combinedProb_layer_name)
+
+ print(self.layer_names)
+
+ def fprop(self, x, reuse, set_ref=False):
+ states = []
+ original_x = x
+ for layer in self.layers1:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+
+ output1 = states[-1]
+ features1 = states[-3]
+ x = original_x
+ for layer in self.layers2:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+
+ features2 = states[-3]
+ output2 = states[-1]
+ x = original_x
+ for layer in self.layers3:
+ if set_ref:
+ layer.ref = x
+ x = layer.fprop(x, reuse)
+ assert x is not None
+ states.append(x)
+ output3 = states[-1]
+ features3 = states[-3]
+
+ states.append(tf.stack((features1, features2, features3), 1))
+ states.append(tf.stack((output1, output2, output3), 1))
+
+ # Find class predictions with each model
+ pred1 = tf.argmax(output1, axis=-1)
+ pred2 = tf.argmax(output2, axis=-1)
+ pred3 = tf.argmax(output3, axis=-1)
+ comb_pred = tf.stack([pred1, pred2, pred3], axis=1)
+ comb_pred = tf.cast(comb_pred, dtype=tf.int32) # converting to int32 as bincount requires int32
+
+ # Find how many times each of the classes are predicted among the three models and identify the max class
+ initial_imidx = 1
+
+ binarray = tf.bincount(comb_pred[0], minlength=self.num_classes)# initial bincount, counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
+ max_class = tf.argmax(binarray, axis=-1)
+ count_max = tf.gather(binarray, max_class) # max vote count for a class
+ rand_idx = np.random.random_integers(3)
+ value = tf.cond(tf.less(count_max, 2), lambda: pred3[0], lambda: max_class)
+ in_class_array = tf.fill([1], value)
+
+ ## Added below to allow better gradient calculation for max voted model
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred3[0]), lambda: output3[0], lambda: tf.zeros_like(output3[0])) # add pred3 if it affected the final decision
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred2[0]), lambda: tf.add(output2[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred1[0]), lambda: tf.add(output1[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
+ in_avgCorrectprob_array = tf.expand_dims(tf.div(in_avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
+
+ #condition check: when true the loop body executes
+ def idx_loop_condition(class_array, avgCorrectprob_array, im_idx):
+ return tf.less(im_idx, tf.shape(pred1)[0])
+
+ #loop body to calculate the max voted class for each image
+ def idx_loop_body(class_array, avgCorrectprob_array, im_idx):
+ binarray_new = tf.bincount(comb_pred[im_idx], minlength=self.num_classes) # counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
+ max_class = tf.argmax(binarray_new, axis=-1)
+ count_max = tf.gather(binarray_new, max_class) # max vote count for a class
+ rand_idx = np.random.random_integers(3)
+ value = tf.cond(tf.less(count_max, 2), lambda: pred3[im_idx], lambda: max_class)# If the max vote is less than 2, take the prediction of the full precision model
+ new_array = tf.fill([1], value)
+ class_array = tf.concat([class_array, new_array], 0)
+
+ ## Added below to allow better gradient calculation for max voted model
+ avgCorrectprob = tf.cond(tf.equal(value, pred3[im_idx]), lambda: output3[im_idx], lambda: tf.zeros_like(output3[im_idx])) # add pred3 if it affected the final decision
+ avgCorrectprob = tf.cond(tf.equal(value, pred2[im_idx]), lambda: tf.add(output2[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
+ avgCorrectprob = tf.cond(tf.equal(value, pred1[im_idx]), lambda: tf.add(output1[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
+ avgCorrectprob = tf.expand_dims(tf.div(avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
+ avgCorrectprob_array = tf.concat([avgCorrectprob_array, avgCorrectprob], 0)
+
+ return (class_array, avgCorrectprob_array, im_idx+1)
+
+ res = tf.while_loop(
+ cond=idx_loop_condition,
+ body=idx_loop_body,
+ loop_vars=[in_class_array, in_avgCorrectprob_array, initial_imidx],
+ shape_invariants=[tf.TensorShape([None]), tf.TensorShape([None, self.num_classes]), tf.TensorShape([])], #add shape invariant saying that the first dimension of in_class_array changes and is thus None
+ )
+ pred_output = tf.cast(res[0], dtype=tf.int64) # no. of times each class is predicted for all images
+
+ states.append(pred_output)
+
+ avgCorrectprob_output = res[1] # no. of times each class is predicted for all images
+
+ states.append(avgCorrectprob_output)
+
+ avgprob = tf.div(tf.add_n([output2, output1, output3]), tf.cast(3, dtype=tf.float32)) # Average probability across all models
+
+ states.append(avgprob)
+
+ states = dict(zip(self.get_layer_names(), states))
+
+ return states
+
+class Layer(object):
+
+ def get_output_shape(self):
+ return self.output_shape
+
+
+class SimpleLinear(Layer):
+
+ def __init__(self, num_hid):
+ self.num_hid = num_hid
+
+ def set_input_shape(self, input_shape, reuse):
+ batch_size, dim = input_shape
+ self.input_shape = [batch_size, dim]
+ self.output_shape = [batch_size, self.num_hid]
+ init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
+ keep_dims=True))
+ self.W = tf.Variable(init)
+ self.b = tf.Variable(np.zeros((self.num_hid,)).astype('float32'))
+
+ def fprop(self, x, reuse):
+ return tf.matmul(x, self.W) + self.b
+
+
+class Linear(Layer):
+
+ def __init__(self, num_hid, detail, useBias=False):
+ self.__dict__.update(locals())
+ # self.num_hid = num_hid
+
+ def set_input_shape(self, input_shape, reuse):
+
+ # with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
+ # with black box, but now can't load checkpoints from wb
+ # this works with white-box
+ with tf.variable_scope(self.detail + self.name + '_init', reuse):
+
+ batch_size, dim = input_shape
+ self.input_shape = [batch_size, dim]
+ self.output_shape = [batch_size, self.num_hid]
+ if self.useBias:
+ self.bias_shape = self.num_hid
+ init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
+ keep_dims=True))
+ self.W = tf.get_variable(
+ "W", initializer=init)
+ W_summ = tf.summary.histogram('W', values=self.W)
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+
+ def fprop(self, x, reuse):
+
+ # with tf.variable_scope(self.scope_name + '_fprop', reuse):
+ # this works with white-box
+ with tf.variable_scope(self.detail + self.name + '_fprop', reuse):
+
+ x = tf.matmul(x, self.W) # + self.b
+ if self.useBias:
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class HiddenLinear(Layer):
+
+ def __init__(self, num_hid, scope_name, useBias=False):
+ self.__dict__.update(locals())
+
+ def set_input_shape(self, input_shape, reuse):
+
+ with tf.variable_scope(self.scope_name+ 'init', reuse):
+
+ batch_size, dim = input_shape
+ self.input_shape = [batch_size, dim]
+ self.output_shape = [batch_size, self.num_hid]
+
+ if self.useBias:
+ self.bias_shape = self.num_hid
+
+ init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
+ keep_dims=True))
+ self.W = tf.get_variable(
+ "W", initializer=init)
+
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+
+ W_summ = tf.summary.histogram('W', values=self.W)
+
+ def fprop(self, x, reuse):
+
+ with tf.variable_scope(self.scope_name + '_fprop', reuse):
+
+ x = tf.matmul(x, self.W)
+ if self.useBias:
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class HiddenLinear_lowprecision(Layer):
+
+ # def __init__(self, num_hid, scope_name):
+ def __init__(self, wbits, abits, num_hid, scope_name, useBias=False):
+ self.__dict__.update(locals())
+
+ def quantize(self, x, k): ## k= No. of quantized bits
+ n = float(2**k-1) ## Max value representable with k bits
+
+ @tf.custom_gradient ## Can be used to define a custom gradient function
+ def _quantize(x):
+ return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
+
+ return _quantize(x)
+
+ def quantizeWt(self, x):
+ x = tf.tanh(x) ## Normalizing weights to [-1, 1]
+ x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
+ return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
+
+ def quantizeAct(self, x):
+ x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
+ return self.quantize(x, self.abits)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
+ # with black box, but now can't load checkpoints from wb
+ # this works with white-box
+ # with tf.variable_scope(self.detail + self.name + '_init', reuse):
+
+ batch_size, dim = input_shape
+ self.input_shape = [batch_size, dim]
+ self.output_shape = [batch_size, self.num_hid]
+
+ if self.useBias:
+ self.bias_shape = self.num_hid
+ init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
+ keep_dims=True))
+ self.W = tf.get_variable(
+ "W", initializer=init)
+
+ if (self.wbits < 32):
+ self.W = self.quantizeWt(self.W)
+
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+
+ W_summ = tf.summary.histogram('W', values=self.W)
+
+ def fprop(self, x, reuse):
+
+ with tf.variable_scope(self.scope_name + '_fprop', reuse):
+ # this works with white-box
+ # with tf.variable_scope(self.detail + self.name + '_fprop', reuse):
+ if self.abits < 32:
+ x = self.quantizeAct(x)
+
+ x = tf.matmul(x, self.W) # + self.b
+ if self.useBias:
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class Conv2DRand(Layer):
+
+ def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def quantize_rand(self, x, dist):
+ with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
+ return 2 * dist(probs=hard_sigmoid(x)).sample() - 1
+
+ def quantize(self, x):
+ with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
+ return tf.sign(x)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ batch_size, rows, cols, input_channels = input_shape
+ kernel_shape = tuple(self.kernel_shape) + (input_channels,
+ self.output_channels)
+ assert len(kernel_shape) == 4
+ assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
+
+ with tf.variable_scope(self.scope_name + '_init', reuse):
+
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.2, dtype=tf.float32)
+ self.kernels = tf.get_variable("k", initializer=init)
+ k_summ = tf.summary.histogram(
+ name="k", values=self.kernels)
+
+ from tensorflow.contrib.distributions import MultivariateNormalDiag
+ with self.G.gradient_override_map({"MultivariateNormalDiag": "QuantizeGrad"}):
+ self.kernels = MultivariateNormalDiag(
+ loc=self.kernels).sample()
+
+ k_rand_summ = tf.summary.histogram(
+ name="k_rand", values=self.kernels)
+
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ def fprop(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ x = tf.nn.conv2d(x, self.kernels, (1,) +
+ tuple(self.strides) + (1,), self.padding)
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+
+class Conv2D(Layer):
+
+ def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name, useBias=False):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def quantize(self, x):
+ with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
+ return tf.sign(x)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ batch_size, rows, cols, input_channels = input_shape
+ kernel_shape = tuple(self.kernel_shape) + (input_channels,
+ self.output_channels)
+ assert len(kernel_shape) == 4
+ assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
+
+ with tf.variable_scope(self.scope_name + '_init', reuse):
+
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.1, dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
+ axis=(0, 1, 2)))
+ self.kernels = tf.get_variable("k", initializer=init)
+ k_summ = tf.summary.histogram(
+ name="k", values=self.kernels)
+
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ # Set output shape using fprop without bias if useBias set
+ if self.useBias:
+ dummy_output = self.fprop_withoutbias(dummy_batch, False)
+ else: #--default below
+ dummy_output = self.fprop(dummy_batch, False)
+
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ if self.useBias:
+ self.bias_shape = self.output_shape
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+
+ def fprop(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ x = tf.nn.conv2d(x, self.kernels, (1,) +
+ tuple(self.strides) + (1,), self.padding)
+
+ if self.useBias:
+ output_shape = tf.shape(x) # Checking output shape before bias
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+ x = tf.reshape(x, output_shape)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+ # special function without bias to get output shape
+ def fprop_withoutbias(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ x = tf.nn.conv2d(x, self.kernels, (1,) +
+ tuple(self.strides) + (1,), self.padding)
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class Conv2D_lowprecision(Layer):
+
+ def __init__(self, wbits, abits, output_channels, kernel_shape, strides, padding, phase, scope_name, seed=1, useBatchNorm=False, stocRound=False, useBias=False):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def quantize(self, x, k): ## k= No. of quantized bits
+ n = float(2**k-1) ## Max value representable with k bits
+
+ @tf.custom_gradient ## Can be used to define a custom gradient function
+ def _quantize(x):
+ if self.stocRound: # If stochastic rounding is set
+ xn_int = tf.floor(x*n) # Get integer part
+ xn_frac = tf.subtract(x*n, xn_int) # Get fractional part
+ xn_frac_rand = tf.distributions.Bernoulli(probs=xn_frac, dtype=tf.float32).sample() # Get random number from bernoulli distribution with prob=fractional part value
+ x_q = (xn_int + xn_frac_rand)/n
+
+ return x_q, lambda dy: dy # Second part is the function evaluated during gradient, identity function
+ else:
+ return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
+
+ return _quantize(x)
+
+ def quantizeWt(self, x):
+ x = tf.tanh(x) ## Normalizing weights to [-1, 1]
+ x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
+ return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
+
+ def quantizeAct(self, x):
+ x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
+ return self.quantize(x, self.abits)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ batch_size, rows, cols, input_channels = input_shape
+ kernel_shape = tuple(self.kernel_shape) + (input_channels,
+ self.output_channels)
+ assert len(kernel_shape) == 4
+ assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
+
+ with tf.variable_scope(self.scope_name + '_init', reuse):
+
+ if self.wbits < 32:
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.2, dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
+ axis=(0, 1, 2)))
+ self.kernels = tf.get_variable("k", initializer=init)
+ else:
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.1, dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
+ axis=(0, 1, 2)))
+ self.kernels = tf.get_variable("k", initializer=init)
+
+ if (self.wbits < 32): ## Quantize if no. of bits less than 32
+ self.kernels = self.quantizeWt(self.kernels)
+ k_bin_summ = tf.summary.histogram(
+ name="k_bin", values=self.kernels)
+
+ k_summ = tf.summary.histogram(
+ name="k", values=self.kernels)
+
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+
+ # Set output shape using fprop without bias if useBias set
+ if self.useBias:
+ dummy_output = self.fprop_withoutbias(dummy_batch, False)
+ else: #--default below
+ dummy_output = self.fprop(dummy_batch, False)
+
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ # setting bias shape
+ if self.useBias:
+ self.bias_shape = self.output_shape
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+ if self.wbits < 32: ## Quantize if no. of bits less than 32
+ self.bias =self.quantizeWt(self.bias)
+
+ def fprop(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ if self.abits < 32:
+ if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
+ x = tf.contrib.layers.batch_norm(
+ x, epsilon=BN_EPSILON, is_training=self.phase,
+ reuse=reuse, scope=scope)
+ x = self.quantizeAct(x)
+ x = tf.nn.conv2d(x, self.kernels, (1,) +
+ tuple(self.strides) + (1,), self.padding)
+
+ if self.useBias:
+ output_shape = tf.shape(x) # Checking output shape before bias
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+ x = tf.reshape(x, output_shape)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+ # special function without bias to get output shape
+ def fprop_withoutbias(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ x = tf.nn.conv2d(x, self.kernels, (1,) +
+ tuple(self.strides) + (1,), self.padding)
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class Conv2DGroup(Layer):
+
+ def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name, useBias=False):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def quantize(self, x):
+ with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
+ return tf.sign(x)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ self.input_shape = input_shape
+ batch_size, rows, cols, input_channels = input_shape
+ self.input_channels = input_channels
+ kernel_shape = tuple(self.kernel_shape) + (int(input_channels/2),
+ self.output_channels) # as it is 2 groups, input channel dimension is halved
+ assert len(kernel_shape) == 4
+ assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
+
+ with tf.variable_scope(self.scope_name + '_init', reuse):
+
+ init = tf.variance_scaling_initializer(scale=2., dtype=tf.float32)
+ self.kernels = tf.get_variable("k", shape=kernel_shape, initializer=init)
+ k_summ = tf.summary.histogram(
+ name="k", values=self.kernels)
+
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ if self.useBias:
+ dummy_output = self.fprop_withoutbias(dummy_batch, False)
+ else: #--default below
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ # setting bias shape
+ self.bias_shape = self.output_shape
+
+ # initializing bias
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+
+
+ def fprop(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ ### groupwise convolution
+ x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x = tf.concat([x1, x2], 3)
+
+ # adding bias
+ if self.useBias:
+ output_shape = tf.shape(x) # Checking output shape before bias
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+ if self.padding=="SAME": # Padding same means input and output size equal
+ x = tf.reshape(x, output_shape)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+ # Special function without bias to get output shape
+ def fprop_withoutbias(self, x, reuse):
+
+ # need variable_scope here because the batch_norm layer creates
+ # variables internally
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ ### groupwise convolution
+ x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x = tf.concat([x1, x2], 3)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class Conv2DGroup_lowprecision(Layer):
+
+ def __init__(self, wbits, abits, output_channels, kernel_shape, strides, padding, phase, scope_name, seed=1, useBatchNorm=False, stocRound=False, useBias=False):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def quantize(self, x, k): ## k= No. of quantized bits
+ n = float(2**k-1) ## Max value representable with k bits
+
+ @tf.custom_gradient ## Can be used to define a custom gradient function
+ def _quantize(x):
+ if self.stocRound: # If stochastic rounding is set
+ xn_int = tf.floor(x*n) # Get integer part
+ xn_frac = tf.subtract(x*n, xn_int) # Get fractional part
+ xn_frac_rand = tf.distributions.Bernoulli(probs=xn_frac, dtype=tf.float32).sample() # Get random number from bernoulli distribution with prob=fractional part value
+ x_q = (xn_int + xn_frac_rand)/n
+
+ return x_q, lambda dy: dy # Second part is the function evaluated during gradient, identity function
+ else:
+ return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
+
+ return _quantize(x)
+
+ def quantizeWt(self, x):
+ x = tf.tanh(x) ## Normalizing weights to [-1, 1]
+ x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
+ return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
+
+ def quantizeAct(self, x):
+ x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
+ return self.quantize(x, self.abits)
+
+ def set_input_shape(self, input_shape, reuse):
+
+ self.input_shape = input_shape
+ batch_size, rows, cols, input_channels = input_shape
+ self.input_channels = input_channels
+ kernel_shape = tuple(self.kernel_shape) + (int(input_channels/2),
+ self.output_channels) # as it is 2 groups, input channel dimension is halved
+ assert len(kernel_shape) == 4
+ assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
+
+ with tf.variable_scope(self.scope_name + '_init', reuse):
+
+ if self.wbits < 32:
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.2, dtype=tf.float32)
+ self.kernels = tf.get_variable("k", initializer=init)
+ else:
+ init = tf.truncated_normal(
+ kernel_shape, stddev=0.1, dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
+ axis=(0, 1, 2)))
+ self.kernels = tf.get_variable("k", initializer=init)
+
+ if (self.wbits < 32): ## Quantize if no. of bits less than 32
+ self.kernels = self.quantizeWt(self.kernels)
+ k_bin_summ = tf.summary.histogram(
+ name="k_bin", values=self.kernels)
+ k_summ = tf.summary.histogram(
+ name="k", values=self.kernels)
+
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ # Set output shape using fprop without bias if useBias set
+ if self.useBias:
+ dummy_output = self.fprop_withoutbias(dummy_batch, False)
+ else: #--default below
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ self.bias_shape = self.output_shape
+
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias =tf.get_variable("b", initializer= bias_init)
+ if self.wbits < 32: ## Quantize if no. of bits less than 32
+ self.bias =self.quantizeWt(self.bias)
+
+
+ def fprop(self, x, reuse):
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ if self.abits < 32:
+ if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
+ x = tf.contrib.layers.batch_norm(
+ x, epsilon=BN_EPSILON, is_training=self.phase,
+ reuse=reuse, scope=scope)
+ x = self.quantizeAct(x)
+ ### groupwise convolution
+ x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x = tf.concat([x1, x2], 3)
+
+ if self.useBias:
+ output_shape = tf.shape(x) # Checking output shape before bias
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+ if self.padding=="SAME": # Padding same means input and output size equal
+ x = tf.reshape(x, output_shape)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+ # Special function without bias to get output shape
+ def fprop_withoutbias(self, x, reuse):
+ with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
+
+ if self.abits < 32:
+ if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
+ x = tf.contrib.layers.batch_norm(
+ x, epsilon=BN_EPSILON, is_training=self.phase,
+ reuse=reuse, scope=scope)
+ x = self.quantizeAct(x)
+ ### groupwise convolution
+ x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
+ x = tf.concat([x1, x2], 3)
+
+ a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
+ a_summ = tf.summary.histogram('a', values=x)
+ a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
+ a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
+
+ return x
+
+class MaxPool(Layer):
+
+ def __init__ (self, pool_size, strides):
+ self.pool_size = pool_size
+ self.strides = strides
+
+ def set_input_shape(self, input_shape, reuse):
+ self.input_shape = input_shape
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ def fprop(self, x, reuse):
+ return tf.layers.max_pooling2d(x, self.pool_size, self.strides)
+
+class MaxPoolSame(Layer):
+
+ def __init__ (self, pool_size, strides):
+ self.pool_size = pool_size
+ self.strides = strides
+
+ def set_input_shape(self, input_shape, reuse):
+ self.input_shape = input_shape
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ def fprop(self, x, reuse):
+ return tf.layers.max_pooling2d(x, self.pool_size, self.strides, padding='same')
+
+class AvgPool(Layer):
+
+ def __init__ (self, pool_size, strides):
+ self.pool_size = pool_size
+ self.strides = strides
+
+ def set_input_shape(self, input_shape, reuse):
+ self.input_shape = input_shape
+ orig_input_batch_size = input_shape[0]
+ input_shape = list(input_shape)
+ input_shape[0] = 1
+ dummy_batch = tf.zeros(input_shape)
+ dummy_output = self.fprop(dummy_batch, False)
+ output_shape = [int(e) for e in dummy_output.get_shape()]
+ output_shape[0] = 1
+ self.output_shape = tuple(output_shape)
+
+ def fprop(self, x, reuse):
+ return tf.layers.average_pooling2d(x, self.pool_size, self.strides)
+
+class ReLU(Layer):
+
+ def __init__(self):
+ pass
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def get_output_shape(self):
+ return self.output_shape
+
+ def fprop(self, x, reuse):
+ return tf.nn.relu(x)
+
+
+class SReLU(Layer):
+
+ def __init__(self, scope_name):
+ self.scope_name = scope_name
+ pass
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+ with tf.variable_scope(self.scope_name + '_init', reuse=reuse):
+ self.activation_scalar = tf.get_variable(
+ "activation_scalar", initializer=0.05, trainable=True)
+
+ def get_output_shape(self):
+ return self.output_shape
+
+ def fprop(self, x, reuse):
+ with tf.variable_scope(self.scope_name + '_init', reuse=reuse):
+ return tf.nn.relu(x) * self.activation_scalar
+
+
+class Softmax(Layer):
+
+ def __init__(self, temperature):
+ self.temperature = temperature
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def fprop(self, x, reuse):
+ return tf.nn.softmax(x * self.temperature)
+
+
+class SoftmaxT1(Layer):
+
+ def __init__(self):
+ pass
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def fprop(self, x, reuse):
+ return tf.nn.softmax(x)
+
+
+class Flatten(Layer):
+
+ def __init__(self):
+ pass
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ output_width = 1
+ for factor in shape[1:]:
+ output_width *= factor
+ self.output_width = output_width
+ self.output_shape = [None, output_width]
+
+ def fprop(self, x, reuse):
+ return tf.reshape(x, [-1, self.output_width])
+
+# Local response Norm layer for AlexNet
+class LocalNorm(Layer):
+
+ def __init__(self):
+ self.__dict__.update(locals())
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def get_output_shape(self):
+ return self.output_shape
+
+ def fprop(self, x, reuse):
+ x = tf.nn.local_response_normalization(x,
+ depth_radius=RADIUS,
+ alpha=ALPHA,
+ beta=BETA,
+ bias=BIAS)
+ return x
+
+# BatchNorm layer for low precision alexnet
+class BatchNorm(Layer):
+
+ def __init__(self, phase, scope_name, mean=None, variance=None, scale=None, offset=None):
+ self.__dict__.update(locals())
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def get_output_shape(self):
+ return self.output_shape
+
+ def fprop(self, x, reuse):
+
+ # Batch normalization for the training phase
+ if (self.mean is None) and (self.variance is None) and (self.scale is None) and (self.offset is None):
+ with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE): # Adding scope here to help in restoring variables, Saves and restores model
+ x = tf.layers.batch_normalization(x, training=self.phase)
+ else:
+ x = tf.nn.batch_normalization(
+ x, mean=self.mean, variance=self.variance,
+ scale=self.scale, offset=self.offset, variance_epsilon=BN_EPSILON)
+ return x
+
+## dropout layer for alexnet
+class DropOut(Layer):
+
+ def __init__(self, keep_prob, phase):
+ self.__dict__.update(locals())
+ self.G = tf.get_default_graph()
+ del self.self
+
+ def set_input_shape(self, shape, reuse):
+ self.input_shape = shape
+ self.output_shape = shape
+
+ def get_output_shape(self):
+ return self.output_shape
+
+ def fprop(self, x, reuse):
+ return tf.cond(self.phase, lambda: tf.nn.dropout(x, self.keep_prob), lambda: tf.identity(x)) # Dropout during training phase but not during test phase
+
+######################### full-precision #########################
+def make_basic_cnn(phase, temperature, detail, nb_filters=64, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail + 'conv2'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail + 'conv3'),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic cnn')
+ return model
+
+
+def make_scaled_rand_cnn(phase, temperature, detail, nb_filters=64, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail + 'conv2'),
+ ReLU(),
+ Conv2DRand(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail + 'conv3'),
+ SReLU(detail + 'srelu3_fp'),
+ Flatten(),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic cnn')
+ return model
+
+# distilled model
+def make_distilled_cnn(phase, temperature, detail1, detail2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)):
+ # make one teacher low precision cnn with wbits precision weights and abits activations
+ teacher_layers = [Conv2D(nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail1 + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail1 + 'conv2_bin'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail1 + 'conv3_bin'),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail1),
+ Softmax(temperature)] # Hard probs (default)
+ # make one student low precision cnn with wbits precision weights and abits activations
+ student_layers = [Conv2D(nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail2 + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail2 + 'conv2'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail2 + 'conv3'),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail2),
+ Softmax(temperature)] # Hard probs (default)
+
+ model = distilledModel(teacher_layers, student_layers, input_shape)
+ print('Finished making a distilled cnn')
+
+ return model
+
+################## low precision version of mnist cnn #################
+def make_basic_lowprecision_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
+ input_shape=(None, 28, 28, 1), useBatchNorm=False, stocRound=False):
+
+ layers = [Conv2D_lowprecision(wbits, abits, nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail + 'conv1', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Conv2D_lowprecision(wbits, abits, nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail + 'conv2_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Conv2D_lowprecision(wbits, abits, nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail + 'conv3_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic low precision cnn: %d weight bits, %d activation bits' %(wbits, abits))
+ return model
+
+# Variant of low precision supporting different precisions for different layers
+def make_layerwise_lowprecision_cnn(phase, temperature, detail, wbits, abits, nb_filters=64,
+ nb_classes=10, input_shape=(None, 28, 28, 1),
+ useBatchNorm=False, stocRound=False):
+ layers = [Conv2D_lowprecision(wbits[0], abits[0], nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail + 'conv1', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Conv2D_lowprecision(wbits[1], abits[1], nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail + 'conv2_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Conv2D_lowprecision(wbits[2], abits[2], nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail + 'conv3_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making layerwise low precision cnn: %d %d %d weight bits, %d %d %d activation bits' %(wbits[0], wbits[1], wbits[2], abits[0], abits[1], abits[2]))
+ return model
+
+
+################## EMPIR version of mnist cnn #################
+def make_ensemble_three_cnn(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1), useBatchNorm=False):
+ # make one low precision cnn with wbits precision weights and abits activations
+ layers1 = [Conv2D_lowprecision(wbits1, abits1, nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail1 + 'conv1', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail1 + 'conv2_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail1 + 'conv3_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail1),
+ Softmax(temperature)]
+ # make another low precision cnn with wbits precision weights and abits activations
+ layers2 = [Conv2D_lowprecision(wbits2, abits2, nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail2 + 'conv1', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail2 + 'conv2_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail2 + 'conv3_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail2),
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and a bits activations
+ layers3 = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail3 + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail3 + 'conv2'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail3 + 'conv3'),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail3),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
+ print('Finished making ensemble of three cnns')
+
+ return model
+
+def make_ensemble_three_cnn_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1), useBatchNorm=False):
+ # make one low precision cnn with wbits precision weights and abits activations
+ layers1 = [Conv2D_lowprecision(wbits1[0], abits1[0], nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail1 + 'conv1', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits1[1], abits1[1], nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail1 + 'conv2_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits1[2], abits1[2], nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail1 + 'conv3_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail1),
+ Softmax(temperature)]
+ # make another low precision cnn with wbits precision weights and abits activations
+ layers2 = [Conv2D_lowprecision(wbits2[0], abits2[0], nb_filters, (8, 8),
+ (2, 2), "SAME", phase, detail2 + 'conv1', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits2[1], abits2[1], nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail2 + 'conv2_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Conv2D_lowprecision(wbits2[2], abits2[2], nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail2 + 'conv3_bin', useBatchNorm=useBatchNorm),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail2),
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and a bits activations
+ layers3 = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail3 + 'conv1'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (6, 6),
+ (2, 2), "VALID", phase, detail3 + 'conv2'),
+ ReLU(),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "VALID", phase, detail3 + 'conv3'),
+ ReLU(),
+ Flatten(),
+ Linear(nb_classes, detail3),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, avg, weightedAvg, alpha, nb_classes)
+ print('Finished making ensemble of three cnns')
+
+ return model
+
+################# full-precision cifar cnn ############################
+def make_basic_cifar_cnn(phase, temperature, detail, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D(nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)),
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)),
+ Flatten(),
+ HiddenLinear(64, detail + 'ip1'),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic cnn')
+ return model
+
+################## distilled version of cifar cnn #################
+def make_distilled_cifar_cnn(phase, temperature, detail1, detail2, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ teacher_layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D(nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail1 + 'ip1'),
+ Linear(nb_classes, detail1),
+ Softmax(temperature)]
+
+ student_layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D(nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail2 + 'ip1'),
+ Linear(nb_classes, detail2),
+ Softmax(temperature)]
+
+ model = distilledModel(teacher_layers, student_layers, input_shape)
+ print('Finished making distilled cifar cnn')
+ return model
+
+
+################## low precision version of cifar cnn #################
+def make_basic_lowprecision_cifar_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
+ input_shape=(None, 28, 28, 1), stocRound=False):
+
+ layers = [Conv2D_lowprecision(wbits, abits, nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1', stocRound=stocRound), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits, abits, nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2', stocRound=stocRound),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D_lowprecision(wbits, abits, nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv3', stocRound=stocRound),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail + 'ip1'),
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic low precision cnn: %d weight bits, %d activation bits' %(wbits, abits))
+ return model
+
+def make_layerwise_lowprecision_cifar_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
+ input_shape=(None, 28, 28, 1), stocRound=False):
+
+ layers = [Conv2D_lowprecision(wbits[0], abits[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1', stocRound=stocRound),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits[1], abits[1], nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2', stocRound=stocRound),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D_lowprecision(wbits[2], abits[2], nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv3', stocRound=stocRound),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making layerwise low precision cnn %d %d %d weight bits, %d %d %d activation bits' %(wbits[0], wbits[1], wbits[2], abits[0], abits[1], abits[2]))
+ return model
+
+################## EMPIR version of cifar cnn #################
+def make_ensemble_three_cifar_cnn(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10, input_shape=(None, 28, 28, 1)):
+ # make a low precision cnn with full precision weights and a bits activations
+ layers1 = [Conv2D_lowprecision(wbits1, abits1, nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits1, abits1, nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
+ Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail1 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail1),
+ Softmax(temperature)]
+
+ # make a low precision cnn with full precision weights and a bits activations
+ layers2 = [Conv2D_lowprecision(wbits2, abits2, nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits2, abits2, nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
+ Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail2 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail2),
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and a bits activations
+ layers3 = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail3 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D(nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail3 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail3),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
+ print('Finished making ensemble of three cnns')
+
+ return model
+
+def make_ensemble_three_cifar_cnn_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ # make a low precision cnn with full precision weights and a bits activations
+ layers1 = [Conv2D_lowprecision(wbits1[0], abits1[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits1[1], abits1[1], nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
+ Conv2D_lowprecision(wbits1[2], abits1[2], nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail1 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail1),
+ Softmax(temperature)]
+
+ # make a low precision cnn with full precision weights and a bits activations
+ layers2 = [Conv2D_lowprecision(wbits2[0], abits2[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits2[1], abits2[1], nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
+ Conv2D_lowprecision(wbits2[2], abits2[2], nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail2 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail2),
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and a bits activations
+ layers3 = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail3 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D(nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv2'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Conv2D(nb_filters * 2, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv3'),
+ ReLU(),
+ AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ Flatten(),
+ HiddenLinear(64, detail3 + 'ip1'), # first f.c. layer
+ Linear(nb_classes, detail3),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, avg, weightedAvg, alpha, nb_classes)
+ print('Finished making ensemble of three cifar cnns')
+
+ return model
+
+######################### full-precision alexnet for Imagenet #########################
+def make_basic_alexnet_from_scratch(phase, temperature, detail, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+
+ layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2'),
+ BatchNorm(phase, detail + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv3'),
+ BatchNorm(phase, detail + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2DGroup(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv4'),
+ BatchNorm(phase, detail + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv5'),
+ BatchNorm(phase, detail + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Flatten(),
+ HiddenLinear(4096, detail + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear(4096, detail + 'ip2', useBias=False),
+ BatchNorm(phase, detail + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail, useBias=True),
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic alexnet')
+ return model
+
+################## low precision version of alexnet #################
+def make_basic_lowprecision_alexnet(phase, temperature, detail, wbits, abits, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+
+ layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits, abits, 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2'), # useBatchNorm not set here
+ BatchNorm(phase, detail + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits, abits, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv3'),
+ BatchNorm(phase, detail + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits, abits, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv4'),
+ BatchNorm(phase, detail + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits, abits, 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv5'),
+ BatchNorm(phase, detail + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits, abits, 4096, detail + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits, abits, 4096, detail + 'ip2'),
+ BatchNorm(phase, detail + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail, useBias=True), # Last layer is not quantized
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making basic alexnet of low precision')
+ return model
+
+def make_layerwise_lowprecision_alexnet(phase, temperature, detail, wbits, abits, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+
+ layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits[0], abits[0], 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail + 'conv2'), # useBatchNorm not set here
+ BatchNorm(phase, detail + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits[1], abits[1], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv3'),
+ BatchNorm(phase, detail + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits[2], abits[2], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv4'),
+ BatchNorm(phase, detail + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits[3], abits[3], 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail + 'conv5'),
+ BatchNorm(phase, detail + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits[4], abits[4], 4096, detail + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits[5], abits[5], 4096, detail + 'ip2'),
+ BatchNorm(phase, detail + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail, useBias=True), # Last layer is not quantized
+ Softmax(temperature)]
+
+ model = MLP(layers, input_shape)
+ print('Finished making layerwise alexnet of low precision')
+ return model
+
+################## EMPIR version of alexnet #################
+def make_ensemble_three_alexnet(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1), useBatchNorm=False):
+ # make a low precision cnn
+ layers1 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail1 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1, abits1, 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv2'),
+ BatchNorm(phase, detail1 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D_lowprecision(wbits1, abits1, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv3'),
+ BatchNorm(phase, detail1 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1, abits1, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv4'),
+ BatchNorm(phase, detail1 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1, abits1, 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv5'),
+ BatchNorm(phase, detail1 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits1, abits1, 4096, detail1 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail1 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits1, abits1, 4096, detail1 + 'ip2', useBias=False),
+ BatchNorm(phase, detail1 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail1, useBias=True),
+ Softmax(temperature)]
+
+ # make another low precision cnn
+ layers2 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail2 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2, abits2, 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv2'),
+ BatchNorm(phase, detail2 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D_lowprecision(wbits2, abits2, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv3'),
+ BatchNorm(phase, detail2 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2, abits2, 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv4'),
+ BatchNorm(phase, detail2 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2, abits2, 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv5'),
+ BatchNorm(phase, detail2 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits2, abits2, 4096, detail2 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail2 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits2, abits2, 4096, detail2 + 'ip2', useBias=False),
+ BatchNorm(phase, detail2 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail2, useBias=True), # Last layer is not quantized
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and activations
+ layers3 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail3 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv2'),
+ BatchNorm(phase, detail3 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv3'),
+ BatchNorm(phase, detail3 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2DGroup(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv4'),
+ BatchNorm(phase, detail3 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv5'),
+ BatchNorm(phase, detail3 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Flatten(),
+ HiddenLinear(4096, detail3 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail3 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear(4096, detail3 + 'ip2', useBias=False),
+ BatchNorm(phase, detail3 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail3, useBias=True),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
+ print('Finished making ensemble of three cnns')
+
+ return model
+
+def make_ensemble_three_alexnet_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
+ input_shape=(None, 28, 28, 1)):
+ # make a low precision cnn
+ layers1 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail1 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1[0], abits1[0], 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail1 + 'conv2'),
+ BatchNorm(phase, detail1 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits1[1], abits1[1], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv3'),
+ BatchNorm(phase, detail1 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1[2], abits1[2], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv4'),
+ BatchNorm(phase, detail1 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits1[3], abits1[3], 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail1 + 'conv5'),
+ BatchNorm(phase, detail1 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits1[4], abits1[4], 4096, detail1 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail1 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits1[5], abits1[5], 4096, detail1 + 'ip2'),
+ BatchNorm(phase, detail1 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail1, useBias=True), # Last layer is not quantized
+ Softmax(temperature)]
+
+ # make another low precision cnn
+ layers2 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail2 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2[0], abits2[0], 8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail2 + 'conv2'), # useBatchNorm not set here
+ BatchNorm(phase, detail2 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2D_lowprecision(wbits2[1], abits2[1], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv3'),
+ BatchNorm(phase, detail2 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2[2], abits2[2], 12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv4'),
+ BatchNorm(phase, detail2 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup_lowprecision(wbits2[3], abits2[3], 8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail2 + 'conv5'),
+ BatchNorm(phase, detail2 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
+ ReLU(),
+ Flatten(),
+ HiddenLinear_lowprecision(wbits2[4], abits2[4], 4096, detail2 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail2 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear_lowprecision(wbits2[5], abits2[5], 4096, detail2 + 'ip2'),
+ BatchNorm(phase, detail2 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail2, useBias=True), # Last layer is not quantized
+ Softmax(temperature)]
+
+ # make a full precision cnn with full precision weights and activations
+ layers3 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail3 + 'conv1', useBias=True),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (5, 5),
+ (1, 1), "SAME", phase, detail3 + 'conv2'),
+ BatchNorm(phase, detail3 + '_batchNorm1'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2D(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv3'),
+ BatchNorm(phase, detail3 + '_batchNorm2'),
+ MaxPoolSame((3, 3), (2, 2)),
+ ReLU(),
+ Conv2DGroup(12*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv4'),
+ BatchNorm(phase, detail3 + '_batchNorm3'),
+ ReLU(),
+ Conv2DGroup(8*nb_filters, (3, 3),
+ (1, 1), "SAME", phase, detail3 + 'conv5'),
+ BatchNorm(phase, detail3 + '_batchNorm4'),
+ MaxPool((3, 3), (2, 2)),
+ ReLU(),
+ Flatten(),
+ HiddenLinear(4096, detail3 + 'ip1', useBias=True), # first f.c. layer
+ BatchNorm(phase, detail3 + '_batchNorm5'),
+ ReLU(),
+ HiddenLinear(4096, detail3 + 'ip2', useBias=False),
+ BatchNorm(phase, detail3 + '_batchNorm6'),
+ ReLU(),
+ Linear(nb_classes, detail3, useBias=True),
+ Softmax(temperature)]
+
+ model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
+ print('Finished making ensemble of three models')
+
+ return model
diff --git a/case_studies/empir/examples/__init__.py b/case_studies/empir/examples/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/empir/examples/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/empir/examples/alexnet_attack.py b/case_studies/empir/examples/alexnet_attack.py
new file mode 100644
index 0000000..11cf233
--- /dev/null
+++ b/case_studies/empir/examples/alexnet_attack.py
@@ -0,0 +1,753 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import keras
+from keras import backend
+from keras.utils import np_utils
+
+import os
+import argparse
+import logging
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+
+import sys
+sys.path.insert(0, "/home/consus/a/sen9/verifiedAI/cleverhans_EMPIR")
+
+#from modified_cleverhans.attacks import fgsm
+from modified_cleverhans.utils import set_log_level, parse_model_settings, build_model_save_path
+from modified_cleverhans.attacks import FastGradientMethod
+from modified_cleverhans.utils_keras import cnn_model
+from modified_cleverhans.utils_tf import batch_eval, tf_model_load
+from modified_cleverhans.utils_tf import model_train_imagenet, model_eval_imagenet, model_eval_ensemble_imagenet, model_eval_adv_imagenet, model_eval_ensemble_adv_imagenet
+from examples import imagenet_preprocessing #for imagenet preprocessing
+
+from collections import OrderedDict
+
+FLAGS = flags.FLAGS
+
+ATTACK_CARLINI_WAGNER_L2 = 0
+ATTACK_JSMA = 1
+ATTACK_FGSM = 2
+ATTACK_MADRYETAL = 3
+ATTACK_BASICITER = 4
+MAX_BATCH_SIZE = 100
+
+# enum adversarial training types
+ADVERSARIAL_TRAINING_MADRYETAL = 1
+ADVERSARIAL_TRAINING_FGSM = 2
+MAX_EPS = 0.3
+
+# Scaling input to softmax
+INIT_T = 1.0
+
+#ATTACK_T = 1.0
+ATTACK_T = 0.25
+
+_DEFAULT_IMAGE_SIZE = 224
+_NUM_CHANNELS = 3
+_NUM_CLASSES = 1000
+
+_NUM_TRAIN_FILES = 1024
+_SHUFFLE_BUFFER = 10000
+
+def get_filenames(is_training, data_dir):
+ """Return filenames for dataset."""
+ if is_training:
+ return [
+ os.path.join(data_dir, 'Train-%05d-of-01024' % i)
+ for i in range(_NUM_TRAIN_FILES)]
+ else:
+ return [
+ os.path.join(data_dir, 'Val-%05d-of-00128' % i)
+ for i in range(128)]
+
+def _parse_example_proto(example_serialized):
+ """Parses an Example proto containing a training example of an image.
+ The output of the build_image_data.py image preprocessing script is a dataset
+ containing serialized Example protocol buffers. Each Example proto contains
+ the following fields (values are included as examples):
+ image/height': _int64_feature(height),
+ image/width': _int64_feature(width),
+ image/colorspace': _bytes_feature(colorspace),
+ image/channels': _int64_feature(channels),
+ image/class/label': _int64_feature(label),
+ image/class/synset': _bytes_feature(synset),
+ image/format': _bytes_feature(image_format),
+ image/filename': _bytes_feature(os.path.basename(filename)),
+ image/encoded': _bytes_feature(image_buffer)}))
+
+ Args:
+ example_serialized: scalar Tensor tf.string containing a serialized
+ Example protocol buffer.
+ Returns:
+ image_buffer: Tensor tf.string containing the contents of a JPEG file.
+ label: Tensor tf.int32 containing the label.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ """
+ # Dense features in Example proto.
+ feature_map = {
+ 'image/height': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/width': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/colorspace': tf.VarLenFeature(dtype=tf.string),
+ 'image/channels': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/class/synset': tf.VarLenFeature(dtype=tf.string),
+ 'image/format': tf.VarLenFeature(dtype=tf.string),
+ 'image/filename': tf.VarLenFeature(dtype=tf.string),
+ 'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
+ }
+ features = tf.parse_single_example(example_serialized, feature_map)
+ label = tf.cast(features['image/class/label'], dtype=tf.int32)
+ one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector
+
+ # Directly fixing values of min and max
+ xmin = tf.expand_dims([0.0], 0)
+ ymin = tf.expand_dims([0.0], 0)
+ xmax = tf.expand_dims([1.0], 0)
+ ymax = tf.expand_dims([1.0], 0)
+
+ # Note that we impose an ordering of (y, x) just to make life difficult.
+ bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
+
+ # Force the variable number of bounding boxes into the shape
+ # [1, num_boxes, coords].
+ bbox = tf.expand_dims(bbox, 0)
+ bbox = tf.transpose(bbox, [0, 2, 1])
+
+ return features['image/encoded'], one_hot_label, bbox
+
+# variant of the above to parse training datasets which have labels from 1 to 1000 instead of 0 to 999
+def _parse_train_example_proto(example_serialized):
+ """Parses an Example proto containing a training example of an image.
+ The output of the build_image_data.py image preprocessing script is a dataset
+ containing serialized Example protocol buffers. Each Example proto contains
+ the following fields (values are included as examples):
+ image/height': _int64_feature(height),
+ image/width': _int64_feature(width),
+ image/colorspace': _bytes_feature(colorspace),
+ image/channels': _int64_feature(channels),
+ image/class/label': _int64_feature(label),
+ image/class/synset': _bytes_feature(synset),
+ image/format': _bytes_feature(image_format),
+ image/filename': _bytes_feature(os.path.basename(filename)),
+ image/encoded': _bytes_feature(image_buffer)}))
+
+ Args:
+ example_serialized: scalar Tensor tf.string containing a serialized
+ Example protocol buffer.
+ Returns:
+ image_buffer: Tensor tf.string containing the contents of a JPEG file.
+ label: Tensor tf.int32 containing the label.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ """
+ # Dense features in Example proto.
+ feature_map = {
+ 'image/height': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/width': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/colorspace': tf.VarLenFeature(dtype=tf.string),
+ 'image/channels': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
+ 'image/class/synset': tf.VarLenFeature(dtype=tf.string),
+ 'image/format': tf.VarLenFeature(dtype=tf.string),
+ 'image/filename': tf.VarLenFeature(dtype=tf.string),
+ 'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
+ }
+ features = tf.parse_single_example(example_serialized, feature_map)
+ label = tf.cast(features['image/class/label'], dtype=tf.int32) -1
+ one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector
+
+ # Directly fixing values of min and max
+ xmin = tf.expand_dims([0.0], 0)
+ ymin = tf.expand_dims([0.0], 0)
+ xmax = tf.expand_dims([1.0], 0)
+ ymax = tf.expand_dims([1.0], 0)
+
+ # Note that we impose an ordering of (y, x) just to make life difficult.
+ bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
+
+ # Force the variable number of bounding boxes into the shape
+ # [1, num_boxes, coords].
+ bbox = tf.expand_dims(bbox, 0)
+ bbox = tf.transpose(bbox, [0, 2, 1])
+
+ return features['image/encoded'], one_hot_label, bbox
+
+def parse_record(raw_record, is_training, dtype):
+ """Parses a record containing a training example of an image.
+ The input record is parsed into a label and image, and the image is passed
+ through preprocessing steps (cropping, flipping, and so on).
+ Args:
+ raw_record: scalar Tensor tf.string containing a serialized
+ Example protocol buffer.
+ is_training: A boolean denoting whether the input is for training.
+ dtype: data type to use for images/features.
+ Returns:
+ Tuple with processed image tensor and one-hot-encoded label tensor.
+ """
+ if is_training:
+ image_buffer, label, bbox = _parse_train_example_proto(raw_record)
+ else:
+ image_buffer, label, bbox = _parse_example_proto(raw_record)
+
+ image = imagenet_preprocessing.preprocess_image4( # For pretrained Dorefanet network with division by standard deviation
+ image_buffer=image_buffer,
+ bbox=bbox,
+ output_height=_DEFAULT_IMAGE_SIZE,
+ output_width=_DEFAULT_IMAGE_SIZE,
+ num_channels=_NUM_CHANNELS,
+ is_training=is_training)
+
+ image = tf.cast(image, dtype)
+
+ return image, label
+
+def process_record_dataset(dataset,
+ is_training,
+ batch_size,
+ shuffle_buffer,
+ parse_record_fn,
+ num_epochs=1,
+ dtype=tf.float32,
+ datasets_num_private_threads=None,
+ num_parallel_batches=1):
+ """Given a Dataset with raw records, return an iterator over the records.
+ Args:
+ dataset: A Dataset representing raw records
+ is_training: A boolean denoting whether the input is for training.
+ batch_size: The number of samples per batch.
+ shuffle_buffer: The buffer size to use when shuffling records. A larger
+ value results in better randomness, but smaller values reduce startup
+ time and use less memory.
+ parse_record_fn: A function that takes a raw record and returns the
+ corresponding (image, label) pair.
+ num_epochs: The number of epochs to repeat the dataset.
+ dtype: Data type to use for images/features.
+ datasets_num_private_threads: Number of threads for a private
+ threadpool created for all datasets computation.
+ num_parallel_batches: Number of parallel batches for tf.data.
+ Returns:
+ Dataset of (image, label) pairs ready for iteration.
+ """
+
+ # Prefetches a batch at a time to smooth out the time taken to load input
+ # files for shuffling and processing.
+ dataset = dataset.prefetch(buffer_size=batch_size)
+ if is_training:
+ # Shuffles records before repeating to respect epoch boundaries.
+ dataset = dataset.shuffle(buffer_size=shuffle_buffer)
+
+ # Repeats the dataset for the number of epochs to train.
+
+ # Parses the raw records into images and labels.
+ dataset = dataset.apply(
+ tf.contrib.data.map_and_batch(
+ lambda value: parse_record_fn(value, is_training, dtype),
+ batch_size=batch_size,
+ num_parallel_batches=num_parallel_batches))
+
+ # Operations between the final prefetch and the get_next call to the iterator
+ # will happen synchronously during run time. We prefetch here again to
+ # background all of the above processing work and keep it out of the
+ # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
+ # allows DistributionStrategies to adjust how many batches to fetch based
+ # on how many devices are present.
+ # dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
+ dataset = dataset.prefetch(buffer_size=1)
+
+ return dataset
+
+def data_imagenet(nb_epochs, batch_size, imagenet_path):
+ """
+ Preprocess Imagenet dataset
+ :return:
+ """
+
+ # Load images from dataset
+ test_dataset =tf.data.TFRecordDataset(get_filenames(is_training=False, data_dir=imagenet_path+'/Val'))
+
+ train_dataset = tf.data.TFRecordDataset(get_filenames(is_training=True, data_dir=imagenet_path+'/Train'))
+
+ train_processed = process_record_dataset(dataset=train_dataset, is_training=True, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, num_epochs = nb_epochs, parse_record_fn=parse_record)
+
+ test_processed = process_record_dataset(dataset=test_dataset, is_training=False, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record)
+
+ return train_processed, test_processed
+
+def main(argv=None):
+ model_path = FLAGS.model_path
+ targeted = True if FLAGS.targeted else False
+ scale = True if FLAGS.scale else False
+ learning_rate = FLAGS.learning_rate
+ nb_filters = FLAGS.nb_filters
+ batch_size = FLAGS.batch_size
+ nb_epochs = FLAGS.nb_epochs
+ delay = FLAGS.delay
+ eps = FLAGS.eps
+ adv = FLAGS.adv
+
+ attack = FLAGS.attack
+ attack_iterations = FLAGS.attack_iterations
+ nb_iter = FLAGS.nb_iter
+
+ #### EMPIR extra flags
+ lowprecision=FLAGS.lowprecision
+ abits=FLAGS.abits
+ wbits=FLAGS.wbits
+ abitsList=FLAGS.abitsList
+ wbitsList=FLAGS.wbitsList
+ stocRound=True if FLAGS.stocRound else False
+ rand=FLAGS.rand
+ model_path2 = FLAGS.model_path2
+ model_path1 = FLAGS.model_path1
+ model_path3 = FLAGS.model_path3
+ ensembleThree=True if FLAGS.ensembleThree else False
+ abits2=FLAGS.abits2
+ wbits2=FLAGS.wbits2
+ abits2List=FLAGS.abits2List
+ wbits2List=FLAGS.wbits2List
+ ####
+
+ save = False
+ train_from_scratch = False
+
+ #### Imagenet flags
+ imagenet_path = FLAGS.imagenet_path
+ if imagenet_path is None:
+ print("Error: Imagenet data path not specified")
+ sys.exit(1)
+
+ # Imagenet specific dimensions
+ img_rows = _DEFAULT_IMAGE_SIZE
+ img_cols = _DEFAULT_IMAGE_SIZE
+ channels = _NUM_CHANNELS
+ nb_classes = _NUM_CLASSES
+
+ # Set TF random seed to improve reproducibility
+ tf.set_random_seed(1234)
+
+ if not hasattr(backend, "tf"):
+ raise RuntimeError("This tutorial requires keras to be configured"
+ " to use the TensorFlow backend.")
+
+ # Image dimensions ordering should follow the Theano convention
+ if keras.backend.image_dim_ordering() != 'tf':
+ keras.backend.set_image_dim_ordering('tf')
+ print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
+ "'th', temporarily setting to 'tf'")
+
+ # Create TF session and set as Keras backend session
+ sess = tf.Session()
+ keras.backend.set_session(sess)
+
+ set_log_level(logging.WARNING)
+
+ # Get imagenet datasets
+ train_dataset, test_dataset = data_imagenet(nb_epochs, batch_size, imagenet_path)
+
+ # Creating a initializable iterators
+ train_iterator = train_dataset.make_initializable_iterator()
+ test_iterator = test_dataset.make_initializable_iterator()
+
+ # Getting next elements from the iterators
+ next_test_element = test_iterator.get_next()
+ next_train_element = train_iterator.get_next()
+
+ train_x, train_y = train_iterator.get_next()
+ test_x, test_y = test_iterator.get_next()
+
+ # Define input TF placeholder
+ x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
+ y = tf.placeholder(tf.float32, shape=(None, nb_classes))
+ phase = tf.placeholder(tf.bool, name="phase")
+
+ logits_scalar = tf.placeholder_with_default(
+ INIT_T, shape=(), name="logits_temperature")
+
+ if ensembleThree:
+ if (model_path1 is None or model_path2 is None or model_path3 is None):
+ train_from_scratch = True
+ else:
+ train_from_scratch = False
+ elif model_path is not None:
+ if os.path.exists(model_path):
+ # check for existing model in immediate subfolder
+ if any(f.endswith('.meta') for f in os.listdir(model_path)):
+ train_from_scratch = False
+ else:
+ model_path = build_model_save_path(
+ model_path, batch_size, nb_filters, learning_rate, nb_epochs, adv, delay)
+ print(model_path)
+ save = True
+ train_from_scratch = True
+ else:
+ train_from_scratch = True # train from scratch, but don't save since no path given
+
+ if ensembleThree:
+ if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified for first model
+ if (wbits==0) or (abits==0):
+ print("Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 1
+ elif (len(wbitsList) != 6) or (len(abitsList) != 6):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer of the first model")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 0
+
+ if (wbits2List is None) or (abits2List is None): # Layer wise separate quantization not specified for second model
+ if (wbits2==0) or (abits2==0):
+ print("Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 1
+ elif (len(wbits2List) != 6) or (len(abits2List) != 6):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer of the second model")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 0
+
+ if (fixedPrec2 != 1) or (fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
+ fixedPrec=0
+ print("Within atleast one model has separate precisions")
+ if (fixedPrec1 == 1): # first layer has fixed precision
+ abitsList = (abits, abits, abits, abits, abits, abits)
+ wbitsList = (wbits, wbits, wbits, wbits, wbits, wbits)
+ if (fixedPrec2 == 1): # second layer has fixed precision
+ abits2List = (abits2, abits2, abits2, abits2, abits2, abits2)
+ wbits2List = (wbits2, wbits2, wbits2, wbits2, wbits2, wbits2)
+ else:
+ fixedPrec=1
+
+ if (train_from_scratch):
+ print ("The ensemble model cannot be trained from scratch")
+ sys.exit(1)
+ if fixedPrec == 1:
+ from modified_cleverhans_tutorials.tutorial_models import make_ensemble_three_alexnet
+ model = make_ensemble_three_alexnet(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2, abits2, input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
+ else:
+ from modified_cleverhans_tutorials.tutorial_models import make_layerwise_three_combined_alexnet
+ model = make_layerwise_three_combined_alexnet(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList, wbits2List, abits2List, input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
+ elif lowprecision:
+ if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified
+ if (wbits==0) or (abits==0):
+ print("Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
+ sys.exit(1)
+ else:
+ fixedPrec = 1
+ elif (len(wbitsList) != 6) or (len(abitsList) != 6):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer")
+ sys.exit(1)
+ else:
+ fixedPrec = 0
+
+ if fixedPrec:
+
+ ### For training from scratch
+ from modified_cleverhans_tutorials.tutorial_models import make_basic_lowprecision_alexnet
+ model = make_basic_lowprecision_alexnet(phase, logits_scalar, 'lp_', wbits, abits, input_shape=(
+ None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
+ else:
+ from modified_cleverhans_tutorials.tutorial_models import make_layerwise_lowprecision_alexnet
+ model = make_layerwise_lowprecision_alexnet(phase, logits_scalar, 'lp_', wbitsList, abitsList,
+ input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
+ else:
+ ### For training from scratch
+ from modified_cleverhans_tutorials.tutorial_models import make_basic_alexnet_from_scratch
+ model = make_basic_alexnet_from_scratch(phase, logits_scalar, 'fp_', input_shape=(
+ None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
+
+ # separate calling function for ensemble models
+ if ensembleThree:
+ preds = model.ensemble_call(x, reuse=False)
+ else:
+ ##default
+ preds = model(x, reuse=False)
+ print("Defined TensorFlow model graph.")
+
+ rng = np.random.RandomState([2017, 8, 30])
+
+ def evaluate():
+ # Evaluate the accuracy of the CIFAR10 model on legitimate test
+ # examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree:
+ acc = model_eval_ensemble_imagenet(
+ sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, args=eval_params)
+ else: #default below
+ acc = model_eval_imagenet(
+ sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, args=eval_params)
+ print('Test accuracy on legitimate examples: %0.4f' % acc)
+
+ # Train an Imagenet model
+ train_params = {
+ 'lowprecision': lowprecision,
+ 'nb_epochs': nb_epochs,
+ 'batch_size': batch_size,
+ 'learning_rate': learning_rate,
+ 'loss_name': 'train loss',
+ 'filename': 'model',
+ 'reuse_global_step': False,
+ 'train_scope': 'train',
+ 'is_training': True
+ }
+
+ if adv != 0:
+ if adv == ADVERSARIAL_TRAINING_MADRYETAL:
+ from modified_cleverhans.attacks import MadryEtAl
+ train_attack_params = {'eps': MAX_EPS, 'eps_iter': 0.01,
+ 'nb_iter': nb_iter}
+ train_attacker = MadryEtAl(model, sess=sess)
+
+ elif adv == ADVERSARIAL_TRAINING_FGSM:
+ from modified_cleverhans.attacks import FastGradientMethod
+ stddev = int(np.ceil((MAX_EPS * 255) // 2))
+ train_attack_params = {'eps': tf.abs(tf.truncated_normal(
+ shape=(batch_size, 1, 1, 1), mean=0, stddev=stddev))}
+ train_attacker = FastGradientMethod(model, back='tf', sess=sess)
+ # create the adversarial trainer
+ train_attack_params.update({'clip_min': 0., 'clip_max': 1.})
+ adv_x_train = train_attacker.generate(x, phase, **train_attack_params)
+ preds_adv_train = model.get_probs(adv_x_train)
+
+ eval_attack_params = {'eps': MAX_EPS, 'clip_min': 0., 'clip_max': 1.}
+ adv_x_eval = train_attacker.generate(x, phase, **eval_attack_params)
+ preds_adv_eval = model.get_probs(adv_x_eval) # * logits_scalar
+ # if adv:
+ # from modified_cleverhans.attacks import FastGradientMethod
+ # fgsm = FastGradientMethod(model, back='tf', sess=sess)
+ # fgsm_params = {'eps': eps, 'clip_min': 0., 'clip_max': 1.}
+ # adv_x_train = fgsm.generate(x, phase, **fgsm_params)
+ # preds_adv = model.get_probs(adv_x_train)
+
+ if train_from_scratch:
+ if save:
+ train_params.update({'log_dir': model_path})
+ if adv and delay > 0:
+ train_params.update({'nb_epochs': delay})
+
+ # do clean training for 'nb_epochs' or 'delay' epochs with learning rate reducing with time
+ model_train_imagenet2(sess, x, y, preds, train_iterator, train_x, train_y, phase=phase,
+ evaluate=evaluate, args=train_params, save=save, rng=rng)
+
+ # optionally do additional adversarial training
+ if adv:
+ print("Adversarial training for %d epochs" % (nb_epochs - delay))
+ train_params.update({'nb_epochs': nb_epochs - delay})
+ train_params.update({'reuse_global_step': True})
+ model_train_imagenet(sess, x, y, preds, train_iterator, train_x, train_y, phase=phase,
+ predictions_adv=preds_adv_train, evaluate=evaluate, args=train_params, save=save, rng=rng)
+ else:
+ if ensembleThree: ## ensembleThree models have to loaded from different paths
+ variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ # First 11 variables from path1
+ stored_variables = ['lp_conv1_init/k', 'lp_conv1_init/b', 'lp_conv2_init/k', 'lp_conv3_init/k', 'lp_conv4_init/k', 'lp_conv5_init/k', 'lp_ip1init/W', 'lp_ip1init/b', 'lp_ip2init/W', 'lp_logits_init/W', 'lp_logits_init/b']
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[:11]))) # only dict was messing with the order
+ # Restore the first set of variables from model_path1
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path1))
+ # Restore the second set of variables from model_path2
+ # Second 11 variables from path2
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[11:22])))
+ saver2 = tf.train.Saver(variable_dict)
+ saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
+ # Third 11 variables from path3
+ stored_variables = ['fp_conv1_init/k', 'fp_conv1_init/b', 'fp_conv2_init/k', 'fp_conv3_init/k', 'fp_conv4_init/k', 'fp_conv5_init/k', 'fp_ip1init/W', 'fp_ip1init/b', 'fp_ip2init/W', 'fp_logits_init/W', 'fp_logits_init/b']
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[22:33])))
+ saver3 = tf.train.Saver(variable_dict)
+ saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
+ # Next 24 batch norm variables from path1
+ stored_variables = ['lp__batchNorm1/batch_normalization/gamma', 'lp__batchNorm1/batch_normalization/beta', 'lp__batchNorm1/batch_normalization/moving_mean', 'lp__batchNorm1/batch_normalization/moving_variance', 'lp__batchNorm2/batch_normalization/gamma', 'lp__batchNorm2/batch_normalization/beta', 'lp__batchNorm2/batch_normalization/moving_mean', 'lp__batchNorm2/batch_normalization/moving_variance', 'lp__batchNorm3/batch_normalization/gamma', 'lp__batchNorm3/batch_normalization/beta', 'lp__batchNorm3/batch_normalization/moving_mean', 'lp__batchNorm3/batch_normalization/moving_variance', 'lp__batchNorm4/batch_normalization/gamma', 'lp__batchNorm4/batch_normalization/beta', 'lp__batchNorm4/batch_normalization/moving_mean', 'lp__batchNorm4/batch_normalization/moving_variance', 'lp__batchNorm5/batch_normalization/gamma', 'lp__batchNorm5/batch_normalization/beta', 'lp__batchNorm5/batch_normalization/moving_mean', 'lp__batchNorm5/batch_normalization/moving_variance', 'lp__batchNorm6/batch_normalization/gamma', 'lp__batchNorm6/batch_normalization/beta', 'lp__batchNorm6/batch_normalization/moving_mean', 'lp__batchNorm6/batch_normalization/moving_variance']
+
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[33:57])))
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path1))
+ # Next 24 batch norm variables from path2
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[57:81])))
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path2))
+ # Final 24 batch norm variables from path1
+ stored_variables = ['fp__batchNorm1/batch_normalization/gamma', 'fp__batchNorm1/batch_normalization/beta', 'fp__batchNorm1/batch_normalization/moving_mean', 'fp__batchNorm1/batch_normalization/moving_variance', 'fp__batchNorm2/batch_normalization/gamma', 'fp__batchNorm2/batch_normalization/beta', 'fp__batchNorm2/batch_normalization/moving_mean', 'fp__batchNorm2/batch_normalization/moving_variance', 'fp__batchNorm3/batch_normalization/gamma', 'fp__batchNorm3/batch_normalization/beta', 'fp__batchNorm3/batch_normalization/moving_mean', 'fp__batchNorm3/batch_normalization/moving_variance', 'fp__batchNorm4/batch_normalization/gamma', 'fp__batchNorm4/batch_normalization/beta', 'fp__batchNorm4/batch_normalization/moving_mean', 'fp__batchNorm4/batch_normalization/moving_variance', 'fp__batchNorm5/batch_normalization/gamma', 'fp__batchNorm5/batch_normalization/beta', 'fp__batchNorm5/batch_normalization/moving_mean', 'fp__batchNorm5/batch_normalization/moving_variance', 'fp__batchNorm6/batch_normalization/gamma', 'fp__batchNorm6/batch_normalization/beta', 'fp__batchNorm6/batch_normalization/moving_mean', 'fp__batchNorm6/batch_normalization/moving_variance']
+ variable_dict = dict(OrderedDict(zip(stored_variables, variables[81:105])))
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path3))
+ else: # restoring the model trained using this setup, not a downloaded one
+ tf_model_load(sess, model_path)
+ print('Restored model from %s' % model_path)
+ # evaluate()
+
+
+ # Evaluate the accuracy of the model on legitimate test examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree:
+ accuracy = model_eval_ensemble_imagenet(sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, feed={phase: False}, args=eval_params)
+ else: #default below
+ accuracy = model_eval_imagenet(sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, feed={phase: False}, args=eval_params)
+ print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
+
+ ###########################################################################
+ # Build dataset
+ ###########################################################################
+
+ adv_inputs = test_x #adversarial inputs can be generated from any of the test examples
+
+ ###########################################################################
+ # Craft adversarial examples using generic approach
+ ###########################################################################
+ nb_adv_per_sample = 1
+ adv_ys = None
+ yname = "y"
+
+ print('Crafting adversarial examples')
+ print("This could take some time ...")
+
+ if ensembleThree:
+ model_type = 'ensembleThree'
+ else:
+ model_type = 'default'
+
+ if attack == ATTACK_CARLINI_WAGNER_L2:
+ from modified_cleverhans.attacks import CarliniWagnerL2
+ attacker = CarliniWagnerL2(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
+ attack_params = {'binary_search_steps': 1,
+ 'max_iterations': attack_iterations,
+ 'learning_rate': 0.1,
+ 'batch_size': batch_size,
+ 'initial_const': 10,
+ }
+ elif attack == ATTACK_JSMA:
+ from modified_cleverhans.attacks import SaliencyMapMethod
+ attacker = SaliencyMapMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
+ attack_params = {'theta': 1., 'gamma': 0.1}
+ elif attack == ATTACK_FGSM:
+ from modified_cleverhans.attacks import FastGradientMethod
+ attacker = FastGradientMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
+ attack_params = {'eps': eps}
+ elif attack == ATTACK_MADRYETAL:
+ from modified_cleverhans.attacks import MadryEtAl
+ attacker = MadryEtAl(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ elif attack == ATTACK_BASICITER:
+ print('Attack: BasicIterativeMethod')
+ from modified_cleverhans.attacks import BasicIterativeMethod
+ attacker = BasicIterativeMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ else:
+ print("Attack undefined")
+ sys.exit(1)
+
+ attack_params.update({'clip_min': -2.2, 'clip_max': 2.7}) # Since max and min for imagenet turns out to be around -2.11 and 2.12
+ eval_params = {'batch_size': batch_size}
+ '''
+ adv_x = attacker.generate(x, phase, **attack_params)
+ # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
+ eval_params = {'batch_size': batch_size}
+ X_test_adv, = batch_eval(sess, [x], [adv_x], [adv_inputs], feed={
+ phase: False}, args=eval_params)
+ '''
+
+ print("Evaluating un-targeted results")
+ if ensembleThree:
+ adv_accuracy = model_eval_ensemble_adv_imagenet(sess, x, y, preds, test_iterator,
+ test_x, test_y, phase=phase, args=eval_params, attacker=attacker, attack_params=attack_params)
+ else:
+ adv_accuracy = model_eval_adv_imagenet(sess, x, y, preds, test_iterator,
+ test_x, test_y, phase=phase, args=eval_params, attacker=attacker, attack_params=attack_params)
+
+ # Compute the number of adversarial examples that were successfully found
+ print('Test accuracy on adversarial examples {0:.4f}'.format(adv_accuracy))
+
+
+ # Close TF session
+ sess.close()
+
+
+if __name__ == '__main__':
+
+ par = argparse.ArgumentParser()
+
+ # Generic flags
+ par.add_argument('--gpu', help='id of GPU to use')
+ par.add_argument('--model_path', help='Path to save or load model')
+ par.add_argument('--data_dir', help='Path to training data',
+ default='/scratch/gallowaa/cifar10/cifar10_data')
+
+ # Architecture and training specific flags
+ par.add_argument('--nb_epochs', type=int, default=6,
+ help='Number of epochs to train model')
+ par.add_argument('--nb_filters', type=int, default=32,
+ help='Number of filters in first layer')
+ par.add_argument('--batch_size', type=int, default=100,
+ help='Size of training batches')
+ par.add_argument('--learning_rate', type=float, default=0.001,
+ help='Learning rate')
+ par.add_argument('--scale', help='Scale activations of the binary model?',
+ action="store_true")
+ par.add_argument('--rand', help='Stochastic weight layer?',
+ action="store_true")
+ # EMPIR specific flags
+ par.add_argument('--lowprecision', help='Use other low precision models', action="store_true")
+ par.add_argument('--wbits', type=int, default=0, help='No. of bits in weight representation')
+ par.add_argument('--abits', type=int, default=0, help='No. of bits in activation representation')
+ par.add_argument('--wbitsList', type=int, nargs='+', help='List of No. of bits in weight representation for different layers')
+ par.add_argument('--abitsList', type=int, nargs='+', help='List of No. of bits in activation representation for different layers')
+ par.add_argument('--stocRound', help='Stochastic rounding for weights (only in training) and activations?', action="store_true")
+ par.add_argument('--model_path1', help='Path where saved model1 is stored and can be loaded')
+ par.add_argument('--model_path2', help='Path where saved model2 is stored and can be loaded')
+ par.add_argument('--ensembleThree', help='Use an ensemble of full precision and two low precision models that can be attacked directly and potentially trained', action="store_true")
+ par.add_argument('--model_path3', help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
+ par.add_argument('--wbits2', type=int, default=0, help='No. of bits in weight representation of model2, model1 specified using wbits')
+ par.add_argument('--abits2', type=int, default=0, help='No. of bits in activation representation of model2, model2 specified using abits')
+ par.add_argument('--wbits2List', type=int, nargs='+', help='List of No. of bits in weight representation for different layers of model2')
+ par.add_argument('--abits2List', type=int, nargs='+', help='List of No. of bits in activation representation for different layers of model2')
+
+ # Attack specific flags
+ par.add_argument('--eps', type=float, default=0.1,
+ help='epsilon')
+ par.add_argument('--attack', type=int, default=0,
+ help='Attack type, 0=CW, 2=FGSM')
+ par.add_argument('--attack_iterations', type=int, default=50,
+ help='Number of iterations to run CW attack; 1000 is good')
+ par.add_argument(
+ '--targeted', help='Run a targeted attack?', action="store_true")
+ # Adversarial training flags
+ par.add_argument(
+ '--adv', help='Adversarial training type?', type=int, default=0)
+ par.add_argument('--delay', type=int,
+ default=10, help='Nb of epochs to delay adv training by')
+ par.add_argument('--nb_iter', type=int,
+ default=40, help='Nb of iterations of PGD')
+
+ # imagenet flags
+ par.add_argument('--imagenet_path', help='Path where imagenet tfrecords are stored and can be loaded, both Val and Train')
+
+ FLAGS = par.parse_args()
+
+ if FLAGS.gpu:
+ os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
+
+ tf.app.run()
diff --git a/case_studies/empir/examples/binarization_test.sh b/case_studies/empir/examples/binarization_test.sh
new file mode 100644
index 0000000..0a0912c
--- /dev/null
+++ b/case_studies/empir/examples/binarization_test.sh
@@ -0,0 +1,40 @@
+nsamples=${1:-512}
+
+#kwargs=""
+kwargs="--sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Original attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd):case_studies/empir/ ./venv3.8tf/bin/python case_studies/empir/examples/cifar10_binarization_test.py \
+ --ensembleThree \
+ --attack=3 \
+ --abits=2 \
+ --wbits=4 \
+ --abits2=2 \
+ --wbits2=2 \
+ --model_path1=case_studies/empir/weights/Model1/ \
+ --model_path2=case_studies/empir/weights/Model2/ \
+ --model_path3=case_studies/empir/weights/Model3/ \
+ --nb_samples=$nsamples \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd):case_studies/empir/ ./venv3.8tf/bin/python case_studies/empir/examples/cifar10_binarization_test.py \
+ --ensembleThree \
+ --attack=3 \
+ --abits=2 \
+ --wbits=4 \
+ --abits2=2 \
+ --wbits2=2 \
+ --model_path1=case_studies/empir/weights/Model1/ \
+ --model_path2=case_studies/empir/weights/Model2/ \
+ --model_path3=case_studies/empir/weights/Model3/ \
+ --nb_samples=$nsamples \
+ --robust-attack \
+ $kwargs
diff --git a/case_studies/empir/examples/cifar10_attack.py b/case_studies/empir/examples/cifar10_attack.py
new file mode 100644
index 0000000..6c8efa8
--- /dev/null
+++ b/case_studies/empir/examples/cifar10_attack.py
@@ -0,0 +1,540 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import logging
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import keras
+from keras import backend
+from keras.datasets import cifar10
+from keras.utils import np_utils
+
+import os
+import argparse
+import logging
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+import sys
+
+# from modified_cleverhans.attacks import fgsm
+from modified_cleverhans.utils import set_log_level, parse_model_settings, \
+ build_model_save_path
+from modified_cleverhans.utils_tf import model_train, model_eval, \
+ model_eval_ensemble, batch_eval, tf_model_load
+
+FLAGS = flags.FLAGS
+
+ATTACK_CARLINI_WAGNER_L2 = 0
+ATTACK_JSMA = 1
+ATTACK_FGSM = 2
+ATTACK_MADRYETAL = 3
+ATTACK_BASICITER = 4
+MAX_BATCH_SIZE = 100
+MAX_BATCH_SIZE = 100
+
+# enum adversarial training types
+ADVERSARIAL_TRAINING_MADRYETAL = 1
+ADVERSARIAL_TRAINING_FGSM = 2
+MAX_EPS = 0.3
+
+# Scaling input to softmax
+INIT_T = 1.0
+# ATTACK_T = 1.0
+ATTACK_T = 0.25
+
+
+def data_cifar10():
+ """
+ Preprocess CIFAR10 dataset
+ :return:
+ """
+
+ # These values are specific to CIFAR10
+ img_rows = 32
+ img_cols = 32
+ nb_classes = 10
+
+ # the data, shuffled and split between train and test sets
+ (X_train, y_train), (X_test, y_test) = cifar10.load_data()
+
+ if keras.backend.image_dim_ordering() == 'th':
+ X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
+ X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
+ else:
+ X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
+ X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
+ X_train = X_train.astype('float32')
+ X_test = X_test.astype('float32')
+
+ X_train /= 255
+ X_test /= 255
+
+ # convert class vectors to binary class matrices
+ Y_train = np_utils.to_categorical(y_train, nb_classes)
+ Y_test = np_utils.to_categorical(y_test, nb_classes)
+
+ return X_train, Y_train, X_test, Y_test
+
+
+def setup_model():
+ # CIFAR10-specific dimensions
+ img_rows = 32
+ img_cols = 32
+ channels = 3
+ nb_classes = 10
+
+ # Set TF random seed to improve reproducibility
+ tf.set_random_seed(1234)
+
+ if not hasattr(backend, "tf"):
+ raise RuntimeError("This tutorial requires keras to be configured"
+ " to use the TensorFlow backend.")
+
+ # Image dimensions ordering should follow the Theano convention
+ if keras.backend.image_dim_ordering() != 'tf':
+ keras.backend.set_image_dim_ordering('tf')
+ print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
+ "'th', temporarily setting to 'tf'")
+
+ # Create TF session and set as Keras backend session
+ sess = tf.Session()
+ keras.backend.set_session(sess)
+
+ set_log_level(logging.WARNING)
+
+ # Define input TF placeholder
+ x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
+ y = tf.placeholder(tf.float32, shape=(None, 10))
+ phase = tf.placeholder(tf.bool, name="phase")
+ logits_scalar = tf.placeholder_with_default(
+ INIT_T, shape=(), name="logits_temperature")
+
+ model_path = FLAGS.model_path
+ nb_filters = FLAGS.nb_filters
+ batch_size = FLAGS.batch_size
+
+ #### EMPIR extra flags
+ lowprecision = FLAGS.lowprecision
+ abits = FLAGS.abits
+ wbits = FLAGS.wbits
+ abitsList = FLAGS.abitsList
+ wbitsList = FLAGS.wbitsList
+ stocRound = True if FLAGS.stocRound else False
+ model_path2 = FLAGS.model_path2
+ model_path1 = FLAGS.model_path1
+ model_path3 = FLAGS.model_path3
+ ensembleThree = True if FLAGS.ensembleThree else False
+ abits2 = FLAGS.abits2
+ wbits2 = FLAGS.wbits2
+ abits2List = FLAGS.abits2List
+ wbits2List = FLAGS.wbits2List
+ distill = True if FLAGS.distill else False
+ ####
+
+ if ensembleThree:
+ if (model_path1 is None or model_path2 is None or model_path3 is None):
+ raise ValueError()
+ elif model_path is not None:
+ if os.path.exists(model_path):
+ # check for existing model in immediate subfolder
+ if not any(f.endswith('.meta') for f in os.listdir(model_path)):
+ raise ValueError()
+ else:
+ raise ValueError()
+
+ if ensembleThree:
+ if (wbitsList is None) or (
+ abitsList is None): # Layer wise separate quantization not specified for first model
+ if (wbits == 0) or (abits == 0):
+ print(
+ "Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 1
+ elif (len(wbitsList) != 3) or (len(abitsList) != 3):
+ print(
+ "Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the first model")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 0
+
+ if (wbits2List is None) or (
+ abits2List is None): # Layer wise separate quantization not specified for second model
+ if (wbits2 == 0) or (abits2 == 0):
+ print(
+ "Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 1
+ elif (len(wbits2List) != 3) or (len(abits2List) != 3):
+ print(
+ "Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the second model")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 0
+
+ if (fixedPrec2 != 1) or (
+ fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
+ fixedPrec = 0
+ print("Within atleast one model has separate precisions")
+ if (fixedPrec1 == 1): # first layer has fixed precision
+ abitsList = (abits, abits, abits)
+ wbitsList = (wbits, wbits, wbits)
+ if (fixedPrec2 == 1): # second layer has fixed precision
+ abits2List = (abits2, abits2, abits2)
+ wbits2List = (wbits2, wbits2, wbits2)
+ else:
+ fixedPrec = 1
+
+ if fixedPrec == 1:
+ from cleverhans_tutorials.tutorial_models import \
+ make_ensemble_three_cifar_cnn
+ model = make_ensemble_three_cifar_cnn(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2,
+ abits2, input_shape=(None, img_rows, img_cols, channels),
+ nb_filters=nb_filters)
+ else:
+ from cleverhans_tutorials.tutorial_models import \
+ make_ensemble_three_cifar_cnn_layerwise
+ model = make_ensemble_three_cifar_cnn_layerwise(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList,
+ wbits2List, abits2List,
+ input_shape=(None, img_rows, img_cols, channels),
+ nb_filters=nb_filters)
+ elif lowprecision:
+ if (wbitsList is None) or (
+ abitsList is None): # Layer wise separate quantization not specified
+ if (wbits == 0) or (abits == 0):
+ print(
+ "Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
+ sys.exit(1)
+ else:
+ fixedPrec = 1
+ elif (len(wbitsList) != 3) or (len(abitsList) != 3):
+ print(
+ "Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers")
+ sys.exit(1)
+ else:
+ fixedPrec = 0
+
+ if fixedPrec:
+ from cleverhans_tutorials.tutorial_models import \
+ make_basic_lowprecision_cifar_cnn
+ model = make_basic_lowprecision_cifar_cnn(
+ phase, logits_scalar, 'lp_', wbits, abits, input_shape=(
+ None, img_rows, img_cols, channels), nb_filters=nb_filters,
+ stocRound=stocRound)
+ else:
+ from cleverhans_tutorials.tutorial_models import \
+ make_layerwise_lowprecision_cifar_cnn
+ model = make_layerwise_lowprecision_cifar_cnn(
+ phase, logits_scalar, 'lp_', wbitsList, abitsList, input_shape=(
+ None, img_rows, img_cols, channels), nb_filters=nb_filters,
+ stocRound=stocRound)
+ elif distill:
+ from cleverhans_tutorials.tutorial_models import make_distilled_cifar_cnn
+ model = make_distilled_cifar_cnn(phase, logits_scalar,
+ 'teacher_fp_', 'fp_',
+ nb_filters=nb_filters, input_shape=(
+ None, img_rows, img_cols, channels))
+ ####
+ else:
+ from cleverhans_tutorials.tutorial_models import make_basic_cifar_cnn
+ model = make_basic_cifar_cnn(phase, logits_scalar, 'fp_', input_shape=(
+ None, img_rows, img_cols, channels), nb_filters=nb_filters)
+
+ # separate calling function for ensemble models
+ if ensembleThree:
+ preds = model.ensemble_call(x, reuse=False)
+ else:
+ ##default
+ preds = model(x, reuse=False)
+ print("Defined TensorFlow model graph.")
+
+ if ensembleThree:
+ variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ stored_variables = ['lp_conv1_init/k', 'lp_conv2_init/k', 'lp_conv3_init/k',
+ 'lp_ip1init/W', 'lp_logits_init/W']
+ variable_dict = dict(zip(stored_variables, variables[:5]))
+ # Restore the first set of variables from model_path1
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path1))
+ # Restore the second set of variables from model_path2
+ variable_dict = dict(zip(stored_variables, variables[5:10]))
+ saver2 = tf.train.Saver(variable_dict)
+ saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
+ stored_variables = ['fp_conv1_init/k', 'fp_conv2_init/k', 'fp_conv3_init/k',
+ 'fp_ip1init/W', 'fp_logits_init/W']
+ variable_dict = dict(zip(stored_variables, variables[10:]))
+ saver3 = tf.train.Saver(variable_dict)
+ saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
+ else:
+ tf_model_load(sess, model_path)
+ print('Restored model from %s' % model_path)
+
+ return sess, model, preds, x, y, phase
+
+
+def build_adversarial_attack(sess, model, attack, targeted, nb_classes,
+ ensembleThree,
+ nb_samples, nb_iter, eps, robust_attack):
+ if targeted:
+ att_batch_size = np.clip(
+ nb_samples * (nb_classes - 1), a_max=MAX_BATCH_SIZE, a_min=1)
+ yname = "y_target"
+
+ else:
+ att_batch_size = np.minimum(nb_samples, MAX_BATCH_SIZE)
+ adv_ys = None
+ yname = "y"
+
+ if ensembleThree:
+ model_type = 'ensembleThree'
+ else:
+ model_type = 'default'
+
+ if attack == ATTACK_CARLINI_WAGNER_L2:
+ from modified_cleverhans.attacks import CarliniWagnerL2
+ attacker = CarliniWagnerL2(model, back='tf', model_type=model_type,
+ num_classes=nb_classes, sess=sess)
+ attack_params = {'binary_search_steps': 1,
+ 'max_iterations': nb_iter,
+ 'learning_rate': 0.1,
+ 'batch_size': att_batch_size,
+ 'initial_const': 10,
+ }
+ elif attack == ATTACK_JSMA:
+ from modified_cleverhans.attacks import SaliencyMapMethod
+ attacker = SaliencyMapMethod(model, back='tf', model_type=model_type,
+ sess=sess, num_classes=nb_classes)
+ attack_params = {'theta': 1., 'gamma': 0.1}
+ elif attack == ATTACK_FGSM:
+ from modified_cleverhans.attacks import FastGradientMethod
+ attacker = FastGradientMethod(model, back='tf', model_type=model_type,
+ sess=sess, num_classes=nb_classes)
+ attack_params = {'eps': eps}
+ elif attack == ATTACK_MADRYETAL:
+ from modified_cleverhans.attacks import MadryEtAl
+ attacker = MadryEtAl(model, back='tf', model_type=model_type, sess=sess,
+ num_classes=nb_classes, attack_type="robust" if robust_attack else "vanilla")
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ elif attack == ATTACK_BASICITER:
+ from modified_cleverhans.attacks import BasicIterativeMethod
+ attacker = BasicIterativeMethod(model, back='tf', sess=sess,
+ model_type=model_type,
+ num_classes=nb_classes)
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ else:
+ print("Attack undefined")
+ sys.exit(1)
+
+ attack_params.update({yname: adv_ys, 'clip_min': 0., 'clip_max': 1.})
+
+ return attacker, attack_params
+
+
+def main(argv=None):
+ """
+ CIFAR10 modified_cleverhans tutorial
+ :return:
+ """
+
+ img_rows = 32
+ img_cols = 32
+ channels = 3
+ nb_classes = 10
+ targeted = True if FLAGS.targeted else False
+ batch_size = FLAGS.batch_size
+ nb_samples = FLAGS.nb_samples
+ eps = FLAGS.eps
+
+ attack = FLAGS.attack
+ nb_iter = FLAGS.nb_iter
+
+ ensembleThree = True if FLAGS.ensembleThree else False
+ sess, model, preds, x, y, phase = setup_model()
+
+ # Get CIFAR10 test data
+ X_train, Y_train, X_test, Y_test = data_cifar10()
+
+ def evaluate():
+ # Evaluate the accuracy of the CIFAR10 model on legitimate test
+ # examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree:
+ acc = model_eval_ensemble(
+ sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
+ else:
+ acc = model_eval(
+ sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
+ assert X_test.shape[0] == 10000, X_test.shape
+ print('Test accuracy on legitimate examples: %0.4f' % acc)
+
+ evaluate()
+
+ # Evaluate the accuracy of the CIFAR10 model on legitimate test examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree:
+ accuracy = model_eval_ensemble(sess, x, y, preds, X_test, Y_test,
+ phase=phase, feed={phase: False},
+ args=eval_params)
+ else:
+ accuracy = model_eval(sess, x, y, preds, X_test, Y_test, phase=phase,
+ feed={phase: False}, args=eval_params)
+
+ print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
+
+ ###########################################################################
+ # Build dataset
+ ###########################################################################
+
+ if targeted:
+ from modified_cleverhans.utils import build_targeted_dataset
+ adv_inputs, true_labels, adv_ys = build_targeted_dataset(
+ X_test, Y_test, np.arange(nb_samples), nb_classes, img_rows, img_cols,
+ channels)
+ else:
+ adv_inputs = X_test[:nb_samples]
+ true_labels = Y_test[:nb_samples]
+
+ ###########################################################################
+ # Craft adversarial examples using generic approach
+ ###########################################################################
+ attacker, attack_params = build_adversarial_attack(sess, model, attack,
+ targeted, nb_classes,
+ ensembleThree,
+ nb_samples, nb_iter, eps,
+ robust_attack=FLAGS.robust_attack)
+
+ if FLAGS.use_labels:
+ attack_params['y'] = true_labels
+ X_test_adv = attacker.generate_np(adv_inputs, phase, **attack_params)
+ #x_adv = attacker.generate(x, phase, **attack_params)
+
+
+ adv_accuracy = model_eval_ensemble(sess, x, y, preds, X_test_adv, Y_test,
+ phase=phase, args=eval_params)
+
+ # Friendly output for pasting into spreadsheet
+ print('Accuracy: {0:.4f},'.format(accuracy))
+ print('Adversarial Accuracy {0:.4f},'.format(adv_accuracy))
+
+ sess.close()
+
+
+if __name__ == '__main__':
+
+ par = argparse.ArgumentParser()
+
+ # Generic flags
+ par.add_argument('--gpu', help='id of GPU to use')
+ par.add_argument('--model_path', help='Path to save or load model')
+ par.add_argument('--data_dir', help='Path to training data',
+ default='cifar10_data')
+
+ # Architecture and training specific flags
+ par.add_argument('--nb_epochs', type=int, default=6,
+ help='Number of epochs to train model')
+ par.add_argument('--nb_filters', type=int, default=32,
+ help='Number of filters in first layer')
+ par.add_argument('--batch_size', type=int, default=128,
+ help='Size of training batches')
+ par.add_argument('--learning_rate', type=float, default=0.001,
+ help='Learning rate')
+ par.add_argument('--rand', help='Stochastic weight layer?',
+ action="store_true")
+
+ # Attack specific flags
+ par.add_argument('--eps', type=float, default=0.1,
+ help='epsilon')
+ par.add_argument('--attack', type=int, default=0,
+ help='Attack type, 0=CW, 2=FGSM')
+ par.add_argument('--nb_samples', type=int,
+ default=10000, help='Nb of inputs to attack')
+ par.add_argument(
+ '--targeted', help='Run a targeted attack?', action="store_true")
+ # Adversarial training flags
+ par.add_argument(
+ '--adv', help='Adversarial training type?', type=int, default=0)
+ par.add_argument('--delay', type=int,
+ default=10, help='Nb of epochs to delay adv training by')
+ par.add_argument('--nb_iter', type=int,
+ default=40,
+ help='Nb of iterations of PGD (set to 50 for CW)')
+
+ # EMPIR specific flags
+ par.add_argument('--lowprecision', help='Use other low precision models',
+ action="store_true")
+ par.add_argument('--wbits', type=int, default=0,
+ help='No. of bits in weight representation')
+ par.add_argument('--abits', type=int, default=0,
+ help='No. of bits in activation representation')
+ par.add_argument('--wbitsList', type=int, nargs='+',
+ help='List of No. of bits in weight representation for different layers')
+ par.add_argument('--abitsList', type=int, nargs='+',
+ help='List of No. of bits in activation representation for different layers')
+ par.add_argument('--stocRound',
+ help='Stochastic rounding for weights (only in training) and activations?',
+ action="store_true")
+ par.add_argument('--model_path1',
+ help='Path where saved model1 is stored and can be loaded')
+ par.add_argument('--model_path2',
+ help='Path where saved model2 is stored and can be loaded')
+ par.add_argument('--ensembleThree',
+ help='Use an ensemble of full precision and two low precision models that can be attacked directly',
+ action="store_true")
+ par.add_argument('--model_path3',
+ help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
+ par.add_argument('--wbits2', type=int, default=0,
+ help='No. of bits in weight representation of model2, model1 specified using wbits')
+ par.add_argument('--abits2', type=int, default=0,
+ help='No. of bits in activation representation of model2, model2 specified using abits')
+ par.add_argument('--wbits2List', type=int, nargs='+',
+ help='List of No. of bits in weight representation for different layers of model2')
+ par.add_argument('--abits2List', type=int, nargs='+',
+ help='List of No. of bits in activation representation for different layers of model2')
+ # extra flags for defensive distillation
+ par.add_argument('--distill', help='Train the model using distillation',
+ action="store_true")
+ par.add_argument('--student_epochs', type=int, default=50,
+ help='No. of epochs for which the student model is trained')
+ # extra flags for input gradient regularization
+ par.add_argument('--inpgradreg',
+ help='Train the model using input gradient regularization',
+ action="store_true")
+ par.add_argument('--l2dbl', type=int, default=0,
+ help='l2 double backprop penalty')
+ par.add_argument('--l2cs', type=int, default=0,
+ help='l2 certainty sensitivity penalty')
+
+ par.add_argument("--robust-attack", action="store_true")
+ par.add_argument("--use-labels", action="store_true")
+
+ FLAGS = par.parse_args()
+
+ if FLAGS.gpu:
+ os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
+
+ tf.app.run()
diff --git a/case_studies/empir/examples/cifar10_binarization_test.py b/case_studies/empir/examples/cifar10_binarization_test.py
new file mode 100644
index 0000000..5f00769
--- /dev/null
+++ b/case_studies/empir/examples/cifar10_binarization_test.py
@@ -0,0 +1,561 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import logging
+
+import torch
+from torch.utils.data import DataLoader
+from torch.utils.data import TensorDataset
+
+from active_tests.decision_boundary_binarization import LogitRescalingType
+from active_tests.decision_boundary_binarization import \
+ _train_logistic_regression_classifier
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack, format_result
+from cifar10_attack import setup_model
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+from functools import partial
+import tensorflow as tf
+from keras.utils.np_utils import to_categorical
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import os
+import argparse
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+
+class Layer(object):
+
+ def get_output_shape(self):
+ return self.output_shape
+
+
+class Linear(Layer):
+
+ def __init__(self, num_hid, name, useBias=False):
+ self.__dict__.update(locals())
+ # self.num_hid = num_hid
+
+ def set_input_shape(self, input_shape, reuse):
+
+ # with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
+ # with black box, but now can't load checkpoints from wb
+ # this works with white-box
+ with tf.variable_scope(self.name + '_init', reuse):
+
+ batch_size, dim = input_shape
+ self.input_shape = [batch_size, dim]
+ self.output_shape = [batch_size, self.num_hid]
+ if self.useBias:
+ self.bias_shape = self.num_hid
+ init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
+ init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
+ keep_dims=True))
+ self.W = tf.get_variable(
+ "W", initializer=init)
+
+ if self.useBias:
+ bias_init = tf.zeros(self.bias_shape)
+ self.bias = tf.get_variable("b", initializer= bias_init)
+
+ self.bias_ph = tf.placeholder(tf.float32, shape=self.bias_shape)
+ self.set_bias = self.bias.assign(self.bias_ph)
+
+ self.W_ph = tf.placeholder(tf.float32, shape=[dim, self.num_hid])
+ self.set_weight = self.W.assign(self.W_ph)
+
+ def fprop(self, x, reuse):
+
+ # with tf.variable_scope(self.scope_name + '_fprop', reuse):
+ # this works with white-box
+ with tf.variable_scope(self.name + '_fprop', reuse):
+
+ x = tf.matmul(x, self.W) # + self.b
+ if self.useBias:
+ x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
+
+ return x
+
+FLAGS = flags.FLAGS
+
+ATTACK_CARLINI_WAGNER_L2 = 0
+ATTACK_JSMA = 1
+ATTACK_FGSM = 2
+ATTACK_MADRYETAL = 3
+ATTACK_BASICITER = 4
+MAX_BATCH_SIZE = 100
+MAX_BATCH_SIZE = 100
+
+# Scaling input to softmax
+INIT_T = 1.0
+# ATTACK_T = 1.0
+ATTACK_T = 0.25
+
+from cifar10_attack import data_cifar10
+
+from cifar10_attack import build_adversarial_attack
+
+
+def main(argv=None):
+ """
+ CIFAR10 modified_cleverhans tutorial
+ :return:
+ """
+
+ nb_classes = 2
+ targeted = True if FLAGS.targeted else False
+ batch_size = FLAGS.batch_size
+ nb_samples = FLAGS.nb_samples
+ eps = FLAGS.eps
+
+ attack = FLAGS.attack
+ nb_iter = FLAGS.nb_iter
+
+ ensembleThree = True if FLAGS.ensembleThree else False
+ sess, model, preds, x, y, phase = setup_model()
+
+ # Get CIFAR10 test data
+ X_train, Y_train, X_test, Y_test = data_cifar10()
+ del X_train, Y_train
+ X_test = np.transpose(X_test, (0, 3, 1, 2))
+ print(X_test.shape)
+
+ def run_attack(m, l, sess, attack):
+ for x_batch, y_batch in l:
+ assert len(x_batch) == 1
+ x_batch = x_batch.cpu().numpy()
+ y_batch = y_batch.cpu().numpy()
+
+ x_batch = x_batch.transpose(0, 2, 3, 1)
+
+ y_batch_oh = to_categorical(y_batch, num_classes=2)
+ x_batch_adv = attack(x_batch, y_batch_oh)
+
+ probs = m(x_batch_adv)
+ preds = probs.argmax(-1)
+
+ is_adv = preds != y_batch
+
+ return is_adv, (torch.tensor(x_batch_adv.transpose(0, 3, 1, 2), dtype=torch.float32),\
+ torch.tensor(probs, dtype=torch.float32))
+
+
+ def train_classifier(
+ n_features: int,
+ train_loader: DataLoader,
+ raw_train_loader: DataLoader,
+ logits: torch.Tensor,
+ device: str,
+ rescale_logits: LogitRescalingType,
+ binarized_ensemble,
+ set_weight_ops,
+ set_bias_ops,
+ sess,
+ weights_phs,
+ biases_phs
+ ):
+ #del raw_train_loader
+
+ # fit a linear readout for each of the submodels of the ensemble
+ assert len(train_loader.dataset.tensors[0].shape) == 3
+ assert train_loader.dataset.tensors[0].shape[1] == len(weights_phs) == len(
+ biases_phs)
+
+ classifier_weights = []
+ classifier_biases = []
+ for i in range(3):
+ x_ = train_loader.dataset.tensors[0][:, i]
+ y_ = train_loader.dataset.tensors[1]
+
+ cls = _train_logistic_regression_classifier(
+ n_features,
+ DataLoader(TensorDataset(x_, y_), batch_size=train_loader.batch_size),
+ logits[:, i] if logits is not None else None,
+ "sklearn",
+ 10000,
+ device,
+ n_classes=2,
+ rescale_logits=rescale_logits
+ )
+ classifier_weights.append(cls.weight.data.cpu().numpy().transpose())
+ classifier_biases.append(cls.bias.data.cpu().numpy())
+
+ # update weights of the binary models
+ for op, ph, v in zip(set_weight_ops, weights_phs, classifier_weights):
+ sess.run(op, {ph: v})
+ for op, ph, v in zip(set_bias_ops, biases_phs, classifier_biases):
+ sess.run(op, {ph: v})
+
+ """ n_corr1 = 0
+ n_corr2 = 0
+ n_total = 0
+ for x, y in raw_train_loader:
+ preds1 = binarized_model(x)
+ preds2 = binarized_model(x, averaged=False)
+ import pdb; pdb.set_trace()
+ n_corr1 += (preds1 == y).sum()
+ n_corr2 += (preds2 == y).sum()
+ n_total += len(x)
+ """
+ return binarized_ensemble
+
+ from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, \
+ PyTorchToTensorFlow1Wrapper
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(X_test, Y_test, batch_size=32)
+
+ from modified_cleverhans.model import Model
+ class BinarizedEnsembleModel(Model):
+ def __init__(self, base_classifier, input_ph):
+ self.num_classes = 2
+
+ self.base_classifier = base_classifier
+ self.layer_names = []
+ self.layer_names.append('combined_features')
+ self.layer_names.append('combined_logits')
+
+ combined_layer_name = 'combined' ## Gives the final class prediction based on max voting
+ self.layer_names.append(combined_layer_name)
+ combinedCorrectProb_layer_name = 'combinedAvgCorrectProb' ## Gives average probability values of the models that decided the final prediction
+ self.layer_names.append(combinedCorrectProb_layer_name)
+ combinedProb_layer_name = 'combinedAvgProb' ## Gives average probability values of all the models
+ self.layer_names.append(combinedProb_layer_name)
+
+ self.readout_1 = Linear(2, "binarized_ensemble_readout_1", useBias=True)
+ self.readout_2 = Linear(2, "binarized_ensemble_readout_2", useBias=True)
+ self.readout_3 = Linear(2, "binarized_ensemble_readout_3", useBias=True)
+
+ self.readout_1.set_input_shape((-1, 64), True)
+ self.readout_2.set_input_shape((-1, 64), True)
+ self.readout_3.set_input_shape((-1, 64), True)
+
+ self.set_weight_ops = [
+ self.readout_1.set_weight,
+ self.readout_2.set_weight,
+ self.readout_3.set_weight
+ ]
+ self.set_bias_ops = [
+ self.readout_1.set_bias,
+ self.readout_2.set_bias,
+ self.readout_3.set_bias,
+ ]
+
+ self.weights_phs = [
+ self.readout_1.W_ph,
+ self.readout_2.W_ph,
+ self.readout_3.W_ph
+ ]
+ self.biases_phs = [
+ self.readout_1.bias_ph,
+ self.readout_2.bias_ph,
+ self.readout_3.bias_ph
+ ]
+
+ self.input_ph = input_ph
+ self.ensemble_op = self.get_ensemblepreds(self.input_ph)
+ self.averaged_op = self.get_combinedAvgCorrectProbs(self.input_ph)
+
+
+ def __call__(self, x_, averaged=True, *args, **kwargs):
+ return_torch = False
+ return_numpy = False
+ if isinstance(x_, torch.Tensor):
+ x_ = x_.cpu().numpy()
+ return_torch = True
+ if isinstance(x_, np.ndarray):
+ return_numpy = True
+ if x_.shape[1] == 3:
+ x_ = x_.transpose(0, 2, 3, 1)
+
+ x = self.input_ph
+ if averaged:
+ op = self.averaged_op
+ else:
+ op = self.ensemble_op
+
+ else:
+ raise NotImplementedError("Calling this with a tf tensor is not supported yet"
+ " (wasn't necessary).")
+ #if averaged:
+ # op = self.get_combinedAvgCorrectProbs(x_, *args, **kwargs)
+ #else:
+ # op = self.get_ensemblepreds(x_, *args, **kwargs)
+ if return_numpy or return_torch:
+ x_ = sess.run(op, {x: x_})
+ if return_torch:
+ x_ = torch.tensor(x_, dtype=torch.float32)
+ return x_
+
+ def fprop(self, x, reuse):
+ base_states = self.base_classifier.fprop(x, reuse)
+
+ features1 = base_states["Model1_HiddenLinear10"]
+ features2 = base_states["Model2_HiddenLinear10"]
+ features3 = base_states["Model3_HiddenLinear10"]
+
+ output1 = self.readout_1.fprop(features1, reuse)
+ output2 = self.readout_2.fprop(features2, reuse)
+ output3 = self.readout_3.fprop(features3, reuse)
+
+ states = []
+ states.append(tf.stack((features1, features2, features3), 1))
+ states.append(tf.stack((output1, output2, output3), 1))
+
+ # Find class predictions with each model
+ pred1 = tf.argmax(output1, axis=-1)
+ pred2 = tf.argmax(output2, axis=-1)
+ pred3 = tf.argmax(output3, axis=-1)
+ comb_pred = tf.stack([pred1, pred2, pred3], axis=1)
+ comb_pred = tf.cast(comb_pred, dtype=tf.int32) # converting to int32 as bincount requires int32
+
+ # Find how many times each of the classes are predicted among the three models and identify the max class
+ initial_imidx = 1
+
+ binarray = tf.bincount(comb_pred[0], minlength=self.num_classes)# initial bincount, counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
+ max_class = tf.argmax(binarray, axis=-1)
+ count_max = tf.gather(binarray, max_class) # max vote count for a class
+
+ value = tf.cond(tf.less(count_max, 2), lambda: pred3[0], lambda: max_class)
+ in_class_array = tf.fill([1], value)
+
+ ## Added below to allow better gradient calculation for max voted model
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred3[0]), lambda: output3[0], lambda: tf.zeros_like(output3[0])) # add pred3 if it affected the final decision
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred2[0]), lambda: tf.add(output2[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
+ in_avgCorrectprob = tf.cond(tf.equal(value, pred1[0]), lambda: tf.add(output1[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
+ in_avgCorrectprob_array = tf.expand_dims(tf.div(in_avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
+
+ #condition check: when true the loop body executes
+ def idx_loop_condition(class_array, avgCorrectprob_array, im_idx):
+ return tf.less(im_idx, tf.shape(pred1)[0])
+
+ #loop body to calculate the max voted class for each image
+ def idx_loop_body(class_array, avgCorrectprob_array, im_idx):
+ binarray_new = tf.bincount(comb_pred[im_idx], minlength=self.num_classes) # counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
+ max_class = tf.argmax(binarray_new, axis=-1)
+ count_max = tf.gather(binarray_new, max_class) # max vote count for a class
+
+ value = tf.cond(tf.less(count_max, 2), lambda: pred3[im_idx], lambda: max_class)# If the max vote is less than 2, take the prediction of the full precision model
+ new_array = tf.fill([1], value)
+ class_array = tf.concat([class_array, new_array], 0)
+
+ ## Added below to allow better gradient calculation for max voted model
+ avgCorrectprob = tf.cond(tf.equal(value, pred3[im_idx]), lambda: output3[im_idx], lambda: tf.zeros_like(output3[im_idx])) # add pred3 if it affected the final decision
+ avgCorrectprob = tf.cond(tf.equal(value, pred2[im_idx]), lambda: tf.add(output2[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
+ avgCorrectprob = tf.cond(tf.equal(value, pred1[im_idx]), lambda: tf.add(output1[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
+ avgCorrectprob = tf.expand_dims(tf.div(avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
+ avgCorrectprob_array = tf.concat([avgCorrectprob_array, avgCorrectprob], 0)
+
+ return (class_array, avgCorrectprob_array, im_idx+1)
+
+ res = tf.while_loop(
+ cond=idx_loop_condition,
+ body=idx_loop_body,
+ loop_vars=[in_class_array, in_avgCorrectprob_array, initial_imidx],
+ shape_invariants=[tf.TensorShape([None]), tf.TensorShape([None, self.num_classes]), tf.TensorShape([])], #add shape invariant saying that the first dimension of in_class_array changes and is thus None
+ )
+
+ pred_output = tf.cast(res[0], dtype=tf.int64) # no. of times each class is predicted for all images
+ states.append(pred_output)
+
+ avgCorrectprob_output = res[1] # no. of times each class is predicted for all images
+ states.append(avgCorrectprob_output)
+
+ avgprob = tf.div(tf.add_n([output2, output1, output3]), tf.cast(3, dtype=tf.float32)) # Average probability across all models
+ states.append(avgprob)
+
+ states = dict(zip(self.get_layer_names(), states))
+ return states
+
+ binarized_model = BinarizedEnsembleModel(model, x)
+ attacker, attack_params = build_adversarial_attack(
+ sess, binarized_model, attack,
+ targeted, nb_classes,
+ ensembleThree,
+ nb_samples, nb_iter, eps,
+ robust_attack=FLAGS.robust_attack)
+
+ base_model_outputs = model.fprop(x, reuse=True)
+ base_model_features = base_model_outputs["combined_features"]
+ base_model_logits = base_model_outputs["combined_logits"]
+ def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
+ x_np = np.transpose(x_np, (0, 2, 3, 1))
+
+ if features_only:
+ return sess.run(base_model_features, {x : x_np})
+ elif features_and_logits:
+ targets = [base_model_features, base_model_logits]
+ return tuple(sess.run(targets, {x : x_np}))
+ else:
+ return sess.run(base_model_logits, {x : x_np})
+
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=_model_forward_pass,
+ logit_forward_and_backward_pass=None
+ )
+ y = tf.placeholder(tf.float32, shape=(None, 2))
+ if FLAGS.use_labels:
+ attack_params['y'] = y
+ else:
+ #del attack_params['y']
+ attack_params['y'] = tf.stop_gradient(tf.to_float(tf.one_hot(binarized_model.get_ensemblepreds(x, reuse=True), nb_classes)))
+ x_adv = attacker.generate(x, phase, **attack_params)
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, kwargs: run_attack(
+ m, l, sess, lambda x_, y_: sess.run(x_adv, {x: x_, y: y_})
+ ),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=FLAGS.eps,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=FLAGS.n_boundary_points,
+ n_inner_points=FLAGS.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=FLAGS.nb_samples,
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ train_classifier_fn=partial(train_classifier,
+ binarized_ensemble=binarized_model,
+ set_weight_ops=binarized_model.set_weight_ops,
+ set_bias_ops=binarized_model.set_bias_ops,
+ sess=sess,
+ weights_phs=binarized_model.weights_phs,
+ biases_phs=binarized_model.biases_phs,
+ ),
+ fail_on_exception=True,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.9999,
+ sample_training_data_from_corners=FLAGS.sample_from_corners
+ )
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ FLAGS.nb_samples))
+
+if __name__ == '__main__':
+ par = argparse.ArgumentParser()
+
+ # Generic flags
+ par.add_argument('--gpu', help='id of GPU to use')
+ par.add_argument('--model_path', help='Path to save or load model')
+ par.add_argument('--data_dir', help='Path to training data',
+ default='cifar10_data')
+
+ # Architecture and training specific flags
+ par.add_argument('--nb_epochs', type=int, default=6,
+ help='Number of epochs to train model')
+ par.add_argument('--nb_filters', type=int, default=32,
+ help='Number of filters in first layer')
+ par.add_argument('--batch_size', type=int, default=128,
+ help='Size of training batches')
+ par.add_argument('--learning_rate', type=float, default=0.001,
+ help='Learning rate')
+ par.add_argument('--rand', help='Stochastic weight layer?',
+ action="store_true")
+
+ # Attack specific flags
+ par.add_argument('--eps', type=float, default=0.1,
+ help='epsilon')
+ par.add_argument('--attack', type=int, default=0,
+ help='Attack type, 0=CW, 2=FGSM')
+ par.add_argument('--nb_samples', type=int,
+ default=10000, help='Nb of inputs to attack')
+ par.add_argument(
+ '--targeted', help='Run a targeted attack?', action="store_true")
+ # Adversarial training flags
+ par.add_argument(
+ '--adv', help='Adversarial training type?', type=int, default=0)
+ par.add_argument('--delay', type=int,
+ default=10, help='Nb of epochs to delay adv training by')
+ par.add_argument('--nb_iter', type=int,
+ default=40,
+ help='Nb of iterations of PGD (set to 50 for CW)')
+
+ # EMPIR specific flags
+ par.add_argument('--lowprecision', help='Use other low precision models',
+ action="store_true")
+ par.add_argument('--wbits', type=int, default=0,
+ help='No. of bits in weight representation')
+ par.add_argument('--abits', type=int, default=0,
+ help='No. of bits in activation representation')
+ par.add_argument('--wbitsList', type=int, nargs='+',
+ help='List of No. of bits in weight representation for different layers')
+ par.add_argument('--abitsList', type=int, nargs='+',
+ help='List of No. of bits in activation representation for different layers')
+ par.add_argument('--stocRound',
+ help='Stochastic rounding for weights (only in training) and activations?',
+ action="store_true")
+ par.add_argument('--model_path1',
+ help='Path where saved model1 is stored and can be loaded')
+ par.add_argument('--model_path2',
+ help='Path where saved model2 is stored and can be loaded')
+ par.add_argument('--ensembleThree',
+ help='Use an ensemble of full precision and two low precision models that can be attacked directly',
+ action="store_true")
+ par.add_argument('--model_path3',
+ help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
+ par.add_argument('--wbits2', type=int, default=0,
+ help='No. of bits in weight representation of model2, model1 specified using wbits')
+ par.add_argument('--abits2', type=int, default=0,
+ help='No. of bits in activation representation of model2, model2 specified using abits')
+ par.add_argument('--wbits2List', type=int, nargs='+',
+ help='List of No. of bits in weight representation for different layers of model2')
+ par.add_argument('--abits2List', type=int, nargs='+',
+ help='List of No. of bits in activation representation for different layers of model2')
+ # extra flags for defensive distillation
+ par.add_argument('--distill', help='Train the model using distillation',
+ action="store_true")
+ par.add_argument('--student_epochs', type=int, default=50,
+ help='No. of epochs for which the student model is trained')
+ # extra flags for input gradient regularization
+ par.add_argument('--inpgradreg',
+ help='Train the model using input gradient regularization',
+ action="store_true")
+ par.add_argument('--l2dbl', type=int, default=0,
+ help='l2 double backprop penalty')
+ par.add_argument('--l2cs', type=int, default=0,
+ help='l2 certainty sensitivity penalty')
+
+
+ par.add_argument("--n-inner-points", default=999, type=int)
+ par.add_argument("--n-boundary-points", default=1, type=int)
+
+ par.add_argument("--robust-attack", action="store_true")
+ par.add_argument("--use-labels", action="store_true")
+ par.add_argument("--sample-from-corners", action="store_true")
+
+ FLAGS = par.parse_args()
+
+ import cifar10_attack
+ cifar10_attack.FLAGS = FLAGS
+
+ if FLAGS.gpu:
+ os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
+
+ tf.app.run()
diff --git a/case_studies/empir/examples/imagenet_preprocessing.py b/case_studies/empir/examples/imagenet_preprocessing.py
new file mode 100644
index 0000000..da1a333
--- /dev/null
+++ b/case_studies/empir/examples/imagenet_preprocessing.py
@@ -0,0 +1,407 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Provides utilities to preprocess images.
+Training images are sampled using the provided bounding boxes, and subsequently
+cropped to the sampled bounding box. Images are additionally flipped randomly,
+then resized to the target output size (without aspect-ratio preservation).
+Images used during evaluation are resized (with aspect-ratio preservation) and
+centrally cropped.
+All images undergo mean color subtraction.
+Note that these steps are colloquially referred to as "ResNet preprocessing,"
+and they differ from "VGG preprocessing," which does not use bounding boxes
+and instead does an aspect-preserving resize followed by random crop during
+training. (These both differ from "Inception preprocessing," which introduces
+color distortion steps.)
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+import numpy as np
+
+_R_MEAN = 123.68
+_G_MEAN = 116.78
+_B_MEAN = 103.94
+_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
+
+## SANCHARI: copied standard deviation values from dorefanet
+_R_STD = 0.229*255
+_G_STD = 0.224*255
+_B_STD = 0.225*255
+_CHANNEL_STDS = [_R_STD, _G_STD, _B_STD]
+#####
+
+# The lower bound for the smallest side of the image for aspect-preserving
+# resizing. For example, if an image is 500 x 1000, it will be resized to
+# _RESIZE_MIN x (_RESIZE_MIN * 2).
+_RESIZE_MIN = 256
+
+
+def _decode_crop_and_flip(image_buffer, bbox, num_channels):
+ """Crops the given image to a random part of the image, and randomly flips.
+ We use the fused decode_and_crop op, which performs better than the two ops
+ used separately in series, but note that this requires that the image be
+ passed in as an un-decoded string Tensor.
+ Args:
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ num_channels: Integer depth of the image buffer for decoding.
+ Returns:
+ 3-D tensor with cropped image.
+ """
+ # A large fraction of image datasets contain a human-annotated bounding box
+ # delineating the region of the image containing the object of interest. We
+ # choose to create a new bounding box for the object which is a randomly
+ # distorted version of the human-annotated bounding box that obeys an
+ # allowed range of aspect ratios, sizes and overlap with the human-annotated
+ # bounding box. If no box is supplied, then we assume the bounding box is
+ # the entire image.
+ sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
+ tf.image.extract_jpeg_shape(image_buffer),
+ bounding_boxes=bbox,
+ min_object_covered=0.1,
+ aspect_ratio_range=[0.75, 1.33],
+ area_range=[0.05, 1.0],
+ max_attempts=100,
+ use_image_if_no_bounding_boxes=True)
+ bbox_begin, bbox_size, _ = sample_distorted_bounding_box
+
+ # Reassemble the bounding box in the format the crop op requires.
+ offset_y, offset_x, _ = tf.unstack(bbox_begin)
+ target_height, target_width, _ = tf.unstack(bbox_size)
+ crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
+
+ # Use the fused decode and crop op here, which is faster than each in series.
+ cropped = tf.image.decode_and_crop_jpeg(
+ image_buffer, crop_window, channels=num_channels)
+
+ # Flip to add a little more random distortion in.
+ cropped = tf.image.random_flip_left_right(cropped)
+ return cropped
+
+
+def _central_crop(image, crop_height, crop_width):
+ """Performs central crops of the given image list.
+ Args:
+ image: a 3-D image tensor
+ crop_height: the height of the image following the crop.
+ crop_width: the width of the image following the crop.
+ Returns:
+ 3-D tensor with cropped image.
+ """
+ shape = tf.shape(image)
+ height, width = shape[0], shape[1]
+
+ amount_to_be_cropped_h = (height - crop_height)
+ crop_top = amount_to_be_cropped_h // 2
+ amount_to_be_cropped_w = (width - crop_width)
+ crop_left = amount_to_be_cropped_w // 2
+ return tf.slice(
+ image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
+
+
+def _mean_image_subtraction(image, means, num_channels):
+ """Subtracts the given means from each image channel.
+ For example:
+ means = [123.68, 116.779, 103.939]
+ image = _mean_image_subtraction(image, means)
+ Note that the rank of `image` must be known.
+ Args:
+ image: a tensor of size [height, width, C].
+ means: a C-vector of values to subtract from each channel.
+ num_channels: number of color channels in the image that will be distorted.
+ Returns:
+ the centered image.
+ Raises:
+ ValueError: If the rank of `image` is unknown, if `image` has a rank other
+ than three or if the number of channels in `image` doesn't match the
+ number of values in `means`.
+ """
+ if image.get_shape().ndims != 3:
+ raise ValueError('Input must be of size [height, width, C>0]')
+
+ if len(means) != num_channels:
+ raise ValueError('len(means) must match the number of channels')
+
+ # We have a 1-D tensor of means; convert to 3-D.
+ means = tf.expand_dims(tf.expand_dims(means, 0), 0)
+
+ return image - means
+
+
+def _smallest_size_at_least(height, width, resize_min):
+ """Computes new shape with the smallest side equal to `smallest_side`.
+ Computes new shape with the smallest side equal to `smallest_side` while
+ preserving the original aspect ratio.
+ Args:
+ height: an int32 scalar tensor indicating the current height.
+ width: an int32 scalar tensor indicating the current width.
+ resize_min: A python integer or scalar `Tensor` indicating the size of
+ the smallest side after resize.
+ Returns:
+ new_height: an int32 scalar tensor indicating the new height.
+ new_width: an int32 scalar tensor indicating the new width.
+ """
+ resize_min = tf.cast(resize_min, tf.float32)
+
+ # Convert to floats to make subsequent calculations go smoothly.
+ height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
+
+ smaller_dim = tf.minimum(height, width)
+ scale_ratio = resize_min / smaller_dim
+
+ # Convert back to ints to make heights and widths that TF ops will accept.
+ new_height = tf.cast(height * scale_ratio, tf.int32)
+ new_width = tf.cast(width * scale_ratio, tf.int32)
+
+ return new_height, new_width
+
+
+def _aspect_preserving_resize(image, resize_min):
+ """Resize images preserving the original aspect ratio.
+ Args:
+ image: A 3-D image `Tensor`.
+ resize_min: A python integer or scalar `Tensor` indicating the size of
+ the smallest side after resize.
+ Returns:
+ resized_image: A 3-D tensor containing the resized image.
+ """
+ shape = tf.shape(image)
+ height, width = shape[0], shape[1]
+
+ new_height, new_width = _smallest_size_at_least(height, width, resize_min)
+
+ return _resize_image(image, new_height, new_width)
+
+
+def _resize_image(image, height, width):
+ """Simple wrapper around tf.resize_images.
+ This is primarily to make sure we use the same `ResizeMethod` and other
+ details each time.
+ Args:
+ image: A 3-D image `Tensor`.
+ height: The target height for the resized image.
+ width: The target width for the resized image.
+ Returns:
+ resized_image: A 3-D tensor containing the resized image. The first two
+ dimensions have the shape [height, width].
+ """
+ return tf.image.resize_images(
+ image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
+ align_corners=False)
+
+def _lighting_noise(image):
+ eigval = np.asarray([0.2175, 0.0188, 0.0045][::1])*255.0
+ eigvec = np.asarray([[-0.5675, 0.7192, 0.4009],
+ [-0.5808, -0.0045, -0.814],
+ [-0.5836, -0.6948, 0.4203]])[::-1, ::-1]
+ std = 0.1
+ v = np.random.randn(3)*std #random number
+ v = eigval*v
+ inc = np.dot(eigvec, v).reshape((3,))
+ inc = tf.convert_to_tensor(inc, dtype=tf.float32)
+ # image = np.add(image, inc)
+ image = tf.add(image, inc)
+ return image
+
+
+def preprocess_image(image_buffer, bbox, output_height, output_width,
+ num_channels, is_training=False):
+ """Preprocesses the given image.
+ Preprocessing includes decoding, cropping, and resizing for both training
+ and eval images. Training preprocessing, however, introduces some random
+ distortion of the image to improve accuracy.
+ Args:
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ output_height: The height of the image after preprocessing.
+ output_width: The width of the image after preprocessing.
+ num_channels: Integer depth of the image buffer for decoding.
+ is_training: `True` if we're preprocessing the image for training and
+ `False` otherwise.
+ Returns:
+ A preprocessed image.
+ """
+ if is_training:
+ # For training, we want to randomize some of the distortions.
+ image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
+ image = _resize_image(image, output_height, output_width)
+ else:
+ # For validation, we want to decode, resize, then just crop the middle.
+ image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
+ image = _aspect_preserving_resize(image, _RESIZE_MIN)
+ image = _central_crop(image, output_height, output_width)
+
+ image.set_shape([output_height, output_width, num_channels])
+
+ return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
+
+def preprocess_image2(image_buffer, bbox, output_height, output_width,
+ num_channels, is_training=False):
+ """Preprocesses the given image.
+ Preprocessing includes decoding, cropping, and resizing for both training
+ and eval images. Training preprocessing, however, introduces some random
+ distortion of the image to improve accuracy.
+ Args:
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ output_height: The height of the image after preprocessing.
+ output_width: The width of the image after preprocessing.
+ num_channels: Integer depth of the image buffer for decoding.
+ is_training: `True` if we're preprocessing the image for training and
+ `False` otherwise.
+ Returns:
+ A preprocessed image.
+ """
+ if is_training:
+ # For training, we want to randomize some of the distortions.
+ image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
+ image = _resize_image(image, output_height, output_width)
+ else:
+ # For validation, we want to decode, resize, then just crop the middle.
+ image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
+ image = _aspect_preserving_resize(image, _RESIZE_MIN)
+ image = _central_crop(image, output_height, output_width)
+ image = tf.to_float(image)
+
+ image.set_shape([output_height, output_width, num_channels])
+ image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
+
+ # Slice the image into different channels
+ image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
+ image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
+ image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
+
+ # Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy on the pretrained model
+ image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
+
+ return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
+
+def preprocess_image3(image_buffer, bbox, output_height, output_width,
+ num_channels, is_training=False):
+ """Preprocesses the given image.
+ Preprocessing includes decoding, cropping, and resizing for both training
+ and eval images. Training preprocessing, however, introduces some random
+ distortion of the image to improve accuracy. Also changes RGB to BGR
+ and divides by the standard dev
+ Args:
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ output_height: The height of the image after preprocessing.
+ output_width: The width of the image after preprocessing.
+ num_channels: Integer depth of the image buffer for decoding.
+ is_training: `True` if we're preprocessing the image for training and
+ `False` otherwise.
+ Returns:
+ A preprocessed image.
+ """
+ if is_training:
+ # For training, we want to randomize some of the distortions.
+ image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
+ image = _resize_image(image, output_height, output_width)
+ else:
+ # For validation, we want to decode, resize, then just crop the middle.
+ image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
+ image = _aspect_preserving_resize(image, _RESIZE_MIN)
+ image = _central_crop(image, output_height, output_width)
+ image = tf.to_float(image)
+
+ image.set_shape([output_height, output_width, num_channels])
+ image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
+
+ # Slice the image into different channels
+ image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
+ image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
+ image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
+
+ # Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy
+ image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
+
+ image = _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
+ image = tf.divide(image, _CHANNEL_STDS)
+ return image
+
+def preprocess_image4(image_buffer, bbox, output_height, output_width,
+ num_channels, is_training=False):
+ """Preprocesses the given image.
+ Preprocessing includes decoding, cropping, and resizing for both training
+ and eval images. Training preprocessing, however, introduces some random
+ distortion of the image to improve accuracy. Also adds lighting noise,
+ changes RGB to BGR and divides by the standard dev
+ Args:
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+ where each coordinate is [0, 1) and the coordinates are arranged as
+ [ymin, xmin, ymax, xmax].
+ output_height: The height of the image after preprocessing.
+ output_width: The width of the image after preprocessing.
+ num_channels: Integer depth of the image buffer for decoding.
+ is_training: `True` if we're preprocessing the image for training and
+ `False` otherwise.
+ Returns:
+ A preprocessed image.
+ """
+ if is_training:
+ # For training, we want to randomize some of the distortions.
+ image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
+ image = _resize_image(image, output_height, output_width)
+ else:
+ # For validation, we want to decode, resize, then just crop the middle.
+ image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
+ image = _aspect_preserving_resize(image, _RESIZE_MIN)
+ image = _central_crop(image, output_height, output_width)
+ image = tf.to_float(image)
+
+ image.set_shape([output_height, output_width, num_channels])
+ image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
+
+ # Slice the image into different channels
+ image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
+ image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
+ image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
+
+ # Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy
+ image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
+
+ if is_training: # add lighting noise
+ image = _lighting_noise(image)
+
+ image = _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
+ image = tf.divide(image, _CHANNEL_STDS)
+ return image
diff --git a/case_studies/empir/examples/mnist_attack.py b/case_studies/empir/examples/mnist_attack.py
new file mode 100644
index 0000000..c263108
--- /dev/null
+++ b/case_studies/empir/examples/mnist_attack.py
@@ -0,0 +1,681 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy as np
+from six.moves import xrange
+import tensorflow as tf
+from tensorflow.python.platform import flags
+
+import time
+import argparse
+import logging
+import os
+import sys
+
+from modified_cleverhans.utils import parse_model_settings, build_model_save_path
+from modified_cleverhans.utils import set_log_level, AccuracyReport
+from modified_cleverhans.utils_mnist import data_mnist
+from modified_cleverhans.utils_tf import model_train, model_eval, model_eval_ensemble, batch_eval, tf_model_load
+from modified_cleverhans.utils_tf import model_train_teacher, model_train_student, model_train_inpgrad_reg #for training with input gradient regularization
+
+
+FLAGS = flags.FLAGS
+
+# Scaling input to softmax
+INIT_T = 1.0
+#ATTACK_T = 1.0
+ATTACK_T = 0.25
+
+# enum attack types
+ATTACK_CARLINI_WAGNER_L2 = 0
+ATTACK_JSMA = 1
+ATTACK_FGSM = 2
+ATTACK_MADRYETAL = 3
+ATTACK_BASICITER = 4
+
+# enum adversarial training types
+ADVERSARIAL_TRAINING_MADRYETAL = 1
+ADVERSARIAL_TRAINING_FGSM = 2
+MAX_EPS = 0.3
+
+MAX_BATCH_SIZE = 100
+
+
+def mnist_attack(train_start=0, train_end=60000, test_start=0,
+ test_end=10000, viz_enabled=True, nb_epochs=6,
+ batch_size=128, nb_filters=64,
+ nb_samples=10, learning_rate=0.001,
+ eps=0.3, attack=0,
+ attack_iterations=100, model_path=None,
+ targeted=False, rand=False,
+ stocRound=False, lowprecision=False,
+ wbits=0, abits=0, wbitsList=0, abitsList=0, wbits2=0, abits2=0, wbits2List=0, abits2List=0,
+ ensembleThree=False, model_path1=None, model_path2=None, model_path3=None,
+ distill = False, inpgradreg = False, l2dbl = 0, l2cs = 0,
+ debug=None, test=False,
+ data_dir=None, delay=0, adv=0, nb_iter=40):
+ """
+ MNIST tutorial for generic attack
+ :param train_start: index of first training set example
+ :param train_end: index of last training set example
+ :param test_start: index of first test set example
+ :param test_end: index of last test set example
+ :param viz_enabled: (boolean) activate plots of adversarial examples
+ :param nb_epochs: number of epochs to train model
+ :param batch_size: size of training batches
+ :param nb_classes: number of output classes
+ :param nb_samples: number of test inputs to attack
+ :param learning_rate: learning rate for training
+ :param model_path: path to the model file
+ :param targeted: should we run a targeted attack? or untargeted?
+ :return: an AccuracyReport object
+ """
+ # Object used to keep track of (and return) key accuracies
+ report = AccuracyReport()
+
+ # MNIST-specific dimensions
+ img_rows = 28
+ img_cols = 28
+ channels = 1
+ nb_classes = 10
+
+ # Set TF random seed to improve reproducibility
+ tf.set_random_seed(1237)
+
+ # Create TF session
+ sess = tf.Session()
+ print("Created TensorFlow session.")
+
+ if debug:
+ set_log_level(logging.DEBUG)
+ else:
+ set_log_level(logging.WARNING) # for running on sharcnet
+
+ # Get MNIST test data
+ X_train, Y_train, X_test, Y_test = data_mnist(datadir=data_dir,
+ train_start=train_start,
+ train_end=train_end,
+ test_start=test_start,
+ test_end=test_end)
+
+ # Define input TF placeholder
+ x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
+ y = tf.placeholder(tf.float32, shape=(None, nb_classes))
+ phase = tf.placeholder(tf.bool, name='phase')
+
+ # for attempting to break unscaled network.
+ logits_scalar = tf.placeholder_with_default(
+ INIT_T, shape=(), name="logits_temperature")
+
+ save = False
+ train_from_scratch = False
+ if ensembleThree:
+ if (model_path1 is None or model_path2 is None or model_path3 is None):
+ train_from_scratch = True
+ else:
+ train_from_scratch = False
+ elif model_path is not None:
+ if os.path.exists(model_path):
+ # check for existing model in immediate subfolder
+ if any(f.endswith('.meta') for f in os.listdir(model_path)):
+ train_from_scratch = False
+ else:
+ model_path = build_model_save_path(
+ model_path, batch_size, nb_filters, learning_rate, nb_epochs, adv, delay)
+ print(model_path)
+ save = True
+ train_from_scratch = True
+ else:
+ train_from_scratch = True # train from scratch, but don't save since no path given
+
+ # Define TF model graph
+ if ensembleThree:
+ if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified for first model
+ if (wbits==0) or (abits==0):
+ print("Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 1
+ elif (len(wbitsList) != 3) or (len(abitsList) != 3):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the first model")
+ sys.exit(1)
+ else:
+ fixedPrec1 = 0
+
+ if (wbits2List is None) or (abits2List is None): # Layer wise separate quantization not specified for second model
+ if (wbits2==0) or (abits2==0):
+ print("Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 1
+ elif (len(wbits2List) != 3) or (len(abits2List) != 3):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the second model")
+ sys.exit(1)
+ else:
+ fixedPrec2 = 0
+
+ if (fixedPrec2 != 1) or (fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
+ fixedPrec=0
+ print("Within atleast one model has separate precisions")
+ if (fixedPrec1 == 1): # first layer has fixed precision
+ abitsList = (abits, abits, abits)
+ wbitsList = (wbits, wbits, wbits)
+ if (fixedPrec2 == 1): # second layer has fixed precision
+ abits2List = (abits2, abits2, abits2)
+ wbits2List = (wbits2, wbits2, wbits2)
+ else:
+ fixedPrec=1
+
+ if (train_from_scratch):
+ print ("The ensemble model cannot be trained from scratch")
+ sys.exit(1)
+ if fixedPrec == 1:
+ from modified_cleverhans_tutorials.tutorial_models import make_ensemble_three_cnn
+ model = make_ensemble_three_cnn(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2, abits2, nb_filters=nb_filters)
+ else:
+ from modified_cleverhans_tutorials.tutorial_models import make_layerwise_three_combined_cnn
+ model = make_layerwise_three_combined_cnn(
+ phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList, wbits2List, abits2List, nb_filters=nb_filters)
+ elif lowprecision: # For generic DoReFa net style low precision
+ if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified
+ if (wbits==0) or (abits==0):
+ print("Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
+ sys.exit(1)
+ else:
+ fixedPrec = 1
+ elif (len(wbitsList) != 3) or (len(abitsList) != 3):
+ print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers")
+ sys.exit(1)
+ else:
+ fixedPrec = 0
+
+ if fixedPrec:
+ from modified_cleverhans_tutorials.tutorial_models import make_basic_lowprecision_cnn
+ model = make_basic_lowprecision_cnn(
+ phase, logits_scalar, 'lp_', wbits, abits, nb_filters=nb_filters, stocRound=stocRound)
+ else:
+ from modified_cleverhans_tutorials.tutorial_models import make_layerwise_lowprecision_cnn
+ model = make_layerwise_lowprecision_cnn(
+ phase, logits_scalar, 'lp_', wbitsList, abitsList, nb_filters=nb_filters, stocRound=stocRound)
+ elif distill:
+ from modified_cleverhans_tutorials.tutorial_models import make_distilled_cnn
+ model = make_distilled_cnn(phase, logits_scalar,
+ 'teacher_fp_', 'fp_', nb_filters=nb_filters)
+ else:
+ if rand:
+ print('rand=True')
+ from modified_cleverhans_tutorials.tutorial_models import make_scaled_rand_cnn
+ model = make_scaled_rand_cnn(
+ phase, logits_scalar, 'fp_rand', nb_filters=nb_filters)
+ else:
+ from modified_cleverhans_tutorials.tutorial_models import make_basic_cnn
+ model = make_basic_cnn(phase, logits_scalar,
+ 'fp_', nb_filters=nb_filters)
+
+ # separate predictions of teacher for distilled training
+ if distill:
+ teacher_preds = model.teacher_call(x, reuse=False)
+ teacher_logits = model.get_teacher_logits(x, reuse=False)
+ # separate calling function for ensemble models
+ if ensembleThree:
+ preds = model.ensemble_call(x, reuse=False)
+ else:
+ ##default
+ preds = model(x, reuse=False) # * logits_scalar
+ print("Defined TensorFlow model graph.")
+
+ ###########################################################################
+ # Training the model using TensorFlow
+ ###########################################################################
+ rng = np.random.RandomState([2017, 8, 30])
+
+ # Train an MNIST model
+ train_params = {
+ 'nb_epochs': nb_epochs,
+ 'batch_size': batch_size,
+ 'learning_rate': learning_rate,
+ 'loss_name': 'train loss',
+ 'filename': 'model',
+ 'reuse_global_step': False,
+ 'train_scope': 'train',
+ 'is_training': True
+ }
+
+ if adv != 0:
+ if adv == ADVERSARIAL_TRAINING_MADRYETAL:
+ from modified_cleverhans.attacks import MadryEtAl
+ train_attack_params = {'eps': MAX_EPS, 'eps_iter': 0.01,
+ 'nb_iter': nb_iter}
+ train_attacker = MadryEtAl(model, sess=sess)
+
+ elif adv == ADVERSARIAL_TRAINING_FGSM:
+ from modified_cleverhans.attacks import FastGradientMethod
+ stddev = int(np.ceil((MAX_EPS * 255) // 2))
+ train_attack_params = {'eps': tf.abs(tf.truncated_normal(
+ shape=(batch_size, 1, 1, 1), mean=0, stddev=stddev))}
+ train_attacker = FastGradientMethod(model, back='tf', sess=sess)
+ # create the adversarial trainer
+ train_attack_params.update({'clip_min': 0., 'clip_max': 1.})
+ adv_x_train = train_attacker.generate(x, phase, **train_attack_params)
+ preds_adv_train = model.get_probs(adv_x_train)
+
+ eval_attack_params = {'eps': MAX_EPS, 'clip_min': 0., 'clip_max': 1.}
+ adv_x_eval = train_attacker.generate(x, phase, **eval_attack_params)
+ preds_adv_eval = model.get_probs(adv_x_eval) # * logits_scalar
+
+ def evaluate():
+ # Evaluate the accuracy of the MNIST model on clean test examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree:
+ acc = model_eval_ensemble(
+ sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
+ else:
+ acc = model_eval(
+ sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
+ report.clean_train_clean_eval = acc
+ assert X_test.shape[0] == test_end - test_start, X_test.shape
+ print('Test accuracy on legitimate examples: %0.4f' % acc)
+
+ if adv != 0:
+ # Accuracy of the adversarially trained model on adversarial
+ # examples
+ acc = model_eval(
+ sess, x, y, preds_adv_eval, X_test, Y_test, phase=phase, args=eval_params)
+ print('Test accuracy on adversarial examples: %0.4f' % acc)
+
+ acc = model_eval(
+ sess, x, y, preds_adv_eval, X_test, Y_test,
+ phase=phase, args=eval_params, feed={logits_scalar: ATTACK_T})
+ print('Test accuracy on adversarial examples (scaled): %0.4f' % acc)
+
+ if train_from_scratch:
+ if save:
+ train_params.update({'log_dir': model_path})
+ if adv and delay > 0:
+ train_params.update({'nb_epochs': delay})
+
+ # do clean training for 'nb_epochs' or 'delay' epochs
+ if distill:
+ temperature = 100 # 1 means the teacher predictions are used as it is
+ teacher_scaled_preds_val = model_train_teacher(sess, x, y, teacher_preds, teacher_logits,
+ temperature, X_train, Y_train, phase=phase, args=train_params, rng=rng)
+ eval_params = {'batch_size': batch_size}
+ teacher_acc = model_eval(
+ sess, x, y, teacher_preds, X_test, Y_test, phase=phase, args=eval_params)
+ print('Test accuracy of the teacher model on legitimate examples: %0.4f' % teacher_acc)
+ print('Training the student model...')
+ student_train_params = {
+ 'nb_epochs': 50,
+ 'batch_size': batch_size,
+ 'learning_rate': learning_rate,
+ 'loss_name': 'train loss',
+ 'filename': 'model',
+ 'reuse_global_step': False,
+ 'train_scope': 'train',
+ 'is_training': True
+ }
+ if save:
+ student_train_params.update({'log_dir': model_path})
+ y_teacher = tf.placeholder(tf.float32, shape=(None, nb_classes))
+ model_train_student(sess, x, y, preds, temperature, X_train, Y_train, y_teacher=y_teacher,
+ teacher_preds=teacher_scaled_preds_val, alpha=0.5, beta=0.5, phase=phase, evaluate=evaluate, args=student_train_params, save=save, rng=rng)
+ elif inpgradreg:
+ model_train_inpgrad_reg(sess, x, y, preds, X_train, Y_train, phase=phase,
+ evaluate=evaluate, l2dbl = l2dbl, l2cs = l2cs, args=train_params, save=save, rng=rng)
+ elif test:
+ model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
+ evaluate=evaluate, args=train_params, save=save, rng=rng)
+ else:
+ model_train(sess, x, y, preds, X_train, Y_train,
+ phase=phase, args=train_params, save=save, rng=rng)
+
+ # optionally do additional adversarial training
+ if adv:
+ print("Adversarial training for %d epochs" % (nb_epochs - delay))
+ train_params.update({'nb_epochs': nb_epochs - delay})
+ train_params.update({'reuse_global_step': True})
+ if test:
+ model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
+ predictions_adv=preds_adv_train, evaluate=evaluate, args=train_params,
+ save=save, rng=rng)
+ else:
+ model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
+ predictions_adv=preds_adv_train, args=train_params,
+ save=save, rng=rng)
+ else:
+ if ensembleThree: ## Ensemble models have to loaded from different paths
+ variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ stored_variables = ['lp_conv1_init/k', 'lp_conv2_bin_init/k', 'lp_conv3_bin_init/k', 'lp_logits_init/W']
+ variable_dict = dict(zip(stored_variables, variables[:4]))
+ # Restore the first set of variables from model_path1
+ saver = tf.train.Saver(variable_dict)
+ saver.restore(sess, tf.train.latest_checkpoint(model_path1))
+ # Restore the second set of variables from model_path2
+ variable_dict = dict(zip(stored_variables, variables[4:8]))
+ saver2 = tf.train.Saver(variable_dict)
+ saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
+ stored_variables = ['fp_conv1_init/k', 'fp_conv2_init/k', 'fp_conv3_init/k', 'fp_logits_init/W']
+ variable_dict = dict(zip(stored_variables, variables[8:]))
+ saver3 = tf.train.Saver(variable_dict)
+ saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
+ else: #default below
+ tf_model_load(sess, model_path)
+ print('Restored model from %s' % model_path)
+ evaluate()
+
+ # Evaluate the accuracy of the MNIST model on legitimate test examples
+ eval_params = {'batch_size': batch_size}
+ if ensembleThree: ## Ensemble models have to be evaluated with a separate function
+ accuracy = model_eval_ensemble(sess, x, y, preds, X_test, Y_test, phase=phase, feed={phase: False}, args=eval_params)
+ else: #default below
+ accuracy = model_eval(sess, x, y, preds, X_test, Y_test, phase=phase,
+ feed={phase: False}, args=eval_params)
+ assert X_test.shape[0] == test_end - test_start, X_test.shape
+ print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
+ report.clean_train_clean_eval = accuracy
+
+ ###########################################################################
+ # Build dataset
+ ###########################################################################
+ if viz_enabled:
+ assert nb_samples == nb_classes
+ idxs = [np.where(np.argmax(Y_test, axis=1) == i)[0][0]
+ for i in range(nb_classes)]
+ viz_rows = nb_classes if targeted else 2
+ # Initialize our array for grid visualization
+ grid_shape = (nb_classes, viz_rows, img_rows, img_cols, channels)
+ grid_viz_data = np.zeros(grid_shape, dtype='f')
+
+ if targeted:
+ from modified_cleverhans.utils import build_targeted_dataset
+ if viz_enabled:
+ from modified_cleverhans.utils import grid_visual
+ adv_inputs, true_labels, adv_ys = build_targeted_dataset(
+ X_test, Y_test, idxs, nb_classes, img_rows, img_cols, channels)
+ else:
+ adv_inputs, true_labels, adv_ys = build_targeted_dataset(
+ X_test, Y_test, np.arange(nb_samples), nb_classes, img_rows, img_cols, channels)
+ else:
+ if viz_enabled:
+ from modified_cleverhans.utils import pair_visual
+ adv_inputs = X_test[idxs]
+ else:
+ adv_inputs = X_test[:nb_samples]
+
+ ###########################################################################
+ # Craft adversarial examples using generic approach
+ ###########################################################################
+ if targeted:
+ att_batch_size = np.clip(
+ nb_samples * (nb_classes - 1), a_max=MAX_BATCH_SIZE, a_min=1)
+ nb_adv_per_sample = nb_classes - 1
+ yname = "y_target"
+
+ else:
+ att_batch_size = np.minimum(nb_samples, MAX_BATCH_SIZE)
+ nb_adv_per_sample = 1
+ adv_ys = None
+ yname = "y"
+
+ print('Crafting ' + str(nb_samples) + ' * ' + str(nb_adv_per_sample) +
+ ' adversarial examples')
+ print("This could take some time ...")
+
+ if ensembleThree:
+ model_type = 'ensembleThree'
+ else:
+ model_type = 'default'
+
+ if attack == ATTACK_CARLINI_WAGNER_L2:
+ print('Attack: CarliniWagnerL2')
+ from modified_cleverhans.attacks import CarliniWagnerL2
+ attacker = CarliniWagnerL2(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
+ attack_params = {'binary_search_steps': 1,
+ 'max_iterations': attack_iterations,
+ 'learning_rate': 0.1,
+ 'batch_size': att_batch_size,
+ 'initial_const': 10,
+ }
+ elif attack == ATTACK_JSMA:
+ print('Attack: SaliencyMapMethod')
+ from modified_cleverhans.attacks import SaliencyMapMethod
+ attacker = SaliencyMapMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
+ attack_params = {'theta': 1., 'gamma': 0.1}
+ elif attack == ATTACK_FGSM:
+ print('Attack: FastGradientMethod')
+ from modified_cleverhans.attacks import FastGradientMethod
+ attacker = FastGradientMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
+ attack_params = {'eps': eps}
+ elif attack == ATTACK_MADRYETAL:
+ print('Attack: MadryEtAl')
+ from modified_cleverhans.attacks import MadryEtAl
+ attacker = MadryEtAl(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ elif attack == ATTACK_BASICITER:
+ print('Attack: BasicIterativeMethod')
+ from modified_cleverhans.attacks import BasicIterativeMethod
+ attacker = BasicIterativeMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
+ attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
+ else:
+ print("Attack undefined")
+ sys.exit(1)
+
+ attack_params.update({yname: adv_ys, 'clip_min': 0., 'clip_max': 1.})
+ adv_np = attacker.generate_np(adv_inputs, phase, **attack_params)
+
+ '''
+ name = 'm_fgsm_eps%s_n%s.npy' % (eps, nb_samples)
+ fpath = os.path.join(
+ '/scratch/gallowaa/mnist/adversarial_examples/modified_cleverhans/', name)
+ np.savez(fpath, x=adv_np, y=Y_test[:nb_samples])
+ '''
+ '''
+ adv_x = attacker.generate(x, phase, **attack_params)
+ adv_np, = batch_eval(sess, [x], [adv_x], [adv_inputs], feed={
+ phase: False}, args=eval_params)
+ '''
+ eval_params = {'batch_size': att_batch_size}
+ if targeted:
+ print("Evaluating targeted results")
+ adv_accuracy = model_eval(sess, x, y, preds, adv_np, true_labels, phase=phase,
+ args=eval_params)
+
+ else:
+ print("Evaluating untargeted results")
+ if viz_enabled:
+ if ensembleThree:
+ adv_accuracy = model_eval_ensemble(sess, x, y, preds, adv_np, Y_test[idxs], phase=phase, args=eval_params)
+ else: #default below
+ adv_accuracy = model_eval(sess, x, y, preds, adv_np, Y_test[
+ idxs], phase=phase, args=eval_params)
+ else:
+ if ensembleThree:
+ adv_accuracy = model_eval_ensemble(sess, x, y, preds, adv_np, Y_test[:nb_samples], phase=phase, args=eval_params)
+ else: #default below
+ adv_accuracy = model_eval(sess, x, y, preds, adv_np, Y_test[
+ :nb_samples], phase=phase, args=eval_params)
+
+ if viz_enabled:
+ n = nb_classes - 1
+ for i in range(nb_classes):
+ if targeted:
+ for j in range(nb_classes):
+ if i != j:
+ if j != 0 and i != n:
+ grid_viz_data[i, j] = adv_np[j * n + i]
+ if j == 0 and i > 0 or i == n and j > 0:
+ grid_viz_data[i, j] = adv_np[j * n + i - 1]
+ else:
+ grid_viz_data[i, j] = adv_inputs[j * n]
+ else:
+ grid_viz_data[j, 0] = adv_inputs[j]
+ grid_viz_data[j, 1] = adv_np[j]
+ print(grid_viz_data.shape)
+
+ print('--------------------------------------')
+
+ # Compute the number of adversarial examples that were successfully found
+ print('Test accuracy on adversarial examples {0:.4f}'.format(adv_accuracy))
+ report.clean_train_adv_eval = 1. - adv_accuracy
+
+ # Compute the average distortion introduced by the algorithm
+ percent_perturbed = np.mean(np.sum((adv_np - adv_inputs)**2,
+ axis=(1, 2, 3))**.5)
+ print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
+
+ # Compute number of modified features (L_0 norm)
+ nb_changed = np.where(adv_np != adv_inputs)[0].shape[0]
+ percent_perturb = np.mean(float(nb_changed) / adv_np.reshape(-1).shape[0])
+
+ # Compute the average distortion introduced by the algorithm
+ print('Avg. rate of perturbed features {0:.4f}'.format(percent_perturb))
+
+ # Friendly output for pasting into spreadsheet
+ print('{0:.4f}'.format(accuracy))
+ print('{0:.4f}'.format(adv_accuracy))
+ print('{0:.4f}'.format(percent_perturbed))
+ print('{0:.4f}'.format(percent_perturb))
+
+ # Close TF session
+ sess.close()
+
+ # Finally, block & display a grid of all the adversarial examples
+ if viz_enabled:
+ import matplotlib.pyplot as plt
+ _ = grid_visual(grid_viz_data)
+
+ return report
+
+
+def main(argv=None):
+ mnist_attack(viz_enabled=FLAGS.viz_enabled,
+ nb_epochs=FLAGS.nb_epochs,
+ batch_size=FLAGS.batch_size,
+ nb_samples=FLAGS.nb_samples,
+ nb_filters=FLAGS.nb_filters,
+ learning_rate=FLAGS.lr,
+ eps=FLAGS.eps,
+ attack=FLAGS.attack,
+ attack_iterations=FLAGS.attack_iterations,
+ model_path=FLAGS.model_path,
+ targeted=FLAGS.targeted,
+ rand=FLAGS.rand,
+ debug=FLAGS.debug,
+ test=FLAGS.test,
+ data_dir=FLAGS.data_dir,
+ lowprecision=FLAGS.lowprecision,
+ abits=FLAGS.abits,
+ wbits=FLAGS.wbits,
+ abitsList=FLAGS.abitsList,
+ wbitsList=FLAGS.wbitsList,
+ abits2=FLAGS.abits2,
+ wbits2=FLAGS.wbits2,
+ abits2List=FLAGS.abits2List,
+ wbits2List=FLAGS.wbits2List,
+ stocRound=FLAGS.stocRound,
+ model_path1=FLAGS.model_path1,
+ model_path2=FLAGS.model_path2,
+ model_path3=FLAGS.model_path3,
+ ensembleThree=FLAGS.ensembleThree,
+ distill = FLAGS.distill,
+ inpgradreg = FLAGS.inpgradreg,
+ l2dbl = FLAGS.l2dbl,
+ l2cs = FLAGS.l2cs,
+ delay=FLAGS.delay,
+ adv=FLAGS.adv,
+ nb_iter=FLAGS.nb_iter)
+
+
+if __name__ == '__main__':
+
+ par = argparse.ArgumentParser()
+
+ # Generic flags
+ par.add_argument('--gpu', help='id of GPU to use')
+ par.add_argument('--model_path', help='Path to save or load model')
+ par.add_argument('--data_dir', help='Path to training data',
+ default='/tmp/mnist')
+ par.add_argument(
+ '--viz_enabled', help='Visualize adversarial ex.', action="store_true")
+ par.add_argument(
+ '--debug', help='Sets log level to DEBUG, otherwise INFO', action="store_true")
+ par.add_argument(
+ '--test', help='Test while training, takes longer', action="store_true")
+
+ # Architecture and training specific flags
+ par.add_argument('--nb_epochs', type=int, default=15,
+ help='Number of epochs to train model')
+ par.add_argument('--nb_filters', type=int, default=64,
+ help='Number of filters in first layer')
+ par.add_argument('--batch_size', type=int, default=128,
+ help='Size of training batches')
+ par.add_argument('--lr', type=float, default=0.001,
+ help='Learning rate')
+ par.add_argument('--rand', help='Stochastic weight layer?',
+ action="store_true")
+
+ # Attack specific flags
+ par.add_argument('--attack', type=int, default=0,
+ help='Attack type, 0=CW, 1=JSMA')
+ par.add_argument("--eps", type=float, default=0.3)
+ par.add_argument('--attack_iterations', type=int, default=50,
+ help='Number of iterations to run CW attack; 1000 is good')
+ par.add_argument('--nb_samples', type=int,
+ default=10000, help='Nb of inputs to attack')
+ par.add_argument(
+ '--targeted', help='Run a targeted attack?', action="store_true")
+
+ # EMPIR specific flags
+ par.add_argument('--lowprecision', help='Use other low precision models absed on DoReFa net', action="store_true") # For DoReFa net style quantization
+ par.add_argument('--wbits', type=int, default=0, help='No. of bits in weight representation')
+ par.add_argument('--abits', type=int, default=0, help='No. of bits in activation representation')
+ par.add_argument('--wbitsList', type=int, nargs='+', help='List of No. of bits in weight representation for different layers')
+ par.add_argument('--abitsList', type=int, nargs='+', help='List of No. of bits in activation representation for different layers')
+ par.add_argument('--stocRound', help='Stochastic rounding for weights (only in training) and activations?',
+ action="store_true")
+ par.add_argument('--model_path1', help='Path where saved model1 is stored and can be loaded')
+ par.add_argument('--model_path2', help='Path where saved model2 is stored and can be loaded')
+ par.add_argument('--model_path3', help='Path where saved model3 is stored and can be loaded')
+ par.add_argument('--ensembleThree', help='Use an ensemble of full precision and two low precision models', action="store_true")
+ par.add_argument('--wbits2', type=int, default=0, help='No. of bits in weight representation of model2, model1 specified using wbits')
+ par.add_argument('--abits2', type=int, default=0, help='No. of bits in activation representation of model2, model2 specified using abits')
+ par.add_argument('--wbits2List', type=int, nargs='+', help='List of No. of bits in weight representation for different layers of model2')
+ par.add_argument('--abits2List', type=int, nargs='+', help='List of No. of bits in activation representation for different layers of model2')
+ # extra flags for defensive distillation
+ par.add_argument('--distill', help='Train the model using distillation', action="store_true")
+ par.add_argument('--student_epochs', type=int, default=50, help='No. of epochs for which the student model is trained')
+ # extra flags for input gradient regularization
+ par.add_argument('--inpgradreg', help='Train the model using input gradient regularization', action="store_true")
+ par.add_argument('--l2dbl', type=int, default=0, help='l2 double backprop penalty')
+ par.add_argument('--l2cs', type=int, default=0, help='l2 certainty sensitivity penalty')
+ # Adversarial training flags
+ par.add_argument(
+ '--adv', help='Adversarial training type?', type=int, default=0)
+ par.add_argument('--delay', type=int,
+ default=10, help='Nb of epochs to delay adv training by')
+ par.add_argument('--nb_iter', type=int,
+ default=40, help='Nb of iterations of PGD')
+
+ FLAGS = par.parse_args()
+
+ if FLAGS.gpu:
+ os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
+
+ tf.app.run()
diff --git a/case_studies/empir/modified_cleverhans/__init__.py b/case_studies/empir/modified_cleverhans/__init__.py
new file mode 100644
index 0000000..9f196eb
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from modified_cleverhans.devtools.version import dev_version
+
+# Attach a hex digest to the version string to keep track of changes
+# in the development branch
+__version__ = '2.0.0-' + dev_version()
diff --git a/case_studies/empir/modified_cleverhans/attacks.py b/case_studies/empir/modified_cleverhans/attacks.py
new file mode 100644
index 0000000..dd3a0d8
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/attacks.py
@@ -0,0 +1,1304 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABCMeta
+import numpy as np
+from six.moves import xrange
+import warnings
+import collections
+
+import modified_cleverhans.utils as utils
+from modified_cleverhans.model import Model, CallableModelWrapper
+
+_logger = utils.create_logger("cleverhans.attacks")
+
+
+class Attack(object):
+
+ """
+ Abstract base class for all attack classes.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ :param model: An instance of the cleverhans.model.Model class.
+ :param back: The backend to use. Either 'tf' (default) or 'th'
+ (support for Theano is however deprecated and will
+ be removed on 2017-11-08).
+ :param sess: The tf session to run graphs in (use None for Theano)
+ """
+ if not(back == 'tf' or back == 'th'):
+ raise ValueError("Backend argument must either be 'tf' or 'th'.")
+
+ if back == 'th' and sess is not None:
+ raise Exception("A session should not be provided when using th.")
+ elif back == 'tf' and sess is None:
+ import tensorflow as tf
+ sess = tf.get_default_session()
+
+ if not isinstance(model, Model):
+ if hasattr(model, '__call__'):
+ warnings.warn("CleverHans support for supplying a callable"
+ " instead of an instance of the"
+ " cleverhans.model.Model class is"
+ " deprecated and will be dropped on 2018-01-11.")
+ else:
+ raise ValueError("The model argument should be an instance of"
+ " the cleverhans.model.Model class.")
+ if back == 'th':
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ # Prepare attributes
+ self.model = model
+ self.back = back
+ self.sess = sess
+ self.model_type = model_type # for EMPIR added model_type
+ self.num_classes = num_classes # for EMPIR added number of classes
+
+ # We are going to keep track of old graphs and cache them.
+ self.graphs = {}
+
+ # When calling generate_np, arguments in the following set should be
+ # fed into the graph, as they are not structural items that require
+ # generating a new graph.
+ # This dict should map names of arguments to the types they should
+ # have.
+ # (Usually, the target class will be a feedable keyword argument.)
+ self.feedable_kwargs = {}
+
+ # When calling generate_np, arguments in the following set should NOT
+ # be fed into the graph, as they ARE structural items that require
+ # generating a new graph.
+ # This list should contain the names of the structural arguments.
+ self.structural_kwargs = []
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate the attack's symbolic graph for adversarial examples. This
+ method should be overriden in any child class that implements an
+ attack that is expressable symbolically. Otherwise, it will wrap the
+ numerical implementation as a symbolic operator.
+ :param x: The model's symbolic inputs.
+ :param **kwargs: optional parameters used by child classes.
+ :return: A symbolic representation of the adversarial examples.
+ """
+ if self.back == 'th':
+ raise NotImplementedError('Theano version not implemented.')
+
+ # the set of arguments that are structural properties of the attack
+ # if these arguments are different, we must construct a new graph
+ fixed = dict((k, v) for k, v in kwargs.items()
+ if k in self.structural_kwargs)
+
+ # the set of arguments that are passed as placeholders to the graph
+ # on each call, and can change without constructing a new graph
+ feedable = dict((k, v) for k, v in kwargs.items()
+ if k in self.feedable_kwargs)
+
+ if len(fixed) + len(feedable) < len(kwargs):
+ warnings.warn("Supplied extra keyword arguments that are not "
+ "used in the graph computation. They have been "
+ "ignored.")
+
+ if not all(isinstance(value, collections.Hashable)
+ for value in fixed.values()):
+ # we have received a fixed value that isn't hashable
+ # this means we can't cache this graph for later use,
+ # and it will have to be discarded later
+ hash_key = None
+ else:
+ # create a unique key for this set of fixed paramaters
+ hash_key = tuple(sorted(fixed.items()))
+
+ if hash_key not in self.graphs:
+ self.construct_graph(phase, fixed, feedable, x, hash_key)
+
+ x, new_kwargs, x_adv = self.graphs[hash_key]
+
+ return x_adv
+
+ def construct_graph(self, phase, fixed, feedable, x_val, hash_key):
+ """
+ Construct the graph required to run the attack through generate_np.
+ :param fixed: Structural elements that require defining a new graph.
+ :param feedable: Arguments that can be fed to the same graph when
+ they take different values.
+ :param x_val: symbolic adversarial example
+ :param hash_key: the key used to store this graph in our cache
+ """
+ # try our very best to create a TF placeholder for each of the
+ # feedable keyword arguments, and check the types are one of
+ # the allowed types
+ import tensorflow as tf
+
+ class_name = str(self.__class__).split(".")[-1][:-2]
+ _logger.info("Constructing new graph for attack " + class_name)
+
+ # remove the None arguments, they are just left blank
+ for k in list(feedable.keys()):
+ if feedable[k] is None:
+ del feedable[k]
+
+ # process all of the rest and create placeholders for them
+ new_kwargs = dict(x for x in fixed.items())
+ for name, value in feedable.items():
+ given_type = self.feedable_kwargs[name]
+ if isinstance(value, np.ndarray):
+ new_shape = [None] + list(value.shape[1:])
+ new_kwargs[name] = tf.placeholder(given_type, new_shape)
+ elif isinstance(value, utils.known_number_types):
+ new_kwargs[name] = tf.placeholder(given_type, shape=[])
+ else:
+ raise ValueError("Could not identify type of argument " +
+ name + ": " + str(value))
+
+ # x is a special placeholder we always want to have
+ x_shape = [None] + list(x_val.shape)[1:]
+ x = tf.placeholder(tf.float32, shape=x_shape)
+
+ # now we generate the graph that we want
+ x_adv = self.generate(x, phase, **new_kwargs)
+
+ self.graphs[hash_key] = (x, new_kwargs, x_adv)
+
+ if len(self.graphs) >= 10:
+ warnings.warn("Calling generate_np() with multiple different "
+ "structural paramaters is inefficient and should"
+ " be avoided. Calling generate() is preferred.")
+
+ def generate_np(self, x_val, phase, **kwargs):
+ """
+ Generate adversarial examples and return them as a NumPy array.
+ Sub-classes *should not* implement this method unless they must
+ perform special handling of arguments.
+ :param x_val: A NumPy array with the original inputs.
+ :param **kwargs: optional parameters used by child classes.
+ :return: A NumPy array holding the adversarial examples.
+ """
+ if self.back == 'th':
+ raise NotImplementedError('Theano version not implemented.')
+ if self.sess is None:
+ raise ValueError("Cannot use `generate_np` when no `sess` was"
+ " provided")
+
+ # the set of arguments that are structural properties of the attack
+ # if these arguments are different, we must construct a new graph
+ fixed = dict((k, v) for k, v in kwargs.items()
+ if k in self.structural_kwargs)
+
+ # the set of arguments that are passed as placeholders to the graph
+ # on each call, and can change without constructing a new graph
+ feedable = dict((k, v) for k, v in kwargs.items()
+ if k in self.feedable_kwargs)
+
+ if len(fixed) + len(feedable) < len(kwargs):
+ warnings.warn("Supplied extra keyword arguments that are not "
+ "used in the graph computation. They have been "
+ "ignored.")
+
+ if not all(isinstance(value, collections.Hashable)
+ for value in fixed.values()):
+ # we have received a fixed value that isn't hashable
+ # this means we can't cache this graph for later use,
+ # and it will have to be discarded later
+ hash_key = None
+ else:
+ # create a unique key for this set of fixed paramaters
+ hash_key = tuple(sorted(fixed.items()))
+
+ if hash_key not in self.graphs:
+ self.construct_graph(phase, fixed, feedable, x_val, hash_key)
+
+ x, new_kwargs, x_adv = self.graphs[hash_key]
+
+ feed_dict = {x: x_val, phase: False}
+
+ for name in feedable:
+ feed_dict[new_kwargs[name]] = feedable[name]
+
+ return self.sess.run(x_adv, feed_dict)
+
+ def get_or_guess_labels(self, x, kwargs):
+ """
+ Get the label to use in generating an adversarial example for x.
+ The kwargs are fed directly from the kwargs of the attack.
+ If 'y' is in kwargs, then assume it's an untargeted attack and
+ use that as the label.
+ If 'y_target' is in kwargs, then assume it's a targeted attack and
+ use that as the label.
+ Otherwise, use the model's prediction as the label and perform an
+ untargeted attack.
+ """
+ import tensorflow as tf
+
+ if 'y' in kwargs and 'y_target' in kwargs:
+ raise ValueError("Can not set both 'y' and 'y_target'.")
+ elif 'y' in kwargs:
+ labels = kwargs['y']
+ elif 'y_target' in kwargs:
+ labels = kwargs['y_target']
+ else:
+ if self.model_type == 'ensembleThree':
+ preds = self.model.get_ensemblepreds(x, reuse=True)
+ original_predictions = tf.to_float(tf.one_hot(preds, self.num_classes)) # preds just gives the class number above
+ else:
+ preds = self.model.get_probs(x, reuse=True)
+ preds_max = tf.reduce_max(preds, 1, keep_dims=True)
+ original_predictions = tf.to_float(tf.equal(preds,
+ preds_max))
+ labels = tf.stop_gradient(original_predictions)
+ if isinstance(labels, np.ndarray):
+ nb_classes = labels.shape[1]
+ else:
+ nb_classes = labels.get_shape().as_list()[1]
+ return labels, nb_classes
+
+ def parse_params(self, params=None):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+ :param params: a dictionary of attack-specific parameters
+ :return: True when parsing was successful
+ """
+ return True
+
+
+class FastGradientMethod(Attack):
+
+ """
+ This attack was originally implemented by Goodfellow et al. (2015) with the
+ infinity norm (and is known as the "Fast Gradient Sign Method"). This
+ implementation extends the attack to other norms, and is therefore called
+ the Fast Gradient Method.
+ Paper link: https://arxiv.org/abs/1412.6572
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Create a FastGradientMethod instance.
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(FastGradientMethod, self).__init__(model, back, sess, model_type, num_classes)
+ self.feedable_kwargs = {'eps': np.float32,
+ 'y': np.float32,
+ 'y_target': np.float32,
+ 'clip_min': np.float32,
+ 'clip_max': np.float32}
+ self.structural_kwargs = ['ord']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'probs')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param eps: (optional float) attack step size (input variation)
+ :param ord: (optional) Order of the norm (mimics NumPy).
+ Possible values: np.inf, 1 or 2.
+ :param y: (optional) A tensor with the model labels. Only provide
+ this parameter if you'd like to use true labels when crafting
+ adversarial samples. Otherwise, model predictions are used as
+ labels to avoid the "label leaking" effect (explained in this
+ paper: https://arxiv.org/abs/1611.01236). Default is None.
+ Labels should be one-hot-encoded.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ if self.back == 'tf':
+ from .attacks_tf import fgm
+ else:
+ from .attacks_th import fgm
+
+ labels, nb_classes = self.get_or_guess_labels(x, kwargs)
+
+ if self.model_type == 'ensembleThree': ## for EMPIR: extra if condition for covering the multiple combined model case
+ return fgm(x, self.model.get_combinedAvgCorrectProbs(x, reuse=True), y=labels, eps=self.eps,
+ ord=self.ord, clip_min=self.clip_min,
+ clip_max=self.clip_max,
+ targeted=(self.y_target is not None))
+ else:
+ return fgm(x, self.model.get_probs(x, reuse=True), y=labels, eps=self.eps,
+ ord=self.ord, clip_min=self.clip_min,
+ clip_max=self.clip_max,
+ targeted=(self.y_target is not None))
+
+ def parse_params(self, eps=0.3, ord=np.inf, y=None, y_target=None,
+ clip_min=None, clip_max=None, **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+
+ Attack-specific parameters:
+ :param eps: (optional float) attack step size (input variation)
+ :param ord: (optional) Order of the norm (mimics NumPy).
+ Possible values: np.inf, 1 or 2.
+ :param y: (optional) A tensor with the model labels. Only provide
+ this parameter if you'd like to use true labels when crafting
+ adversarial samples. Otherwise, model predictions are used as
+ labels to avoid the "label leaking" effect (explained in this
+ paper: https://arxiv.org/abs/1611.01236). Default is None.
+ Labels should be one-hot-encoded.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ # Save attack-specific parameters
+
+ self.eps = eps
+ self.ord = ord
+ self.y = y
+ self.y_target = y_target
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ if self.y is not None and self.y_target is not None:
+ raise ValueError("Must not set both y and y_target")
+ # Check if order of the norm is acceptable given current implementation
+ if self.ord not in [np.inf, int(1), int(2)]:
+ raise ValueError("Norm order must be either np.inf, 1, or 2.")
+ if self.back == 'th' and self.ord != np.inf:
+ raise NotImplementedError("The only FastGradientMethod norm "
+ "implemented for Theano is np.inf.")
+ return True
+
+
+class BasicIterativeMethod(Attack):
+
+ """
+ The Basic Iterative Method (Kurakin et al. 2016). The original paper used
+ hard labels for this attack; no label smoothing.
+ Paper link: https://arxiv.org/pdf/1607.02533.pdf
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Create a BasicIterativeMethod instance.
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(BasicIterativeMethod, self).__init__(model, back, sess, model_type, num_classes)
+ self.feedable_kwargs = {'eps': np.float32,
+ 'eps_iter': np.float32,
+ 'y': np.float32,
+ 'y_target': np.float32,
+ 'clip_min': np.float32,
+ 'clip_max': np.float32}
+ self.structural_kwargs = ['ord', 'nb_iter']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'probs')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param eps: (required float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (required float) step size for each attack iteration
+ :param nb_iter: (required int) Number of attack iterations.
+ :param y: (optional) A tensor with the model labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ import tensorflow as tf
+
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ # Initialize loop variables
+ eta = 0
+
+ # Fix labels to the first model predictions for loss computation
+ if self.model_type == 'ensembleThree':
+ model_preds = self.model.get_combinedAvgCorrectProbs(x, reuse=True)
+ else:
+ model_preds = self.model.get_probs(x, reuse=True)
+ preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)
+ if self.y_target is not None:
+ y = self.y_target
+ targeted = True
+ elif self.y is not None:
+ y = self.y
+ targeted = False
+ else:
+ y = tf.to_float(tf.equal(model_preds, preds_max))
+ y = tf.stop_gradient(y)
+ targeted = False
+
+ y_kwarg = 'y_target' if targeted else 'y'
+ fgm_params = {'eps': self.eps_iter, y_kwarg: y, 'ord': self.ord,
+ 'clip_min': self.clip_min, 'clip_max': self.clip_max}
+
+ for i in range(self.nb_iter):
+ FGM = FastGradientMethod(self.model, back=self.back,
+ sess=self.sess)
+ # FGM = FastGradientMethod(self.model, back=self.back, model_type=self.model_type,
+ # num_classes=self.num_classes, sess=self.sess)
+ # Compute this step's perturbation
+ eta = FGM.generate(x + eta, phase, **fgm_params) - x
+
+ # Clipping perturbation eta to self.ord norm ball
+ if self.ord == np.inf:
+ eta = tf.clip_by_value(eta, -self.eps, self.eps)
+ elif self.ord in [1, 2]:
+ reduc_ind = list(xrange(1, len(eta.get_shape())))
+ if self.ord == 1:
+ norm = tf.reduce_sum(tf.abs(eta),
+ reduction_indices=reduc_ind,
+ keep_dims=True)
+ elif self.ord == 2:
+ norm = tf.sqrt(tf.reduce_sum(tf.square(eta),
+ reduction_indices=reduc_ind,
+ keep_dims=True))
+ eta = eta * self.eps / norm
+
+ # Define adversarial example (and clip if necessary)
+ adv_x = x + eta
+ if self.clip_min is not None and self.clip_max is not None:
+ adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ return adv_x
+
+ def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None,
+ ord=np.inf, clip_min=None, clip_max=None,
+ y_target=None, **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+
+ Attack-specific parameters:
+ :param eps: (required float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (required float) step size for each attack iteration
+ :param nb_iter: (required int) Number of attack iterations.
+ :param y: (optional) A tensor with the model labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+
+ # Save attack-specific parameters
+ self.eps = eps
+ self.eps_iter = eps_iter
+ self.nb_iter = nb_iter
+ self.y = y
+ self.y_target = y_target
+ self.ord = ord
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ if self.y is not None and self.y_target is not None:
+ raise ValueError("Must not set both y and y_target")
+ # Check if order of the norm is acceptable given current implementation
+ if self.ord not in [np.inf, 1, 2]:
+ raise ValueError("Norm order must be either np.inf, 1, or 2.")
+ if self.back == 'th':
+ error_string = "BasicIterativeMethod is not implemented in Theano"
+ raise NotImplementedError(error_string)
+
+ return True
+
+
+class SaliencyMapMethod(Attack):
+
+ """
+ The Jacobian-based Saliency Map Method (Papernot et al. 2016).
+ Paper link: https://arxiv.org/pdf/1511.07528.pdf
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Create a SaliencyMapMethod instance.
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(SaliencyMapMethod, self).__init__(model, back, sess, model_type, num_classes)
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'probs')
+
+ if self.back == 'th':
+ error = "Theano version of SaliencyMapMethod not implemented."
+ raise NotImplementedError(error)
+
+ import tensorflow as tf
+ self.feedable_kwargs = {'y_target': tf.float32,
+ 'phase': tf.bool}
+ self.structural_kwargs = ['theta', 'gamma',
+ 'clip_max', 'clip_min']
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param theta: (optional float) Perturbation introduced to modified
+ components (can be positive or negative)
+ :param gamma: (optional float) Maximum percentage of perturbed features
+ :param clip_min: (optional float) Minimum component value for clipping
+ :param clip_max: (optional float) Maximum component value for clipping
+ :param y_target: (optional) Target tensor if the attack is targeted
+ """
+ import tensorflow as tf
+ from .attacks_tf import jacobian_graph, jsma_batch
+
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ # Define Jacobian graph wrt to this input placeholder
+ if self.model_type == 'ensembleThree':
+ preds = self.model.get_combinedAvgCorrectProbs(x, reuse=True)
+ else:
+ preds = self.model.get_probs(x, reuse=True)
+ nb_classes = preds.get_shape().as_list()[-1]
+ grads = jacobian_graph(preds, x, nb_classes)
+
+ # Define appropriate graph (targeted / random target labels)
+ if self.y_target is not None:
+ def jsma_wrap(x_val, y_target):
+ return jsma_batch(self.sess, x, preds, grads, x_val,
+ self.theta, self.gamma, self.clip_min,
+ self.clip_max, nb_classes,
+ y_target=y_target, feed={phase: False})
+
+ # Attack is targeted, target placeholder will need to be fed
+ wrap = tf.py_func(jsma_wrap, [x, self.y_target], tf.float32)
+ else:
+ def jsma_wrap(x_val):
+ return jsma_batch(self.sess, x, preds, grads, x_val,
+ self.theta, self.gamma, self.clip_min,
+ self.clip_max, nb_classes,
+ y_target=None, feed={phase: False})
+
+ # Attack is untargeted, target values will be chosen at random
+ wrap = tf.py_func(jsma_wrap, [x], tf.float32)
+
+ return wrap
+
+ def parse_params(self, theta=1., gamma=np.inf, nb_classes=None,
+ clip_min=0., clip_max=1., y_target=None, **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+
+ Attack-specific parameters:
+ :param theta: (optional float) Perturbation introduced to modified
+ components (can be positive or negative)
+ :param gamma: (optional float) Maximum percentage of perturbed features
+ :param nb_classes: (optional int) Number of model output classes
+ :param clip_min: (optional float) Minimum component value for clipping
+ :param clip_max: (optional float) Maximum component value for clipping
+ :param y_target: (optional) Target tensor if the attack is targeted
+ """
+
+ if nb_classes is not None:
+ warnings.warn("The nb_classes argument is depricated and will "
+ "be removed on 2018-02-11")
+ self.theta = theta
+ self.gamma = gamma
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+ self.y_target = y_target
+
+ return True
+
+
+class VirtualAdversarialMethod(Attack):
+
+ """
+ This attack was originally proposed by Miyato et al. (2016) and was used
+ for virtual adversarial training.
+ Paper link: https://arxiv.org/abs/1507.00677
+
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(VirtualAdversarialMethod, self).__init__(model, back, sess, model_type, num_classes)
+
+ if self.back == 'th':
+ error = "For the Theano version of VAM please call vatm directly."
+ raise NotImplementedError(error)
+
+ import tensorflow as tf
+ self.feedable_kwargs = {'eps': tf.float32, 'xi': tf.float32,
+ 'clip_min': tf.float32,
+ 'clip_max': tf.float32}
+ self.structural_kwargs = ['num_iterations']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'logits')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param eps: (optional float ) the epsilon (input variation parameter)
+ :param num_iterations: (optional) the number of iterations
+ :param xi: (optional float) the finite difference parameter
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ return vatm(self.model, x, self.model.get_logits(x), eps=self.eps,
+ num_iterations=self.num_iterations, xi=self.xi,
+ clip_min=self.clip_min, clip_max=self.clip_max)
+
+ def parse_params(self, eps=2.0, num_iterations=1, xi=1e-6, clip_min=None,
+ clip_max=None, **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+
+ Attack-specific parameters:
+ :param eps: (optional float )the epsilon (input variation parameter)
+ :param num_iterations: (optional) the number of iterations
+ :param xi: (optional float) the finite difference parameter
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ # Save attack-specific parameters
+ self.eps = eps
+ self.num_iterations = num_iterations
+ self.xi = xi
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+ return True
+
+
+class CarliniWagnerL2(Attack):
+ """
+ This attack was originally proposed by Carlini and Wagner. It is an
+ iterative attack that finds adversarial examples on many defenses that
+ are robust to other attacks.
+ Paper link: https://arxiv.org/abs/1608.04644
+
+ At a high level, this attack is an iterative attack using Adam and
+ a specially-chosen loss function to find adversarial examples with
+ lower distortion than other attacks. This comes at the cost of speed,
+ as this attack is often much slower than others.
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(CarliniWagnerL2, self).__init__(model, back, sess, model_type, num_classes)
+
+ if self.back == 'th':
+ raise NotImplementedError('Theano version not implemented.')
+
+ import tensorflow as tf
+ self.feedable_kwargs = {'y': tf.float32,
+ 'y_target': tf.float32,
+ 'phase': tf.bool}
+
+ self.structural_kwargs = ['batch_size', 'confidence',
+ 'targeted', 'learning_rate',
+ 'binary_search_steps', 'max_iterations',
+ 'abort_early', 'initial_const',
+ 'clip_min', 'clip_max']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'logits')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Return a tensor that constructs adversarial examples for the given
+ input. Generate uses tf.py_func in order to operate over tensors.
+
+ :param x: (required) A tensor with the inputs.
+ :param y: (optional) A tensor with the true labels for an untargeted
+ attack. If None (and y_target is None) then use the
+ original labels the classifier assigns.
+ :param y_target: (optional) A tensor with the target labels for a
+ targeted attack.
+ :param confidence: Confidence of adversarial examples: higher produces
+ examples with larger l2 distortion, but more
+ strongly classified as adversarial.
+ :param batch_size: Number of attacks to run simultaneously.
+ :param learning_rate: The learning rate for the attack algorithm.
+ Smaller values produce better results but are
+ slower to converge.
+ :param binary_search_steps: The number of times we perform binary
+ search to find the optimal tradeoff-
+ constant between norm of the purturbation
+ and confidence of the classification.
+ :param max_iterations: The maximum number of iterations. Setting this
+ to a larger value will produce lower distortion
+ results. Using only a few iterations requires
+ a larger learning rate, and will produce larger
+ distortion results.
+ :param abort_early: If true, allows early aborts if gradient descent
+ is unable to make progress (i.e., gets stuck in
+ a local minimum).
+ :param initial_const: The initial tradeoff-constant to use to tune the
+ relative importance of size of the pururbation
+ and confidence of classification.
+ If binary_search_steps is large, the initial
+ constant is not important. A smaller value of
+ this constant gives lower distortion results.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ import tensorflow as tf
+ from .attacks_tf import CarliniWagnerL2 as CWL2
+ self.parse_params(**kwargs)
+
+ labels, nb_classes = self.get_or_guess_labels(x, kwargs)
+
+ attack = CWL2(self.sess, self.model, self.batch_size,
+ self.confidence, 'y_target' in kwargs,
+ self.learning_rate, self.binary_search_steps,
+ self.max_iterations, self.abort_early,
+ self.initial_const, self.clip_min, self.clip_max,
+ nb_classes, x.get_shape().as_list()[1:])
+
+ def cw_wrap(x_val, y_val):
+ return np.array(attack.attack(x_val, y_val, phase), dtype=np.float32)
+ wrap = tf.py_func(cw_wrap, [x, labels], tf.float32)
+
+ return wrap
+
+ def parse_params(self, y=None, y_target=None, nb_classes=None,
+ batch_size=1, confidence=0,
+ learning_rate=5e-3,
+ binary_search_steps=5, max_iterations=1000,
+ abort_early=True, initial_const=1e-2,
+ clip_min=0, clip_max=1):
+
+ # ignore the y and y_target argument
+ if nb_classes is not None:
+ warnings.warn("The nb_classes argument is depricated and will "
+ "be removed on 2018-02-11")
+ self.batch_size = batch_size
+ self.confidence = confidence
+ self.learning_rate = learning_rate
+ self.binary_search_steps = binary_search_steps
+ self.max_iterations = max_iterations
+ self.abort_early = abort_early
+ self.initial_const = initial_const
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+
+class ElasticNetMethod(Attack):
+ """
+ This attack features L1-oriented adversarial examples and includes
+ the C&W L2 attack as a special case (when beta is set to 0).
+ Adversarial examples attain similar performance to those
+ generated by the C&W L2 attack, and more importantly,
+ have improved transferability properties and
+ complement adversarial training.
+ Paper link: https://arxiv.org/abs/1709.04114
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+ super(ElasticNetMethod, self).__init__(model, back, sess, model_type, num_classes)
+
+ if self.back == 'th':
+ raise NotImplementedError('Theano version not implemented.')
+
+ import tensorflow as tf
+ self.feedable_kwargs = {'y': tf.float32,
+ 'y_target': tf.float32}
+
+ self.structural_kwargs = ['beta', 'batch_size', 'confidence',
+ 'targeted', 'learning_rate',
+ 'binary_search_steps', 'max_iterations',
+ 'abort_early', 'initial_const',
+ 'clip_min', 'clip_max']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'logits')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Return a tensor that constructs adversarial examples for the given
+ input. Generate uses tf.py_func in order to operate over tensors.
+
+ :param x: (required) A tensor with the inputs.
+ :param y: (optional) A tensor with the true labels for an untargeted
+ attack. If None (and y_target is None) then use the
+ original labels the classifier assigns.
+ :param y_target: (optional) A tensor with the target labels for a
+ targeted attack.
+ :param beta: Trades off L2 distortion with L1 distortion: higher
+ produces examples with lower L1 distortion, at the
+ cost of higher L2 (and typically Linf) distortion
+ :param confidence: Confidence of adversarial examples: higher produces
+ examples with larger l2 distortion, but more
+ strongly classified as adversarial.
+ :param batch_size: Number of attacks to run simultaneously.
+ :param learning_rate: The learning rate for the attack algorithm.
+ Smaller values produce better results but are
+ slower to converge.
+ :param binary_search_steps: The number of times we perform binary
+ search to find the optimal tradeoff-
+ constant between norm of the perturbation
+ and confidence of the classification.
+ :param max_iterations: The maximum number of iterations. Setting this
+ to a larger value will produce lower distortion
+ results. Using only a few iterations requires
+ a larger learning rate, and will produce larger
+ distortion results.
+ :param abort_early: If true, allows early abort when the total
+ loss starts to increase (greatly speeds up attack,
+ but hurts performance, particularly on ImageNet)
+ :param initial_const: The initial tradeoff-constant to use to tune the
+ relative importance of size of the perturbation
+ and confidence of classification.
+ If binary_search_steps is large, the initial
+ constant is not important. A smaller value of
+ this constant gives lower distortion results.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+ import tensorflow as tf
+ self.parse_params(**kwargs)
+
+ from .attacks_tf import ElasticNetMethod as EAD
+ labels, nb_classes = self.get_or_guess_labels(x, kwargs)
+
+ attack = EAD(self.sess, self.model, self.beta,
+ self.batch_size, self.confidence,
+ 'y_target' in kwargs, self.learning_rate,
+ self.binary_search_steps, self.max_iterations,
+ self.abort_early, self.initial_const,
+ self.clip_min, self.clip_max,
+ nb_classes, x.get_shape().as_list()[1:])
+
+ def ead_wrap(x_val, y_val):
+ return np.array(attack.attack(x_val, y_val), dtype=np.float32)
+ wrap = tf.py_func(ead_wrap, [x, labels], tf.float32)
+
+ return wrap
+
+ def parse_params(self, y=None, y_target=None,
+ nb_classes=None, beta=1e-3,
+ batch_size=9, confidence=0,
+ learning_rate=1e-2,
+ binary_search_steps=9, max_iterations=1000,
+ abort_early=False, initial_const=1e-3,
+ clip_min=0, clip_max=1):
+
+ # ignore the y and y_target argument
+ if nb_classes is not None:
+ warnings.warn("The nb_classes argument is depricated and will "
+ "be removed on 2018-02-11")
+ self.beta = beta
+ self.batch_size = batch_size
+ self.confidence = confidence
+ self.learning_rate = learning_rate
+ self.binary_search_steps = binary_search_steps
+ self.max_iterations = max_iterations
+ self.abort_early = abort_early
+ self.initial_const = initial_const
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+
+class DeepFool(Attack):
+
+ """
+ DeepFool is an untargeted & iterative attack which is based on an
+ iterative linearization of the classifier. The implementation here
+ is w.r.t. the L2 norm.
+ Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
+ """
+ Create a DeepFool instance.
+ """
+ super(DeepFool, self).__init__(model, back, sess, model_type, num_classes)
+
+ if self.back == 'th':
+ raise NotImplementedError('Theano version not implemented.')
+
+ self.structural_kwargs = ['over_shoot', 'max_iter', 'clip_max',
+ 'clip_min', 'nb_candidate']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'logits')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param nb_candidate: The number of classes to test against, i.e.,
+ deepfool only consider nb_candidate classes when
+ attacking(thus accelerate speed). The nb_candidate
+ classes are chosen according to the prediction
+ confidence during implementation.
+ :param overshoot: A termination criterion to prevent vanishing updates
+ :param max_iter: Maximum number of iteration for deepfool
+ :param nb_classes: The number of model output classes
+ :param clip_min: Minimum component value for clipping
+ :param clip_max: Maximum component value for clipping
+ """
+
+ import tensorflow as tf
+ from .attacks_tf import jacobian_graph, deepfool_batch
+
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ # Define graph wrt to this input placeholder
+ logits = self.model.get_logits(x)
+ self.nb_classes = logits.get_shape().as_list()[-1]
+ assert self.nb_candidate <= self.nb_classes,\
+ 'nb_candidate should not be greater than nb_classes'
+ preds = tf.reshape(tf.nn.top_k(logits, k=self.nb_candidate)[0],
+ [-1, self.nb_candidate])
+ # grads will be the shape [batch_size, nb_candidate, image_size]
+ grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)
+
+ # Define graph
+ def deepfool_wrap(x_val):
+ return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
+ self.nb_candidate, self.overshoot,
+ self.max_iter, self.clip_min, self.clip_max,
+ self.nb_classes)
+ return tf.py_func(deepfool_wrap, [x], tf.float32)
+
+ def parse_params(self, nb_candidate=10, overshoot=0.02, max_iter=50,
+ nb_classes=None, clip_min=0., clip_max=1., **kwargs):
+ """
+ :param nb_candidate: The number of classes to test against, i.e.,
+ deepfool only consider nb_candidate classes when
+ attacking(thus accelerate speed). The nb_candidate
+ classes are chosen according to the prediction
+ confidence during implementation.
+ :param overshoot: A termination criterion to prevent vanishing updates
+ :param max_iter: Maximum number of iteration for deepfool
+ :param nb_classes: The number of model output classes
+ :param clip_min: Minimum component value for clipping
+ :param clip_max: Maximum component value for clipping
+ """
+ if nb_classes is not None:
+ warnings.warn("The nb_classes argument is depricated and will "
+ "be removed on 2018-02-11")
+ self.nb_candidate = nb_candidate
+ self.overshoot = overshoot
+ self.max_iter = max_iter
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ return True
+
+
+def fgsm(x, predictions, eps, back='tf', clip_min=None, clip_max=None):
+ """
+ A wrapper for the Fast Gradient Sign Method.
+ It calls the right function, depending on the
+ user's backend.
+ :param x: the input
+ :param predictions: the model's output
+ (Note: in the original paper that introduced this
+ attack, the loss was computed by comparing the
+ model predictions with the hard labels (from the
+ dataset). Instead, this version implements the loss
+ by comparing the model predictions with the most
+ likely class. This tweak is recommended since the
+ discovery of label leaking in the following paper:
+ https://arxiv.org/abs/1611.01236)
+ :param eps: the epsilon (input variation parameter)
+ :param back: switch between TensorFlow ('tf') and
+ Theano ('th') implementation
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :return: a tensor for the adversarial example
+ """
+ warnings.warn("attacks.fgsm is deprecated and will be removed on "
+ "2017-09-27. Instantiate an object from FastGradientMethod.")
+ if back == 'tf':
+ # Compute FGSM using TensorFlow
+ from .attacks_tf import fgm
+ return fgm(x, predictions, y=None, eps=eps, ord=np.inf,
+ clip_min=clip_min, clip_max=clip_max)
+ elif back == 'th':
+ # Compute FGSM using Theano
+ from .attacks_th import fgm
+ return fgm(x, predictions, eps, clip_min=clip_min, clip_max=clip_max)
+
+
+def vatm(model, x, logits, eps, back='tf', num_iterations=1, xi=1e-6,
+ clip_min=None, clip_max=None):
+ """
+ A wrapper for the perturbation methods used for virtual adversarial
+ training : https://arxiv.org/abs/1507.00677
+ It calls the right function, depending on the
+ user's backend.
+ :param model: the model which returns the network unnormalized logits
+ :param x: the input placeholder
+ :param logits: the model's unnormalized output tensor
+ :param eps: the epsilon (input variation parameter)
+ :param num_iterations: the number of iterations
+ :param xi: the finite difference parameter
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :return: a tensor for the adversarial example
+
+ """
+ if back == 'tf':
+ # Compute VATM using TensorFlow
+ from .attacks_tf import vatm as vatm_tf
+ return vatm_tf(model, x, logits, eps, num_iterations=num_iterations,
+ xi=xi, clip_min=clip_min, clip_max=clip_max)
+ elif back == 'th':
+ # Compute VATM using Theano
+ from .attacks_th import vatm as vatm_th
+ return vatm_th(model, x, logits, eps, num_iterations=num_iterations,
+ xi=xi, clip_min=clip_min, clip_max=clip_max)
+
+
+def jsma(sess, x, predictions, grads, sample, target, theta, gamma=np.inf,
+ increase=True, back='tf', clip_min=None, clip_max=None):
+ """
+ A wrapper for the Jacobian-based saliency map approach.
+ It calls the right function, depending on the
+ user's backend.
+ :param sess: TF session
+ :param x: the input
+ :param predictions: the model's symbolic output (linear output,
+ pre-softmax)
+ :param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
+ :param target: target class for input sample
+ :param theta: delta for each feature adjustment
+ :param gamma: a float between 0 - 1 indicating the maximum distortion
+ percentage
+ :param increase: boolean; true if we are increasing pixels, false otherwise
+ :param back: switch between TensorFlow ('tf') and
+ Theano ('th') implementation
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :return: an adversarial sample
+ """
+ warnings.warn("attacks.jsma is deprecated and will be removed on "
+ "2017-09-27. Instantiate an object from SaliencyMapMethod.")
+ if back == 'tf':
+ # Compute Jacobian-based saliency map attack using TensorFlow
+ from .attacks_tf import jsma
+ return jsma(sess, x, predictions, grads, sample, target, theta, gamma,
+ clip_min, clip_max)
+ elif back == 'th':
+ raise NotImplementedError("Theano jsma not implemented.")
+
+
+class MadryEtAl(Attack):
+
+ """
+ The Projected Gradient Descent Attack (Madry et al. 2016).
+ Paper link: https://arxiv.org/pdf/1706.06083.pdf
+ """
+
+ def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10,
+ attack_type='vanilla'):
+ """
+ Create a MadryEtAl instance.
+ """
+ super(MadryEtAl, self).__init__(model, back, sess, model_type, num_classes)
+ self.feedable_kwargs = {'eps': np.float32,
+ 'eps_iter': np.float32,
+ 'y': np.float32,
+ 'y_target': np.float32,
+ 'clip_min': np.float32,
+ 'clip_max': np.float32}
+ self.attack_type = attack_type
+ self.structural_kwargs = ['ord', 'nb_iter']
+
+ if not isinstance(self.model, Model):
+ self.model = CallableModelWrapper(self.model, 'probs')
+
+ def generate(self, x, phase, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param eps: (required float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (required float) step size for each attack iteration
+ :param nb_iter: (required int) Number of attack iterations.
+ :param y: (optional) A tensor with the model labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ labels, nb_classes = self.get_or_guess_labels(x, kwargs)
+ self.targeted = self.y_target is not None
+
+ # Initialize loop variables
+ adv_x = self.attack(x)
+
+ return adv_x
+
+ def parse_params(self, eps=0.3, eps_iter=0.01, nb_iter=40, y=None,
+ ord=np.inf, clip_min=None, clip_max=None,
+ y_target=None, **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+
+ Attack-specific parameters:
+ :param eps: (required float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (required float) step size for each attack iteration
+ :param nb_iter: (required int) Number of attack iterations.
+ :param y: (optional) A tensor with the model labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ """
+
+ # Save attack-specific parameters
+ self.eps = eps
+ self.eps_iter = eps_iter
+ self.nb_iter = nb_iter
+ self.y = y
+ self.y_target = y_target
+ self.ord = ord
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ if self.y is not None and self.y_target is not None:
+ raise ValueError("Must not set both y and y_target")
+ # Check if order of the norm is acceptable given current implementation
+ if self.ord not in [np.inf, 1, 2]:
+ raise ValueError("Norm order must be either np.inf, 1, or 2.")
+ if self.back == 'th':
+ error_string = ("ProjectedGradientDescentMethod is"
+ " not implemented in Theano")
+ raise NotImplementedError(error_string)
+
+ return True
+
+ def attack_single_step(self, x, eta, y):
+ """
+ Given the original image and the perturbation computed so far, computes
+ a new perturbation.
+
+ :param x: A tensor with the original input.
+ :param eta: A tensor the same shape as x that holds the perturbation.
+ :param y: A tensor with the target labels or ground-truth labels.
+ """
+ import tensorflow as tf
+ from modified_cleverhans.utils_tf import model_loss, clip_eta
+
+ adv_x = x + eta
+ if self.attack_type != "robust":
+ if self.model_type == 'ensembleThree': ## for EMPIR extra if condition for covering the multiple combined model case
+ preds = self.model.get_combinedAvgCorrectProbs(adv_x, reuse=True)
+ else:
+ preds = self.model.get_probs(adv_x, reuse=True)
+ loss = model_loss(y, preds)
+ else:
+ # modification from zimmerrol to make sure their loss was correctly implemented
+ preds = self.model.get_layer(adv_x, True, 'combined_logits')
+ preds = tf.reduce_sum(preds, 1)
+ loss = model_loss(y, preds)
+ if self.targeted:
+ loss = -loss
+ grad, = tf.gradients(loss, adv_x)
+ scaled_signed_grad = self.eps_iter * tf.sign(grad)
+ adv_x = adv_x + scaled_signed_grad
+ adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+ eta = adv_x - x
+ eta = clip_eta(eta, self.ord, self.eps)
+ return x, eta
+
+ def attack(self, x, **kwargs):
+ """
+ This method creates a symbolic graph that given an input image,
+ first randomly perturbs the image. The
+ perturbation is bounded to an epsilon ball. Then multiple steps of
+ gradient descent is performed to increase the probability of a target
+ label or decrease the probability of the ground-truth label.
+
+ :param x: A tensor with the input image.
+ """
+ import tensorflow as tf
+ from modified_cleverhans.utils_tf import clip_eta
+
+ eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
+ eta = clip_eta(eta, self.ord, self.eps)
+
+ if self.y is not None:
+ y = self.y
+ else:
+ if self.model_type == 'ensembleThree': ## for EMPIR extra if condition for covering the ensemble model case
+ preds = self.model.get_combinedAvgCorrectProbs(x)
+ # default below
+ else:
+ preds = self.model.get_probs(x)
+ preds_max = tf.reduce_max(preds, 1, keep_dims=True)
+ y = tf.to_float(tf.equal(preds, preds_max))
+ y = y / tf.reduce_sum(y, 1, keep_dims=True)
+ y = tf.stop_gradient(y)
+
+ for i in range(self.nb_iter):
+ x, eta = self.attack_single_step(x, eta, y)
+
+ adv_x = x + eta
+ if self.clip_min is not None and self.clip_max is not None:
+ adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ return adv_x
diff --git a/case_studies/empir/modified_cleverhans/attacks_tf.py b/case_studies/empir/modified_cleverhans/attacks_tf.py
new file mode 100644
index 0000000..e9880f6
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/attacks_tf.py
@@ -0,0 +1,1198 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import copy
+import numpy as np
+from six.moves import xrange
+import tensorflow as tf
+import warnings
+
+from . import utils_tf
+from . import utils
+
+_logger = utils.create_logger("cleverhans.attacks.tf")
+
+
+def fgsm(x, predictions, eps=0.3, clip_min=None, clip_max=None):
+ return fgm(x, predictions, y=None, eps=eps, ord=np.inf, clip_min=clip_min,
+ clip_max=clip_max)
+
+
+def fgm(x, preds, y=None, eps=0.3, ord=np.inf,
+ clip_min=None, clip_max=None,
+ targeted=False):
+ """
+ TensorFlow implementation of the Fast Gradient Method.
+ :param x: the input placeholder
+ :param preds: the model's output tensor (the attack expects the
+ probabilities, i.e., the output of the softmax)
+ :param y: (optional) A placeholder for the model labels. If targeted
+ is true, then provide the target label. Otherwise, only provide
+ this parameter if you'd like to use true labels when crafting
+ adversarial samples. Otherwise, model predictions are used as
+ labels to avoid the "label leaking" effect (explained in this
+ paper: https://arxiv.org/abs/1611.01236). Default is None.
+ Labels should be one-hot-encoded.
+ :param eps: the epsilon (input variation parameter)
+ :param ord: (optional) Order of the norm (mimics NumPy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: Minimum float value for adversarial example components
+ :param clip_max: Maximum float value for adversarial example components
+ :param targeted: Is the attack targeted or untargeted? Untargeted, the
+ default, will try to make the label incorrect. Targeted
+ will instead try to move in the direction of being more
+ like y.
+ :return: a tensor for the adversarial example
+ """
+
+ if y is None:
+ # Using model predictions as ground truth to avoid label leaking
+ preds_max = tf.reduce_max(preds, 1, keep_dims=True)
+ y = tf.to_float(tf.equal(preds, preds_max))
+ y = tf.stop_gradient(y)
+ y = y / tf.reduce_sum(y, 1, keep_dims=True)
+
+ # Compute loss
+ loss = utils_tf.model_loss(y, preds, mean=False)
+ if targeted:
+ loss = -loss
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x)
+
+ if ord == np.inf:
+ # Take sign of gradient
+ normalized_grad = tf.sign(grad)
+ # The following line should not change the numerical results.
+ # It applies only because `normalized_grad` is the output of
+ # a `sign` op, which has zero derivative anyway.
+ # It should not be applied for the other norms, where the
+ # perturbation has a non-zero derivative.
+ normalized_grad = tf.stop_gradient(normalized_grad)
+ elif ord == 1:
+ red_ind = list(xrange(1, len(x.get_shape())))
+ normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
+ reduction_indices=red_ind,
+ keep_dims=True)
+ elif ord == 2:
+ red_ind = list(xrange(1, len(x.get_shape())))
+ square = tf.reduce_sum(tf.square(grad),
+ reduction_indices=red_ind,
+ keep_dims=True)
+ normalized_grad = grad / tf.sqrt(square)
+ else:
+ raise NotImplementedError("Only L-inf, L1 and L2 norms are "
+ "currently implemented.")
+
+ # Multiply by constant epsilon
+ scaled_grad = eps * normalized_grad
+ print('scaled_grad.get_shape() = ')
+ print(scaled_grad.get_shape())
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = x + scaled_grad
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) and (clip_max is not None):
+ adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
+
+ return adv_x
+
+
+def vatm(model, x, logits, eps, num_iterations=1, xi=1e-6,
+ clip_min=None, clip_max=None, scope=None):
+ """
+ Tensorflow implementation of the perturbation method used for virtual
+ adversarial training: https://arxiv.org/abs/1507.00677
+ :param model: the model which returns the network unnormalized logits
+ :param x: the input placeholder
+ :param logits: the model's unnormalized output tensor (the input to
+ the softmax layer)
+ :param eps: the epsilon (input variation parameter)
+ :param num_iterations: the number of iterations
+ :param xi: the finite difference parameter
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :param seed: the seed for random generator
+ :return: a tensor for the adversarial example
+ """
+ with tf.name_scope(scope, "virtual_adversarial_perturbation"):
+ d = tf.random_normal(tf.shape(x))
+ for i in range(num_iterations):
+ d = xi * utils_tf.l2_batch_normalize(d)
+ logits_d = model.get_logits(x + d, reuse=True)
+ kl = utils_tf.kl_with_logits(logits, logits_d)
+ Hd = tf.gradients(kl, d)[0]
+ d = tf.stop_gradient(Hd)
+ d = eps * utils_tf.l2_batch_normalize(d)
+ adv_x = x + d
+ if (clip_min is not None) and (clip_max is not None):
+ adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
+ return adv_x
+
+
+def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
+ """
+ TensorFlow implementation for apply perturbations to input features based
+ on salency maps
+ :param i: index of first selected feature
+ :param j: index of second selected feature
+ :param X: a matrix containing our input features for our sample
+ :param increase: boolean; true if we are increasing pixels, false otherwise
+ :param theta: delta for each feature adjustment
+ :param clip_min: mininum value for a feature in our sample
+ :param clip_max: maximum value for a feature in our sample
+ : return: a perturbed input feature matrix for a target class
+ """
+
+ # perturb our input sample
+ if increase:
+ X[0, i] = np.minimum(clip_max, X[0, i] + theta)
+ X[0, j] = np.minimum(clip_max, X[0, j] + theta)
+ else:
+ X[0, i] = np.maximum(clip_min, X[0, i] - theta)
+ X[0, j] = np.maximum(clip_min, X[0, j] - theta)
+
+ return X
+
+
+def saliency_map(grads_target, grads_other, search_domain, increase):
+ """
+ TensorFlow implementation for computing saliency maps
+ :param grads_target: a matrix containing forward derivatives for the
+ target class
+ :param grads_other: a matrix where every element is the sum of forward
+ derivatives over all non-target classes at that index
+ :param search_domain: the set of input indices that we are considering
+ :param increase: boolean; true if we are increasing pixels, false otherwise
+ :return: (i, j, search_domain) the two input indices selected and the
+ updated search domain
+ """
+ # Compute the size of the input (the number of features)
+ nf = len(grads_target)
+
+ # Remove the already-used input features from the search space
+ invalid = list(set(range(nf)) - search_domain)
+ increase_coef = (2 * int(increase) - 1)
+ grads_target[invalid] = - increase_coef * np.max(np.abs(grads_target))
+ grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))
+
+ # Create a 2D numpy array of the sum of grads_target and grads_other
+ target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
+ other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))
+
+ # Create a mask to only keep features that match saliency map conditions
+ if increase:
+ scores_mask = ((target_sum > 0) & (other_sum < 0))
+ else:
+ scores_mask = ((target_sum < 0) & (other_sum > 0))
+
+ # Create a 2D numpy array of the scores for each pair of candidate features
+ scores = scores_mask * (-target_sum * other_sum)
+
+ # A pixel can only be selected (and changed) once
+ np.fill_diagonal(scores, 0)
+
+ # Extract the best two pixels
+ best = np.argmax(scores)
+ p1, p2 = best % nf, best // nf
+
+ # Remove used pixels from our search domain
+ search_domain.discard(p1)
+ search_domain.discard(p2)
+
+ return p1, p2, search_domain
+
+
+def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None):
+ """
+ TensorFlow implementation of the foward derivative / Jacobian
+ :param x: the input placeholder
+ :param grads: the list of TF gradients returned by jacobian_graph()
+ :param target: the target misclassification class
+ :param X: numpy array with sample input
+ :param nb_features: the number of features in the input
+ :return: matrix of forward derivatives flattened into vectors
+ """
+ # Prepare feeding dictionary for all gradient computations
+ feed_dict = {x: X}
+ if feed is not None:
+ feed_dict.update(feed)
+
+ # Initialize a numpy array to hold the Jacobian component values
+ jacobian_val = np.zeros((nb_classes, nb_features), dtype=np.float32)
+
+ # Compute the gradients for all classes
+ for class_ind, grad in enumerate(grads):
+ run_grad = sess.run(grad, feed_dict)
+ jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features))
+
+ # Sum over all classes different from the target class to prepare for
+ # saliency map computation in the next step of the attack
+ other_classes = utils.other_classes(nb_classes, target)
+ grad_others = np.sum(jacobian_val[other_classes, :], axis=0)
+
+ return jacobian_val[target], grad_others
+
+
+def jacobian_graph(predictions, x, nb_classes):
+ """
+ Create the Jacobian graph to be ran later in a TF session
+ :param predictions: the model's symbolic output (linear output,
+ pre-softmax)
+ :param x: the input placeholder
+ :param nb_classes: the number of classes the model has
+ :return:
+ """
+ # This function will return a list of TF gradients
+ list_derivatives = []
+
+ # Define the TF graph elements to compute our derivatives for each class
+ for class_ind in xrange(nb_classes):
+ derivatives, = tf.gradients(predictions[:, class_ind], x)
+ list_derivatives.append(derivatives)
+
+ return list_derivatives
+
+
+def jsma(sess, x, predictions, grads, sample, target, theta, gamma, clip_min,
+ clip_max, feed=None):
+ """
+ TensorFlow implementation of the JSMA (see https://arxiv.org/abs/1511.07528
+ for details about the algorithm design choices).
+ :param sess: TF session
+ :param x: the input placeholder
+ :param predictions: the model's symbolic output (the attack expects the
+ probabilities, i.e., the output of the softmax, but will
+ also work with logits typically)
+ :param grads: symbolic gradients
+ :param sample: numpy array with sample input
+ :param target: target class for sample input
+ :param theta: delta for each feature adjustment
+ :param gamma: a float between 0 - 1 indicating the maximum distortion
+ percentage
+ :param clip_min: minimum value for components of the example returned
+ :param clip_max: maximum value for components of the example returned
+ :return: an adversarial sample
+ """
+
+ # Copy the source sample and define the maximum number of features
+ # (i.e. the maximum number of iterations) that we may perturb
+ adv_x = copy.copy(sample)
+ # count the number of features. For MNIST, 1x28x28 = 784; for
+ # CIFAR, 3x32x32 = 3072; etc.
+ nb_features = np.product(adv_x.shape[1:])
+ # reshape sample for sake of standardization
+ original_shape = adv_x.shape
+ adv_x = np.reshape(adv_x, (1, nb_features))
+ # compute maximum number of iterations
+ max_iters = np.floor(nb_features * gamma / 2)
+
+ # Find number of classes based on grads
+ nb_classes = len(grads)
+
+ increase = bool(theta > 0)
+
+ # Compute our initial search domain. We optimize the initial search domain
+ # by removing all features that are already at their maximum values (if
+ # increasing input features---otherwise, at their minimum value).
+ if increase:
+ search_domain = set([i for i in xrange(nb_features)
+ if adv_x[0, i] < clip_max])
+ else:
+ search_domain = set([i for i in xrange(nb_features)
+ if adv_x[0, i] > clip_min])
+
+ # Initialize the loop variables
+ iteration = 0
+ adv_x_original_shape = np.reshape(adv_x, original_shape)
+ current = utils_tf.model_argmax(sess, x, predictions, adv_x_original_shape,
+ feed=feed)
+
+ _logger.debug("Starting JSMA attack up to {} iterations".format(max_iters))
+ # Repeat this main loop until we have achieved misclassification
+ while (current != target and iteration < max_iters and
+ len(search_domain) > 1):
+ # Reshape the adversarial example
+ adv_x_original_shape = np.reshape(adv_x, original_shape)
+
+ # Compute the Jacobian components
+ grads_target, grads_others = jacobian(sess, x, grads, target,
+ adv_x_original_shape,
+ nb_features, nb_classes,
+ feed=feed)
+
+ if iteration % ((max_iters + 1) // 5) == 0 and iteration > 0:
+ _logger.debug("Iteration {} of {}".format(iteration,
+ int(max_iters)))
+ # Compute the saliency map for each of our target classes
+ # and return the two best candidate features for perturbation
+ i, j, search_domain = saliency_map(
+ grads_target, grads_others, search_domain, increase)
+
+ # Apply the perturbation to the two input features selected previously
+ adv_x = apply_perturbations(
+ i, j, adv_x, increase, theta, clip_min, clip_max)
+
+ # Update our current prediction by querying the model
+ current = utils_tf.model_argmax(sess, x, predictions,
+ adv_x_original_shape, feed=feed)
+
+ # Update loop variables
+ iteration = iteration + 1
+
+ if current == target:
+ _logger.info("Attack succeeded using {} iterations".format(iteration))
+ else:
+ _logger.info(("Failed to find adversarial example " +
+ "after {} iterations").format(iteration))
+
+ # Compute the ratio of pixels perturbed by the algorithm
+ percent_perturbed = float(iteration * 2) / nb_features
+
+ # Report success when the adversarial example is misclassified in the
+ # target class
+ if current == target:
+ return np.reshape(adv_x, original_shape), 1, percent_perturbed
+ else:
+ return np.reshape(adv_x, original_shape), 0, percent_perturbed
+
+
+def jsma_batch(sess, x, pred, grads, X, theta, gamma, clip_min, clip_max,
+ nb_classes, y_target=None, feed=None, **kwargs):
+ """
+ Applies the JSMA to a batch of inputs
+ :param sess: TF session
+ :param x: the input placeholder
+ :param pred: the model's symbolic output
+ :param grads: symbolic gradients
+ :param X: numpy array with sample inputs
+ :param theta: delta for each feature adjustment
+ :param gamma: a float between 0 - 1 indicating the maximum distortion
+ percentage
+ :param clip_min: minimum value for components of the example returned
+ :param clip_max: maximum value for components of the example returned
+ :param nb_classes: number of model output classes
+ :param y_target: target class for sample input
+ :return: adversarial examples
+ """
+ if 'targets' in kwargs:
+ warnings.warn('The targets parameter is deprecated, use y_target.'
+ 'targets will be removed on 2018-02-03.')
+ y_target = kwargs['targets']
+
+ X_adv = np.zeros(X.shape)
+
+ for ind, val in enumerate(X):
+ val = np.expand_dims(val, axis=0)
+ if y_target is None:
+ # No y_target provided, randomly choose from other classes
+ from .utils_tf import model_argmax
+ gt = model_argmax(sess, x, pred, val, feed=feed)
+
+ # Randomly choose from the incorrect classes for each sample
+ from .utils import random_targets
+ target = random_targets(gt, nb_classes)[0]
+ else:
+ target = y_target[ind]
+
+ X_adv[ind], _, _ = jsma(sess, x, pred, grads, val, np.argmax(target),
+ theta, gamma, clip_min, clip_max, feed=feed)
+
+ return np.asarray(X_adv, dtype=np.float32)
+
+
+def jacobian_augmentation(sess, x, X_sub_prev, Y_sub, grads, lmbda,
+ keras_phase=None, feed=None):
+ """
+ Augment an adversary's substitute training set using the Jacobian
+ of a substitute model to generate new synthetic inputs.
+ See https://arxiv.org/abs/1602.02697 for more details.
+ See cleverhans_tutorials/mnist_blackbox.py for example use case
+ :param sess: TF session in which the substitute model is defined
+ :param x: input TF placeholder for the substitute model
+ :param X_sub_prev: substitute training data available to the adversary
+ at the previous iteration
+ :param Y_sub: substitute training labels available to the adversary
+ at the previous iteration
+ :param grads: Jacobian symbolic graph for the substitute
+ (should be generated using attacks_tf.jacobian_graph)
+ :param keras_phase: (deprecated) if not None, holds keras learning_phase
+ :return: augmented substitute data (will need to be labeled by oracle)
+ """
+ assert len(x.get_shape()) == len(np.shape(X_sub_prev))
+ assert len(grads) >= np.max(Y_sub) + 1
+ assert len(X_sub_prev) == len(Y_sub)
+
+ if keras_phase is not None:
+ warnings.warn("keras_phase argument is deprecated and will be removed"
+ " on 2017-09-28. Instead, use K.set_learning_phase(0) at"
+ " the start of your script and serve with tensorflow.")
+
+ # Prepare input_shape (outside loop) for feeding dictionary below
+ input_shape = list(x.get_shape())
+ input_shape[0] = 1
+
+ # Create new numpy array for adversary training data
+ # with twice as many components on the first dimension.
+ X_sub = np.vstack([X_sub_prev, X_sub_prev])
+
+ # For each input in the previous' substitute training iteration
+ for ind, input in enumerate(X_sub_prev):
+ # Select gradient corresponding to the label predicted by the oracle
+ grad = grads[Y_sub[ind]]
+
+ # Prepare feeding dictionary
+ feed_dict = {x: np.reshape(input, input_shape)}
+ if feed is not None:
+ feed_dict.update(feed)
+
+ # Compute sign matrix
+ grad_val = sess.run([tf.sign(grad)], feed_dict=feed_dict)[0]
+
+ # Create new synthetic point in adversary substitute training set
+ X_sub[2 * ind] = X_sub[ind] + lmbda * grad_val
+
+ # Return augmented training data (needs to be labeled afterwards)
+ return X_sub
+
+
+class CarliniWagnerL2(object):
+
+ def __init__(self, sess, model, batch_size, confidence,
+ targeted, learning_rate,
+ binary_search_steps, max_iterations,
+ abort_early, initial_const,
+ clip_min, clip_max, num_labels, shape):
+ """
+ Return a tensor that constructs adversarial examples for the given
+ input. Generate uses tf.py_func in order to operate over tensors.
+
+ :param sess: a TF session.
+ :param model: a cleverhans.model.Model object.
+ :param batch_size: Number of attacks to run simultaneously.
+ :param confidence: Confidence of adversarial examples: higher produces
+ examples with larger l2 distortion, but more
+ strongly classified as adversarial.
+ :param targeted: boolean controlling the behavior of the adversarial
+ examples produced. If set to False, they will be
+ misclassified in any wrong class. If set to True,
+ they will be misclassified in a chosen target class.
+ :param learning_rate: The learning rate for the attack algorithm.
+ Smaller values produce better results but are
+ slower to converge.
+ :param binary_search_steps: The number of times we perform binary
+ search to find the optimal tradeoff-
+ constant between norm of the purturbation
+ and confidence of the classification.
+ :param max_iterations: The maximum number of iterations. Setting this
+ to a larger value will produce lower distortion
+ results. Using only a few iterations requires
+ a larger learning rate, and will produce larger
+ distortion results.
+ :param abort_early: If true, allows early aborts if gradient descent
+ is unable to make progress (i.e., gets stuck in
+ a local minimum).
+ :param initial_const: The initial tradeoff-constant to use to tune the
+ relative importance of size of the pururbation
+ and confidence of classification.
+ If binary_search_steps is large, the initial
+ constant is not important. A smaller value of
+ this constant gives lower distortion results.
+ :param clip_min: (optional float) Minimum input component value.
+ :param clip_max: (optional float) Maximum input component value.
+ :param num_labels: the number of classes in the model's output.
+ :param shape: the shape of the model's input tensor.
+ """
+
+ self.sess = sess
+ self.TARGETED = targeted
+ self.LEARNING_RATE = learning_rate
+ self.MAX_ITERATIONS = max_iterations
+ self.BINARY_SEARCH_STEPS = binary_search_steps
+ self.ABORT_EARLY = abort_early
+ self.CONFIDENCE = confidence
+ self.initial_const = initial_const
+ self.batch_size = batch_size
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+ self.model = model
+
+ self.repeat = binary_search_steps >= 10
+
+ self.shape = shape = tuple([batch_size] + list(shape))
+
+ # the variable we're going to optimize over
+ modifier = tf.Variable(np.zeros(shape, dtype=np.float32))
+
+ # these are variables to be more efficient in sending data to tf
+ self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32,
+ name='timg')
+ self.tlab = tf.Variable(np.zeros((batch_size, num_labels)),
+ dtype=tf.float32, name='tlab')
+ self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32,
+ name='const')
+
+ # and here's what we use to assign them
+ self.assign_timg = tf.placeholder(tf.float32, shape,
+ name='assign_timg')
+ self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels),
+ name='assign_tlab')
+ self.assign_const = tf.placeholder(tf.float32, [batch_size],
+ name='assign_const')
+
+ # the resulting instance, tanh'd to keep bounded from clip_min
+ # to clip_max
+ self.newimg = (tf.tanh(modifier + self.timg) + 1) / 2
+ self.newimg = self.newimg * (clip_max - clip_min) + clip_min
+
+ # prediction BEFORE-SOFTMAX of the model
+ self.output = model.get_logits(self.newimg, reuse=True)
+
+ # distance to the input data
+ self.other = (tf.tanh(self.timg) + 1) / \
+ 2 * (clip_max - clip_min) + clip_min
+ self.l2dist = tf.reduce_sum(tf.square(self.newimg - self.other),
+ list(range(1, len(shape))))
+
+ # compute the probability of the label class versus the maximum other
+ real = tf.reduce_sum((self.tlab) * self.output, 1)
+ other = tf.reduce_max(
+ (1 - self.tlab) * self.output - self.tlab * 10000,
+ 1)
+
+ if self.TARGETED:
+ # if targeted, optimize for making the other class most likely
+ loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)
+ else:
+ # if untargeted, optimize for making this class least likely.
+ loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)
+
+ # sum up the losses
+ self.loss2 = tf.reduce_sum(self.l2dist)
+ self.loss1 = tf.reduce_sum(self.const * loss1)
+ self.loss = self.loss1 + self.loss2
+
+ # Setup the adam optimizer and keep track of variables we're creating
+ start_vars = set(x.name for x in tf.global_variables())
+ optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
+ self.train = optimizer.minimize(self.loss, var_list=[modifier])
+ end_vars = tf.global_variables()
+ new_vars = [x for x in end_vars if x.name not in start_vars]
+
+ # these are the variables to initialize when we run
+ self.setup = []
+ self.setup.append(self.timg.assign(self.assign_timg))
+ self.setup.append(self.tlab.assign(self.assign_tlab))
+ self.setup.append(self.const.assign(self.assign_const))
+
+ self.init = tf.variables_initializer(var_list=[modifier] + new_vars)
+
+ def attack(self, imgs, targets, phase):
+ """
+ Perform the L_2 attack on the given instance for the given targets.
+
+ If self.targeted is true, then the targets represents the target labels
+ If self.targeted is false, then targets are the original class labels
+ """
+
+ r = []
+ for i in range(0, len(imgs), self.batch_size):
+ _logger.debug(("Running CWL2 attack on instance " +
+ "{} of {}").format(i, len(imgs)))
+ r.extend(self.attack_batch(imgs[i:i + self.batch_size],
+ targets[i:i + self.batch_size], phase))
+ return np.array(r)
+
+ def attack_batch(self, imgs, labs, phase):
+ """
+ Run the attack on a batch of instance and labels.
+ """
+ def compare(x, y):
+ if not isinstance(x, (float, int, np.int64)):
+ x = np.copy(x)
+ if self.TARGETED:
+ x[y] -= self.CONFIDENCE
+ else:
+ x[y] += self.CONFIDENCE
+ x = np.argmax(x)
+ if self.TARGETED:
+ return x == y
+ else:
+ return x != y
+
+ batch_size = self.batch_size
+
+ oimgs = np.clip(imgs, self.clip_min, self.clip_max)
+
+ # re-scale instances to be within range [0, 1]
+ imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
+ imgs = np.clip(imgs, 0, 1)
+ # now convert to [-1, 1]
+ imgs = (imgs * 2) - 1
+ # convert to tanh-space
+ imgs = np.arctanh(imgs * .999999)
+
+ # set the lower and upper bounds accordingly
+ lower_bound = np.zeros(batch_size)
+ CONST = np.ones(batch_size) * self.initial_const
+ upper_bound = np.ones(batch_size) * 1e10
+
+ # placeholders for the best l2, score, and instance attack found so far
+ o_bestl2 = [1e10] * batch_size
+ o_bestscore = [-1] * batch_size
+ o_bestattack = np.copy(oimgs)
+
+ for outer_step in range(self.BINARY_SEARCH_STEPS):
+ # completely reset adam's internal state.
+ self.sess.run(self.init)
+ batch = imgs[:batch_size]
+ batchlab = labs[:batch_size]
+
+ bestl2 = [1e10] * batch_size
+ bestscore = [-1] * batch_size
+ _logger.debug(" Binary search step {} of {}".
+ format(outer_step, self.BINARY_SEARCH_STEPS))
+
+ # The last iteration (if we run many steps) repeat the search once.
+ if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
+ CONST = upper_bound
+
+ # set the variables so that we don't have to send them over again
+ self.sess.run(self.setup, {self.assign_timg: batch,
+ self.assign_tlab: batchlab,
+ self.assign_const: CONST,
+ phase: False})
+
+ prev = 1e6
+ for iteration in range(self.MAX_ITERATIONS):
+ # perform the attack
+ _, l, l2s, scores, nimg = self.sess.run([self.train,
+ self.loss,
+ self.l2dist,
+ self.output,
+ self.newimg],
+ feed_dict={phase: False})
+
+ if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
+ _logger.debug((" Iteration {} of {}: loss={:.3g} " +
+ "l2={:.3g} f={:.3g}")
+ .format(iteration, self.MAX_ITERATIONS,
+ l, np.mean(l2s), np.mean(scores)))
+
+ # check if we should abort search if we're getting nowhere.
+ if self.ABORT_EARLY and \
+ iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
+ if l > prev * .9999:
+ msg = " Failed to make progress; stop early"
+ _logger.debug(msg)
+ break
+ prev = l
+
+ # adjust the best result found so far
+ for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
+ lab = np.argmax(batchlab[e])
+ if l2 < bestl2[e] and compare(sc, lab):
+ bestl2[e] = l2
+ bestscore[e] = np.argmax(sc)
+ if l2 < o_bestl2[e] and compare(sc, lab):
+ o_bestl2[e] = l2
+ o_bestscore[e] = np.argmax(sc)
+ o_bestattack[e] = ii
+
+ # adjust the constant as needed
+ for e in range(batch_size):
+ if compare(bestscore[e], np.argmax(batchlab[e])) and \
+ bestscore[e] != -1:
+ # success, divide const by two
+ upper_bound[e] = min(upper_bound[e], CONST[e])
+ if upper_bound[e] < 1e9:
+ CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
+ else:
+ # failure, either multiply by 10 if no solution found yet
+ # or do binary search with the known upper bound
+ lower_bound[e] = max(lower_bound[e], CONST[e])
+ if upper_bound[e] < 1e9:
+ CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
+ else:
+ CONST[e] *= 10
+ _logger.debug(" Successfully generated adversarial examples " +
+ "on {} of {} instances.".
+ format(sum(upper_bound < 1e9), batch_size))
+ o_bestl2 = np.array(o_bestl2)
+ mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
+ _logger.debug(" Mean successful distortion: {:.4g}".format(mean))
+
+ # return the best solution found
+ o_bestl2 = np.array(o_bestl2)
+ return o_bestattack
+
+
+class ElasticNetMethod(object):
+
+ def __init__(self, sess, model, beta,
+ batch_size, confidence,
+ targeted, learning_rate,
+ binary_search_steps, max_iterations,
+ abort_early, initial_const,
+ clip_min, clip_max, num_labels, shape):
+ """
+ EAD Attack with the EN Decision Rule
+
+ Return a tensor that constructs adversarial examples for the given
+ input. Generate uses tf.py_func in order to operate over tensors.
+
+ :param sess: a TF session.
+ :param model: a cleverhans.model.Model object.
+ :param beta: Trades off L2 distortion with L1 distortion: higher
+ produces examples with lower L1 distortion, at the
+ cost of higher L2 (and typically Linf) distortion
+ :param batch_size: Number of attacks to run simultaneously.
+ :param confidence: Confidence of adversarial examples: higher produces
+ examples with larger l2 distortion, but more
+ strongly classified as adversarial.
+ :param targeted: boolean controlling the behavior of the adversarial
+ examples produced. If set to False, they will be
+ misclassified in any wrong class. If set to True,
+ they will be misclassified in a chosen target class.
+ :param learning_rate: The learning rate for the attack algorithm.
+ Smaller values produce better results but are
+ slower to converge.
+ :param binary_search_steps: The number of times we perform binary
+ search to find the optimal tradeoff-
+ constant between norm of the perturbation
+ and confidence of the classification.
+ :param max_iterations: The maximum number of iterations. Setting this
+ to a larger value will produce lower distortion
+ results. Using only a few iterations requires
+ a larger learning rate, and will produce larger
+ distortion results.
+ :param abort_early: If true, allows early abort when the total
+ loss starts to increase (greatly speeds up attack,
+ but hurts performance, particularly on ImageNet)
+ :param initial_const: The initial tradeoff-constant to use to tune the
+ relative importance of size of the perturbation
+ and confidence of classification.
+ If binary_search_steps is large, the initial
+ constant is not important. A smaller value of
+ this constant gives lower distortion results.
+ :param clip_min: (optional float) Minimum input component value.
+ :param clip_max: (optional float) Maximum input component value.
+ :param num_labels: the number of classes in the model's output.
+ :param shape: the shape of the model's input tensor.
+ """
+
+ self.sess = sess
+ self.TARGETED = targeted
+ self.LEARNING_RATE = learning_rate
+ self.MAX_ITERATIONS = max_iterations
+ self.BINARY_SEARCH_STEPS = binary_search_steps
+ self.ABORT_EARLY = abort_early
+ self.CONFIDENCE = confidence
+ self.initial_const = initial_const
+ self.batch_size = batch_size
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+ self.model = model
+
+ self.beta = beta
+ self.beta_t = tf.cast(self.beta, tf.float32)
+
+ self.repeat = binary_search_steps >= 10
+
+ self.shape = shape = tuple([batch_size] + list(shape))
+
+ # these are variables to be more efficient in sending data to tf
+ self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32,
+ name='timg')
+ self.newimg = tf.Variable(np.zeros(shape), dtype=tf.float32,
+ name='newimg')
+ self.slack = tf.Variable(np.zeros(shape), dtype=tf.float32,
+ name='slack')
+ self.tlab = tf.Variable(np.zeros((batch_size, num_labels)),
+ dtype=tf.float32, name='tlab')
+ self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32,
+ name='const')
+
+ # and here's what we use to assign them
+ self.assign_timg = tf.placeholder(tf.float32, shape,
+ name='assign_timg')
+ self.assign_newimg = tf.placeholder(tf.float32, shape,
+ name='assign_newimg')
+ self.assign_slack = tf.placeholder(tf.float32, shape,
+ name='assign_slack')
+ self.assign_tlab = tf.placeholder(tf.float32, (batch_size,
+ num_labels),
+ name='assign_tlab')
+ self.assign_const = tf.placeholder(tf.float32, [batch_size],
+ name='assign_const')
+
+ self.global_step = tf.Variable(0, trainable=False)
+ self.global_step_t = tf.cast(self.global_step, tf.float32)
+
+ """Fast Iterative Shrinkage Thresholding"""
+ """--------------------------------"""
+ self.zt = tf.divide(self.global_step_t,
+ self.global_step_t + tf.cast(3, tf.float32))
+
+ cond1 = tf.cast(tf.greater(tf.subtract(self.slack, self.timg),
+ self.beta_t), tf.float32)
+ cond2 = tf.cast(tf.less_equal(tf.abs(tf.subtract(self.slack,
+ self.timg)),
+ self.beta_t), tf.float32)
+ cond3 = tf.cast(tf.less(tf.subtract(self.slack, self.timg),
+ tf.negative(self.beta_t)), tf.float32)
+
+ upper = tf.minimum(tf.subtract(self.slack, self.beta_t),
+ tf.cast(self.clip_max, tf.float32))
+ lower = tf.maximum(tf.add(self.slack, self.beta_t),
+ tf.cast(self.clip_min, tf.float32))
+
+ self.assign_newimg = tf.multiply(cond1, upper)
+ self.assign_newimg += tf.multiply(cond2, self.timg)
+ self.assign_newimg += tf.multiply(cond3, lower)
+
+ self.assign_slack = self.assign_newimg
+ self.assign_slack += tf.multiply(self.zt,
+ self.assign_newimg - self.newimg)
+
+ self.setter = tf.assign(self.newimg, self.assign_newimg)
+ self.setter_y = tf.assign(self.slack, self.assign_slack)
+ """--------------------------------"""
+
+ # prediction BEFORE-SOFTMAX of the model
+ self.output = model.get_logits(self.newimg, reuse=True)
+ self.output_y = model.get_logits(self.slack, reuse=True)
+
+ # distance to the input data
+ self.l2dist = tf.reduce_sum(tf.square(self.newimg - self.timg),
+ list(range(1, len(shape))))
+ self.l2dist_y = tf.reduce_sum(tf.square(self.slack - self.timg),
+ list(range(1, len(shape))))
+ self.l1dist = tf.reduce_sum(tf.abs(self.newimg - self.timg),
+ list(range(1, len(shape))))
+ self.l1dist_y = tf.reduce_sum(tf.abs(self.slack - self.timg),
+ list(range(1, len(shape))))
+ self.elasticdist = self.l2dist + tf.multiply(self.l1dist,
+ self.beta_t)
+ self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y,
+ self.beta_t)
+
+ # compute the probability of the label class versus the maximum other
+ real = tf.reduce_sum((self.tlab) * self.output, 1)
+ real_y = tf.reduce_sum((self.tlab) * self.output_y, 1)
+ other = tf.reduce_max((1 - self.tlab) * self.output -
+ (self.tlab * 10000), 1)
+ other_y = tf.reduce_max((1 - self.tlab) * self.output_y -
+ (self.tlab * 10000), 1)
+
+ if self.TARGETED:
+ # if targeted, optimize for making the other class most likely
+ loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)
+ loss1_y = tf.maximum(0.0, other_y - real_y + self.CONFIDENCE)
+ else:
+ # if untargeted, optimize for making this class least likely.
+ loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)
+ loss1_y = tf.maximum(0.0, real_y - other_y + self.CONFIDENCE)
+
+ # sum up the losses
+ self.loss21 = tf.reduce_sum(self.l1dist)
+ self.loss21_y = tf.reduce_sum(self.l1dist_y)
+ self.loss2 = tf.reduce_sum(self.l2dist)
+ self.loss2_y = tf.reduce_sum(self.l2dist_y)
+ self.loss1 = tf.reduce_sum(self.const * loss1)
+ self.loss1_y = tf.reduce_sum(self.const * loss1_y)
+ self.loss2 = tf.reduce_sum(self.l2dist)
+
+ self.loss_opt = self.loss1_y + self.loss2_y
+ self.loss = self.loss1 + self.loss2 + \
+ tf.multiply(self.beta_t, self.loss21)
+
+ self.learning_rate = tf.train.polynomial_decay(self.LEARNING_RATE,
+ self.global_step,
+ self.MAX_ITERATIONS,
+ 0, power=0.5)
+
+ # Setup the optimizer and keep track of variables we're creating
+ start_vars = set(x.name for x in tf.global_variables())
+ optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
+ self.train = optimizer.minimize(self.loss_opt,
+ var_list=[self.slack],
+ global_step=self.global_step)
+ end_vars = tf.global_variables()
+ new_vars = [x for x in end_vars if x.name not in start_vars]
+
+ # these are the variables to initialize when we run
+ self.setup = []
+ self.setup.append(self.timg.assign(self.assign_timg))
+ self.setup.append(self.tlab.assign(self.assign_tlab))
+ self.setup.append(self.const.assign(self.assign_const))
+
+ self.init = tf.variables_initializer(var_list=[self.global_step] +
+ [self.slack] + [self.newimg] +
+ new_vars)
+
+ def attack(self, imgs, targets):
+ """
+ Perform the EAD attack on the given instance for the given targets.
+
+ If self.targeted is true, then the targets represents the target labels
+ If self.targeted is false, then targets are the original class labels
+ """
+
+ r = []
+ for i in range(0, len(imgs), self.batch_size):
+ _logger.debug(("Running EAD attack on instance " +
+ "{} of {}").format(i, len(imgs)))
+ r.extend(self.attack_batch(imgs[i:i + self.batch_size],
+ targets[i:i + self.batch_size], phase))
+ return np.array(r)
+
+ def attack_batch(self, imgs, labs, phase):
+ """
+ Run the attack on a batch of instance and labels.
+ """
+ def compare(x, y):
+ if not isinstance(x, (float, int, np.int64)):
+ x = np.copy(x)
+ if self.TARGETED:
+ x[y] -= self.CONFIDENCE
+ else:
+ x[y] += self.CONFIDENCE
+ x = np.argmax(x)
+ if self.TARGETED:
+ return x == y
+ else:
+ return x != y
+
+ batch_size = self.batch_size
+
+ imgs = np.clip(imgs, self.clip_min, self.clip_max)
+
+ # set the lower and upper bounds accordingly
+ lower_bound = np.zeros(batch_size)
+ CONST = np.ones(batch_size) * self.initial_const
+ upper_bound = np.ones(batch_size) * 1e10
+
+ # placeholders for the best en, score, and instance attack found so far
+ o_besten = [1e10] * batch_size
+ o_bestscore = [-1] * batch_size
+ o_bestattack = np.copy(imgs)
+
+ for outer_step in range(self.BINARY_SEARCH_STEPS):
+ # completely reset the optimizer's internal state.
+ self.sess.run(self.init)
+ batch = imgs[:batch_size]
+ batchlab = labs[:batch_size]
+
+ besten = [1e10] * batch_size
+ bestscore = [-1] * batch_size
+ _logger.debug(" Binary search step {} of {}".
+ format(outer_step, self.BINARY_SEARCH_STEPS))
+
+ # The last iteration (if we run many steps) repeat the search once.
+ if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
+ CONST = upper_bound
+
+ # set the variables so that we don't have to send them over again
+ self.sess.run(self.setup, {self.assign_timg: batch,
+ self.assign_tlab: batchlab,
+ self.assign_const: CONST})
+ self.sess.run(self.setter, feed_dict={self.assign_newimg: batch})
+ self.sess.run(self.setter_y, feed_dict={self.assign_slack: batch})
+ prev = 1e6
+ for iteration in range(self.MAX_ITERATIONS):
+ # perform the attack
+ self.sess.run([self.train])
+ self.sess.run([self.setter, self.setter_y])
+ l, l2s, l1s, elastic = self.sess.run([self.loss,
+ self.l2dist,
+ self.l1dist,
+ self.elasticdist])
+ scores, nimg = self.sess.run([self.output, self.newimg])
+
+ if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
+ _logger.debug((" Iteration {} of {}: loss={:.3g} " +
+ "l2={:.3g} l1={:.3g} f={:.3g}")
+ .format(iteration, self.MAX_ITERATIONS,
+ l, np.mean(l2s), np.mean(l1s),
+ np.mean(scores)))
+
+ # check if we should abort search if we're getting nowhere.
+ if self.ABORT_EARLY and \
+ iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
+ if l > prev * .9999:
+ msg = " Failed to make progress; stop early"
+ _logger.debug(msg)
+ break
+ prev = l
+
+ # adjust the best result found so far
+ for e, (en, sc, ii) in enumerate(zip(elastic, scores, nimg)):
+ lab = np.argmax(batchlab[e])
+ if en < besten[e] and compare(sc, lab):
+ besten[e] = en
+ bestscore[e] = np.argmax(sc)
+ if en < o_besten[e] and compare(sc, lab):
+ o_besten[e] = en
+ o_bestscore[e] = np.argmax(sc)
+ o_bestattack[e] = ii
+
+ # adjust the constant as needed
+ for e in range(batch_size):
+ if compare(bestscore[e], np.argmax(batchlab[e])) and \
+ bestscore[e] != -1:
+ # success, divide const by two
+ upper_bound[e] = min(upper_bound[e], CONST[e])
+ if upper_bound[e] < 1e9:
+ CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
+ else:
+ # failure, either multiply by 10 if no solution found yet
+ # or do binary search with the known upper bound
+ lower_bound[e] = max(lower_bound[e], CONST[e])
+ if upper_bound[e] < 1e9:
+ CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
+ else:
+ CONST[e] *= 10
+ _logger.debug(" Successfully generated adversarial examples " +
+ "on {} of {} instances.".
+ format(sum(upper_bound < 1e9), batch_size))
+ o_besten = np.array(o_besten)
+ mean = np.mean(np.sqrt(o_besten[o_besten < 1e9]))
+ _logger.debug(" Elastic Mean successful distortion: {:.4g}".
+ format(mean))
+
+ # return the best solution found
+ o_besten = np.array(o_besten)
+ return o_bestattack
+
+
+def deepfool_batch(sess, x, pred, logits, grads, X, nb_candidate, overshoot,
+ max_iter, clip_min, clip_max, nb_classes, feed=None):
+ """
+ Applies DeepFool to a batch of inputs
+ :param sess: TF session
+ :param x: The input placeholder
+ :param pred: The model's sorted symbolic output of logits, only the top
+ nb_candidate classes are contained
+ :param logits: The model's unnormalized output tensor (the input to
+ the softmax layer)
+ :param grads: Symbolic gradients of the top nb_candidate classes, procuded
+ from gradient_graph
+ :param X: Numpy array with sample inputs
+ :param nb_candidate: The number of classes to test against, i.e.,
+ deepfool only consider nb_candidate classes when
+ attacking(thus accelerate speed). The nb_candidate
+ classes are chosen according to the prediction
+ confidence during implementation.
+ :param overshoot: A termination criterion to prevent vanishing updates
+ :param max_iter: Maximum number of iteration for DeepFool
+ :param clip_min: Minimum value for components of the example returned
+ :param clip_max: Maximum value for components of the example returned
+ :param nb_classes: Number of model output classes
+ :return: Adversarial examples
+ """
+ X_adv = deepfool_attack(sess, x, pred, logits, grads, X, nb_candidate,
+ overshoot, max_iter, clip_min, clip_max, feed=feed)
+
+ return np.asarray(X_adv, dtype=np.float32)
+
+
+def deepfool_attack(sess, x, predictions, logits, grads, sample, nb_candidate,
+ overshoot, max_iter, clip_min, clip_max, feed=None):
+ """
+ TensorFlow implementation of DeepFool.
+ Paper link: see https://arxiv.org/pdf/1511.04599.pdf
+ :param sess: TF session
+ :param x: The input placeholder
+ :param predictions: The model's sorted symbolic output of logits, only the
+ top nb_candidate classes are contained
+ :param logits: The model's unnormalized output tensor (the input to
+ the softmax layer)
+ :param grads: Symbolic gradients of the top nb_candidate classes, procuded
+ from gradient_graph
+ :param sample: Numpy array with sample input
+ :param nb_candidate: The number of classes to test against, i.e.,
+ deepfool only consider nb_candidate classes when
+ attacking(thus accelerate speed). The nb_candidate
+ classes are chosen according to the prediction
+ confidence during implementation.
+ :param overshoot: A termination criterion to prevent vanishing updates
+ :param max_iter: Maximum number of iteration for DeepFool
+ :param clip_min: Minimum value for components of the example returned
+ :param clip_max: Maximum value for components of the example returned
+ :return: Adversarial examples
+ """
+ import copy
+
+ adv_x = copy.copy(sample)
+ # Initialize the loop variables
+ iteration = 0
+ current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
+ if current.shape == ():
+ current = np.array([current])
+ w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
+ r_tot = np.zeros(sample.shape)
+ original = current # use original label as the reference
+
+ _logger.debug("Starting DeepFool attack up to {} iterations".
+ format(max_iter))
+ # Repeat this main loop until we have achieved misclassification
+ while (np.any(current == original) and iteration < max_iter):
+
+ if iteration % 5 == 0 and iteration > 0:
+ _logger.info("Attack result at iteration {} is {}".format(
+ iteration,
+ current))
+ gradients = sess.run(grads, feed_dict={x: adv_x})
+ predictions_val = sess.run(predictions, feed_dict={x: adv_x})
+ for idx in range(sample.shape[0]):
+ pert = np.inf
+ if current[idx] != original[idx]:
+ continue
+ for k in range(1, nb_candidate):
+ w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
+ f_k = predictions_val[idx, k] - predictions_val[idx, 0]
+ # adding value 0.00001 to prevent f_k = 0
+ pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
+ if pert_k < pert:
+ pert = pert_k
+ w = w_k
+ r_i = pert * w / np.linalg.norm(w)
+ r_tot[idx, ...] = r_tot[idx, ...] + r_i
+
+ adv_x = np.clip(r_tot + sample, clip_min, clip_max)
+ current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
+ if current.shape == ():
+ current = np.array([current])
+ # Update loop variables
+ iteration = iteration + 1
+
+ # need more revision, including info like how many succeed
+ _logger.info("Attack result at iteration {} is {}".format(iteration,
+ current))
+ _logger.info("{} out of {}".format(sum(current != original),
+ sample.shape[0]) +
+ " becomes adversarial examples at iteration {}".format(
+ iteration))
+ # need to clip this image into the given range
+ adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
+ return adv_x
diff --git a/case_studies/empir/modified_cleverhans/attacks_th.py b/case_studies/empir/modified_cleverhans/attacks_th.py
new file mode 100644
index 0000000..5f7057e
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/attacks_th.py
@@ -0,0 +1,114 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+import theano
+import warnings
+from theano import gradient, tensor as T
+from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
+
+from . import utils_th
+
+floatX = theano.config.floatX
+
+
+def fgsm(x, predictions, eps, clip_min=None, clip_max=None):
+ return fgm(x, predictions, y=None, eps=eps, ord=np.inf, clip_min=clip_min,
+ clip_max=clip_max)
+
+
+def fgm(x, predictions, y=None, eps=0.3, ord=np.inf, clip_min=None,
+ clip_max=None):
+ """
+ Theano implementation of the Fast Gradient
+ Sign method.
+ :param x: the input placeholder
+ :param predictions: the model's output tensor
+ :param y: the output placeholder. Use None (the default) to avoid the
+ label leaking effect.
+ :param eps: the epsilon (input variation parameter)
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf (other norms not implemented yet).
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :return: a tensor for the adversarial example
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+ assert ord == np.inf, "Theano implementation not available for this norm."
+ eps = np.asarray(eps, dtype=floatX)
+
+ if y is None:
+ # Using model predictions as ground truth to avoid label leaking
+ y = T.eq(predictions, T.max(predictions, axis=1, keepdims=True))
+ y = T.cast(y, utils_th.floatX)
+ y = y / T.sum(y, 1, keepdims=True)
+ # Compute loss
+ loss = utils_th.model_loss(y, predictions, mean=True)
+
+ # Define gradient of loss wrt input
+ grad = T.grad(loss, x)
+
+ # Take sign of gradient
+ signed_grad = T.sgn(grad)
+
+ # Multiply by constant epsilon
+ scaled_signed_grad = eps * signed_grad
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = gradient.disconnected_grad(x + scaled_signed_grad)
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) and (clip_max is not None):
+ adv_x = T.clip(adv_x, clip_min, clip_max)
+
+ return adv_x
+
+
+def vatm(model, x, predictions, eps, num_iterations=1, xi=1e-6,
+ clip_min=None, clip_max=None, seed=12345):
+ """
+ Theano implementation of the perturbation method used for virtual
+ adversarial training: https://arxiv.org/abs/1507.00677
+ :param model: the model which returns the network unnormalized logits
+ :param x: the input placeholder
+ :param predictions: the model's unnormalized output tensor
+ :param eps: the epsilon (input variation parameter)
+ :param num_iterations: the number of iterations
+ :param xi: the finite difference parameter
+ :param clip_min: optional parameter that can be used to set a minimum
+ value for components of the example returned
+ :param clip_max: optional parameter that can be used to set a maximum
+ value for components of the example returned
+ :param seed: the seed for random generator
+ :return: a tensor for the adversarial example
+ """
+ eps = np.asarray(eps, dtype=floatX)
+ xi = np.asarray(xi, dtype=floatX)
+ rng = RandomStreams(seed=seed)
+ d = rng.normal(size=x.shape, dtype=x.dtype)
+ for i in range(num_iterations):
+ d = xi * utils_th.l2_batch_normalize(d)
+ logits_d = model(x + d)
+ kl = utils_th.kl_with_logits(predictions, logits_d)
+ Hd = T.grad(kl.sum(), d)
+ d = gradient.disconnected_grad(Hd)
+ d = eps * utils_th.l2_batch_normalize(d)
+ adv_x = gradient.disconnected_grad(x + d)
+ if (clip_min is not None) and (clip_max is not None):
+ adv_x = T.clip(adv_x, clip_min, clip_max)
+ return adv_x
diff --git a/case_studies/empir/modified_cleverhans/devtools/LICENSE.txt b/case_studies/empir/modified_cleverhans/devtools/LICENSE.txt
new file mode 100644
index 0000000..eaac7d7
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/LICENSE.txt
@@ -0,0 +1,30 @@
+The devtools module is a derivative work from the devtools module of pylearn2.
+We reproduce the corresponding license here.
+
+Copyright (c) 2011--2014, Université de Montréal
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/case_studies/empir/modified_cleverhans/devtools/__init__.py b/case_studies/empir/modified_cleverhans/devtools/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/empir/modified_cleverhans/devtools/checks.py b/case_studies/empir/modified_cleverhans/devtools/checks.py
new file mode 100644
index 0000000..085277b
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/checks.py
@@ -0,0 +1,43 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functionality for building tests.
+
+We have to call this file "checks" and not anything with "test" as a
+substring or nosetests will execute it.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import time
+import unittest
+
+
+class CleverHansTest(unittest.TestCase):
+
+ def setUp(self):
+ self.test_start = time.time()
+ # seed the randomness
+ np.random.seed(1234)
+
+ def tearDown(self):
+ print(self.id(), "took", time.time() - self.test_start, "seconds")
+
+ def assertClose(self, x, y, *args, **kwargs):
+ # self.assertTrue(np.allclose(x, y)) doesn't give a useful message
+ # on failure
+ assert np.allclose(x, y, *args, **kwargs), (x, y)
diff --git a/case_studies/empir/modified_cleverhans/devtools/list_files.py b/case_studies/empir/modified_cleverhans/devtools/list_files.py
new file mode 100644
index 0000000..bfcf9b9
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/list_files.py
@@ -0,0 +1,100 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Code for listing files that belong to the library."""
+import logging
+import cleverhans
+import os
+__authors__ = "Ian Goodfellow"
+__copyright__ = "Copyright 2010-2012, Universite de Montreal"
+__credits__ = ["Ian Goodfellow"]
+__license__ = "3-clause BSD"
+__maintainer__ = "LISA Lab"
+__email__ = "pylearn-dev@googlegroups"
+
+logger = logging.getLogger(__name__)
+
+
+def list_files(suffix=""):
+ """
+ Returns a list of all files in CleverHans with the given suffix.
+
+ Parameters
+ ----------
+ suffix : str
+
+ Returns
+ -------
+
+ file_list : list
+ A list of all files in CleverHans whose filepath ends with `suffix`
+ """
+
+ cleverhans_path = os.path.abspath(cleverhans.__path__[0])
+ repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir))
+
+ file_list = _list_files(cleverhans_path, suffix)
+
+ tutorials_path = os.path.join(repo_path, "cleverhans_tutorials")
+ tutorials_files = _list_files(tutorials_path, suffix)
+ tutorials_files = [os.path.join(os.pardir, path) for path in
+ tutorials_files]
+ examples_path = os.path.join(repo_path, "examples")
+ examples_files = _list_files(examples_path, suffix)
+ examples_files = [os.path.join(os.pardir, path) for path in
+ examples_files]
+
+ file_list = file_list + tutorials_files + examples_files
+
+ return file_list
+
+
+def _list_files(path, suffix=""):
+ """
+ Returns a list of all files ending in `suffix` contained within `path`.
+
+ Parameters
+ ----------
+ path : str
+ a filepath
+ suffix : str
+
+ Returns
+ -------
+ l : list
+ A list of all files ending in `suffix` contained within `path`.
+ (If `path` is a file rather than a directory, it is considered
+ to "contain" itself)
+ """
+ if os.path.isdir(path):
+ incomplete = os.listdir(path)
+ complete = [os.path.join(path, entry) for entry in incomplete]
+ lists = [_list_files(subpath, suffix) for subpath in complete]
+ flattened = []
+ for l in lists:
+ for elem in l:
+ flattened.append(elem)
+ return flattened
+ else:
+ assert os.path.exists(path), "couldn't find file '%s'" % path
+ if path.endswith(suffix):
+ return [path]
+ return []
+
+
+if __name__ == '__main__':
+ # Print all .py files in the library
+ result = list_files('.py')
+ for path in result:
+ logger.info(path)
diff --git a/case_studies/empir/modified_cleverhans/devtools/mocks.py b/case_studies/empir/modified_cleverhans/devtools/mocks.py
new file mode 100644
index 0000000..2bf75b3
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/mocks.py
@@ -0,0 +1,40 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for mocking up tests.
+
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+def random_feed_dict(rng, placeholders):
+ """
+ Returns random data to be used with `feed_dict`.
+ :param rng: A numpy.random.RandomState instance
+ :param placeholders: List of tensorflow placeholders
+ :return: A dict mapping placeholders to random numpy values
+ """
+
+ output = {}
+
+ for placeholder in placeholders:
+ if placeholder.dtype != 'float32':
+ raise NotImplementedError()
+ value = rng.randn(*placeholder.shape).astype('float32')
+ output[placeholder] = value
+
+ return output
diff --git a/case_studies/empir/modified_cleverhans/devtools/tests/__init__.py b/case_studies/empir/modified_cleverhans/devtools/tests/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/empir/modified_cleverhans/devtools/tests/docscrape.py b/case_studies/empir/modified_cleverhans/devtools/tests/docscrape.py
new file mode 100644
index 0000000..7361a38
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/tests/docscrape.py
@@ -0,0 +1,853 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+from __future__ import print_function
+
+import inspect
+from nose.plugins.skip import SkipTest
+import re
+import sys
+
+import six
+
+
+class Reader(object):
+
+ """A line-based string reader.
+
+ """
+
+ def __init__(self, data):
+ """
+ Parameters
+ ----------
+ data : str
+ String with lines separated by '\n'.
+
+ """
+ if isinstance(data, list):
+ self._str = data
+ else:
+ self._str = data.split('\n') # store string as list of lines
+
+ self.reset()
+
+ def __getitem__(self, n):
+ return self._str[n]
+
+ def reset(self):
+ self._l = 0 # current line nr
+
+ def read(self):
+ if not self.eof():
+ out = self[self._l]
+ self._l += 1
+ return out
+ else:
+ return ''
+
+ def seek_next_non_empty_line(self):
+ for l in self[self._l:]:
+ if l.strip():
+ break
+ else:
+ self._l += 1
+
+ def eof(self):
+ return self._l >= len(self._str)
+
+ def read_to_condition(self, condition_func):
+ start = self._l
+ for line in self[start:]:
+ if condition_func(line):
+ return self[start:self._l]
+ self._l += 1
+ if self.eof():
+ return self[start:self._l + 1]
+ return []
+
+ def read_to_next_empty_line(self):
+ self.seek_next_non_empty_line()
+
+ def is_empty(line):
+ return not line.strip()
+ return self.read_to_condition(is_empty)
+
+ def read_to_next_unindented_line(self):
+ def is_unindented(line):
+ return (line.strip() and (len(line.lstrip()) == len(line)))
+ return self.read_to_condition(is_unindented)
+
+ def peek(self, n=0):
+ if self._l + n < len(self._str):
+ return self[self._l + n]
+ else:
+ return ''
+
+ def is_empty(self):
+ return not ''.join(self._str).strip()
+
+ def __iter__(self):
+ for line in self._str:
+ yield line
+
+
+class NumpyDocString(object):
+
+ def __init__(self, docstring, name=None):
+ if name:
+ self.name = name
+ docstring = docstring.split('\n')
+
+ # De-indent paragraph
+ try:
+ indent = min(len(s) - len(s.lstrip()) for s in docstring
+ if s.strip())
+ except ValueError:
+ indent = 0
+
+ for n, line in enumerate(docstring):
+ docstring[n] = docstring[n][indent:]
+
+ self._doc = Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': '',
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Other Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {},
+ 'Attributes': [],
+ 'Methods': [],
+ }
+ self.section_order = []
+
+ self._parse()
+
+ def __getitem__(self, key):
+ return self._parsed_data[key]
+
+ def __setitem__(self, key, val):
+ if key not in self._parsed_data:
+ raise ValueError("Unknown section %s" % key)
+ else:
+ self._parsed_data[key] = val
+
+ def _is_at_section(self):
+ self._doc.seek_next_non_empty_line()
+
+ if self._doc.eof():
+ return False
+
+ l1 = self._doc.peek().strip() # e.g. Parameters
+
+ if l1.startswith('.. index::'):
+ return True
+
+ l2 = self._doc.peek(1).strip() # ----------
+ return (len(l1) == len(l2) and l2 == '-' * len(l1))
+
+ def _strip(self, doc):
+ i = 0
+ j = 0
+ for i, line in enumerate(doc):
+ if line.strip():
+ break
+
+ for j, line in enumerate(doc[::-1]):
+ if line.strip():
+ break
+
+ return doc[i:len(doc) - j]
+
+ def _read_to_next_section(self):
+ section = self._doc.read_to_next_empty_line()
+
+ while not self._is_at_section() and not self._doc.eof():
+ if not self._doc.peek(-1).strip(): # previous line was empty
+ section += ['']
+
+ section += self._doc.read_to_next_empty_line()
+
+ return section
+
+ def _read_sections(self):
+ while not self._doc.eof():
+ data = self._read_to_next_section()
+ name = data[0].strip()
+
+ if name.startswith('..'): # index section
+ yield name, data[1:]
+ elif len(data) < 2:
+ yield StopIteration
+ else:
+ yield name, self._strip(data[2:])
+
+ def _parse_param_list(self, content):
+ r = Reader(content)
+ params = []
+ while not r.eof():
+ header = r.read().strip()
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ')[:2]
+ else:
+ arg_name, arg_type = header, ''
+
+ desc = r.read_to_next_unindented_line()
+ for n, line in enumerate(desc):
+ desc[n] = line.strip()
+ desc = desc # '\n'.join(desc)
+
+ params.append((arg_name, arg_type, desc))
+
+ return params
+
+ def _parse_see_also(self, content):
+ """
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, func_name3
+
+ """
+ functions = []
+ current_func = None
+ rest = []
+ for line in content:
+ if not line.strip():
+ continue
+ if ':' in line:
+ if current_func:
+ functions.append((current_func, rest))
+ r = line.split(':', 1)
+ current_func = r[0].strip()
+ r[1] = r[1].strip()
+ if r[1]:
+ rest = [r[1]]
+ else:
+ rest = []
+ elif not line.startswith(' '):
+ if current_func:
+ functions.append((current_func, rest))
+ current_func = None
+ rest = []
+ if ',' in line:
+ for func in line.split(','):
+ func = func.strip()
+ if func:
+ functions.append((func, []))
+ elif line.strip():
+ current_func = line.strip()
+ elif current_func is not None:
+ rest.append(line.strip())
+ if current_func:
+ functions.append((current_func, rest))
+ return functions
+
+ def _parse_index(self, section, content):
+ """
+ .. index: default
+ :refguide: something, else, and more
+
+ """
+ def strip_each_in(lst):
+ return [s.strip() for s in lst]
+
+ out = {}
+ section = section.split('::')
+ if len(section) > 1:
+ out['default'] = strip_each_in(section[1].split(','))[0]
+ for line in content:
+ line = line.split(':')
+ if len(line) > 2:
+ out[line[1]] = strip_each_in(line[2].split(','))
+ return out
+
+ def _parse_summary(self):
+ """Grab signature (if given) and summary"""
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = "\n".join([s.strip() for s in summary])
+ if re.compile('^([\w. ]+=)?[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ self['Summary'] = self._doc.read_to_next_empty_line()
+ elif re.compile('^[\w]+\n[-]+').match(summary_str):
+ self['Summary'] = ''
+ self._doc.reset()
+ else:
+ self['Summary'] = summary
+
+ if not self._is_at_section():
+ self['Extended Summary'] = self._read_to_next_section()
+
+ def _parse(self):
+ self._doc.reset()
+ self._parse_summary()
+ for (section, content) in self._read_sections():
+ if not section.startswith('..'):
+ section = ' '.join([s.capitalize()
+ for s in section.split(' ')])
+ if section in ('Parameters', 'Other Parameters', 'Returns',
+ 'Raises', 'Warns', 'Attributes', 'Methods'):
+ self[section] = self._parse_param_list(content)
+ self.section_order.append(section)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ self.section_order.append('index')
+ elif section.lower() == 'see also':
+ self['See Also'] = self._parse_see_also(content)
+ self.section_order.append('See Also')
+ else:
+ self[section] = content
+ self.section_order.append(section)
+
+ # string conversion routines
+
+ def _str_header(self, name, symbol='-'):
+ return [name, len(name) * symbol]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' ' * indent + line]
+ return out
+
+ def _str_signature(self):
+ if not self['Signature']:
+ return []
+ return ["*%s*" % self['Signature'].replace('*', '\*')] + ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ for param, param_type, desc in self[name]:
+ out += ['%s : %s' % (param, param_type)]
+ out += self._str_indent(desc)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += self[name]
+ out += ['']
+ return out
+
+ def _str_see_also(self):
+ if not self['See Also']:
+ return []
+ out = []
+ out += self._str_header("See Also")
+ last_had_desc = True
+ for func, desc in self['See Also']:
+ if desc or last_had_desc:
+ out += ['']
+ out += ["`%s`_" % func]
+ else:
+ out[-1] += ", `%s`_" % func
+ if desc:
+ out += self._str_indent(desc)
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default', '')]
+ for section, references in six.iteritems(idx):
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Other Parameters',
+ 'Returns', 'Raises', 'Warns'):
+ out += self._str_param_list(param_list)
+ out += self._str_see_also()
+ for s in ('Notes', 'References', 'Examples'):
+ out += self._str_section(s)
+ out += self._str_index()
+ return '\n'.join(out)
+
+ # --
+
+ def get_errors(self, check_order=True):
+ errors = []
+ self._doc.reset()
+ for j, line in enumerate(self._doc):
+ if len(line) > 75:
+ if hasattr(self, 'name'):
+ errors.append("%s: Line %d exceeds 75 chars"
+ ": \"%s\"..." % (self.name, j + 1,
+ line[:30]))
+ else:
+ errors.append("Line %d exceeds 75 chars"
+ ": \"%s\"..." % (j + 1, line[:30]))
+
+ if check_order:
+ canonical_order = ['Signature', 'Summary', 'Extended Summary',
+ 'Attributes', 'Methods', 'Parameters',
+ 'Other Parameters', 'Returns', 'Raises',
+ 'Warns',
+ 'See Also', 'Notes', 'References', 'Examples',
+ 'index']
+
+ canonical_order_copy = list(canonical_order)
+
+ for s in self.section_order:
+ while canonical_order_copy and s != canonical_order_copy[0]:
+ canonical_order_copy.pop(0)
+ if not canonical_order_copy:
+ errors.append(
+ "Sections in wrong order (starting at %s). The"
+ " right order is %s" % (s, canonical_order))
+
+ return errors
+
+
+def indent(str, indent=4):
+ indent_str = ' ' * indent
+ if str is None:
+ return indent_str
+ lines = str.split('\n')
+ return '\n'.join(indent_str + l for l in lines)
+
+
+class NumpyFunctionDocString(NumpyDocString):
+
+ def __init__(self, docstring, function):
+ super(NumpyFunctionDocString, self).__init__(docstring)
+ args, varargs, keywords, defaults = inspect.getargspec(function)
+ if (args and args != ['self']) or varargs or keywords or defaults:
+ self.has_parameters = True
+ else:
+ self.has_parameters = False
+
+ def _parse(self):
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': '',
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Other Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {},
+ }
+ return NumpyDocString._parse(self)
+
+ def get_errors(self):
+ errors = NumpyDocString.get_errors(self)
+
+ if not self['Signature']:
+ # This check is currently too restrictive.
+ # Disabling it for now.
+ # errors.append("No function signature")
+ pass
+
+ if not self['Summary']:
+ errors.append("No function summary line")
+
+ if len(" ".join(self['Summary'])) > 3 * 80:
+ errors.append("Brief function summary is longer than 3 lines")
+
+ if not self['Parameters'] and self.has_parameters:
+ errors.append("No Parameters section")
+
+ return errors
+
+
+class NumpyClassDocString(NumpyDocString):
+
+ def __init__(self, docstring, class_name, class_object):
+ super(NumpyClassDocString, self).__init__(docstring)
+ self.class_name = class_name
+ methods = dict((name, func) for name, func
+ in inspect.getmembers(class_object))
+
+ self.has_parameters = False
+ if '__init__' in methods:
+ # verify if __init__ is a Python function. If it isn't
+ # (e.g. the function is implemented in C), getargspec will fail
+ if not inspect.ismethod(methods['__init__']):
+ return
+ args, varargs, keywords, defaults = inspect.getargspec(
+ methods['__init__'])
+ if (args and args != ['self']) or varargs or keywords or defaults:
+ self.has_parameters = True
+
+ def _parse(self):
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': '',
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Other Parameters': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {},
+ 'Attributes': [],
+ 'Methods': [],
+ }
+ return NumpyDocString._parse(self)
+
+ def __str__(self):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Attributes', 'Methods', 'Parameters', 'Raises',
+ 'Warns'):
+ out += self._str_param_list(param_list)
+ out += self._str_see_also()
+ for s in ('Notes', 'References', 'Examples'):
+ out += self._str_section(s)
+ out += self._str_index()
+ return '\n'.join(out)
+
+ def get_errors(self):
+ errors = NumpyDocString.get_errors(self)
+ if not self['Parameters'] and self.has_parameters:
+ errors.append("%s class has no Parameters section"
+ % self.class_name)
+ return errors
+
+
+class NumpyModuleDocString(NumpyDocString):
+
+ """
+ Module doc strings: no parsing is done.
+
+ """
+
+ def _parse(self):
+ self.out = []
+
+ def __str__(self):
+ return "\n".join(self._doc._str)
+
+ def get_errors(self):
+ errors = NumpyDocString.get_errors(self, check_order=False)
+ return errors
+
+
+def header(text, style='-'):
+ return text + '\n' + style * len(text) + '\n'
+
+
+class SphinxDocString(NumpyDocString):
+ # string conversion routines
+
+ def _str_header(self, name, symbol='`'):
+ return ['**' + name + '**'] + [symbol * (len(name) + 4)]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' ' * indent + line]
+ return out
+
+ def _str_signature(self):
+ return ['``%s``' % self['Signature'].replace('*', '\*')] + ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ for param, param_type, desc in self[name]:
+ out += self._str_indent(['**%s** : %s' % (param, param_type)])
+ out += ['']
+ out += self._str_indent(desc, 8)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ content = self._str_indent(self[name])
+ out += content
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default', '')]
+ for section, references in six.iteritems(idx):
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self, indent=0):
+ out = []
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Returns', 'Raises', 'Warns'):
+ out += self._str_param_list(param_list)
+ for s in ('Notes', 'References', 'Examples'):
+ out += self._str_section(s)
+ # out += self._str_index()
+ out = self._str_indent(out, indent)
+ return '\n'.join(out)
+
+
+class FunctionDoc(object):
+
+ def __init__(self, func):
+ self._f = func
+
+ def __str__(self):
+ out = ''
+ doclines = inspect.getdoc(self._f) or ''
+ try:
+ doc = SphinxDocString(doclines)
+ except Exception as e:
+ print('*' * 78)
+ print("ERROR: '%s' while parsing `%s`" % (e, self._f))
+ print('*' * 78)
+ # print "Docstring follows:"
+ # print doclines
+ # print '='*78
+ return out
+
+ if doc['Signature']:
+ out += '%s\n' % header('**%s**' %
+ doc['Signature'].replace('*', '\*'), '-')
+ else:
+ try:
+ # try to read signature
+ argspec = inspect.getargspec(self._f)
+ argspec = inspect.formatargspec(*argspec)
+ argspec = argspec.replace('*', '\*')
+ out += header('%s%s' % (self._f.__name__, argspec), '-')
+ except TypeError as e:
+ out += '%s\n' % header('**%s()**' % self._f.__name__, '-')
+
+ out += str(doc)
+ return out
+
+
+class ClassDoc(object):
+
+ def __init__(self, cls, modulename=''):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using an object")
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+
+ @property
+ def methods(self):
+ return [name for name, func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and callable(func)]
+
+ def __str__(self):
+ out = ''
+
+ def replace_header(match):
+ return '"' * (match.end() - match.start())
+
+ for m in self.methods:
+ print("Parsing `%s`" % m)
+ out += str(FunctionDoc(getattr(self._cls, m))) + '\n\n'
+ out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
+
+ return out
+
+
+def handle_function(val, name):
+ func_errors = []
+ docstring = inspect.getdoc(val)
+ if docstring is None:
+ func_errors.append((name, '**missing** function-level docstring'))
+ else:
+ func_errors = [
+ (name, e) for e in
+ NumpyFunctionDocString(docstring, val).get_errors()
+ ]
+ return func_errors
+
+
+def handle_module(val, name):
+ module_errors = []
+ docstring = val
+ if docstring is None:
+ module_errors.append((name, '**missing** module-level docstring'))
+ else:
+ module_errors = [
+ (name, e) for e in NumpyModuleDocString(docstring).get_errors()
+ ]
+ return module_errors
+
+
+def handle_method(method, method_name, class_name):
+ method_errors = []
+
+ # Skip out-of-library inherited methods
+ module = inspect.getmodule(method)
+ if module is not None:
+ if not module.__name__.startswith('pylearn2'):
+ return method_errors
+
+ docstring = inspect.getdoc(method)
+ if docstring is None:
+ method_errors.append((class_name, method_name,
+ '**missing** method-level docstring'))
+ else:
+ method_errors = [
+ (class_name, method_name, e) for e in
+ NumpyFunctionDocString(docstring, method).get_errors()
+ ]
+ return method_errors
+
+
+def handle_class(val, class_name):
+ cls_errors = []
+ docstring = inspect.getdoc(val)
+ if docstring is None:
+ cls_errors.append((class_name,
+ '**missing** class-level docstring'))
+ else:
+ cls_errors = [
+ (e,) for e in
+ NumpyClassDocString(docstring, class_name, val).get_errors()
+ ]
+ # Get public methods and parse their docstrings
+ methods = dict(((name, func) for name, func in inspect.getmembers(val)
+ if not name.startswith('_') and callable(func) and
+ type(func) is not type))
+ for m_name, method in six.iteritems(methods):
+ # skip error check if the method was inherited
+ # from a parent class (which means it wasn't
+ # defined in this source file)
+ if inspect.getmodule(method) is not None:
+ continue
+ cls_errors.extend(handle_method(method, m_name, class_name))
+ return cls_errors
+
+
+def docstring_errors(filename, global_dict=None):
+ """
+ Run a Python file, parse the docstrings of all the classes
+ and functions it declares, and return them.
+
+ Parameters
+ ----------
+ filename : str
+ Filename of the module to run.
+
+ global_dict : dict, optional
+ Globals dictionary to pass along to `execfile()`.
+
+ Returns
+ -------
+ all_errors : list
+ Each entry of the list is a tuple, of length 2 or 3, with
+ format either
+
+ (func_or_class_name, docstring_error_description)
+ or
+ (class_name, method_name, docstring_error_description)
+ """
+ if global_dict is None:
+ global_dict = {}
+ if '__file__' not in global_dict:
+ global_dict['__file__'] = filename
+ if '__doc__' not in global_dict:
+ global_dict['__doc__'] = None
+ try:
+ with open(filename) as f:
+ code = compile(f.read(), filename, 'exec')
+ exec(code, global_dict)
+ except SystemExit:
+ pass
+ except SkipTest:
+ raise AssertionError("Couldn't verify format of " + filename +
+ "due to SkipTest")
+ all_errors = []
+ for key, val in six.iteritems(global_dict):
+ if not key.startswith('_'):
+ module_name = ""
+ if hasattr(inspect.getmodule(val), '__name__'):
+ module_name = inspect.getmodule(val).__name__
+ if (inspect.isfunction(val) or inspect.isclass(val)) and\
+ (inspect.getmodule(val) is None
+ or module_name == '__builtin__'):
+ if inspect.isfunction(val):
+ all_errors.extend(handle_function(val, key))
+ elif inspect.isclass(val):
+ all_errors.extend(handle_class(val, key))
+ elif key == '__doc__':
+ all_errors.extend(handle_module(val, key))
+ if all_errors:
+ all_errors.insert(0, ("%s:" % filename,))
+ return all_errors
+
+
+if __name__ == "__main__":
+ all_errors = docstring_errors(sys.argv[1])
+ if len(all_errors) > 0:
+ print("*" * 30, "docstring errors", "*" * 30)
+ for line in all_errors:
+ print(':'.join(line))
+ sys.exit(int(len(all_errors) > 0))
diff --git a/case_studies/empir/modified_cleverhans/devtools/tests/test_format.py b/case_studies/empir/modified_cleverhans/devtools/tests/test_format.py
new file mode 100644
index 0000000..1213400
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/tests/test_format.py
@@ -0,0 +1,130 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit tests for format checking
+"""
+
+from __future__ import print_function
+
+from nose.plugins.skip import SkipTest
+
+import os
+
+import modified_cleverhans
+from modified_cleverhans.devtools.tests.docscrape import docstring_errors
+from modified_cleverhans.devtools.list_files import list_files
+from pycodestyle import StyleGuide
+
+# Enter a manual list of files that are allowed to violate PEP8 here
+whitelist_pep8 = [
+]
+
+# The NIPS 2017 competition code is allowed to violate PEP8 because it
+# follows the Google style guide instead (e.g., 2 spaces instead of 4)
+whitelist_pep8.extend([os.path.relpath(path, cleverhans.__path__[0])
+ for path in list_files()
+ if "nips17_adversarial_competition" in path])
+
+
+whitelist_docstrings = [
+]
+
+
+def test_format_pep8():
+ """
+ Test if pep8 is respected.
+ """
+ pep8_checker = StyleGuide()
+ files_to_check = []
+ for path in list_files(".py"):
+ rel_path = os.path.relpath(path, cleverhans.__path__[0])
+ if rel_path in whitelist_pep8:
+ continue
+ else:
+ files_to_check.append(path)
+ report = pep8_checker.check_files(files_to_check)
+ if report.total_errors > 0:
+ raise AssertionError("PEP8 Format not respected")
+
+
+def print_files_information_pep8():
+ """
+ Print the list of files which can be removed from the whitelist and the
+ list of files which do not respect PEP8 formatting that aren't in the
+ whitelist
+ """
+ infracting_files = []
+ non_infracting_files = []
+ pep8_checker = StyleGuide(quiet=True)
+ for path in list_files(".py"):
+ number_of_infractions = pep8_checker.input_file(path)
+ rel_path = os.path.relpath(path, cleverhans.__path__[0])
+ if number_of_infractions > 0:
+ if rel_path not in whitelist_pep8:
+ infracting_files.append(path)
+ else:
+ if rel_path in whitelist_pep8:
+ non_infracting_files.append(path)
+ print("Files that must be corrected or added to whitelist:")
+ for file in infracting_files:
+ print(file)
+ print("Files that can be removed from whitelist:")
+ for file in non_infracting_files:
+ print(file)
+
+
+def test_format_docstrings():
+ """
+ Test if docstrings are well formatted.
+ """
+ # Disabled for now
+ return True
+
+ try:
+ verify_format_docstrings()
+ except SkipTest as e:
+ import traceback
+ traceback.print_exc(e)
+ raise AssertionError(
+ "Some file raised SkipTest on import, and inadvertently"
+ " canceled the documentation testing."
+ )
+
+
+def verify_format_docstrings():
+ """
+ Implementation of `test_format_docstrings`. The implementation is
+ factored out so it can be placed inside a guard against SkipTest.
+ """
+ format_infractions = []
+
+ for path in list_files(".py"):
+ rel_path = os.path.relpath(path, cleverhans.__path__[0])
+ if rel_path in whitelist_docstrings:
+ continue
+ try:
+ format_infractions.extend(docstring_errors(path))
+ except Exception as e:
+ format_infractions.append(["%s failed to run so format cannot "
+ "be checked. Error message:\n %s" %
+ (rel_path, e)])
+
+ if len(format_infractions) > 0:
+ msg = "\n".join(':'.join(line) for line in format_infractions)
+ raise AssertionError("Docstring format not respected:\n%s" % msg)
+
+
+if __name__ == "__main__":
+ print_files_information_pep8()
diff --git a/case_studies/empir/modified_cleverhans/devtools/version.py b/case_studies/empir/modified_cleverhans/devtools/version.py
new file mode 100644
index 0000000..3362df5
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/devtools/version.py
@@ -0,0 +1,36 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utility functions for keeping track of the version of CleverHans.
+
+These functions provide a finer level of granularity than the
+manually specified version string attached to each release.
+"""
+import hashlib
+from cleverhans.devtools.list_files import list_files
+
+
+def dev_version():
+ """
+ Returns a hexdigest of all the python files in the module.
+ """
+
+ m = hashlib.md5()
+ py_files = sorted(list_files(suffix=".py"))
+ for filename in py_files:
+ with open(filename, 'rb') as f:
+ content = f.read()
+ m.update(content)
+ return m.hexdigest()
diff --git a/case_studies/empir/modified_cleverhans/model.py b/case_studies/empir/modified_cleverhans/model.py
new file mode 100644
index 0000000..57706e0
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/model.py
@@ -0,0 +1,181 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABCMeta
+
+
+class Model(object):
+
+ """
+ An abstract interface for model wrappers that exposes model symbols
+ needed for making an attack. This abstraction removes the dependency on
+ any specific neural network package (e.g. Keras) from the core
+ code of CleverHans. It can also simplify exposing the hidden features of a
+ model when a specific package does not directly expose them.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self):
+ pass
+
+ def __call__(self, *args, **kwargs):
+ """
+ For compatibility with functions used as model definitions (taking
+ an input tensor and returning the tensor giving the output
+ of the model on that input).
+ """
+ return self.get_probs(*args, **kwargs)
+
+ def get_layer(self, x, reuse, layer):
+ """
+ Expose the hidden features of a model given a layer name.
+ :param x: A symbolic representation of the network input
+ :param layer: The name of the hidden layer to return features at.
+ :return: A symbolic representation of the hidden features
+ :raise: NoSuchLayerError if `layer` is not in the model.
+ """
+ # Return the symbolic representation for this layer.
+ output = self.fprop(x, reuse)
+ try:
+ requested = output[layer]
+ except KeyError:
+ raise NoSuchLayerError()
+ return requested
+
+ def get_logits(self, x, reuse):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the output logits (i.e., the
+ values fed as inputs to the softmax layer).
+ """
+ return self.get_layer(x, reuse, 'logits')
+
+ def get_probs(self, x, reuse=True):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the output probabilities (i.e.,
+ the output values produced by the softmax layer).
+ """
+ try:
+ return self.get_layer(x, reuse, 'probs')
+ except NoSuchLayerError:
+ import tensorflow as tf
+ return tf.nn.softmax(self.get_logits(x, True))
+
+ def get_layer_names(self):
+ """
+ :return: a list of names for the layers that can be exposed by this
+ model abstraction.
+ """
+
+ if hasattr(self, 'layer_names'):
+ return self.layer_names
+
+ raise NotImplementedError('`get_layer_names` not implemented.')
+
+ def fprop(self, x, reuse):
+ """
+ Exposes all the layers of the model returned by get_layer_names.
+ :param x: A symbolic representation of the network input
+ :return: A dictionary mapping layer names to the symbolic
+ representation of their output.
+ """
+ raise NotImplementedError('`fprop` not implemented.')
+
+ # special call for the ensemble model
+ def ensemble_call(self, *args, **kwargs):
+ """
+ For compatibility with functions used as model definitions (taking
+ an input tensor and returning the tensor giving the output
+ of the model on that input).
+ """
+ return self.get_ensemblepreds(*args, **kwargs)
+
+ def get_ensemblepreds(self, x, reuse=True):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the ensemble output predictions
+ """
+ try:
+ return self.get_layer(x, reuse, 'combined')
+ except NoSuchLayerError:
+ raise NotImplementedError('`combinedLayer` not implemented.')
+
+ # Returns the average probability of the models that were finally used in the prediction after max voting
+ def get_combinedAvgCorrectProbs(self, x, reuse=True):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the output probabilities (i.e.,
+ the output values produced by the softmax layer).
+ """
+ try:
+ return self.get_layer(x, reuse, 'combinedAvgCorrectProb')
+ except NoSuchLayerError:
+ raise NotImplementedError('`combinedAvgCorrectProbLayer` not implemented.')
+
+ # special functions for the teacher model in training with distillation
+ def get_teacher_logits(self, x, reuse):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the output logits (i.e., the
+ values fed as inputs to the softmax layer).
+ """
+ return self.get_layer(x, reuse, 'teacher_logits')
+
+ def get_teacher_probs(self, x, reuse=True):
+ """
+ :param x: A symbolic representation of the network input
+ :return: A symbolic representation of the output probabilities (i.e.,
+ the output values produced by the softmax layer).
+ """
+ try:
+ return self.get_layer(x, reuse, 'teacher_probs')
+ except NoSuchLayerError:
+ import tensorflow as tf
+ return tf.nn.softmax(self.get_teacher_logits(x, True))
+
+ def teacher_call(self, *args, **kwargs):
+ """
+ For compatibility with functions used as model definitions (taking
+ an input tensor and returning the tensor giving the output
+ of the model on that input).
+ """
+ return self.get_teacher_probs(*args, **kwargs)
+
+
+class CallableModelWrapper(Model):
+
+ def __init__(self, callable_fn, output_layer):
+ """
+ Wrap a callable function that takes a tensor as input and returns
+ a tensor as output with the given layer name.
+ :param callable_fn: The callable function taking a tensor and
+ returning a given layer as output.
+ :param output_layer: A string of the output layer returned by the
+ function. (Usually either "probs" or "logits".)
+ """
+
+ self.output_layer = output_layer
+ self.callable_fn = callable_fn
+
+ def get_layer_names(self):
+ return [self.output_layer]
+
+ def fprop(self, x, reuse):
+ return {self.output_layer: self.callable_fn(x)}
+
+
+class NoSuchLayerError(ValueError):
+
+ """Raised when a layer that does not exist is requested."""
diff --git a/case_studies/empir/modified_cleverhans/utils.py b/case_studies/empir/modified_cleverhans/utils.py
new file mode 100644
index 0000000..1e7fc29
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/utils.py
@@ -0,0 +1,397 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
+import numpy as np
+from collections import OrderedDict
+from six.moves import xrange
+import warnings
+import logging
+
+known_number_types = (int, float, np.float16, np.float32, np.float64,
+ np.int8, np.int16, np.int32, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64)
+
+
+class _ArgsWrapper(object):
+
+ """
+ Wrapper that allows attribute access to dictionaries
+ """
+
+ def __init__(self, args):
+ if not isinstance(args, dict):
+ args = vars(args)
+ self.args = args
+
+ def __getattr__(self, name):
+ return self.args.get(name)
+
+
+class AccuracyReport(object):
+
+ """
+ An object summarizing the accuracy results for experiments involving
+ training on clean examples or adversarial examples, then evaluating
+ on clean or adversarial examples.
+ """
+
+ def __init__(self):
+ self.clean_train_clean_eval = 0.
+ self.clean_train_adv_eval = 0.
+ self.adv_train_clean_eval = 0.
+ self.adv_train_adv_eval = 0.
+
+ # Training data accuracy results to be used by tutorials
+ self.train_clean_train_clean_eval = 0.
+ self.train_clean_train_adv_eval = 0.
+ self.train_adv_train_clean_eval = 0.
+ self.train_adv_train_adv_eval = 0.
+
+
+def batch_indices(batch_nb, data_length, batch_size):
+ """
+ This helper function computes a batch start and end index
+ :param batch_nb: the batch number
+ :param data_length: the total length of the data being parsed by batches
+ :param batch_size: the number of inputs in each batch
+ :return: pair of (start, end) indices
+ """
+ # Batch start and end index
+ start = int(batch_nb * batch_size)
+ end = int((batch_nb + 1) * batch_size)
+
+ # When there are not enough inputs left, we reuse some to complete the
+ # batch
+ if end > data_length:
+ shift = end - data_length
+ start -= shift
+ end -= shift
+
+ return start, end
+
+
+def other_classes(nb_classes, class_ind):
+ """
+ Returns a list of class indices excluding the class indexed by class_ind
+ :param nb_classes: number of classes in the task
+ :param class_ind: the class index to be omitted
+ :return: list of class indices excluding the class indexed by class_ind
+ """
+ if class_ind < 0 or class_ind >= nb_classes:
+ error_str = "class_ind must be within the range (0, nb_classes - 1)"
+ raise ValueError(error_str)
+
+ other_classes_list = list(range(nb_classes))
+ other_classes_list.remove(class_ind)
+
+ return other_classes_list
+
+
+def to_categorical(y, num_classes=None):
+ """
+ Converts a class vector (integers) to binary class matrix.
+ This is adapted from the Keras function with the same name.
+ :param y: class vector to be converted into a matrix
+ (integers from 0 to num_classes).
+ :param num_classes: num_classes: total number of classes.
+ :return: A binary matrix representation of the input.
+ """
+ y = np.array(y, dtype='int').ravel()
+ if not num_classes:
+ num_classes = np.max(y) + 1
+ n = y.shape[0]
+ categorical = np.zeros((n, num_classes))
+ categorical[np.arange(n), y] = 1
+ return categorical
+
+
+def random_targets(gt, nb_classes):
+ """
+ Take in an array of correct labels and randomly select a different label
+ for each label in the array. This is typically used to randomly select a
+ target class in targeted adversarial examples attacks (i.e., when the
+ search algorithm takes in both a source class and target class to compute
+ the adversarial example).
+ :param gt: the ground truth (correct) labels. They can be provided as a
+ 1D vector or 2D array of one-hot encoded labels.
+ :param nb_classes: The number of classes for this task. The random class
+ will be chosen between 0 and nb_classes such that it
+ is different from the correct class.
+ :return: A numpy array holding the randomly-selected target classes
+ encoded as one-hot labels.
+ """
+ # If the ground truth labels are encoded as one-hot, convert to labels.
+ if len(gt.shape) == 2:
+ gt = np.argmax(gt, axis=1)
+
+ # This vector will hold the randomly selected labels.
+ result = np.zeros(gt.shape, dtype=np.int32)
+
+ for class_ind in xrange(nb_classes):
+ # Compute all indices in that class.
+ in_cl = gt == class_ind
+ size = np.sum(in_cl)
+
+ # Compute the set of potential targets for this class.
+ potential_targets = other_classes(nb_classes, class_ind)
+
+ # Draw with replacement random targets among the potential targets.
+ result[in_cl] = np.random.choice(potential_targets, size=size)
+
+ # Encode vector of random labels as one-hot labels.
+ result = to_categorical(result, nb_classes)
+ result = result.astype(np.int32)
+
+ return result
+
+
+def pair_visual(original, adversarial, figure=None):
+ """
+ This function displays two images: the original and the adversarial sample
+ :param original: the original input
+ :param adversarial: the input after perterbations have been applied
+ :param figure: if we've already displayed images, use the same plot
+ :return: the matplot figure to reuse for future samples
+ """
+ import matplotlib.pyplot as plt
+
+ # Ensure our inputs are of proper shape
+ assert(len(original.shape) == 2 or len(original.shape) == 3)
+
+ # To avoid creating figures per input sample, reuse the sample plot
+ if figure is None:
+ plt.ion()
+ figure = plt.figure()
+ figure.canvas.set_window_title('Cleverhans: Pair Visualization')
+
+ # Add the images to the plot
+ perterbations = adversarial - original
+ for index, image in enumerate((original, perterbations, adversarial)):
+ figure.add_subplot(1, 3, index + 1)
+ plt.axis('off')
+
+ # If the image is 2D, then we have 1 color channel
+ if len(image.shape) == 2:
+ plt.imshow(image, cmap='gray')
+ else:
+ plt.imshow(image)
+
+ # Give the plot some time to update
+ plt.pause(0.01)
+
+ # Draw the plot and return
+ plt.show()
+ return figure
+
+
+def grid_visual(data):
+ """
+ This function displays a grid of images to show full misclassification
+ :param data: grid data of the form;
+ [nb_classes : nb_classes : img_rows : img_cols : nb_channels]
+ :return: if necessary, the matplot figure to reuse
+ """
+ import matplotlib.pyplot as plt
+
+ # Ensure interactive mode is disabled and initialize our graph
+ plt.ioff()
+ figure = plt.figure()
+ figure.canvas.set_window_title('Cleverhans: Grid Visualization')
+
+ # Add the images to the plot
+ num_cols = data.shape[0]
+ num_rows = data.shape[1]
+ num_channels = data.shape[4]
+ current_row = 0
+ for y in xrange(num_rows):
+ for x in xrange(num_cols):
+ figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
+ plt.axis('off')
+
+ if num_channels == 1:
+ plt.imshow(data[x, y, :, :, 0], cmap='gray')
+ else:
+ plt.imshow(data[x, y, :, :, :])
+
+ # Draw the plot and return
+ plt.show()
+ return figure
+
+
+def conv_2d(*args, **kwargs):
+ from modified_cleverhans.utils_keras import conv_2d
+ warnings.warn("utils.conv_2d is deprecated and may be removed on or after"
+ " 2018-01-05. Switch to utils_keras.conv_2d.")
+ return conv_2d(*args, **kwargs)
+
+
+def cnn_model(*args, **kwargs):
+ from modified_cleverhans.utils_keras import cnn_model
+ warnings.warn("utils.cnn_model is deprecated and may be removed on or"
+ " after 2018-01-05. Switch to utils_keras.cnn_model.")
+ return cnn_model(*args, **kwargs)
+
+
+def set_log_level(level, name="cleverhans"):
+ """
+ Sets the threshold for the cleverhans logger to level
+ :param level: the logger threshold. You can find values here:
+ https://docs.python.org/2/library/logging.html#levels
+ :param name: the name used for the cleverhans logger
+ """
+ logging.getLogger(name).setLevel(level)
+
+
+def create_logger(name):
+ """
+ Create a logger object with the given name.
+
+ If this is the first time that we call this method, then initialize the
+ formatter.
+ """
+ base = logging.getLogger("cleverhans")
+ if len(base.handlers) == 0:
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
+ '%(message)s')
+ ch.setFormatter(formatter)
+ base.addHandler(ch)
+
+ return base
+
+
+def deterministic_dict(normal_dict):
+ """
+ Returns a version of `normal_dict` whose iteration order is always the same
+ """
+ out = OrderedDict()
+ for key in sorted(normal_dict.keys()):
+ out[key] = normal_dict[key]
+ return out
+
+
+def parse_model_settings(model_path):
+
+ tokens = model_path.split('/')
+ precision_list = ['bin', 'binsc', 'fp']
+ precision = ''
+ start_index = 0
+ adv = False
+
+ for p in precision_list:
+ if p in tokens:
+ start_index = tokens.index(p)
+ precision = p
+ try:
+ nb_filters = int(tokens[start_index + 1].split('_')[1])
+ batch_size = int(tokens[start_index + 2].split('_')[1])
+ learning_rate = float(tokens[start_index + 3].split('_')[1])
+ nb_epochs = int(tokens[start_index + 4].split('_')[1])
+
+ adv_index = start_index + 5
+ if adv_index < len(tokens):
+ adv = True if 'adv' in tokens[adv_index] else False
+
+ print("Got %s model" % precision)
+ print("Got %d filters" % nb_filters)
+ print("Got batch_size %d" % batch_size)
+ print("Got batch_size %f" % learning_rate)
+ print("Got %d epochs" % nb_epochs)
+ except:
+ print("Could not parse tokens!")
+ sys.exit(1)
+
+ return nb_filters, batch_size, learning_rate, nb_epochs, adv
+
+
+def build_model_save_path(root_path, batch_size, nb_filters, lr, epochs, adv, delay):
+
+ model_path = os.path.join(root_path, precision)
+ model_path += 'k_' + str(nb_filters) + '/'
+ model_path += 'bs_' + str(batch_size) + '/'
+ model_path += 'lr_' + str(lr) + '/'
+ model_path += 'ep_' + str(epochs)
+
+ if adv:
+ model_path += '/adv_%d' % delay
+
+ # optionally create this dir if it does not already exist,
+ # otherwise, increment
+ model_path = create_dir_if_not_exists(model_path)
+
+ return model_path
+
+
+def create_dir_if_not_exists(path):
+ if not os.path.exists(path):
+ path += '/1'
+ os.makedirs(path)
+ else:
+ digits = []
+ sub_dirs = next(os.walk(path))[1]
+ [digits.append(s) for s in sub_dirs if s.isdigit()]
+ sub = '/' + str(int(max(digits)) + 1) if len(digits) > 0 else '/1'
+ path += sub
+ os.makedirs(path)
+ print('Logging to:%s' % path)
+ return path
+
+
+def build_targeted_dataset(X_test, Y_test, indices, nb_classes, img_rows, img_cols, img_channels):
+ """
+ Build a dataset for targeted attacks, each source image is repeated nb_classes -1
+ times, and target labels are assigned that do not overlap with true label.
+ :param X_test: clean source images
+ :param Y_test: true labels for X_test
+ :param indices: indices of source samples to use
+ :param nb_classes: number of classes in classification problem
+ :param img_rows: number of pixels along rows of image
+ :param img_cols: number of pixels along columns of image
+ """
+
+ nb_samples = len(indices)
+ nb_target_classes = nb_classes - 1
+ X = X_test[indices]
+ Y = Y_test[indices]
+
+ adv_inputs = np.array(
+ [[instance] * nb_target_classes for
+ instance in X], dtype=np.float32)
+ adv_inputs = adv_inputs.reshape(
+ (nb_samples * nb_target_classes, img_rows, img_cols, img_channels))
+
+ true_labels = np.array(
+ [[instance] * nb_target_classes for
+ instance in Y], dtype=np.float32)
+ true_labels = true_labels.reshape(
+ nb_samples * nb_target_classes, nb_classes)
+
+ target_labels = np.zeros((nb_samples * nb_target_classes, nb_classes))
+
+ for n in range(nb_samples):
+ one_hot = np.zeros((nb_target_classes, nb_classes))
+ one_hot[np.arange(nb_target_classes), np.arange(nb_classes)
+ != np.argmax(Y[n])] = 1.0
+ start = n * nb_target_classes
+ end = start + nb_target_classes
+ target_labels[start:end] = one_hot
+
+ return adv_inputs, true_labels, target_labels
diff --git a/case_studies/empir/modified_cleverhans/utils_keras.py b/case_studies/empir/modified_cleverhans/utils_keras.py
new file mode 100644
index 0000000..c6110ce
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/utils_keras.py
@@ -0,0 +1,213 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Model construction utilities based on keras
+"""
+from .model import Model
+
+import keras
+from keras.utils import np_utils
+from keras.models import Sequential
+from keras.layers import Dense, Activation, Flatten
+
+from distutils.version import LooseVersion
+if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
+ from keras.layers import Conv2D
+else:
+ from keras.layers import Convolution2D
+
+
+def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
+ """
+ Defines the right convolutional layer according to the
+ version of Keras that is installed.
+ :param filters: (required integer) the dimensionality of the output
+ space (i.e. the number output of filters in the
+ convolution)
+ :param kernel_shape: (required tuple or list of 2 integers) specifies
+ the strides of the convolution along the width and
+ height.
+ :param padding: (required string) can be either 'valid' (no padding around
+ input or feature map) or 'same' (pad to ensure that the
+ output feature map size is identical to the layer input)
+ :param input_shape: (optional) give input shape if this is the first
+ layer of the model
+ :return: the Keras layer
+ """
+ if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
+ if input_shape is not None:
+ return Conv2D(filters=filters, kernel_size=kernel_shape,
+ strides=strides, padding=padding,
+ input_shape=input_shape)
+ else:
+ return Conv2D(filters=filters, kernel_size=kernel_shape,
+ strides=strides, padding=padding)
+ else:
+ if input_shape is not None:
+ return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
+ subsample=strides, border_mode=padding,
+ input_shape=input_shape)
+ else:
+ return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
+ subsample=strides, border_mode=padding)
+
+
+def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
+ channels=1, nb_filters=64, nb_classes=10):
+ """
+ Defines a CNN model using Keras sequential model
+ :param logits: If set to False, returns a Keras model, otherwise will also
+ return logits tensor
+ :param input_ph: The TensorFlow tensor for the input
+ (needed if returning logits)
+ ("ph" stands for placeholder but it need not actually be a
+ placeholder)
+ :param img_rows: number of row in the image
+ :param img_cols: number of columns in the image
+ :param channels: number of color channels (e.g., 1 for MNIST)
+ :param nb_filters: number of convolutional filters per layer
+ :param nb_classes: the number of output classes
+ :return:
+ """
+ model = Sequential()
+
+ # Define the layers successively (convolution layers are version dependent)
+ if keras.backend.image_dim_ordering() == 'th':
+ input_shape = (channels, img_rows, img_cols)
+ else:
+ input_shape = (img_rows, img_cols, channels)
+
+ layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
+ input_shape=input_shape),
+ Activation('relu'),
+ conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
+ Activation('relu'),
+ conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
+ Activation('relu'),
+ Flatten(),
+ Dense(nb_classes)]
+
+ for layer in layers:
+ model.add(layer)
+
+ if logits:
+ logits_tensor = model(input_ph)
+ model.add(Activation('softmax'))
+
+ if logits:
+ return model, logits_tensor
+ else:
+ return model
+
+
+class KerasModelWrapper(Model):
+ """
+ An implementation of `Model` that wraps a Keras model. It
+ specifically exposes the hidden features of a model by creating new models.
+ The symbolic graph is reused and so there is little overhead. Splitting
+ in-place operations can incur an overhead.
+ """
+
+ def __init__(self, model=None):
+ """
+ Create a wrapper for a Keras model
+ :param model: A Keras model
+ """
+ super(KerasModelWrapper, self).__init__()
+
+ if model is None:
+ raise ValueError('model argument must be supplied.')
+
+ self.model = model
+ self.keras_model = None
+
+ def _get_softmax_name(self):
+ """
+ Looks for the name of the softmax layer.
+ :return: Softmax layer name
+ """
+ for i, layer in enumerate(self.model.layers):
+ cfg = layer.get_config()
+ if 'activation' in cfg and cfg['activation'] == 'softmax':
+ return layer.name
+
+ raise Exception("No softmax layers found")
+
+ def _get_logits_name(self):
+ """
+ Looks for the name of the layer producing the logits.
+ :return: name of layer producing the logits
+ """
+ softmax_name = self._get_softmax_name()
+ softmax_layer = self.model.get_layer(softmax_name)
+ node = softmax_layer.inbound_nodes[0]
+ logits_name = node.inbound_layers[0].name
+
+ return logits_name
+
+ def get_logits(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the logits
+ """
+ logits_name = self._get_logits_name()
+
+ return self.get_layer(x, logits_name)
+
+ def get_probs(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the probs
+ """
+ name = self._get_softmax_name()
+
+ return self.get_layer(x, name)
+
+ def get_layer_names(self):
+ """
+ :return: Names of all the layers kept by Keras
+ """
+ layer_names = [x.name for x in self.model.layers]
+ return layer_names
+
+ def fprop(self, x):
+ """
+ Exposes all the layers of the model returned by get_layer_names.
+ :param x: A symbolic representation of the network input
+ :return: A dictionary mapping layer names to the symbolic
+ representation of their output.
+ """
+ from keras.models import Model as KerasModel
+
+ if self.keras_model is None:
+ # Get the input layer
+ new_input = self.model.get_input_at(0)
+
+ # Make a new model that returns each of the layers as output
+ out_layers = [x_layer.output for x_layer in self.model.layers]
+ self.keras_model = KerasModel(new_input, out_layers)
+
+ # and get the outputs for that model on the input x
+ outputs = self.keras_model(x)
+
+ # Keras only returns a list for outputs of length >= 1, if the model
+ # is only one layer, wrap a list
+ if len(self.model.layers) == 1:
+ outputs = [outputs]
+
+ # compute the dict to return
+ fprop_dict = dict(zip(self.get_layer_names(), outputs))
+
+ return fprop_dict
diff --git a/case_studies/empir/modified_cleverhans/utils_mnist.py b/case_studies/empir/modified_cleverhans/utils_mnist.py
new file mode 100644
index 0000000..6f3219a
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/utils_mnist.py
@@ -0,0 +1,87 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy as np
+import sys
+import warnings
+
+from . import utils
+
+
+def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
+ test_end=10000):
+ """
+ Load and preprocess MNIST dataset
+ :param datadir: path to folder where data should be stored
+ :param train_start: index of first training set example
+ :param train_end: index of last training set example
+ :param test_start: index of first test set example
+ :param test_end: index of last test set example
+ :return: tuple of four arrays containing training data, training labels,
+ testing data and testing labels.
+ """
+ assert isinstance(train_start, int)
+ assert isinstance(train_end, int)
+ assert isinstance(test_start, int)
+ assert isinstance(test_end, int)
+
+ if 'tensorflow' in sys.modules:
+ from tensorflow.examples.tutorials.mnist import input_data
+ mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
+ X_train = np.vstack((mnist.train.images, mnist.validation.images))
+ Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
+ X_test = mnist.test.images
+ Y_test = mnist.test.labels
+ else:
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+ import keras
+ from keras.datasets import mnist
+ from keras.utils import np_utils
+
+ # These values are specific to MNIST
+ img_rows = 28
+ img_cols = 28
+ nb_classes = 10
+
+ # the data, shuffled and split between train and test sets
+ (X_train, y_train), (X_test, y_test) = mnist.load_data()
+
+ if keras.backend.image_dim_ordering() == 'th':
+ X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
+ X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
+
+ X_train = X_train.astype('float32')
+ X_test = X_test.astype('float32')
+ X_train /= 255
+ X_test /= 255
+
+ # convert class vectors to binary class matrices
+ Y_train = np_utils.to_categorical(y_train, nb_classes)
+ Y_test = np_utils.to_categorical(y_test, nb_classes)
+
+ X_train = X_train[train_start:train_end]
+ Y_train = Y_train[train_start:train_end]
+ X_test = X_test[test_start:test_end]
+ Y_test = Y_test[test_start:test_end]
+
+ print('X_train shape:', X_train.shape)
+ print('X_test shape:', X_test.shape)
+
+ return X_train, Y_train, X_test, Y_test
diff --git a/case_studies/empir/modified_cleverhans/utils_tf.py b/case_studies/empir/modified_cleverhans/utils_tf.py
new file mode 100644
index 0000000..009db6a
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/utils_tf.py
@@ -0,0 +1,1578 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from distutils.version import LooseVersion
+import math
+import numpy as np
+import os
+from six.moves import xrange
+import tensorflow as tf
+import time
+import warnings
+import logging
+
+from .utils import batch_indices, _ArgsWrapper, create_logger, set_log_level
+
+FLAGS = tf.app.flags.FLAGS
+
+_logger = create_logger("cleverhans.utils.tf")
+
+
+class _FlagsWrapper(_ArgsWrapper):
+
+ """
+ Wrapper that tries to find missing parameters in TensorFlow FLAGS
+ for backwards compatibility.
+
+ Plain _ArgsWrapper should be used instead if the support for FLAGS
+ is removed.
+ """
+
+ def __getattr__(self, name):
+ val = self.args.get(name)
+ if val is None:
+ warnings.warn('Setting parameters ({}) from TensorFlow FLAGS is '
+ 'deprecated.'.format(name))
+ val = FLAGS.__getattr__(name)
+ return val
+
+
+def model_loss(y, model, mean=True):
+ """
+ Define loss of TF graph
+ :param y: correct labels
+ :param model: output of the model
+ :param mean: boolean indicating whether should return mean of loss
+ or vector of losses for each input of the batch
+ :return: return mean of loss if True, otherwise return vector with per
+ sample loss
+ """
+ op = model.op
+ if "softmax" in str(op).lower():
+ logits, = op.inputs
+ else:
+ logits = model
+
+ out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
+
+ if mean:
+ out = tf.reduce_mean(out)
+ return out
+
+
+def initialize_uninitialized_global_variables(sess):
+ """
+ Only initializes the variables of a TensorFlow session that were not
+ already initialized.
+ :param sess: the TensorFlow session
+ :return:
+ """
+ # List all global variables
+ global_vars = tf.global_variables()
+
+ # Find initialized status for all variables
+ is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
+ is_initialized = sess.run(is_var_init)
+
+ # List all variables that were not initialized previously
+ not_initialized_vars = [var for (var, init) in
+ zip(global_vars, is_initialized) if not init]
+
+ # Initialize all uninitialized variables found, if any
+ if len(not_initialized_vars):
+ sess.run(tf.variables_initializer(not_initialized_vars))
+
+
+def sign_changes_count_op(prv_k, k):
+
+ print(k)
+ print(prv_k)
+ return tf.cast(np.product(k.shape[:]), tf.int32) - tf.reduce_sum(
+ tf.cast(tf.equal(prv_k, k), tf.int32))
+
+
+def normalized_sign_changes_op(prv_k, k):
+
+ return 1.0 - tf.reduce_sum(tf.cast(tf.equal(
+ prv_k, k), tf.float32)) / tf.cast(np.product(k.shape[:]), tf.float32)
+
+
+def create_kernel_placeholder(model, i):
+
+ return tf.placeholder(
+ tf.float32, [model.layers[i].kernels.shape[0], model.layers[i].kernels.shape[1],
+ model.layers[i].kernels.shape[2], model.layers[i].kernels.shape[3]])
+
+
+def model_train(sess, x, y, predictions, X_train, Y_train, model=None, phase=None,
+ writer=None, save=False, predictions_adv=None, init_all=False,
+ evaluate=None, verbose=True, feed=None, args=None, rng=None):
+ """
+ Train a TF graph
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controlling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param init_all: (boolean) If set to true, all TF variables in the session
+ are (re)initialized, otherwise only previously
+ uninitialized variables are initialized before training.
+ :param evaluate: function that is run after each training iteration
+ (typically to display the test/validation accuracy).
+ :param verbose: (boolean) all print statements disabled when set to False.
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ If save is True, should also contain 'log_dir'
+ and 'filename'
+ :param rng: Instance of numpy.random.RandomState
+ :return: True if model trained
+ """
+ args = _FlagsWrapper(args or {})
+
+ # Check that necessary arguments were given (see doc above)
+ assert args.nb_epochs, "Number of epochs was not given in args dict"
+ assert args.learning_rate, "Learning rate was not given in args dict"
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ if save:
+ assert args.log_dir, "Directory for save was not given in args dict"
+ assert args.filename, "Filename for save was not given in args dict"
+
+ if not verbose:
+ set_log_level(logging.WARNING)
+ warnings.warn("verbose argument is deprecated and will be removed"
+ " on 2018-02-11. Instead, use utils.set_log_level()."
+ " For backward compatibility, log_level was set to"
+ " logging.WARNING (30).")
+
+ if rng is None:
+ rng = np.random.RandomState()
+
+ # Define loss
+ loss = model_loss(y, predictions)
+ if predictions_adv is not None:
+ loss = (loss + model_loss(y, predictions_adv)) / 2
+
+ with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
+ global_step = tf.get_variable(
+ "global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
+
+ train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
+ train_step = train_step.minimize(loss)
+
+ if writer is not None:
+
+ assert args.loss_name, "Name of scalar summary loss"
+ training_summary = tf.summary.scalar(args.loss_name, loss)
+ merge_op = tf.summary.merge_all()
+
+ with sess.as_default():
+ if hasattr(tf, "global_variables_initializer"):
+ if init_all:
+ tf.global_variables_initializer().run()
+ else:
+ initialize_uninitialized_global_variables(sess)
+ else:
+ warnings.warn("Update your copy of tensorflow; future versions of "
+ "CleverHans may drop support for this version.")
+ sess.run(tf.initialize_all_variables())
+
+ init_step = sess.run(global_step)
+
+ for epoch in xrange(args.nb_epochs):
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_train)
+
+ # Indices to shuffle training set
+ index_shuf = list(range(len(X_train)))
+ rng.shuffle(index_shuf)
+
+ prev = time.time()
+ for batch in range(nb_batches):
+
+ step = init_step + (epoch * nb_batches + batch)
+
+ # Compute batch start and end indices
+ start, end = batch_indices(
+ batch, len(X_train), args.batch_size)
+
+ # Perform one training step
+ feed_dict = {x: X_train[index_shuf[start:end]],
+ y: Y_train[index_shuf[start:end]],
+ phase: args.is_training}
+ if feed is not None:
+ feed_dict.update(feed)
+ sess.run(train_step, feed_dict=feed_dict)
+
+ if batch % 100 == 0:
+ if writer is not None:
+ loss_val, merged_summ = sess.run(
+ [loss, merge_op], feed_dict=feed_dict)
+ writer.add_summary(merged_summ, step)
+ writer.flush()
+ else:
+ loss_val = sess.run(loss, feed_dict=feed_dict)
+
+ #print('epoch %d, batch %d, step %d, loss %.4f' %
+ # (epoch, batch, step, loss_val))
+
+ assert end >= len(X_train) # Check that all examples were used
+ cur = time.time()
+ if verbose:
+ _logger.info("Epoch " + str(epoch) + " took " +
+ str(cur - prev) + " seconds")
+ if evaluate is not None:
+ evaluate()
+ #global_step = step
+ if save:
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ else:
+ _logger.info("Completed model training.")
+
+ return True
+
+# Variation of model_train for the teacher model in distillation
+def model_train_teacher(sess, x, y, predictions, logits, temperature, X_train, Y_train, model=None, phase=None,
+ writer=None, save=False, predictions_adv=None, init_all=False,
+ evaluate=None, verbose=True, feed=None, args=None, rng=None):
+ """
+ Train a TF graph
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controlling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param init_all: (boolean) If set to true, all TF variables in the session
+ are (re)initialized, otherwise only previously
+ uninitialized variables are initialized before training.
+ :param evaluate: function that is run after each training iteration
+ (typically to display the test/validation accuracy).
+ :param verbose: (boolean) all print statements disabled when set to False.
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ If save is True, should also contain 'log_dir'
+ and 'filename'
+ :param rng: Instance of numpy.random.RandomState
+ :return: True if model trained
+ """
+ args = _FlagsWrapper(args or {})
+
+ # Check that necessary arguments were given (see doc above)
+ assert args.nb_epochs, "Number of epochs was not given in args dict"
+ assert args.learning_rate, "Learning rate was not given in args dict"
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ if save:
+ assert args.log_dir, "Directory for save was not given in args dict"
+ assert args.filename, "Filename for save was not given in args dict"
+
+ if not verbose:
+ set_log_level(logging.WARNING)
+ warnings.warn("verbose argument is deprecated and will be removed"
+ " on 2018-02-11. Instead, use utils.set_log_level()."
+ " For backward compatibility, log_level was set to"
+ " logging.WARNING (30).")
+
+ if rng is None:
+ rng = np.random.RandomState()
+
+ # Define loss
+ # loss = model_loss(y, predictions)
+ loss = model_loss_temp(y, predictions, temperature)
+ if predictions_adv is not None:
+ loss = (loss + model_loss(y, predictions_adv)) / 2
+
+ with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
+ teacher_global_step = tf.get_variable(
+ "teacher_global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
+
+ train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
+ train_step = train_step.minimize(loss)
+
+ scaled_preds = tf.nn.softmax(logits / temperature)
+ scaled_preds_train = np.zeros([len(X_train), np.size(Y_train, 1)])
+
+ if writer is not None:
+
+ assert args.loss_name, "Name of scalar summary loss"
+ training_summary = tf.summary.scalar(args.loss_name, loss)
+ merge_op = tf.summary.merge_all()
+
+ with sess.as_default():
+ if hasattr(tf, "global_variables_initializer"):
+ if init_all:
+ tf.global_variables_initializer().run()
+ else:
+ initialize_uninitialized_global_variables(sess)
+ else:
+ warnings.warn("Update your copy of tensorflow; future versions of "
+ "CleverHans may drop support for this version.")
+ sess.run(tf.initialize_all_variables())
+
+ init_step = sess.run(teacher_global_step)
+
+ for epoch in xrange(args.nb_epochs):
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_train)
+
+ # Indices to shuffle training set
+ index_shuf = list(range(len(X_train)))
+ rng.shuffle(index_shuf)
+
+ prev = time.time()
+ for batch in range(nb_batches):
+
+ step = init_step + (epoch * nb_batches + batch)
+
+ # Compute batch start and end indices
+ start, end = batch_indices(
+ batch, len(X_train), args.batch_size)
+
+ # Perform one training step
+ feed_dict = {x: X_train[index_shuf[start:end]],
+ y: Y_train[index_shuf[start:end]],
+ phase: args.is_training}
+ if feed is not None:
+ feed_dict.update(feed)
+ sess.run(train_step, feed_dict=feed_dict)
+ if epoch == args.nb_epochs - 1:
+ scaled_predicted = sess.run(scaled_preds, feed_dict=feed_dict)
+ scaled_preds_train[start:end] = scaled_predicted
+
+ if batch % 100 == 0:
+ if writer is not None:
+ loss_val, merged_summ = sess.run(
+ [loss, merge_op], feed_dict=feed_dict)
+ writer.add_summary(merged_summ, step)
+ writer.flush()
+ else:
+ loss_val = sess.run(loss, feed_dict=feed_dict)
+
+ #print('epoch %d, batch %d, step %d, loss %.4f' %
+ # (epoch, batch, step, loss_val))
+
+ assert end >= len(X_train) # Check that all examples were used
+ cur = time.time()
+ if verbose:
+ _logger.info("Epoch " + str(epoch) + " took " +
+ str(cur - prev) + " seconds")
+ if evaluate is not None:
+ evaluate()
+ #teacher_global_step = step
+ if save:
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ else:
+ _logger.info("Completed model training.")
+
+ return scaled_preds_train
+
+# Modified version of model_train for model loss calculation with a different temperature
+def model_train_student(sess, x, y, predictions, temperature, X_train, Y_train, y_teacher=None,
+ teacher_preds=None, alpha=0.5, beta=0.5, model=None, phase=None, writer=None, save=False,
+ predictions_adv=None, init_all=False, evaluate=None, verbose=True, feed=None, args=None, rng=None):
+ """
+ Train a TF graph
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controlling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param init_all: (boolean) If set to true, all TF variables in the session
+ are (re)initialized, otherwise only previously
+ uninitialized variables are initialized before training.
+ :param evaluate: function that is run after each training iteration
+ (typically to display the test/validation accuracy).
+ :param verbose: (boolean) all print statements disabled when set to False.
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ If save is True, should also contain 'log_dir'
+ and 'filename'
+ :param rng: Instance of numpy.random.RandomState
+ :return: True if model trained
+ """
+ args = _FlagsWrapper(args or {})
+
+ # Check that necessary arguments were given (see doc above)
+ assert args.nb_epochs, "Number of epochs was not given in args dict"
+ assert args.learning_rate, "Learning rate was not given in args dict"
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ if save:
+ assert args.log_dir, "Directory for save was not given in args dict"
+ assert args.filename, "Filename for save was not given in args dict"
+
+ if not verbose:
+ set_log_level(logging.WARNING)
+ warnings.warn("verbose argument is deprecated and will be removed"
+ " on 2018-02-11. Instead, use utils.set_log_level()."
+ " For backward compatibility, log_level was set to"
+ " logging.WARNING (30).")
+
+ if rng is None:
+ rng = np.random.RandomState()
+
+ # Define loss
+ # Incorporating both hard and soft labels for training
+ if y_teacher is not None:
+ loss = alpha*model_loss(y, predictions) + beta*model_loss_temp(y_teacher, predictions, temperature)
+ else:
+ loss = model_loss(y, predictions)
+
+ with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
+ global_step = tf.get_variable(
+ "global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
+
+ train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
+ train_step = train_step.minimize(loss)
+
+ if writer is not None:
+ assert args.loss_name, "Name of scalar summary loss"
+ training_summary = tf.summary.scalar(args.loss_name, loss)
+ merge_op = tf.summary.merge_all()
+
+ with sess.as_default():
+ if hasattr(tf, "global_variables_initializer"):
+ if init_all:
+ tf.global_variables_initializer().run()
+ else:
+ initialize_uninitialized_global_variables(sess)
+ else:
+ warnings.warn("Update your copy of tensorflow; future versions of "
+ "CleverHans may drop support for this version.")
+ sess.run(tf.initialize_all_variables())
+
+ init_step = sess.run(global_step)
+
+ for epoch in xrange(args.nb_epochs):
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_train)
+
+ # Indices to shuffle training set
+ index_shuf = list(range(len(X_train)))
+ rng.shuffle(index_shuf)
+
+ prev = time.time()
+ for batch in range(nb_batches):
+
+ step = init_step + (epoch * nb_batches + batch)
+
+ # Compute batch start and end indices
+ start, end = batch_indices(
+ batch, len(X_train), args.batch_size)
+
+ # Perform one training step
+ feed_dict = {x: X_train[index_shuf[start:end]],
+ y: Y_train[index_shuf[start:end]],
+ y_teacher: teacher_preds[index_shuf[start:end]],
+ phase: args.is_training}
+ if feed is not None:
+ feed_dict.update(feed)
+ sess.run(train_step, feed_dict=feed_dict)
+
+ if batch % 100 == 0:
+ if writer is not None:
+ loss_val, merged_summ = sess.run(
+ [loss, merge_op], feed_dict=feed_dict)
+ writer.add_summary(merged_summ, step)
+ writer.flush()
+ else:
+ loss_val = sess.run(loss, feed_dict=feed_dict)
+
+ #print('epoch %d, batch %d, step %d, loss %.4f' %
+ # (epoch, batch, step, loss_val))
+
+ assert end >= len(X_train) # Check that all examples were used
+ cur = time.time()
+ if verbose:
+ _logger.info("Epoch " + str(epoch) + " took " +
+ str(cur - prev) + " seconds")
+ if evaluate is not None:
+ evaluate()
+ #global_step = step
+ if save:
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ else:
+ _logger.info("Completed model training.")
+
+ return True
+
+def model_train_inpgrad_reg(sess, x, y, predictions, X_train, Y_train, model=None, phase=None,
+ writer=None, save=False, predictions_adv=None, init_all=False,
+ evaluate=None, l2dbl = 0, l2cs = 0, verbose=True, feed=None, args=None, rng=None):
+ """
+ Train a TF graph
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controlling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param init_all: (boolean) If set to true, all TF variables in the session
+ are (re)initialized, otherwise only previously
+ uninitialized variables are initialized before training.
+ :param evaluate: function that is run after each training iteration
+ (typically to display the test/validation accuracy).
+ :param verbose: (boolean) all print statements disabled when set to False.
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ If save is True, should also contain 'log_dir'
+ and 'filename'
+ :param rng: Instance of numpy.random.RandomState
+ :return: True if model trained
+ """
+ args = _FlagsWrapper(args or {})
+
+ # Check that necessary arguments were given (see doc above)
+ assert args.nb_epochs, "Number of epochs was not given in args dict"
+ assert args.learning_rate, "Learning rate was not given in args dict"
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ if save:
+ assert args.log_dir, "Directory for save was not given in args dict"
+ assert args.filename, "Filename for save was not given in args dict"
+
+ if not verbose:
+ set_log_level(logging.WARNING)
+ warnings.warn("verbose argument is deprecated and will be removed"
+ " on 2018-02-11. Instead, use utils.set_log_level()."
+ " For backward compatibility, log_level was set to"
+ " logging.WARNING (30).")
+
+ if rng is None:
+ rng = np.random.RandomState()
+
+ # Define loss
+ loss = model_loss(y, predictions) + model_loss_inpgrad_reg(x, y, predictions, l2dbl, l2cs)
+ if predictions_adv is not None:
+ loss = (loss + model_loss(x, y, predictions_adv)) / 2
+
+ with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
+ global_step = tf.get_variable(
+ "global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
+
+ train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
+ train_step = train_step.minimize(loss)
+
+ if writer is not None:
+ assert args.loss_name, "Name of scalar summary loss"
+ training_summary = tf.summary.scalar(args.loss_name, loss)
+ merge_op = tf.summary.merge_all()
+
+ with sess.as_default():
+ if hasattr(tf, "global_variables_initializer"):
+ if init_all:
+ tf.global_variables_initializer().run()
+ else:
+ initialize_uninitialized_global_variables(sess)
+ else:
+ warnings.warn("Update your copy of tensorflow; future versions of "
+ "CleverHans may drop support for this version.")
+ sess.run(tf.initialize_all_variables())
+
+ init_step = sess.run(global_step)
+
+ for epoch in xrange(args.nb_epochs):
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_train)
+
+ # Indices to shuffle training set
+ index_shuf = list(range(len(X_train)))
+ rng.shuffle(index_shuf)
+
+ prev = time.time()
+ for batch in range(nb_batches):
+
+ step = init_step + (epoch * nb_batches + batch)
+
+ # Compute batch start and end indices
+ start, end = batch_indices(
+ batch, len(X_train), args.batch_size)
+
+ # Perform one training step
+ feed_dict = {x: X_train[index_shuf[start:end]],
+ y: Y_train[index_shuf[start:end]],
+ phase: args.is_training}
+ if feed is not None:
+ feed_dict.update(feed)
+ sess.run(train_step, feed_dict=feed_dict)
+
+ if batch % 100 == 0:
+ if writer is not None:
+ loss_val, merged_summ = sess.run(
+ [loss, merge_op], feed_dict=feed_dict)
+ writer.add_summary(merged_summ, step)
+ writer.flush()
+ else:
+ loss_val = sess.run(loss, feed_dict=feed_dict)
+
+ #print('epoch %d, batch %d, step %d, loss %.4f' %
+ # (epoch, batch, step, loss_val))
+
+ assert end >= len(X_train) # Check that all examples were used
+ cur = time.time()
+ if verbose:
+ _logger.info("Epoch " + str(epoch) + " took " +
+ str(cur - prev) + " seconds")
+ if evaluate is not None:
+ evaluate()
+ #global_step = step
+ if save:
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ else:
+ _logger.info("Completed model training.")
+
+ return True
+
+# Imagenet training
+def model_train_imagenet(sess, x, y, predictions, train_iterator, X_train, Y_train, model=None, phase=None,
+ writer=None, save=False, predictions_adv=None, init_all=False,
+ evaluate=None, verbose=True, feed=None, args=None, rng=None):
+ """
+ Train a TF graph
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controlling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param init_all: (boolean) If set to true, all TF variables in the session
+ are (re)initialized, otherwise only previously
+ uninitialized variables are initialized before training.
+ :param evaluate: function that is run after each training iteration
+ (typically to display the test/validation accuracy).
+ :param verbose: (boolean) all print statements disabled when set to False.
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ If save is True, should also contain 'log_dir'
+ and 'filename'
+ :param rng: Instance of numpy.random.RandomState
+ :return: True if model trained
+ """
+ args = _FlagsWrapper(args or {})
+
+ # Check that necessary arguments were given (see doc above)
+ assert args.nb_epochs, "Number of epochs was not given in args dict"
+ assert args.learning_rate, "Learning rate was not given in args dict"
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ if save:
+ assert args.log_dir, "Directory for save was not given in args dict"
+ assert args.filename, "Filename for save was not given in args dict"
+
+ if not verbose:
+ set_log_level(logging.WARNING)
+ warnings.warn("verbose argument is deprecated and will be removed"
+ " on 2018-02-11. Instead, use utils.set_log_level()."
+ " For backward compatibility, log_level was set to"
+ " logging.WARNING (30).")
+
+ if rng is None:
+ rng = np.random.RandomState()
+
+ # Define loss
+ loss = model_loss(y, predictions)
+ if predictions_adv is not None:
+ loss = (loss + model_loss(y, predictions_adv)) / 2
+
+ with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
+ global_step = tf.get_variable(
+ "global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
+ learning_rate_tensor = tf.placeholder(tf.float32, shape=[])
+
+ if args.lowprecision:
+ update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
+ with tf.control_dependencies(update_ops):
+ train_step = tf.train.AdamOptimizer(learning_rate=learning_rate_tensor, epsilon=1e-5) #Copied epsilon from dorefanet
+ train_step = train_step.minimize(loss)
+ # Find the batch norm variables
+ all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ batch_vars = [var for var in all_vars if ('batchNorm' in var.name) and (('moving_mean' in var.name) or ('moving_variance' in var.name))] # Gives the moving mean and moving vaiance variables which are not part of trainable variables but have to be restored
+ save_vars = tf.trainable_variables() + batch_vars# Declare save variable to have both batch norm and other trainable variables
+ else:
+ train_step = tf.train.AdamOptimizer(learning_rate=learning_rate_tensor, epsilon=1e-5) #Copied epsilon from dorefanet
+ train_step = train_step.minimize(loss)
+
+ if writer is not None:
+ assert args.loss_name, "Name of scalar summary loss"
+ training_summary = tf.summary.scalar(args.loss_name, loss)
+ merge_op = tf.summary.merge_all()
+
+ with sess.as_default():
+ if hasattr(tf, "global_variables_initializer"):
+ if init_all:
+ tf.global_variables_initializer().run()
+ else:
+ initialize_uninitialized_global_variables(sess)
+ else:
+ warnings.warn("Update your copy of tensorflow; future versions of "
+ "CleverHans may drop support for this version.")
+ sess.run(tf.initialize_all_variables())
+
+ init_step = sess.run(global_step)
+ step = init_step
+
+ for epoch in xrange(args.nb_epochs):
+ prev = time.time()
+
+ # Initialize the iterator
+ sess.run(train_iterator.initializer)
+
+ if args.lowprecision:
+ if (epoch == 60):
+ args.learning_rate = 4e-5
+ if (epoch == 75):
+ args.learning_rate = 8e-6
+ else: # for FP models decreasing lr after different number of epochs based on tensorpack model
+ if (epoch == 30) or (epoch == 60) or (epoch == 80):
+ args.learning_rate = args.learning_rate/10
+
+ # Try feeding new values till end
+ num_batches = 0
+ try:
+ while True:
+ # Perform one training epoch
+ X_array, Y_array = sess.run([X_train, Y_train]) # Get np arrays of X and Y
+ feed_dict = {x: X_array,
+ y: Y_array,
+ phase: args.is_training,
+ learning_rate_tensor: args.learning_rate}
+
+ sess.run(train_step, feed_dict=feed_dict)
+ loss_val = sess.run(loss, feed_dict=feed_dict)
+ num_batches = num_batches+1
+
+ except tf.errors.OutOfRangeError:
+ pass
+ print('epoch %d, loss %.4f' %
+ (epoch, loss_val))
+
+ cur = time.time()
+ if verbose:
+ _logger.info("Epoch " + str(epoch) + " took " +
+ str(cur - prev) + " seconds")
+ if evaluate is not None:
+ evaluate()
+ step = step + num_batches # Training steps in batches
+ # save every 10 epochs
+ if save and (epoch % 10 == 0):
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ if args.lowprecision: # The batch norm variables also need to be saved
+ saver = tf.train.Saver(save_vars)
+ else:
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ # Save at the end as well
+ if save:
+ save_path = os.path.join(args.log_dir, args.filename)
+ #save_path = args.log_dir
+ if args.lowprecision: # The batch norm variables also need to be saved
+ saver = tf.train.Saver(save_vars)
+ else:
+ saver = tf.train.Saver()
+ if not os.path.exists(args.log_dir):
+ os.makedirs(args.log_dir)
+ saver.save(sess, save_path, global_step=step)
+ _logger.info("Completed model training and saved at: " +
+ str(save_path))
+ else:
+ _logger.info("Completed model training.")
+
+ return True
+
+def model_eval(sess, x, y, predictions=None, X_test=None, Y_test=None, phase=None, writer=None,
+ feed=None, args=None, model=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and predictions is None:
+ raise ValueError("One of model argument "
+ "or predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+ with sess.as_default():
+
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ for batch in range(nb_batches):
+ if batch % 100 == 0 and batch > 0:
+ _logger.debug("Batch " + str(batch))
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+ cur_batch_size = end - start
+
+ # The last batch may be smaller than all others, so we need to
+ # account for variable batch size here
+ feed_dict = {x: X_test[start:end],
+ y: Y_test[start:end],
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += (cur_batch_size * cur_acc)
+
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+
+def model_eval_ensemble(sess, x, y, predictions=None, X_test=None, Y_test=None, phase=None, writer=None,
+ feed=None, args=None, model=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and (predictions is None):
+ raise ValueError("One of model argument "
+ "or both predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ predictions)
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ predictions)
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+ with sess.as_default():
+
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ for batch in range(nb_batches):
+ if batch % 100 == 0 and batch > 0:
+ _logger.debug("Batch " + str(batch))
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+ cur_batch_size = end - start
+
+ # The last batch may be smaller than all others, so we need to
+ # account for variable batch size here
+ feed_dict = {x: X_test[start:end],
+ y: Y_test[start:end],
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += (cur_batch_size * cur_acc)
+
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+def model_eval_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
+ feed=None, args=None, model=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and predictions is None:
+ raise ValueError("One of model argument "
+ "or predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+
+ with sess.as_default():
+
+ # Initialize the iterator
+ sess.run(test_iterator.initializer)
+
+ # Try feeding new values till end
+ num_batches = 0
+ try:
+ while True:
+ X_array, Y_array = sess.run([X_test, Y_test])
+ feed_dict = {x: X_array,
+ y: Y_array,
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+ num_batches = num_batches + 1
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += cur_acc
+ except tf.errors.OutOfRangeError:
+ pass
+
+ # Divide by number of examples to get final value
+ accuracy = accuracy/num_batches
+
+ return accuracy
+
+def model_eval_adv_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None,
+ phase=None, writer=None, feed=None, attacker=None, args=None, model=None, attack_params=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param feed_adv: An optional dictionary for generating adversarial attacks that is appended to the feeding
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and predictions is None:
+ raise ValueError("One of model argument "
+ "or predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+
+ with sess.as_default():
+
+ # Initialize the iterator
+ sess.run(test_iterator.initializer)
+
+ num_batches = 0
+ try:
+ while True:
+ X_array, Y_array = sess.run([X_test, Y_test])
+ X_shape = X_array.shape
+ if X_array.shape[0] < args.batch_size: # Last batch discarded to avoid error with CW attack
+ break
+
+ # Generate the adversarial examples
+ X_adv_array = attacker.generate_np(X_array, phase, **attack_params)
+
+ feed_dict = {x: X_adv_array,
+ y: Y_array,
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+ num_batches = num_batches + 1
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += cur_acc
+ except tf.errors.OutOfRangeError:
+ pass
+
+
+ # Divide by number of examples to get final value
+ accuracy = accuracy/num_batches # accuracy was already reduce mean across that batch so we have to divide number of batches
+
+ return accuracy
+
+def model_eval_ensemble_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
+ feed=None, args=None, model=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and (predictions is None):
+ raise ValueError("One of model argument "
+ "or both predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ predictions)
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ predictions)
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+ with sess.as_default():
+
+ # Initialize the iterator
+ sess.run(test_iterator.initializer)
+
+ # Try feeding new values till end
+ num_batches = 0
+ try:
+ while True:
+ X_array, Y_array = sess.run([X_test, Y_test])
+ feed_dict = {x: X_array,
+ y: Y_array,
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+ num_batches = num_batches + 1
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += cur_acc
+ except tf.errors.OutOfRangeError:
+ pass
+
+ # Divide by number of examples to get final value
+ accuracy = accuracy/num_batches
+
+ return accuracy
+
+def model_eval_ensemble_adv_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
+ feed=None, attacker=None, args=None, model=None, attack_params=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use when training the graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :param model: (deprecated) if not None, holds model output predictions
+ :return: a float with the accuracy value
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument "
+ "must be supplied.")
+ if model is None and (predictions is None):
+ raise ValueError("One of model argument "
+ "or both predictions argument must be supplied.")
+ if model is not None:
+ warnings.warn("model argument is deprecated. "
+ "Switch to predictions argument. "
+ "model argument will be removed after 2018-01-05.")
+ if predictions is None:
+ predictions = model
+ else:
+ raise ValueError("Exactly one of model argument"
+ " and predictions argument should be specified.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ predictions)
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ predictions)
+
+ acc_value = tf.reduce_mean(tf.to_float(correct_preds))
+
+ # Init result var
+ accuracy = 0.0
+
+ if writer is not None:
+ eval_summary = tf.summary.scalar('acc', acc_value)
+
+ with sess.as_default():
+
+ # Initialize the iterator
+ sess.run(test_iterator.initializer)
+
+ num_batches = 0
+ try:
+ while True:
+ X_array, Y_array = sess.run([X_test, Y_test])
+ X_shape = X_array.shape
+ if X_array.shape[0] < args.batch_size:
+ break
+
+ # Generate the adversarial examples
+ X_adv_array = attacker.generate_np(X_array, phase, **attack_params)
+
+ feed_dict = {x: X_adv_array,
+ y: Y_array,
+ phase: False}
+ if feed is not None:
+ feed_dict.update(feed)
+ num_batches = num_batches + 1
+
+ if writer is not None:
+ cur_acc, eval_summ = sess.run(
+ [acc_value, eval_summary], feed_dict=feed_dict)
+ writer.add_summary(eval_summ, batch)
+ writer.flush()
+ else:
+ cur_acc = acc_value.eval(feed_dict=feed_dict)
+ accuracy += cur_acc
+ except tf.errors.OutOfRangeError:
+ pass
+
+ # Divide by number of examples to get final value
+ accuracy = accuracy/num_batches
+ return accuracy
+
+def tf_model_load(sess, file_path=None):
+ """
+
+ :param sess: the session object to restore
+ :param file_path: path to the restored session, if None is
+ taken from FLAGS.log_dir and FLAGS.filename
+ :return:
+ """
+ with sess.as_default():
+ saver = tf.train.Saver()
+ if file_path is None:
+ file_path = os.path.join(FLAGS.log_dir, FLAGS.filename)
+ saver.restore(sess, tf.train.latest_checkpoint(file_path))
+
+ return True
+
+
+def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, feed=None,
+ args=None):
+ """
+ A helper function that computes a tensor on numpy inputs by batches.
+
+ :param sess:
+ :param tf_inputs:
+ :param tf_outputs:
+ :param numpy_inputs:
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ """
+ args = _FlagsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+
+ n = len(numpy_inputs)
+ assert n > 0
+ assert n == len(tf_inputs)
+ m = numpy_inputs[0].shape[0]
+ for i in xrange(1, n):
+ assert numpy_inputs[i].shape[0] == m
+ out = []
+ for _ in tf_outputs:
+ out.append([])
+ with sess.as_default():
+ for start in xrange(0, m, args.batch_size):
+ batch = start // args.batch_size
+ if batch % 100 == 0 and batch > 0:
+ _logger.debug("Batch " + str(batch))
+
+ # Compute batch start and end indices
+ start = batch * args.batch_size
+ end = start + args.batch_size
+ numpy_input_batches = [numpy_input[start:end]
+ for numpy_input in numpy_inputs]
+ cur_batch_size = numpy_input_batches[0].shape[0]
+ assert cur_batch_size <= args.batch_size
+ for e in numpy_input_batches:
+ assert e.shape[0] == cur_batch_size
+
+ feed_dict = dict(zip(tf_inputs, numpy_input_batches))
+ if feed is not None:
+ feed_dict.update(feed)
+ numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
+ for e in numpy_output_batches:
+ assert e.shape[0] == cur_batch_size, e.shape
+ for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
+ out_elem.append(numpy_output_batch)
+
+ out = [np.concatenate(x, axis=0) for x in out]
+ for e in out:
+ assert e.shape[0] == m, e.shape
+ return out
+
+
+def model_argmax(sess, x, predictions, samples, feed=None):
+ """
+ Helper function that computes the current class prediction
+ :param sess: TF session
+ :param x: the input placeholder
+ :param predictions: the model's symbolic output
+ :param samples: numpy array with input samples (dims must match x)
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :return: the argmax output of predictions, i.e. the current predicted class
+ """
+ feed_dict = {x: samples}
+ if feed is not None:
+ feed_dict.update(feed)
+ probabilities = sess.run(predictions, feed_dict)
+
+ if samples.shape[0] == 1:
+ return np.argmax(probabilities)
+ else:
+ return np.argmax(probabilities, axis=1)
+
+
+def l2_batch_normalize(x, epsilon=1e-12, scope=None):
+ """
+ Helper function to normalize a batch of vectors.
+ :param x: the input placeholder
+ :param epsilon: stabilizes division
+ :return: the batch of l2 normalized vector
+ """
+ with tf.name_scope(scope, "l2_batch_normalize") as scope:
+ x_shape = tf.shape(x)
+ x = tf.contrib.layers.flatten(x)
+ x /= (epsilon + tf.reduce_max(tf.abs(x), 1, keep_dims=True))
+ square_sum = tf.reduce_sum(tf.square(x), 1, keep_dims=True)
+ x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
+ x_norm = tf.multiply(x, x_inv_norm)
+ return tf.reshape(x_norm, x_shape, scope)
+
+
+def kl_with_logits(p_logits, q_logits, scope=None,
+ loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES):
+ """Helper function to compute kl-divergence KL(p || q)
+ """
+ with tf.name_scope(scope, "kl_divergence") as name:
+ p = tf.nn.softmax(p_logits)
+ p_log = tf.nn.log_softmax(p_logits)
+ q_log = tf.nn.log_softmax(q_logits)
+ loss = tf.reduce_mean(tf.reduce_sum(p * (p_log - q_log), axis=1),
+ name=name)
+ tf.losses.add_loss(loss, loss_collection)
+ return loss
+
+def clip_eta(eta, ord, eps):
+ """
+ Helper function to clip the perturbation to epsilon norm ball.
+ :param eta: A tensor with the current perturbation.
+ :param ord: Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param eps: Epilson, bound of the perturbation.
+ """
+
+ # Clipping perturbation eta to self.ord norm ball
+ if ord not in [np.inf, 1, 2]:
+ raise ValueError('ord must be np.inf, 1, or 2.')
+ if ord == np.inf:
+ eta = tf.clip_by_value(eta, -eps, eps)
+ elif ord in [1, 2]:
+ reduc_ind = list(xrange(1, len(eta.get_shape())))
+ if ord == 1:
+ norm = tf.reduce_sum(tf.abs(eta),
+ reduction_indices=reduc_ind,
+ keep_dims=True)
+ elif ord == 2:
+ norm = tf.sqrt(tf.reduce_sum(tf.square(eta),
+ reduction_indices=reduc_ind,
+ keep_dims=True))
+ eta = eta * eps / norm
+ return eta
diff --git a/case_studies/empir/modified_cleverhans/utils_th.py b/case_studies/empir/modified_cleverhans/utils_th.py
new file mode 100644
index 0000000..1c203f1
--- /dev/null
+++ b/case_studies/empir/modified_cleverhans/utils_th.py
@@ -0,0 +1,340 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import math
+import numpy as np
+import six
+import time
+import warnings
+
+from collections import OrderedDict
+
+from .utils import batch_indices, _ArgsWrapper
+
+import theano
+import theano.tensor as T
+
+import keras
+
+floatX = theano.config.floatX
+
+_TEST_PHASE = np.uint8(0)
+_TRAIN_PHASE = np.uint8(1)
+
+
+def get_or_compute_grads(loss_or_grads, params):
+ if isinstance(loss_or_grads, list):
+ return loss_or_grads
+ else:
+ return theano.grad(loss_or_grads, params)
+
+
+def adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
+ """ From Lasagne
+ """
+ grads = get_or_compute_grads(loss_or_grads, params)
+ updates = OrderedDict()
+
+ # Using theano constant to prevent upcasting of float32
+ one = T.constant(1)
+
+ for param, grad in zip(params, grads):
+ value = param.get_value(borrow=True)
+ # accu: accumulate gradient magnitudes
+ accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
+ broadcastable=param.broadcastable)
+ # delta_accu: accumulate update magnitudes (recursively!)
+ delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
+ broadcastable=param.broadcastable)
+
+ # update accu (as in rmsprop)
+ accu_new = rho * accu + (one - rho) * grad ** 2
+ updates[accu] = accu_new
+
+ # compute parameter update, using the 'old' delta_accu
+ update = (grad * T.sqrt(delta_accu + epsilon) /
+ T.sqrt(accu_new + epsilon))
+ updates[param] = param - learning_rate * update
+
+ # update delta_accu (as accu, but accumulating updates)
+ delta_accu_new = rho * delta_accu + (one - rho) * update ** 2
+ updates[delta_accu] = delta_accu_new
+
+ return updates
+
+
+def model_loss(y, model, mean=True):
+ """
+ Define loss of Theano graph
+ :param y: correct labels
+ :param model: output of the model
+ :return: return mean of loss if True, otherwise return vector with per
+ sample loss
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ from_logits = "softmax" not in str(model).lower()
+
+ if from_logits:
+ model = T.nnet.softmax(model)
+
+ out = T.nnet.categorical_crossentropy(model, y)
+
+ if mean:
+ out = T.mean(out)
+ return out
+
+
+def th_model_train(x, y, predictions, params, X_train, Y_train, save=False,
+ predictions_adv=None, evaluate=None, args=None):
+ """
+ Train a Theano graph
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param params: model trainable weights
+ :param X_train: numpy array with training inputs
+ :param Y_train: numpy array with training outputs
+ :param save: boolean controling the save operation
+ :param predictions_adv: if set with the adversarial example tensor,
+ will run adversarial training
+ :param args: dict or argparse `Namespace` object.
+ Should contain `nb_epochs`, `learning_rate`,
+ `batch_size`
+ :return: True if model trained
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ args = _ArgsWrapper(args or {})
+
+ print("Starting model training using Theano.")
+
+ # Define loss
+ loss = model_loss(y, predictions)
+ if predictions_adv is not None:
+ loss = (loss + model_loss(y, predictions_adv)) / 2
+
+ print("Defined optimizer.")
+
+ train_step = theano.function(
+ inputs=[x, y],
+ outputs=[loss],
+ givens={keras.backend.learning_phase(): _TRAIN_PHASE},
+ allow_input_downcast=True,
+ on_unused_input='ignore',
+ updates=adadelta(
+ loss, params, learning_rate=args.learning_rate, rho=0.95,
+ epsilon=1e-08)
+ )
+
+ for epoch in six.moves.xrange(args.nb_epochs):
+ print("Epoch " + str(epoch))
+
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_train)
+
+ prev = time.time()
+ for batch in range(nb_batches):
+
+ # Compute batch start and end indices
+ start, end = batch_indices(batch, len(X_train), args.batch_size)
+
+ # Perform one training step
+ train_step(X_train[start:end], Y_train[start:end])
+ assert end >= len(X_train) # Check that all examples were used
+ cur = time.time()
+ print("\tEpoch took " + str(cur - prev) + " seconds")
+ prev = cur
+ if evaluate is not None:
+ evaluate()
+
+ return True
+
+
+def th_model_eval(x, y, model, X_test, Y_test, args=None):
+ """
+ Compute the accuracy of a Theano model on some data
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param model: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :return: a float with the accuracy value
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ args = _ArgsWrapper(args or {})
+
+ # Define symbol for accuracy
+ acc_value = keras.metrics.categorical_accuracy(y, model)
+ # Keras 2.0 categorical_accuracy no longer calculates the mean internally
+ # T.mean is called in here and is backward compatible with previous
+ # versions of Keras
+ acc_value = T.mean(acc_value)
+
+ # Init result var
+ accuracy = 0.0
+
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ eval_step = theano.function(
+ inputs=[x, y],
+ outputs=acc_value,
+ givens={keras.backend.learning_phase(): _TEST_PHASE},
+ on_unused_input="ignore",
+ allow_input_downcast=True,
+ updates=None
+ )
+
+ for batch in range(nb_batches):
+ if batch % 100 == 0 and batch > 0:
+ print("Batch " + str(batch))
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+ cur_batch_size = end - start
+
+ # The last batch may be smaller than all others, so we need to
+ # account for variable batch size here
+ accuracy += cur_batch_size * \
+ eval_step(X_test[start:end], Y_test[start:end])
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+
+def batch_eval(th_inputs, th_outputs, numpy_inputs, args=None):
+ """
+ A helper function that computes a tensor on numpy inputs by batches.
+
+ :param th_inputs:
+ :param th_outputs:
+ :param numpy_inputs:
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ args = _ArgsWrapper(args or {})
+
+ n = len(numpy_inputs)
+ assert n > 0
+ assert n == len(th_inputs)
+ m = numpy_inputs[0].shape[0]
+ for i in six.moves.xrange(1, n):
+ assert numpy_inputs[i].shape[0] == m
+ out = []
+ for _ in th_outputs:
+ out.append([])
+
+ eval_step = theano.function(
+ inputs=th_inputs,
+ outputs=th_outputs,
+ givens={keras.backend.learning_phase(): _TEST_PHASE},
+ allow_input_downcast=True,
+ updates=None
+ )
+
+ for start in six.moves.xrange(0, m, args.batch_size):
+ batch = start // args.batch_size
+ if batch % 100 == 0 and batch > 0:
+ print("Batch " + str(batch))
+
+ # Compute batch start and end indices
+ start = batch * args.batch_size
+ end = start + args.batch_size
+ numpy_input_batches = [numpy_input[start:end]
+ for numpy_input in numpy_inputs]
+ cur_batch_size = numpy_input_batches[0].shape[0]
+ assert cur_batch_size <= args.batch_size
+ for e in numpy_input_batches:
+ assert e.shape[0] == cur_batch_size
+
+ numpy_output_batches = eval_step(*numpy_input_batches)
+ for e in numpy_output_batches:
+ assert e.shape[0] == cur_batch_size, e.shape
+ for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
+ out_elem.append(numpy_output_batch)
+
+ out = [np.concatenate(x, axis=0) for x in out]
+ for e in out:
+ assert e.shape[0] == m, e.shape
+ return out
+
+
+def model_argmax(x, predictions, sample):
+ """
+ Helper function that computes the current class prediction
+ :param x: the input placeholder
+ :param predictions: the model's symbolic output
+ :param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
+ :return: the argmax output of predictions, i.e. the current predicted class
+ """
+ warnings.warn("CleverHans support for Theano is deprecated and "
+ "will be dropped on 2017-11-08.")
+
+ probabilities = theano.function(
+ inputs=[x],
+ outputs=predictions,
+ givens={keras.backend.learning_phase(): _TEST_PHASE},
+ allow_input_downcast=True,
+ updates=None
+ )(x)
+
+ return np.argmax(probabilities)
+
+
+def l2_batch_normalize(x, epsilon=1e-12):
+ """
+ Helper function to normalize a batch of vectors.
+ :param x: the input placeholder
+ :param epsilon: stabilizes division
+ :return: the batch of l2 normalized vector
+ """
+ epsilon = np.asarray(epsilon, dtype=floatX)
+ x_shape = x.shape
+ x = T.reshape(x, (x.shape[0], -1))
+ x /= (epsilon + T.max(T.abs_(x), 1, keepdims=True))
+ square_sum = T.sum(T.sqr(x), 1, keepdims=True)
+ x /= T.sqrt(np.sqrt(epsilon) + square_sum)
+ return x.reshape(x_shape)
+
+
+def kl_with_logits(q_logits, p_logits):
+ """Helper function to compute kl-divergence KL(q || p)
+ """
+ q = T.nnet.softmax(q_logits)
+ q_log = T.nnet.logsoftmax(q_logits)
+ p_log = T.nnet.logsoftmax(p_logits)
+ loss = T.sum(q * (q_log - p_log), axis=1)
+ return loss
diff --git a/case_studies/error_correcting_codes/AttackModel.py b/case_studies/error_correcting_codes/AttackModel.py
new file mode 100644
index 0000000..c9ae723
--- /dev/null
+++ b/case_studies/error_correcting_codes/AttackModel.py
@@ -0,0 +1,219 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Run this to attack a trained model via TrainModel.
+Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
+The main attack function is "runAttacks" which runs attacks on trained models
+"""
+import pdb
+
+from cleverhans.attacks import Noise, CarliniWagnerL2, MaxConfidence, FastGradientMethod, BasicIterativeMethod, DeepFool, MomentumIterativeMethod, ProjectedGradientDescent
+from Model_Implementations import Model_Softmax_Baseline, Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, Model_Tanh_Baseline
+from tensorflow.keras.datasets import mnist, cifar10
+from tensorflow.keras import backend
+import tensorflow as tf; import numpy as np
+import scipy.linalg
+from scipy import stats
+import matplotlib.pyplot as plt
+
+
+
+
+model_path = 'checkpoints/ECOC/tanh32/checkpoints' #path with saved model parameters
+sess = backend.get_session()
+backend.set_learning_phase(0) #need to do this to get CleverHans to work with batchnorm
+
+
+
+#Dataset-specific parameters - should be same as those used in TrainModel
+DATA_DESC = 'CIFAR10'; (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
+epochs=None; weight_save_freq=None
+num_classes=10 #how many classes (categories) are in this dataset?
+Y_train = np.squeeze(Y_train); Y_test = np.squeeze(Y_test)
+num_filters_std = [32, 64, 128]; num_filters_ens=[32, 64, 128]; num_filters_ens_2=16; dropout_rate_std=0.0; dropout_rate_ens=0.0; weight_decay = 0
+model_rep_baseline=2; model_rep_ens=2; DATA_AUGMENTATION_FLAG=1; BATCH_NORMALIZATION_FLAG=1
+num_channels = 3; inp_shape = (32,32,3); lr=1e-4; batch_size=80;
+noise_stddev = 0.032; blend_factor = .032
+
+#Attack parameters
+eps_val = 8/255.0; PGD_iters = 200; eps_iter=(2/3)*eps_val;
+eps_range = np.linspace(0, 0.33, 10)
+noise_eps=0.1
+
+
+# DATA PRE-PROCESSING
+X_train = (X_train/255).astype(np.float32); X_test = (X_test/255).astype(np.float32)
+#reshape (add third (image) channel)
+X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2],num_channels); X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],num_channels)
+X_valid = X_test[1000:2000]; Y_valid = Y_test[1000:2000]; #validation data, used to attack model
+#X_train = X_train-0.5; X_test = X_test-0.5; X_valid = X_valid-0.5; #map to range (-0.5,0.5)
+data_dict = {'X_train':X_train, 'Y_train_cat':Y_train, 'X_test':X_test, 'Y_test_cat':Y_test}
+X_random = np.random.rand(X_valid.shape[0],X_valid.shape[1],X_valid.shape[2],X_valid.shape[3])-0.5; X_random = X_random.astype(np.float32)
+
+
+
+
+#Model definition of the model we want to attack; should be same as the definition used in TrainModel
+## ENSEMBLE TANH 32 MODEL DEFINITION
+name = 'tanh_32_diverse' + '_' + DATA_DESC;
+seed = 59;
+code_length = 32;
+num_codes = code_length;
+num_chunks = 4;
+base_model = None;
+
+def output_activation(x):
+ return tf.nn.tanh(x)
+
+M = scipy.linalg.hadamard(code_length).astype(np.float32)
+M[np.arange(0, num_codes,
+ 2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
+np.random.seed(seed)
+np.random.shuffle(M)
+idx = np.random.permutation(code_length)
+M = M[0:num_codes, idx[0:code_length]]
+params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
+ 'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
+ 'base_model': base_model, 'num_chunks': num_chunks,
+ 'model_rep': model_rep_ens,
+ 'output_activation': output_activation,
+ 'num_filters_ens': num_filters_ens,
+ 'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
+ 'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
+ 'blend_factor': blend_factor, 'inp_shape': inp_shape,
+ 'noise_stddev': noise_stddev,
+ 'weight_save_freq': weight_save_freq, 'name': name,
+ 'model_path': model_path,
+ 'zero_one_input': True
+ }
+m4 = Model_Tanh_Ensemble({}, params_dict)
+m4.loadFullModel() # load in the saved model, which should have already been trained first via TrainModel
+
+m4.legend = 'TEns32';
+
+m4.X_valid = X_valid; m4.Y_valid = Y_valid;
+m4.X_test = X_test; m4.Y_test = Y_test;
+m4.X_random = X_random;
+#m4.minval = -0.5; m4.maxval = 0.5
+m4.minval = 0; m4.maxval = 1
+
+
+
+def benignAccuracy(model, X, Y):
+
+ acc_vec=[]; probs_benign_list=[]
+ for rep in np.arange(0, X.shape[0], 1000):
+ x = X[rep:rep+1000]
+ probs_benign = sess.run(model.predict(tf.convert_to_tensor(x)))
+ print(probs_benign.shape)
+ acc= np.mean(np.argmax(probs_benign, 1)==Y[rep:rep+1000])
+ acc_vec += [acc]
+ probs_benign_list += list(np.max(probs_benign, 1))
+
+ acc = np.mean(acc_vec)
+ print("Accuracy for model " + model.params_dict['name'] + " : ", acc)
+ return probs_benign_list
+
+
+def wbAttack(model, attack, att_params, X, Y):
+ sess = backend.get_session()
+ modelCH = model.modelCH()
+ adv_model = attack(modelCH, sess=sess)
+
+ acc_vec=[]; probs_adv_list=[]
+ inc=64
+ for rep in np.arange(0, X.shape[0], inc):
+ x = X[rep:rep+inc]
+ y = Y[rep:rep+inc]
+ X_adv = adv_model.generate(tf.convert_to_tensor(x), **att_params).eval(session=sess)
+ temp = sess.run(model.predict(tf.convert_to_tensor(X_adv)))
+ print(temp.shape)
+ preds = np.argmax(temp, 1)
+ acc = np.mean(np.equal(preds, y))
+ probs_adv = np.max(sess.run(model.predict(tf.convert_to_tensor(X_adv))), 1)
+ probs_adv = probs_adv[preds != y]
+ acc= np.mean(np.equal(preds, y))
+ acc_vec += [acc]
+ probs_adv_list += list(probs_adv)
+
+
+ acc = np.mean(acc_vec)
+ print("Adv accuracy for model " + model.params_dict['name'] + " : ", acc)
+ return probs_adv_list, acc, X_adv
+
+
+
+
+
+def runAttacks(models_list):
+ #CW attack
+ for model in models_list:
+
+ print(""); print(""); print("");
+ print("Running tests on model: ", model.params_dict['name'])
+
+ print("Clean accuracy of model:")
+ probs_benign = benignAccuracy(model, model.X_test, model.Y_test)
+ print("")
+
+ print("Running PGD attack:")
+ att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'eps':eps_val, 'eps_iter':eps_iter, 'nb_iter':PGD_iters,'ord':np.inf}
+ probs_adv, junk, X_adv = wbAttack(model, ProjectedGradientDescent, att_params, model.X_valid, model.Y_valid)
+ print("")
+
+# print("Running CW attack:")
+# att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'binary_search_steps':10, 'learning_rate':1e-3}
+# probs_adv, junk, X_adv = wbAttack(model, CarliniWagnerL2, att_params, model.X_valid[0:100], model.Y_valid[0:100])
+# print("")
+#
+# print("Running Blind Spot attack, alpha=0.8:")
+# att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'binary_search_steps':10, 'learning_rate':1e-3}
+# probs_adv, junk, X_adv = wbAttack(model, CarliniWagnerL2, att_params, 0.8*model.X_valid[0:100], model.Y_valid[0:100])
+# print("")
+
+
+ #Random ATTACK (0 SNR inputs)
+ print("Running random attack:")
+ probs_random = np.max(sess.run(model.predict(tf.convert_to_tensor(model.X_random))), 1)
+ print('Prob. that ', model.params_dict['name'], ' < 0.9 on random data: ', np.mean(probs_random<0.9))
+
+ #Noise ATTACK (low SNR inputs)
+ print("Running Noise attack:")
+ att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'eps':noise_eps}
+ probs_noise, junk, X_adv = wbAttack(model, Noise, att_params, model.X_valid, model.Y_valid)
+ print("")
+
+ return probs_benign, probs_adv, probs_noise
+
+
+
+
+models_list = [m4]
+probs_benign, probs_adv, probs_noise = runAttacks(models_list)
+
+plt.figure(1)
+kernel = stats.gaussian_kde(probs_benign, bw_method=0.5)
+plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
+
+plt.figure(2)
+kernel = stats.gaussian_kde(probs_adv, bw_method=0.5)
+plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
+
+plt.figure(3)
+kernel = stats.gaussian_kde(probs_noise, bw_method=0.5)
+plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
+
diff --git a/case_studies/error_correcting_codes/ClassBlender.py b/case_studies/error_correcting_codes/ClassBlender.py
new file mode 100644
index 0000000..b67c832
--- /dev/null
+++ b/case_studies/error_correcting_codes/ClassBlender.py
@@ -0,0 +1,68 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+This code blends two classes together as a convex combination; a type of simple data augmentation
+"""
+
+from tensorflow.keras.layers import Layer
+from tensorflow.keras import backend as K
+import tensorflow as tf
+
+import numpy as np
+
+
+class ClassBlender(Layer):
+ """Only active at training time since it is a regularization layer.
+ # Arguments
+ attenuation: how much to attenuate the input
+ # Input shape
+ Arbitrary.
+ # Output shape
+ Same as the input shape.
+ """
+
+ def __init__(self, attenuation, batch_size, **kwargs):
+ super(ClassBlender, self).__init__(**kwargs)
+ self.supports_masking = True
+ self.attenuation = attenuation
+ self.batch_size = batch_size
+
+
+
+
+ def call(self, inputs, training=None):
+ def blended():
+
+ inputs_permuted = tf.random_shuffle(inputs)
+ angles = (180*(2*np.random.rand(self.batch_size)-1))*np.pi/180
+ shifts = 4*(2*np.random.rand(self.batch_size, 2)-1)
+ inputs_permuted_translated = tf.contrib.image.translate(inputs_permuted, shifts)
+ inputs_permuted_translated_rotated = tf.contrib.image.rotate(inputs_permuted_translated,angles)
+ inputs_adjusted = inputs_permuted_translated_rotated
+
+ inputs_adjusted = tf.clip_by_value(inputs_adjusted,-0.5,0.5)
+
+
+ return (1.0-self.attenuation)*inputs + self.attenuation*inputs_adjusted
+
+
+ return K.in_train_phase(blended, inputs, training=training)
+
+ def get_config(self):
+ config = {'attenuation': self.attenuation, 'batch_size':self.batch_size}
+ base_config = super(ClassBlender, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
diff --git a/case_studies/error_correcting_codes/Clipper.py b/case_studies/error_correcting_codes/Clipper.py
new file mode 100644
index 0000000..993bab2
--- /dev/null
+++ b/case_studies/error_correcting_codes/Clipper.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+"""
+
+from tensorflow.keras.layers import Layer
+from tensorflow.keras import backend as K
+import tensorflow as tf
+
+import numpy as np
+
+
+class Clipper(Layer):
+ """clips input to lie wihin valid pixel range
+ Only active at training time since it is a regularization layer.
+ # Arguments
+ attenuation: how much to attenuate the input
+ # Input shape
+ Arbitrary.
+ # Output shape
+ Same as the input shape.
+ """
+
+ def __init__(self, **kwargs):
+ super(Clipper, self).__init__(**kwargs)
+ self.supports_masking = True
+
+
+
+ def call(self, inputs, training=None):
+ def augmented():
+ return tf.clip_by_value(inputs,-0.5,0.5)
+
+ return K.in_train_phase(augmented, augmented, training=training)
+
+
+
+
+ def get_config(self):
+ config = {}
+ base_config = super(Clipper, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
diff --git a/case_studies/error_correcting_codes/DataAugmenter.py b/case_studies/error_correcting_codes/DataAugmenter.py
new file mode 100644
index 0000000..4c62fa8
--- /dev/null
+++ b/case_studies/error_correcting_codes/DataAugmenter.py
@@ -0,0 +1,69 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+"""
+
+from tensorflow.keras.layers import Layer
+from tensorflow.keras import backend as K
+import tensorflow as tf
+
+import numpy as np
+
+
+class DataAugmenter(Layer):
+ """Shifts and scales input
+ Only active at training time since it is a regularization layer.
+ # Arguments
+ attenuation: how much to attenuate the input
+ # Input shape
+ Arbitrary.
+ # Output shape
+ Same as the input shape.
+ """
+
+ def __init__(self, batch_size, **kwargs):
+ super(DataAugmenter, self).__init__(**kwargs)
+ self.supports_masking = True
+ self.batch_size = batch_size
+
+
+
+
+ def call(self, inputs, training=None):
+ def augmented():
+ angles = (15*(2*np.random.rand(self.batch_size)-1))*np.pi/180
+ shifts = 4*(2*np.random.rand(self.batch_size, 2)-1)
+ inputs_shifted = tf.contrib.image.translate(inputs, shifts)
+ inputs_shifted_rotated = tf.contrib.image.rotate(inputs_shifted,angles)
+
+ random_number = tf.random_uniform([self.batch_size])
+ inputs_shifted_rotated_flipped = tf.where(random_number<0.5, tf.image.flip_left_right(inputs_shifted_rotated), inputs_shifted_rotated)
+
+ # modificaiton by zimmerrol to make sure keras shape computation works
+ inputs_shifted_rotated_flipped = tf.ensure_shape(inputs_shifted_rotated_flipped, inputs.shape)
+
+ return inputs_shifted_rotated_flipped
+
+
+
+ return K.in_train_phase(augmented, inputs, training=training)
+
+ def get_config(self):
+ config = {}
+ config['batch_size'] = self.batch_size
+ base_config = super(DataAugmenter, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
diff --git a/case_studies/error_correcting_codes/Grayscaler.py b/case_studies/error_correcting_codes/Grayscaler.py
new file mode 100644
index 0000000..1ef5b54
--- /dev/null
+++ b/case_studies/error_correcting_codes/Grayscaler.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+"""
+
+from tensorflow.keras.layers import Layer
+from tensorflow.keras import backend as K
+import tensorflow as tf
+
+import numpy as np
+
+
+class Grayscaler(Layer):
+ """Converts input to grayscale
+ Only active at training time since it is a regularization layer.
+ # Arguments
+ attenuation: how much to attenuate the input
+ # Input shape
+ Arbitrary.
+ # Output shape
+ Same as the input shape.
+ """
+
+ def __init__(self, **kwargs):
+ super(Grayscaler, self).__init__(**kwargs)
+ self.supports_masking = True
+
+
+
+ def call(self, inputs, training=None):
+ def augmented():
+ return tf.image.rgb_to_grayscale(inputs)
+
+ return K.in_train_phase(augmented, augmented, training=training)
+
+
+
+
+ def get_config(self):
+ config = {}
+ base_config = super(Grayscaler, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
diff --git a/case_studies/error_correcting_codes/LICENSE b/case_studies/error_correcting_codes/LICENSE
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/case_studies/error_correcting_codes/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/case_studies/error_correcting_codes/Model.py b/case_studies/error_correcting_codes/Model.py
new file mode 100644
index 0000000..272f4a6
--- /dev/null
+++ b/case_studies/error_correcting_codes/Model.py
@@ -0,0 +1,407 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+This defines a general "Model", i.e. architecture and decoding operations. It is an abstract base class for all models,
+e.g. the baseline softmax model or the ensemble Tanh model
+"""
+import tensorflow as tf
+from utils_keras import KerasModelWrapper as CleverHansKerasModelWrapper
+
+from tensorflow.keras.layers import BatchNormalization, Dropout, Lambda, Input, Dense, Conv2D, Flatten, Activation, Concatenate, GaussianNoise
+from tensorflow.keras.utils import plot_model
+from tensorflow.keras import regularizers
+from tensorflow.keras.models import load_model
+from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.callbacks import Callback
+from tensorflow.keras.models import Model as KerasModel
+
+import pickle
+import numpy as np
+from ClassBlender import ClassBlender
+from DataAugmenter import DataAugmenter
+from Clipper import Clipper
+from Grayscaler import Grayscaler
+
+
+
+class WeightsSaver(Callback):
+ def __init__(self, N):
+ self.N = N
+ self.epoch = 0
+
+ def specifyFilePath(self, path):
+ self.full_path = path #full path to file, including file name
+
+ def on_epoch_end(self, epoch, logs={}):
+ if self.epoch % self.N == 0:
+ print("SAVING WEIGHTS")
+ w= self.model.get_weights()
+ pklfile= self.full_path + '_' + str(self.epoch) + '.pkl'
+ fpkl= open(pklfile, 'wb')
+ pickle.dump(w, fpkl)
+ fpkl.close()
+ self.epoch += 1
+
+
+
+#Abstract base class for all model classes
+class Model(object):
+
+ def __init__(self, data_dict, params_dict):
+ self.data_dict = data_dict
+ self.params_dict = params_dict
+ self.input = Input(shape=self.params_dict['inp_shape'], name='input')
+ self.TRAIN_FLAG=1
+ if len(data_dict) > 0:
+ self.encodeData()
+ else:
+ import warnings
+ warnings.warn("no data passed; cannot encode it")
+
+
+ #map categorical class labels (numbers) to encoded (e.g., one hot or ECOC) vectors
+ def encodeData(self):
+ self.Y_train = np.zeros((self.data_dict['X_train'].shape[0], self.params_dict['M'].shape[1]))
+ self.Y_test = np.zeros((self.data_dict['X_test'].shape[0], self.params_dict['M'].shape[1]))
+ for k in np.arange(self.params_dict['M'].shape[1]):
+ self.Y_train[:,k] = self.params_dict['M'][self.data_dict['Y_train_cat'], k]
+ self.Y_test[:,k] = self.params_dict['M'][self.data_dict['Y_test_cat'], k]
+
+
+
+ #define the neural network
+ def defineModel(self):
+
+ outputs=[]
+ self.penultimate = []
+ self.penultimate2 = []
+
+ features = []
+
+ n = int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
+ for k in np.arange(0,self.params_dict['num_chunks']):
+
+ x = self.input
+
+ if self.params_dict.get('zero_one_input', False):
+ x = x - 0.5
+
+ if self.params_dict['inp_shape'][2]>1:
+ x_gs = Grayscaler()(x)
+ else:
+ x_gs = x
+
+ if (self.TRAIN_FLAG==1):
+ x = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x)
+ x_gs = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x_gs)
+
+ if self.params_dict['DATA_AUGMENTATION_FLAG']>0:
+ x = DataAugmenter(self.params_dict['batch_size'])(x)
+ x_gs = DataAugmenter(self.params_dict['batch_size'])(x_gs)
+
+ x = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x)
+ x_gs = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x_gs)
+
+
+ #x = Lambda(lambda x: x-0.5)(x)
+
+ x = Clipper()(x)
+ x_gs = Clipper()(x_gs)
+
+ # TODO: verify that this modifcation makes sense
+
+ # Added trainable=self.TRAIN_FLAG==1 for all batchnorm layers to make
+ # sure they stay fixed during eval (modification by AUTHOR)
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x = Conv2D(self.params_dict['num_filters_ens'][0], (5,5), activation='elu', padding='same')(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+
+ x = Conv2D(self.params_dict['num_filters_ens'][0], (3,3), strides=(2,2), activation='elu', padding='same')(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x = Conv2D(self.params_dict['num_filters_ens'][1], (3, 3), activation='elu', padding='same')(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+ x = Conv2D(self.params_dict['num_filters_ens'][1], (3,3), strides=(2,2), activation='elu', padding='same')(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x = Conv2D(self.params_dict['num_filters_ens'][2], (3, 3), activation='elu', padding='same')(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+
+ x = Conv2D(self.params_dict['num_filters_ens'][2], (3,3), strides=(2,2), activation='elu', padding='same')(x)
+ #x = BatchNormalization()(x)
+
+
+
+ pens = []
+ out=[]
+ for k2 in np.arange(n):
+ x0 = Conv2D(self.params_dict['num_filters_ens_2'], (5, 5), strides=(2,2), activation='elu', padding='same')(x_gs)
+ x0 = Conv2D(self.params_dict['num_filters_ens_2'], (3, 3), strides=(2,2), activation='elu', padding='same')(x0)
+ x0 = Conv2D(self.params_dict['num_filters_ens_2'], (3, 3), strides=(2,2), activation='elu', padding='same')(x0)
+
+ x_= Concatenate()([x0, x])
+
+ x_ = Conv2D(self.params_dict['num_filters_ens_2'], (2, 2), activation='elu', padding='same')(x_)
+
+ x_ = Conv2D(self.params_dict['num_filters_ens_2'], (2, 2), activation='elu', padding='same')(x_)
+
+ x_ = Flatten()(x_)
+
+ features.append(x_)
+
+ x_ = Dense(16, activation='elu')(x_)
+ x_ = Dense(8, activation='elu')(x_)
+ x_ = Dense(4, activation='elu')(x_)
+ x0 = Dense(2, activation='linear')(x_)
+
+ pens += [x0]
+
+ x1 = Dense(1, activation='linear', name='w_'+str(k)+'_'+str(k2)+'_'+self.params_dict['name'], kernel_regularizer=regularizers.l2(0.0))(x0)
+ out += [x1]
+
+ self.penultimate += [pens]
+
+ if len(pens) > 1:
+ self.penultimate2 += [Concatenate()(pens)]
+ else:
+ self.penultimate2 += pens
+
+ if len(out)>1:
+ outputs += [Concatenate()(out)]
+ else:
+ outputs += out
+
+ self.features = features
+
+ self.model = KerasModel(inputs=self.input, outputs=outputs)
+ # print(self.model.summary())
+ #plot_model(self.model, to_file=self.params_dict['model_path'] + '/' + self.params_dict['name'] + '.png')
+
+ return outputs
+
+
+ def defineLoss(self):
+ error = "Sub-classes must implement defineLoss."
+ raise NotImplementedError(error)
+
+
+ def defineMetric(self):
+ error = "Sub-classes must implement defineMetric."
+ raise NotImplementedError(error)
+
+
+ def trainModel(self):
+ opt = Adam(lr=self.params_dict['lr'])
+
+ self.model.compile(optimizer=opt, loss=[self.defineLoss(k) for k in np.arange(self.params_dict['num_chunks'])], metrics=self.defineMetric())
+ WS = WeightsSaver(self.params_dict['weight_save_freq'])
+ WS.specifyFilePath(self.params_dict['model_path'] + self.params_dict['name'])
+
+ Y_train_list=[]
+ Y_test_list=[]
+
+ start = 0
+ for k in np.arange(self.params_dict['num_chunks']):
+ end = start + int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
+ Y_train_list += [self.Y_train[:,start:end]]
+ Y_test_list += [self.Y_test[:,start:end]]
+ start=end
+
+
+ self.model.fit(self.data_dict['X_train'], Y_train_list,
+ epochs=self.params_dict['epochs'],
+ batch_size=self.params_dict['batch_size'],
+ shuffle=True,
+ validation_data=[self.data_dict['X_test'], Y_test_list],
+ callbacks=[WS])
+
+
+
+ self.saveModel()
+
+
+
+
+ def resumeTrainModel(self):
+
+ Y_train_list=[]
+ Y_test_list=[]
+
+ start = 0
+ for k in np.arange(self.params_dict['num_chunks']):
+ end = start + int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
+ Y_train_list += [self.Y_train[:,start:end]]
+ Y_test_list += [self.Y_test[:,start:end]]
+ start=end
+
+ def hinge_loss(y_true, y_pred):
+ loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
+ return loss
+
+ def hinge_pred(y_true, y_pred):
+ corr = tf.to_float((y_pred*y_true)>0)
+ return tf.reduce_mean(corr)
+
+ self.model = load_model(self.params_dict['model_path'] + self.params_dict['name'] + '_final.h5', custom_objects={'DataAugmenter':DataAugmenter, 'ClassBlender':ClassBlender, 'Clipper':Clipper, 'Grayscaler':Grayscaler, 'hinge_loss':hinge_loss, 'hinge_pred':hinge_pred})
+ WS = WeightsSaver(self.params_dict['weight_save_freq'])
+ WS.specifyFilePath(self.params_dict['model_path'] + self.params_dict['name'])
+
+
+ self.model.fit(self.data_dict['X_train'], Y_train_list,
+ epochs=self.params_dict['epochs'],
+ batch_size=self.params_dict['batch_size'],
+ shuffle=True,
+ validation_data=[self.data_dict['X_test'], Y_test_list],
+ callbacks=[WS])
+
+
+
+ self.saveModel()
+
+
+
+ #this function takes the output of the NN and maps into logits (which will be passed into softmax to give a prob. dist.)
+ #It effectively does a Hamming decoding by taking the inner product of the output with each column of the coding matrix (M)
+ #obviously, the better the match, the larger the dot product is between the output and a given row
+ #it is simply a log ReLU on the output
+ def outputDecoder(self, x, M=None):
+ if M is None:
+ M = self.params_dict['M']
+ mat1 = tf.matmul(x, M, transpose_b=True)
+
+ if not self.params_dict['adaptive_attack']:
+ mat1 = tf.maximum(mat1, 0)
+ mat1 = tf.log(mat1+1e-6) #floor negative values
+ return mat1
+
+
+ def defineBinarizedModel(self):
+ assert hasattr(self, "penultimate2"), "model needs to be defined first"
+
+ readouts = []
+ individual_logits = []
+ for k in range(len(self.features)):
+ readout = Dense(1, activation='linear',
+ name='binarized_readout_' + str(k),
+ kernel_regularizer=regularizers.l2(0.0)
+ )
+ logit = readout(self.features[k])
+ logit = Lambda(self.params_dict['output_activation'])(logit)
+ readouts.append(readout)
+ individual_logits.append(logit)
+
+ if len(individual_logits)>1:
+ logits = Concatenate()(individual_logits)
+ else: #if only a single chunk
+ logits = individual_logits[0]
+ M = np.stack([np.ones(logits.shape[-1]), -np.ones(logits.shape[-1])], 0).astype(np.float32)
+ logits = Lambda(
+ lambda x: self.outputDecoder(
+ x,
+ M=M
+ ))(logits)
+
+ probs = Activation('softmax')(logits) #return probs
+
+ self.binarized_logit = logits
+ self.binarized_probs = probs
+ self.binarized_readouts = readouts
+
+ self.model_binarized = KerasModel(inputs=self.input, outputs=self.binarized_probs)
+
+
+ def defineFullModel(self):
+ self.TRAIN_FLAG=0
+ outputs = self.defineModel()
+
+ if len(outputs)>1:
+ self.raw_output = Concatenate()(outputs)
+ else: #if only a single chunk
+ self.raw_output = outputs[0]
+
+ #pass output logits through activation
+ for idx,o in enumerate(outputs):
+ outputs[idx] = Lambda(self.params_dict['output_activation'])(o)
+
+ if len(outputs)>1:
+ x = Concatenate()(outputs)
+ else: #if only a single chunk
+ x = outputs[0]
+
+ x = Lambda(self.outputDecoder)(x) #logits
+ logits = x
+ x = Activation('softmax')(x) #return probs
+
+ self.logits = logits
+ self.probabilities = x
+
+ if self.params_dict['base_model'] == None:
+ self.model_full = KerasModel(inputs=self.input, outputs=x)
+ else:
+ self.model_full = KerasModel(inputs=self.params_dict['base_model'].input, outputs=x)
+
+
+ #CleverHans model wrapper; returns a model that CH can attack
+ def modelCH(self):
+ return CleverHansKerasModelWrapper(self.model_full)
+
+ def modelBinarizedCH(self):
+ return CleverHansKerasModelWrapper(self.model_binarized)
+
+
+ def saveModel(self):
+ w= self.model.get_weights()
+ pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
+ fpkl= open(pklfile, 'wb')
+ pickle.dump(w, fpkl)
+ fpkl.close()
+ self.model.save(self.params_dict['model_path'] + self.params_dict['name'] + '_final.h5')
+
+
+
+ def loadModel(self):
+ pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
+ f= open(pklfile, 'rb')
+ weigh= pickle.load(f);
+ f.close();
+ self.defineModel()
+ self.model.set_weights(weigh)
+
+
+ def loadFullModel(self):
+ pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
+ f= open(pklfile, 'rb')
+ weigh= pickle.load(f);
+ f.close();
+ self.defineFullModel()
+ self.model_full.set_weights(weigh)
+
+
+ def predict(self, X):
+ return self.model_full(X)
diff --git a/case_studies/error_correcting_codes/Model_Implementations.py b/case_studies/error_correcting_codes/Model_Implementations.py
new file mode 100644
index 0000000..cb5f93e
--- /dev/null
+++ b/case_studies/error_correcting_codes/Model_Implementations.py
@@ -0,0 +1,250 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Full implementation of all methods of Abstract class "Model"
+"""
+
+import tensorflow as tf
+import numpy as np
+from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Dropout, Multiply, Lambda, Input, Dense, Conv2D, MaxPooling2D, Flatten, Activation, UpSampling2D, Concatenate, GaussianNoise
+from tensorflow.keras.utils import plot_model
+from tensorflow.keras import metrics, regularizers, optimizers
+from tensorflow.keras.models import Model as KerasModel
+from Model import Model
+from tensorflow.keras import losses, metrics
+from ClassBlender import ClassBlender
+from DataAugmenter import DataAugmenter
+
+
+
+#Full architectural definition for all "baseline" models used in the paper
+def defineModelBaseline(self):
+ outputs=[]
+ self.penultimate = []
+ self.penultimate2 = []
+
+ x = self.input
+
+ x = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x)
+ if (self.TRAIN_FLAG==1):
+ if self.params_dict['DATA_AUGMENTATION_FLAG']>0:
+ x = DataAugmenter(self.params_dict['batch_size'])(x)
+ x = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x)
+
+ x = Lambda(lambda x: tf.clip_by_value(x,-0.5,0.5))(x)
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x = Conv2D(self.params_dict['num_filters_std'][0], (5,5), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+ x = Conv2D(self.params_dict['num_filters_std'][0], (3,3), strides=(2,2), activation='elu', padding='same')(x)
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x = Conv2D(self.params_dict['num_filters_std'][1], (3, 3), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x = BatchNormalization()(x)
+
+ x = Conv2D(self.params_dict['num_filters_std'][1], (3,3), strides=(2,2), activation='elu', padding='same')(x)
+ x_=x
+
+ for rep in np.arange(self.params_dict['model_rep']):
+ x_ = Conv2D(self.params_dict['num_filters_std'][2], (3, 3), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x_)
+ if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
+ x_ = BatchNormalization()(x_)
+
+ x_ = Conv2D(self.params_dict['num_filters_std'][2], (3,3), strides=(2,2), activation='elu', padding='same')(x_)
+
+ x_ = Flatten()(x_)
+
+ x_ = Dense(128, activation='elu')(x_)
+ x_ = Dense(64, activation='elu')(x_)
+ x0 = Dense(64, activation='linear')(x_)
+ x1 = Dense(self.params_dict['M'].shape[1], activation='linear', kernel_regularizer=regularizers.l2(0.0))(x0)
+
+ outputs = [x1]
+ self.model = KerasModel(inputs=self.input, outputs=outputs)
+ print(self.model.summary())
+ plot_model(self.model, to_file=self.params_dict['model_path'] + '/' + self.params_dict['name'] + '.png')
+
+ return outputs
+
+
+
+
+
+class Model_Softmax_Baseline(Model):
+
+ def __init__(self, data_dict, params_dict):
+ super(Model_Softmax_Baseline, self).__init__(data_dict, params_dict)
+
+
+ def defineModel(self):
+ return defineModelBaseline(self)
+
+
+
+ def defineLoss(self, idx):
+ def loss_fn(y_true, y_pred):
+ loss = tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
+ return loss
+ return loss_fn
+
+
+ def defineMetric(self):
+ return [metrics.categorical_accuracy]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class Model_Logistic_Baseline(Model):
+
+ def __init__(self, data_dict, params_dict):
+ super(Model_Logistic_Baseline, self).__init__(data_dict, params_dict)
+
+
+ def defineModel(self):
+ return defineModelBaseline(self)
+
+
+
+ def defineLoss(self, idx):
+ def loss_fn(y_true, y_pred):
+ loss = tf.keras.backend.binary_crossentropy(y_true, y_pred, from_logits=True)
+ return loss
+ return loss_fn
+
+
+
+
+ def defineMetric(self):
+ def sigmoid_pred(y_true, y_pred):
+
+ corr = tf.to_float((y_pred*(2*y_true-1))>0)
+ return tf.reduce_mean(corr)
+
+ return [sigmoid_pred]
+
+
+
+
+
+
+
+
+
+
+
+
+class Model_Tanh_Baseline(Model):
+
+ def __init__(self, data_dict, params_dict):
+ super(Model_Tanh_Baseline, self).__init__(data_dict, params_dict)
+
+
+
+ def defineModel(self):
+ return defineModelBaseline(self)
+
+
+
+ def defineLoss(self, idx):
+ def hinge_loss(y_true, y_pred):
+ loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
+ return loss
+
+ return hinge_loss
+
+
+
+
+ def defineMetric(self):
+ def tanh_pred(y_true, y_pred):
+ corr = tf.to_float((y_pred*y_true)>0)
+ return tf.reduce_mean(corr)
+ return [tanh_pred]
+
+
+
+
+
+
+
+
+
+
+class Model_Logistic_Ensemble(Model):
+
+ def __init__(self, data_dict, params_dict):
+ super(Model_Logistic_Ensemble, self).__init__(data_dict, params_dict)
+
+ def defineLoss(self, idx):
+ def loss_fn(y_true, y_pred):
+ loss = tf.keras.backend.binary_crossentropy(y_true, y_pred, from_logits=True)
+ return loss
+ return loss_fn
+
+
+
+ def defineMetric(self):
+ def sigmoid_pred(y_true, y_pred):
+
+ corr = tf.to_float((y_pred*(2*y_true-1))>0)
+ return tf.reduce_mean(corr)
+
+ return [sigmoid_pred]
+
+
+
+
+
+
+
+class Model_Tanh_Ensemble(Model):
+
+ def __init__(self, data_dict, params_dict):
+ super(Model_Tanh_Ensemble, self).__init__(data_dict, params_dict)
+
+
+
+ def defineLoss(self, idx):
+
+ def hinge_loss(y_true, y_pred):
+ loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
+ return loss
+
+ return hinge_loss
+
+
+
+ def defineMetric(self):
+ def hinge_pred(y_true, y_pred):
+ corr = tf.to_float((y_pred*y_true)>0)
+ return tf.reduce_mean(corr)
+ return [hinge_pred]
+
+
\ No newline at end of file
diff --git a/case_studies/error_correcting_codes/README.md b/case_studies/error_correcting_codes/README.md
new file mode 100644
index 0000000..bf9516c
--- /dev/null
+++ b/case_studies/error_correcting_codes/README.md
@@ -0,0 +1,13 @@
+# robust-ecoc
+This is the github repository for the paper "Error Correcting Output Codes Improve Probability Estimation and Adversarial Robustness of Deep Neural Networks" by Gunjan Verma and Ananthram Swami.
+
+All code is in Python 3.6 using Keras and Tensorflow. Adversarial attacks are done with CleverHans 3.0.1. The two main files to reproduce results in the paper are:
+
+1. TrainModel.py. Use this script to train a model.
+2. AttackModel.py. Attack (e.g. adversarial PGD attack) the trained model outputted by (1).
+
+In addition, there are a few notable supporting files if one desires to modify the internal implementation
+
+3. Model.py. Abstract base class implementing a baseline or ensemble model. Look at the implementation of "defineModel" in this file to see or modify the neural network architecture used by all ensemble models.
+
+4. Model_Implementations.py. Implements model-specific methods of (3). Look at the implementation of "defineModelBaseline" in this file to see or modify the neural network architecture used by all baseline models.
diff --git a/case_studies/error_correcting_codes/TrainModel.py b/case_studies/error_correcting_codes/TrainModel.py
new file mode 100644
index 0000000..c0e5de7
--- /dev/null
+++ b/case_studies/error_correcting_codes/TrainModel.py
@@ -0,0 +1,206 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/henv python3
+# -*- coding: utf-8 -*-
+
+"""
+This script trains a model that uses ECOC coding. It defines many types of models (baseline and ensemble).
+Uncomment the final two lines corresponding to the model of interest from one of the below model definition "code blocks" to train that model.
+Next run "AttackModel" to then attack this model.
+"""
+
+# IMPORTS
+import tensorflow as tf;
+import numpy as np
+from tensorflow.keras.datasets import mnist, cifar10
+from Model_Implementations import Model_Softmax_Baseline, \
+ Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
+ Model_Tanh_Baseline
+import scipy.linalg
+
+# GENERAL PARAMETERS - SET THESE APPROPRIATELY
+model_path = 'checkpoints' # path to save model weights to
+weight_save_freq = 10 # how frequently (in epochs, e.g. every 10 epochs) to save weights to disk
+tf.set_random_seed(1)
+
+########DATASET-SPECIFIC PARAMETERS: CHOOSE THIS BLOCK FOR MNIST
+# DATA_DESC = 'MNIST'; (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
+# Y_train = np.squeeze(Y_train); Y_test = np.squeeze(Y_test)
+# num_channels = 1; inp_shape = (28,28,1); num_classes=10
+##MODEL-SPECIFIC PARAMETERS: MNIST
+##PARAMETERS RELATED TO SGD OPTIMIZATION
+# epochs=150; batch_size=200; lr=3e-4;
+##MODEL DEFINTION PARAMETERS
+# num_filters_std = [64, 64, 64]; num_filters_ens=[32, 32, 32]; num_filters_ens_2=4;
+# dropout_rate_std=0.0; dropout_rate_ens=0.0; weight_decay = 0
+# noise_stddev = 0.3; blend_factor=0.3;
+# model_rep_baseline=1; model_rep_ens=2;
+# DATA_AUGMENTATION_FLAG=0; BATCH_NORMALIZATION_FLAG=0
+########END: DATASET-SPECIFIC PARAMETERS: MNIST
+
+
+##########DATASET-SPECIFIC PARAMETERS: CHOOSE THIS BLOCK FOR CIFAR10
+DATA_DESC = 'CIFAR10';
+(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
+Y_train = np.squeeze(Y_train);
+Y_test = np.squeeze(Y_test)
+num_channels = 3;
+inp_shape = (32, 32, 3);
+num_classes = 10
+# MODEL-SPECIFIC PARAMETERS: CIFAR10
+# PARAMETERS RELATED TO SGD OPTIMIZATION
+epochs = 400;
+batch_size = 200;
+lr = 2e-4;
+# MODEL DEFINTION PARAMETERS
+num_filters_std = [32, 64, 128];
+num_filters_ens = [32, 64, 128];
+num_filters_ens_2 = 16;
+dropout_rate_std = 0.0;
+dropout_rate_ens = 0.0;
+weight_decay = 0
+noise_stddev = 0.032;
+blend_factor = 0.032;
+model_rep_baseline = 2;
+model_rep_ens = 2;
+DATA_AUGMENTATION_FLAG = 1;
+BATCH_NORMALIZATION_FLAG = 1
+##########END: DATASET-SPECIFIC PARAMETERS: CIFAR10
+
+
+# DATA PRE-PROCESSING
+X_train = (X_train / 255).astype(np.float32);
+X_test = (X_test / 255).astype(np.float32); # scale data to (0,1)
+X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2],
+ num_channels);
+X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],
+ num_channels)
+X_valid = X_test[0:1000];
+Y_valid = Y_test[
+ 0:1000]; # validation data (to monitor accuracy during training)
+X_train = X_train - 0.5;
+X_test = X_test - 0.5;
+X_valid = X_valid - 0.5; # map to range (-0.5,0.5)
+data_dict = {'X_train': X_train, 'Y_train_cat': Y_train, 'X_test': X_test,
+ 'Y_test_cat': Y_test}
+
+### TRAIN MODEL. each block below corresponds to one of the models in Table 1 of the paper. In order to train,
+# uncomment the final two lines of the block of interest and then run this script
+
+"""
+### BASELINE SOFTMAX MODEL DEFINITION
+name = 'softmax_baseline'+'_'+DATA_DESC; num_chunks=1
+M = np.eye(num_classes).astype(np.float32)
+output_activation = 'softmax'; base_model=None
+params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'lr':lr, 'dropout_rate':dropout_rate_std, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
+#m0 = Model_Softmax_Baseline(data_dict, params_dict)
+#m0.defineModel(); m0.trainModel()
+
+
+## BASELINE LOGISTIC MODEL DEFINITION
+name = 'logistic_baseline'+'_'+DATA_DESC; num_chunks=1
+M = np.eye(num_classes).astype(np.float32)
+output_activation = 'sigmoid'; base_model=None
+params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'lr':lr, 'dropout_rate':dropout_rate_std, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
+#m1 = Model_Logistic_Baseline(data_dict, params_dict)
+#m1.defineModel(); m1.trainModel()
+
+
+## BASELINE TANH MODEL DEFINITION
+name = 'Tanh_baseline_16'+'_'+DATA_DESC; seed = 59; num_chunks=1; code_length=16; num_codes=num_classes; code_length_true=code_length
+M = scipy.linalg.hadamard(code_length).astype(np.float32)
+M[np.arange(0, num_codes,2), 0]= -1#replace first col, which for this Hadamard construction is always 1, hence not a useful bit
+np.random.seed(seed); np.random.shuffle(M)
+idx=np.random.permutation(code_length)
+M = M[0:num_codes, idx[0:code_length_true]]
+base_model=None
+def output_activation(x):
+ return tf.nn.tanh(x)
+params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_std, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
+#m2 = Model_Tanh_Baseline(data_dict, params_dict)
+#m2.defineModel(); m2.trainModel()
+
+## ENSEMBLE LOGISTIC MODEL DEFINITION
+name = 'logistic_diverse'+'_'+DATA_DESC; num_chunks=2
+M = np.eye(num_classes).astype(np.float32)
+base_model=None
+def output_activation(x):
+ return tf.nn.sigmoid(x)
+params_dict = {'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'base_model':base_model, 'num_chunks':num_chunks, 'model_rep': model_rep_ens, 'output_activation':output_activation, 'num_filters_ens':num_filters_ens, 'num_filters_ens_2':num_filters_ens_2,'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_ens, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
+#m3 = Model_Logistic_Ensemble(data_dict, params_dict)
+#m3.defineModel(); m3.trainModel()
+
+
+
+#COMMENTS FOR ALL TANH ENSEMBLE MODELS:
+#1. num_chunks refers to how many models comprise the ensemble (4 is used in the paper); code_length/num_chunks shoould be an integer
+#2. output_activation is the function to apply to the logits
+# a. one can use anything which gives support to positive and negative values (since output code has +1/-1 elements); tanh or identity maps both work
+# b. in order to alleviate potential concerns of gradient masking with tanh, one can use identity as well
+#3. M is the actual coding matrix (referred to in the paper as H). Each row is a codeword
+# note that any random shuffle of a Hadmard matrix's rows or columns is still orthogonal
+#4. There is nothing particularly special about the seed (which effectively determines the coding matrix).
+# We tried several seeds from 0-60 and found that all give comparable model performance (e.g. benign and adversarial accuracy).
+
+## ENSEMBLE TANH 16 MODEL DEFINITION
+name = 'tanh_16_diverse'+'_'+DATA_DESC; seed = 59; code_length=16; num_codes=code_length; num_chunks=4; base_model=None;
+def output_activation(x):
+ return tf.nn.tanh(x)
+M = scipy.linalg.hadamard(code_length).astype(np.float32)
+M[np.arange(0, num_codes,2), 0]= -1#replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
+np.random.seed(seed)
+np.random.shuffle(M)
+idx=np.random.permutation(code_length)
+M = M[0:num_codes, idx[0:code_length]]
+params_dict = {'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'base_model':base_model, 'num_chunks':num_chunks, 'model_rep': model_rep_ens, 'output_activation':output_activation, 'num_filters_ens':num_filters_ens, 'num_filters_ens_2':num_filters_ens_2,'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_ens, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
+#m4 = Model_Tanh_Ensemble(data_dict, params_dict)
+#m4.defineModel(); m4.trainModel()
+"""
+
+## ENSEMBLE TANH 32 MODEL DEFINITION
+name = 'tanh_32_diverse' + '_' + DATA_DESC;
+seed = 59;
+code_length = 32;
+num_codes = code_length;
+num_chunks = 4;
+base_model = None;
+
+
+def output_activation(x):
+ return tf.nn.tanh(x)
+
+
+M = scipy.linalg.hadamard(code_length).astype(np.float32)
+M[np.arange(0, num_codes,
+ 2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
+np.random.seed(seed)
+np.random.shuffle(M)
+idx = np.random.permutation(code_length)
+M = M[0:num_codes, idx[0:code_length]]
+params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
+ 'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
+ 'base_model': base_model, 'num_chunks': num_chunks,
+ 'model_rep': model_rep_ens,
+ 'output_activation': output_activation,
+ 'num_filters_ens': num_filters_ens,
+ 'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
+ 'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
+ 'blend_factor': blend_factor, 'inp_shape': inp_shape,
+ 'noise_stddev': noise_stddev,
+ 'weight_save_freq': weight_save_freq, 'name': name,
+ 'model_path': model_path}
+m5 = Model_Tanh_Ensemble(data_dict, params_dict)
+m5.defineModel();
+m5.trainModel()
diff --git a/case_studies/error_correcting_codes/adversarial_evaluation.py b/case_studies/error_correcting_codes/adversarial_evaluation.py
new file mode 100644
index 0000000..bd28850
--- /dev/null
+++ b/case_studies/error_correcting_codes/adversarial_evaluation.py
@@ -0,0 +1,285 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Run this to attack a trained model via TrainModel.
+Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
+The main attack function is "runAttacks" which runs attacks on trained models
+"""
+
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+from cleverhans.attacks import ProjectedGradientDescent
+from Model_Implementations import Model_Softmax_Baseline, \
+ Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
+ Model_Tanh_Baseline
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras import backend
+import tensorflow as tf;
+import numpy as np
+import scipy.linalg
+
+model_path = 'checkpoints/ECOC/tanh32/checkpoints' #path with saved model parameters
+
+def setup_model_and_data(adaptive_attack=False):
+ print("Modifying model for adaptive attack:", adaptive_attack)
+ # Dataset-specific parameters - should be same as those used in TrainModel
+ DATA_DESC = 'CIFAR10';
+ (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
+ epochs = None;
+ weight_save_freq = None
+ num_classes = 10 # how many classes (categories) are in this dataset?
+ Y_train = np.squeeze(Y_train);
+ Y_test = np.squeeze(Y_test)
+ num_filters_std = [32, 64, 128];
+ num_filters_ens = [32, 64, 128];
+ num_filters_ens_2 = 16;
+ dropout_rate_std = 0.0;
+ dropout_rate_ens = 0.0;
+ weight_decay = 0
+ model_rep_baseline = 2;
+ model_rep_ens = 2;
+ DATA_AUGMENTATION_FLAG = 1;
+ BATCH_NORMALIZATION_FLAG = 1
+ num_channels = 3;
+ inp_shape = (32, 32, 3);
+ lr = 1e-4;
+ batch_size = 80;
+ noise_stddev = 0.032;
+ blend_factor = .032
+
+ # DATA PRE-PROCESSING
+ X_train = (X_train / 255).astype(np.float32);
+ X_test = (X_test / 255).astype(np.float32)
+ # reshape (add third (image) channel)
+ X_train = X_train.reshape(X_train.shape[0], X_train.shape[1],
+ X_train.shape[2],
+ num_channels);
+ X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],
+ num_channels)
+ X_valid = X_test[1000:2000];
+ Y_valid = Y_test[1000:2000]; # validation data, used to attack model
+
+ ## ENSEMBLE TANH 32 MODEL DEFINITION
+ name = 'tanh_32_diverse' + '_' + DATA_DESC;
+ seed = 59;
+ code_length = 32;
+ num_codes = code_length;
+ num_chunks = 4;
+ base_model = None;
+
+ def output_activation(x):
+ if adaptive_attack:
+ return x
+ else:
+ return tf.nn.tanh(x)
+
+ M = scipy.linalg.hadamard(code_length).astype(np.float32)
+ M[np.arange(0, num_codes,
+ 2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
+ np.random.seed(seed)
+ np.random.shuffle(M)
+ idx = np.random.permutation(code_length)
+ M = M[0:num_codes, idx[0:code_length]]
+ params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
+ 'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
+ 'base_model': base_model, 'num_chunks': num_chunks,
+ 'model_rep': model_rep_ens,
+ 'output_activation': output_activation,
+ 'num_filters_ens': num_filters_ens,
+ 'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
+ 'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
+ 'blend_factor': blend_factor, 'inp_shape': inp_shape,
+ 'noise_stddev': noise_stddev,
+ 'weight_save_freq': weight_save_freq, 'name': name,
+ 'model_path': model_path,
+ 'zero_one_input': True,
+ 'adaptive_attack': adaptive_attack
+ }
+ m5 = Model_Tanh_Ensemble({}, params_dict)
+ m5.loadFullModel() # load in the saved model, which should have already been trained first via TrainModel
+
+ m5.legend = 'TEns32';
+
+ model = m5
+
+ return model, (X_valid, Y_valid), (X_test, Y_test)
+
+
+def wbAttack(sess, model, x_ph, x_adv_op, X, Y, batch_size=500, verbose=True):
+ n_correct = 0
+ n_total = 0
+ all_logits = []
+ all_x_adv = []
+ import tqdm
+ pbar = np.arange(0, X.shape[0], batch_size)
+ if verbose:
+ pbar = tqdm.tqdm(pbar)
+
+ for start_idx in pbar:
+ x = X[start_idx:start_idx + batch_size]
+ y = Y[start_idx:start_idx + batch_size]
+ x_adv = sess.run(x_adv_op, {x_ph: x})
+ logits = sess.run(model.logits, {model.input: x_adv})
+ preds = np.argmax(logits, -1)
+ n_correct += np.sum(np.equal(preds, y))
+ n_total += len(x)
+ all_logits.append(logits)
+ all_x_adv.append(x_adv)
+
+ all_x_adv = np.concatenate(all_x_adv, 0)
+ all_logits = np.concatenate(all_logits, 0)
+
+ adv_acc = n_correct / n_total
+ return adv_acc, all_logits, all_x_adv
+
+
+def patch_pgd_loss():
+ import cleverhans
+
+ def fgm(x,
+ logits,
+ y=None,
+ eps=0.3,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ targeted=False,
+ sanity_checks=True):
+
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if clip_min is not None:
+ asserts.append(cleverhans.utils_tf.assert_greater_equal(
+ x, tf.cast(clip_min, x.dtype)))
+
+ if clip_max is not None:
+ asserts.append(cleverhans.utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
+
+ # Make sure the caller has not passed probs by accident
+ assert logits.op.type != 'Softmax'
+
+ if y is None:
+ # Using model predictions as ground truth to avoid label leaking
+ preds_max = tf.reduce_max(logits, 1, keepdims=True)
+ y = tf.to_float(tf.equal(logits, preds_max))
+ y = tf.stop_gradient(y)
+ y = y / tf.reduce_sum(y, 1, keepdims=True)
+
+ # Compute loss
+ loss = loss_fn(labels=y, logits=logits)
+ if targeted:
+ loss = -loss
+
+ # loss = tf.Print(loss, [loss])
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x)
+
+ optimal_perturbation = cleverhans.attacks.optimize_linear(grad, eps, ord)
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = x + optimal_perturbation
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) or (clip_max is not None):
+ # We don't currently support one-sided clipping
+ assert clip_min is not None and clip_max is not None
+ adv_x = cleverhans.utils_tf.clip_by_value(adv_x, clip_min, clip_max)
+
+ if sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ return adv_x
+
+ def loss_fn(sentinel=None,
+ labels=None,
+ logits=None,
+ dim=-1):
+ """
+ Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
+ deprecated warning
+ """
+ # Make sure that all arguments were passed as named arguments.
+ if sentinel is not None:
+ name = "softmax_cross_entropy_with_logits"
+ raise ValueError("Only call `%s` with "
+ "named arguments (labels=..., logits=..., ...)"
+ % name)
+ if labels is None or logits is None:
+ raise ValueError("Both labels and logits must be provided.")
+
+ labels = tf.stop_gradient(labels)
+ # modified from
+ # https://github.com/carlini/nn_robust_attacks/blob/master/li_attack.py
+ real = tf.reduce_sum(labels * logits, -1)
+ other = tf.reduce_max((1-labels) * logits - (labels*10000), -1)
+
+ loss = other - real
+
+ # loss = tf.Print(loss, [loss])
+
+ return loss
+
+ cleverhans.attacks.fgm = fgm
+
+def main():
+ sess = backend.get_session()
+ backend.set_learning_phase(
+ 0) # need to do this to get CleverHans to work with batchnorm
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--eps", type=float, default=8, help="in 0-255")
+ parser.add_argument("--pgd-n-steps", default=200, type=int)
+ parser.add_argument("--pgd-step-size", type=float, default=2 / 3 * 8, help="in 0-255")
+ parser.add_argument("--n-samples", type=int, default=512)
+ parser.add_argument("--adaptive-attack", action="store_true")
+ args = parser.parse_args()
+
+ model, (X_valid, Y_valid), (X_test, Y_test) = setup_model_and_data(adaptive_attack=args.adaptive_attack)
+
+ test_indices = list(range(len(X_test)))
+ np.random.shuffle(test_indices)
+ X_test, Y_test = X_test[test_indices], Y_test[test_indices]
+
+ X_test, Y_test = X_test[:args.n_samples], Y_test[:args.n_samples]
+
+ model_ch = model.modelCH()
+ attack = ProjectedGradientDescent(model_ch, sess=sess)
+ att_params = {'clip_min': 0.0, 'clip_max': 1.0,
+ 'eps': args.eps / 255.0, 'eps_iter': args.pgd_step_size / 255.0,
+ 'nb_iter': args.pgd_n_steps, 'ord': np.inf,
+ }
+ if args.adaptive_attack:
+ patch_pgd_loss()
+
+ x_ph = tf.placeholder(shape=model.input.shape, dtype=tf.float32)
+ x_adv_op = attack.generate(x_ph, **att_params)
+ adv_acc, all_logits, all_x_adv = wbAttack(sess, model,
+ x_ph, x_adv_op,
+ X_test, Y_test, batch_size=512)
+
+ print("Robust accuracy:", adv_acc)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/error_correcting_codes/binarization_test.py b/case_studies/error_correcting_codes/binarization_test.py
new file mode 100644
index 0000000..efff695
--- /dev/null
+++ b/case_studies/error_correcting_codes/binarization_test.py
@@ -0,0 +1,219 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Run this to attack a trained model via TrainModel.
+Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
+The main attack function is "runAttacks" which runs attacks on trained models
+"""
+import warnings
+
+import torch
+from cleverhans.attacks import ProjectedGradientDescent
+from torch.utils.data import DataLoader
+from torch.utils.data import TensorDataset
+
+from Model_Implementations import Model_Softmax_Baseline, \
+ Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
+ Model_Tanh_Baseline
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras import backend
+import tensorflow as tf;
+import numpy as np
+import scipy.linalg
+from adversarial_evaluation import setup_model_and_data, patch_pgd_loss
+from active_tests import decision_boundary_binarization as dbb
+from functools import partial
+
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+from utils import build_dataloader_from_arrays
+
+
+def train_classifier(
+ n_features: int,
+ train_loader: DataLoader,
+ raw_train_loader: DataLoader,
+ logits: torch.Tensor,
+ device: str,
+ rescale_logits: dbb.LogitRescalingType,
+ model,
+ sess,
+):
+ del raw_train_loader
+
+ # fit a linear readout for each of the submodels of the ensemble
+ assert len(train_loader.dataset.tensors[0].shape) == 3
+ assert train_loader.dataset.tensors[0].shape[1] == len(model.binarized_readouts)
+
+ classifier_weights = []
+ classifier_biases = []
+ for i in range(len(model.binarized_readouts)):
+ x_ = train_loader.dataset.tensors[0][:, i]
+ y_ = train_loader.dataset.tensors[1]
+
+ cls = dbb._train_logistic_regression_classifier(
+ n_features,
+ DataLoader(TensorDataset(x_, y_), batch_size=train_loader.batch_size),
+ logits[:, i] if logits is not None else None,
+ "sklearn",
+ 20000,
+ device,
+ n_classes=2,
+ rescale_logits=rescale_logits,
+ solution_goodness="good",
+ class_weight="balanced"
+ )
+ classifier_weights.append(cls.weight.data.cpu().numpy().transpose()[:, [0]])
+ classifier_biases.append(cls.bias.data.cpu().numpy()[0])
+
+ # update weights of the binary models
+ for l, vw, vb in zip(model.binarized_readouts, classifier_weights, classifier_biases):
+ l.set_weights([vw, vb.reshape((1,))])
+
+ return BinarizedModelWrapper(model, sess)
+
+
+class BinarizedModelWrapper:
+ def __init__(self, model, sess):
+ self.model = model
+ self.sess = sess
+
+ def __call__(self, x):
+ x = x.numpy()
+ x = x.transpose((0, 2, 3, 1))
+ p = self.sess.run(self.model.binarized_probs, {self.model.input: x})
+ return torch.tensor(p)
+
+
+def main():
+ sess = backend.get_session()
+ backend.set_learning_phase(
+ 0) # need to do this to get CleverHans to work with batchnorm
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--eps", type=float, default=8, help="in 0-255")
+ parser.add_argument("--pgd-n-steps", default=200, type=int)
+ parser.add_argument("--pgd-step-size", type=float, default=2 / 3 * 8, help="in 0-255")
+
+ parser.add_argument("--n-samples", type=int, default=512)
+ parser.add_argument("--adaptive-attack", action="store_true")
+
+ parser.add_argument("-n-inner-points", type=int, default=999)
+ parser.add_argument("-n-boundary-points", type=int, default=1)
+ parser.add_argument("--sample-from-corners", action="store_true")
+ args = parser.parse_args()
+
+ model, (X_valid, Y_valid), (X_test, Y_test) = setup_model_and_data(adaptive_attack=args.adaptive_attack)
+ model.defineBinarizedModel()
+ binarized_model_ch = model.modelBinarizedCH()
+
+ if args.adaptive_attack:
+ patch_pgd_loss()
+
+ attack = ProjectedGradientDescent(binarized_model_ch, sess=sess)
+ att_params = {'clip_min': 0.0, 'clip_max': 1.0,
+ 'eps': args.eps / 255.0, 'eps_iter': args.pgd_step_size / 255.0,
+ 'nb_iter': args.pgd_n_steps, 'ord': np.inf}
+ x_ph = tf.placeholder(shape=model.input.shape, dtype=tf.float32)
+ x_adv_op = attack.generate(x_ph, **att_params)
+
+ def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
+ x_np = np.transpose(x_np, (0, 2, 3, 1))
+
+ if features_only:
+ f = sess.run(model.features, {model.input : x_np})
+ f = np.stack(f, 1)
+ return f
+ elif features_and_logits:
+ f, l = sess.run((model.features,
+ model.logits), {model.input : x_np})
+ f = np.stack(f, 1)
+ return f, l
+ else:
+ l = sess.run(model.logits, {model.input : x_np})
+ return l
+
+
+ def run_attack(m, l, sess):
+ model = m.model
+ for x, y in l:
+ assert len(x) == 1
+ x, y = x.numpy(), y.numpy()
+ x = x.transpose((0, 2, 3, 1))
+ x_adv = sess.run(x_adv_op, {x_ph: x})
+
+ warnings.warn("ATTENTION: Clipping perturbation just to TEST something. Remove this again!")
+ delta = x_adv - x
+ delta[delta > 0] = args.eps / 255.0
+ delta[delta < 0] = -args.eps / 255.0
+ x_adv = np.clip(x + delta, 0, 1)
+
+ logits, probs = sess.run((model.binarized_logit, model.binarized_probs),
+ {model.input: x_adv})
+ is_adv = np.argmax(probs) != y
+ return is_adv, (torch.tensor(x_adv.transpose((0, 3, 1, 2))), torch.tensor(logits))
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=_model_forward_pass,
+ logit_forward_and_backward_pass=None
+ )
+
+ test_indices = list(range(len(X_test)))
+ np.random.shuffle(test_indices)
+ X_test, Y_test = X_test[test_indices], Y_test[test_indices]
+
+ X_test = np.transpose(X_test, (0, 3, 1, 2))
+ test_loader = build_dataloader_from_arrays(X_test, Y_test, batch_size=32)
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ scores_logit_differences_and_validation_accuracies = \
+ dbb.interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, kwargs: run_attack(m, l, sess),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.eps / 255.0,
+ norm="linf",
+ lr=25000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=args.n_samples,
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ train_classifier_fn=partial(
+ train_classifier,
+ model=model,
+ sess=sess
+ ),
+ fail_on_exception=False,
+ # needs to be set to None as logit rescaling introduces a weird behavior
+ # of very high R. ASR (probably due to the log in the logit calculation)
+ rescale_logits=None,
+ decision_boundary_closeness=0.999,
+ sample_training_data_from_corners=args.sample_from_corners
+
+ )
+
+ print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/case_studies/error_correcting_codes/binarization_test.sh b/case_studies/error_correcting_codes/binarization_test.sh
new file mode 100644
index 0000000..013b778
--- /dev/null
+++ b/case_studies/error_correcting_codes/binarization_test.sh
@@ -0,0 +1,28 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+
+kwargs=""
+kwargs="--sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "epsilon: $epsilon"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Original attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/error_correcting_codes/binarization_test.py \
+ --n-samples=$nsamples \
+ --eps=$epsilon \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/error_correcting_codes/binarization_test.py \
+ --n-samples=$nsamples \
+ --adaptive-attack \
+ --pgd-n-steps=200 \
+ --pgd-step-size=0.50 \
+ --eps=$epsilon \
+ $kwargs
diff --git a/case_studies/error_correcting_codes/utils_keras.py b/case_studies/error_correcting_codes/utils_keras.py
new file mode 100644
index 0000000..b5ae70a
--- /dev/null
+++ b/case_studies/error_correcting_codes/utils_keras.py
@@ -0,0 +1,257 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Model construction utilities based on keras
+"""
+
+from distutils.version import LooseVersion
+
+# modified to work with error-correcting codes
+#import keras
+#from keras.models import Sequential
+#from keras.layers import Dense, Activation, Flatten
+
+from tensorflow import keras
+from tensorflow.keras.models import Sequential
+from tensorflow.keras.layers import Dense, Activation, Flatten
+
+from cleverhans.model import Model, NoSuchLayerError
+
+if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
+ from keras.layers import Conv2D
+else:
+ from keras.layers import Convolution2D
+
+
+def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
+ """
+ Defines the right convolutional layer according to the
+ version of Keras that is installed.
+ :param filters: (required integer) the dimensionality of the output
+ space (i.e. the number output of filters in the
+ convolution)
+ :param kernel_shape: (required tuple or list of 2 integers) specifies
+ the strides of the convolution along the width and
+ height.
+ :param padding: (required string) can be either 'valid' (no padding around
+ input or feature map) or 'same' (pad to ensure that the
+ output feature map size is identical to the layer input)
+ :param input_shape: (optional) give input shape if this is the first
+ layer of the model
+ :return: the Keras layer
+ """
+ if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
+ if input_shape is not None:
+ return Conv2D(filters=filters, kernel_size=kernel_shape,
+ strides=strides, padding=padding,
+ input_shape=input_shape)
+ else:
+ return Conv2D(filters=filters, kernel_size=kernel_shape,
+ strides=strides, padding=padding)
+ else:
+ if input_shape is not None:
+ return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
+ subsample=strides, border_mode=padding,
+ input_shape=input_shape)
+ else:
+ return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
+ subsample=strides, border_mode=padding)
+
+
+def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
+ channels=1, nb_filters=64, nb_classes=10):
+ """
+ Defines a CNN model using Keras sequential model
+ :param logits: If set to False, returns a Keras model, otherwise will also
+ return logits tensor
+ :param input_ph: The TensorFlow tensor for the input
+ (needed if returning logits)
+ ("ph" stands for placeholder but it need not actually be a
+ placeholder)
+ :param img_rows: number of row in the image
+ :param img_cols: number of columns in the image
+ :param channels: number of color channels (e.g., 1 for MNIST)
+ :param nb_filters: number of convolutional filters per layer
+ :param nb_classes: the number of output classes
+ :return:
+ """
+ model = Sequential()
+
+ # Define the layers successively (convolution layers are version dependent)
+ if keras.backend.image_dim_ordering() == 'th':
+ input_shape = (channels, img_rows, img_cols)
+ else:
+ input_shape = (img_rows, img_cols, channels)
+
+ layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
+ input_shape=input_shape),
+ Activation('relu'),
+ conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
+ Activation('relu'),
+ conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
+ Activation('relu'),
+ Flatten(),
+ Dense(nb_classes)]
+
+ for layer in layers:
+ model.add(layer)
+
+ if logits:
+ logits_tensor = model(input_ph)
+ model.add(Activation('softmax'))
+
+ if logits:
+ return model, logits_tensor
+ else:
+ return model
+
+
+class KerasModelWrapper(Model):
+ """
+ An implementation of `Model` that wraps a Keras model. It
+ specifically exposes the hidden features of a model by creating new models.
+ The symbolic graph is reused and so there is little overhead. Splitting
+ in-place operations can incur an overhead.
+ """
+
+ def __init__(self, model):
+ """
+ Create a wrapper for a Keras model
+ :param model: A Keras model
+ """
+ super(KerasModelWrapper, self).__init__(None, None, {})
+
+ if model is None:
+ raise ValueError('model argument must be supplied.')
+
+ self.model = model
+ self.keras_model = None
+
+ def _get_softmax_name(self):
+ """
+ Looks for the name of the softmax layer.
+ :return: Softmax layer name
+ """
+ for layer in self.model.layers:
+ cfg = layer.get_config()
+ if 'activation' in cfg and cfg['activation'] == 'softmax':
+ return layer.name
+
+ raise Exception("No softmax layers found")
+
+ def _get_logits_name(self):
+ """
+ Looks for the name of the layer producing the logits.
+ :return: name of layer producing the logits
+ """
+ softmax_name = self._get_softmax_name()
+ softmax_layer = self.model.get_layer(softmax_name)
+
+ if not isinstance(softmax_layer, Activation):
+ # In this case, the activation is part of another layer
+ return softmax_name
+
+ if not hasattr(softmax_layer, '_inbound_nodes'):
+ raise RuntimeError("Please update keras to version >= 2.1.3")
+
+ node = softmax_layer._inbound_nodes[0]
+
+ # modified for error-correcting codes, first line was original
+ if isinstance(node.inbound_layers, (list, tuple)):
+ logits_name = node.inbound_layers[0].name
+ else:
+ logits_name = node.inbound_layers.name
+ return logits_name
+
+ def get_logits(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the logits
+ """
+ logits_name = self._get_logits_name()
+ logits_layer = self.get_layer(x, logits_name)
+
+ # Need to deal with the case where softmax is part of the
+ # logits layer
+ if logits_name == self._get_softmax_name():
+ softmax_logit_layer = self.get_layer(x, logits_name)
+
+ # The final op is the softmax. Return its input
+ logits_layer = softmax_logit_layer._op.inputs[0]
+
+ return logits_layer
+
+ def get_probs(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the probs
+ """
+ name = self._get_softmax_name()
+
+ return self.get_layer(x, name)
+
+ def get_layer_names(self):
+ """
+ :return: Names of all the layers kept by Keras
+ """
+ layer_names = [x.name for x in self.model.layers]
+ return layer_names
+
+ def fprop(self, x):
+ """
+ Exposes all the layers of the model returned by get_layer_names.
+ :param x: A symbolic representation of the network input
+ :return: A dictionary mapping layer names to the symbolic
+ representation of their output.
+ """
+ # modified to work with error-correcting codes defense
+ from tensorflow.keras.models import Model as KerasModel
+
+ if self.keras_model is None:
+ # Get the input layer
+ new_input = self.model.get_input_at(0)
+
+ # Make a new model that returns each of the layers as output
+ out_layers = [x_layer.output for x_layer in self.model.layers]
+ self.keras_model = KerasModel(new_input, out_layers)
+
+ # and get the outputs for that model on the input x
+ outputs = self.keras_model(x)
+
+ # Keras only returns a list for outputs of length >= 1, if the model
+ # is only one layer, wrap a list
+ if len(self.model.layers) == 1:
+ outputs = [outputs]
+
+ # compute the dict to return
+ fprop_dict = dict(zip(self.get_layer_names(), outputs))
+
+ return fprop_dict
+
+ def get_layer(self, x, layer):
+ """
+ Expose the hidden features of a model given a layer name.
+ :param x: A symbolic representation of the network input
+ :param layer: The name of the hidden layer to return features at.
+ :return: A symbolic representation of the hidden features
+ :raise: NoSuchLayerError if `layer` is not in the model.
+ """
+ # Return the symbolic representation for this layer.
+ output = self.fprop(x)
+ try:
+ requested = output[layer]
+ except KeyError:
+ raise NoSuchLayerError()
+ return requested
\ No newline at end of file
diff --git a/case_studies/evaluate_classifier.py b/case_studies/evaluate_classifier.py
new file mode 100644
index 0000000..178d6f5
--- /dev/null
+++ b/case_studies/evaluate_classifier.py
@@ -0,0 +1,734 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import copy
+import textwrap
+from typing import List
+from typing import Tuple
+from typing import Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+import torch.utils.data
+import torchvision
+import tqdm
+from torchvision import transforms
+from typing_extensions import Literal
+import warnings
+
+import active_tests.decision_boundary_binarization as dbl
+import argparse_utils
+import argparse_utils as aut
+import networks
+from attacks import adaptive_kwta_attack
+from attacks import autopgd
+from attacks import fab
+from attacks import pgd
+from attacks import thermometer_ls_pgd
+
+
+LossType = Union[Literal["ce"], Literal["logit-diff"]]
+
+
+def parse_arguments():
+ """Parse arguments."""
+ parser = argparse.ArgumentParser("Evaluation Script")
+ parser.add_argument(
+ "-ds", "--dataset", choices=("cifar10", "imagenet"), default="cifar10"
+ )
+ parser.add_argument("-bs", "--batch-size", default=128, type=int)
+ parser.add_argument("-ns", "--n-samples", default=512, type=int)
+ parser.add_argument("-i", "--input", required=True, type=str)
+ parser.add_argument("-d", "--device", default=None, type=str)
+ parser.add_argument(
+ "-c",
+ "--classifier",
+ default="networks.cifar_resnet18",
+ type=argparse_utils.parse_classifier_argument,
+ )
+ parser.add_argument("-cin", "--classifier-input-noise", default=0.0, type=float)
+ parser.add_argument("-cgn", "--classifier-gradient-noise", default=0.0, type=float)
+ parser.add_argument("-cls", "--classifier-logit-scale", default=1.0, type=float)
+ parser.add_argument(
+ "-cinorm", "--classifier-input-normalization", action="store_true"
+ )
+ parser.add_argument(
+ "-cijq",
+ "--classifier-input-jpeg-quality",
+ default=100,
+ type=int,
+ help="Setting a negative value leads to a differentiable JPEG version",
+ )
+ parser.add_argument(
+ "-cigb", "--classifier-input-gaussian-blur-stddev", default=0.0, type=float
+ )
+
+ parser.add_argument(
+ "-a",
+ "--adversarial-attack",
+ type=aut.parse_adversarial_attack_argument,
+ default=None,
+ )
+
+ parser.add_argument(
+ "-dbl",
+ "--decision-boundary-binarization",
+ type=aut.parse_decision_boundary_binarization_argument,
+ default=None,
+ )
+ parser.add_argument("--dbl-sample-from-corners", action="store_true")
+
+ parser.add_argument("-nfs", "--n-final-softmax", default=1, type=int)
+ parser.add_argument("-ciusvt", "--classifier-input-usvt", action="store_true")
+ parser.add_argument("--no-ce-loss", action="store_true")
+ parser.add_argument("--no-logit-diff-loss", action="store_true")
+ parser.add_argument("--no-clean-evaluation", action="store_true")
+ args = parser.parse_args()
+
+ assert not (
+ args.no_ce_loss and args.no_logit_diff_loss
+ ), "Only one loss can be disabled"
+
+ print("Detected type of tests to run:")
+ if args.adversarial_attack is not None:
+ print("\tadversarial attack:", args.adversarial_attack)
+
+ if args.decision_boundary_binarization is not None:
+ print(
+ "\tinterior-vs-boundary discrimination:",
+ args.decision_boundary_binarization,
+ )
+
+ print()
+
+ return args
+
+
+def setup_dataloader(
+ dataset: Union[Literal["cifar10", "imagenet"]], batch_size: int
+) -> torch.utils.data.DataLoader:
+ if dataset == "cifar10":
+ transform_test = transforms.Compose([transforms.ToTensor()])
+ create_dataset_fn = lambda download: torchvision.datasets.CIFAR10(
+ root="./data/cifar10",
+ train=False,
+ download=download,
+ transform=transform_test,
+ )
+ elif dataset == "imagenet":
+ transform_test = transforms.Compose(
+ [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
+ )
+ create_dataset_fn = lambda _: torchvision.datasets.ImageNet(
+ root="./data/imagenet", split="val", transform=transform_test
+ )
+ else:
+ raise ValueError("Invalid value for dataset.")
+ try:
+ testset = create_dataset_fn(False)
+ except:
+ testset = create_dataset_fn(True)
+
+ testloader = torch.utils.data.DataLoader(
+ testset, batch_size=batch_size, shuffle=True, num_workers=8
+ )
+
+ return testloader
+
+
+def main():
+ args = parse_arguments()
+
+ if args.input == "pretrained":
+ assert args.dataset == "imagenet"
+ classifier = args.classifier(pretrained=True)
+ print("Base Classifier:", args.classifier.__name__)
+ else:
+ classifier = args.classifier(
+ **({"pretrained": False} if args.dataset == "imagenet" else {})
+ )
+ print("Base Classifier:", args.classifier.__name__)
+
+ print("Loading checkpoint:", args.input)
+ state_dict = torch.load(args.input, map_location="cpu")
+ if "classifier" in state_dict:
+ classifier_state_dict = state_dict["classifier"]
+ else:
+ classifier_state_dict = state_dict
+ try:
+ classifier.load_state_dict(classifier_state_dict)
+ except RuntimeError as ex:
+ print(
+ f"Could not load weights due to error: "
+ f"{textwrap.shorten(str(ex), width=50, placeholder='...')}"
+ )
+ print("Trying to remap weights by removing 'module.' namespace")
+ modified_classifier_state_dict = {
+ (k[len("module.") :] if k.startswith("module.") else k): v
+ for k, v in classifier_state_dict.items()
+ }
+ try:
+ classifier.load_state_dict(modified_classifier_state_dict)
+ print("Successfully loaded renamed weights.")
+ except RuntimeError:
+ print("Remapping weights did also not work. Initial error:")
+ raise ex
+ classifier.train(False)
+
+ test_loader = setup_dataloader(args.dataset, args.batch_size)
+ if args.device is None:
+ args.device = "cuda" if torch.cuda.is_available() else "cpu"
+ classifier = classifier.to(args.device)
+
+ if args.classifier_input_normalization:
+ if args.dataset == "cifar10":
+ classifier = networks.InputNormalization(
+ classifier,
+ torch.tensor([0.4914, 0.4822, 0.4465]),
+ torch.tensor([0.2023, 0.1994, 0.2010]),
+ )
+ elif args.dataset == "imagenet":
+ classifier = networks.InputNormalization(
+ classifier,
+ torch.tensor([0.485, 0.456, 0.406]),
+ torch.tensor([0.229, 0.224, 0.225]),
+ )
+ else:
+ raise ValueError("Unknown dataset.")
+
+ if args.classifier_input_noise > 0:
+ classifier = networks.GaussianNoiseInputModule(
+ classifier, args.classifier_input_noise
+ )
+ if args.classifier_gradient_noise > 0:
+ classifier = networks.GaussianNoiseGradientModule(
+ classifier, args.classifier_gradient_noise
+ )
+
+ if args.classifier_input_jpeg_quality != 100:
+ if args.classifier_input_jpeg_quality > 0:
+ classifier = networks.JPEGForwardIdentityBackwardModule(
+ classifier,
+ args.classifier_input_jpeg_quality,
+ size=32 if args.dataset == "cifar10" else 224,
+ legacy=True,
+ )
+ print("Using (slow) legacy JPEG mode")
+ else:
+ classifier = networks.DifferentiableJPEGModule(
+ classifier,
+ args.classifier_input_jpeg_quality,
+ size=32 if args.dataset == "cifar10" else 224,
+ )
+ classifier = classifier.to(args.device)
+
+ if args.classifier_input_gaussian_blur_stddev > 0:
+ classifier = networks.GausianBlurForwardIdentityBackwardModule(
+ classifier, 3, args.classifier_input_gaussian_blur_stddev
+ )
+
+ if args.classifier_input_usvt:
+ classifier = networks.UVSTModule(classifier)
+
+ if args.classifier_logit_scale > 1:
+ classifier = networks.ScaledLogitsModule(
+ classifier, args.classifier_logit_scale
+ )
+
+ if args.n_final_softmax > 1:
+ classifier = torch.nn.Sequential(
+ classifier, *[torch.nn.Softmax() for _ in range(args.n_final_softmax)]
+ )
+
+ classifier = classifier.to(args.device)
+
+ if not args.no_clean_evaluation:
+ print(
+ "clean evaluation, Accuracy: {0}\n\tclass accuracy: {1}\n\tclass histogram: {3}".format(
+ *run_clean_evaluation(classifier, test_loader, args.device)
+ )
+ )
+
+ if args.adversarial_attack is not None:
+ print("adversarial evaluation:")
+ if not args.no_ce_loss:
+ print(
+ "\tadversarial evaluation (ce loss), ASR:",
+ run_adversarial_evaluation(
+ classifier,
+ test_loader,
+ "ce",
+ args.adversarial_attack,
+ args.n_samples,
+ args.device,
+ ),
+ )
+ if not args.no_logit_diff_loss:
+ print(
+ "\tadversarial evaluation (logit-diff loss), ASR:",
+ run_adversarial_evaluation(
+ classifier,
+ test_loader,
+ "logit-diff",
+ args.adversarial_attack,
+ args.n_samples,
+ args.device,
+ ),
+ )
+
+ max_eps_adversarial_attack_settings = copy.deepcopy(args.adversarial_attack)
+ # set epsilon to 0.5
+ # then rescale the step size so that it relatively to epsilon stays the same
+ max_eps_adversarial_attack_settings.epsilon = 0.50
+ max_eps_adversarial_attack_settings.step_size = (
+ args.adversarial_attack.step_size / args.adversarial_attack.epsilon * 0.5
+ )
+ if not args.no_ce_loss:
+ print(
+ "\tadversarial evaluation (ce loss, eps = 0.5), ASR:",
+ run_adversarial_evaluation(
+ classifier,
+ test_loader,
+ "ce",
+ max_eps_adversarial_attack_settings,
+ args.n_samples,
+ args.device,
+ ),
+ )
+ if not args.no_logit_diff_loss:
+ print(
+ "\tadversarial evaluation (logit-diff loss, eps = 0.5), ASR:",
+ run_adversarial_evaluation(
+ classifier,
+ test_loader,
+ "logit-diff",
+ max_eps_adversarial_attack_settings,
+ args.n_samples,
+ args.device,
+ ),
+ )
+
+ if args.decision_boundary_binarization is not None:
+ print("decision boundary binarization:")
+ if not args.no_ce_loss:
+ print(
+ run_decision_boundary_binarization(
+ classifier,
+ test_loader,
+ "ce",
+ args.decision_boundary_binarization,
+ args.n_samples,
+ args.device,
+ args.batch_size,
+ "interior-vs-boundary discrimination (ce loss)",
+ args.dbl_sample_from_corners,
+ )
+ )
+ if not args.no_logit_diff_loss:
+ print(
+ run_decision_boundary_binarization(
+ classifier,
+ test_loader,
+ "logit-diff",
+ args.decision_boundary_binarization,
+ args.n_samples,
+ args.device,
+ args.batch_size,
+ "interior-vs-boundary discrimination (logit-diff loss)",
+ args.dbl_sample_from_corners,
+ )
+ )
+
+
+def run_clean_evaluation(
+ classifier: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ device: str,
+ n_classes: int = 10,
+) -> Tuple[float, List[float], np.ndarray, List[int]]:
+ """
+ Perform evaluation of classifier on clean data.
+
+ Args:
+ classifier: Classifier to evaluate.
+ test_loader: Dataloader to perform evaluation on.
+ device: torch device
+ n_classes: Number of classes in the dataset.
+ Returns
+ Accuracy, Accuracy per class, Correctly classified per sample,
+ Histogram of predicted labels
+ """
+ n_correct = 0
+ n_total = 0
+ class_histogram_correct = {}
+ class_histogram_total = {}
+ class_histogram_predicted = {}
+
+ if n_classes is not None:
+ for i in range(n_classes):
+ class_histogram_correct[i] = 0
+ class_histogram_total[i] = 0
+ class_histogram_predicted[i] = 0
+ correctly_classified = []
+ pbar = tqdm.tqdm(test_loader, leave=False)
+ for x, y in pbar:
+ x = x.to(device)
+ y = y.to(device)
+
+ with torch.no_grad():
+ y_pred = classifier(x).argmax(-1)
+ n_correct += (y_pred == y).long().sum().item()
+ n_total += len(x)
+ correctly_classified.append((y_pred == y).detach().cpu())
+
+ for y_, y_pred_ in zip(
+ y.detach().cpu().numpy(), y_pred.detach().cpu().numpy()
+ ):
+ if y_ not in class_histogram_correct:
+ class_histogram_correct[y_] = 0
+ class_histogram_correct[y_] += int(y_ == y_pred_)
+ if y_ not in class_histogram_total:
+ class_histogram_total[y_] = 0
+ class_histogram_total[y_] += 1
+ if y_pred_ not in class_histogram_predicted:
+ class_histogram_predicted[y_pred_] = 0
+ class_histogram_predicted[y_pred_] += 1
+ pbar.set_description(f"Accuracy = {n_correct / n_total:.4f}")
+ correctly_classified = torch.cat(correctly_classified).numpy()
+ class_histogram_correct = [
+ class_histogram_correct[k] for k in sorted(class_histogram_correct.keys())
+ ]
+ class_histogram_total = [
+ class_histogram_total[k] for k in sorted(class_histogram_total.keys())
+ ]
+
+ class_histogram_accuracy = [
+ a / b if b > 0 else np.nan
+ for a, b in zip(class_histogram_correct, class_histogram_total)
+ ]
+
+ class_histogram_predicted = [
+ class_histogram_predicted[k] for k in sorted(class_histogram_predicted.keys())
+ ]
+
+ return (
+ n_correct / n_total,
+ class_histogram_accuracy,
+ correctly_classified,
+ class_histogram_predicted,
+ )
+
+
+def run_decision_boundary_binarization(
+ classifier: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ loss: LossType,
+ linearization_settings: aut.DecisionBoundaryBinarizationSettings,
+ n_samples: int,
+ device: str,
+ batch_size: int,
+ title: str = "interior-vs-boundary discimination",
+ sample_training_data_from_corners: bool = False,
+) -> float:
+ """Perform the binarization test for a classifier.
+
+ Args:
+ classifier: Classifier to evaluate.
+ test_loader: Test dataloader.
+ loss: Loss to use in the adversarial attack during the test.
+ linearization_settings: Settings of the test.
+ n_samples: Number of samples to perform test on.
+ device: Torch device.
+ batch_size: Batch size.
+ title: Name of the experiment that will be shown in log.
+ sample_training_data_from_corners: Sample boundary samples from
+ corners or surfaces.
+ Returns:
+ String summarizing the results of the test.
+ """
+ def attack_fn(
+ model: torch.nn.Module, data_loader: torch.utils.data.DataLoader, attack_kwargs
+ ):
+ result = run_adversarial_evaluation(
+ model,
+ data_loader,
+ loss,
+ linearization_settings.adversarial_attack_settings,
+ n_samples=1,
+ device=device,
+ return_samples=True,
+ n_classes=2,
+ early_stopping=True,
+ )
+ # return ASR, (x_adv, logits(x_adv))
+ return result[0], (result[1][1], result[1][2])
+
+ scores_logit_differences_and_validation_accuracies_and_asr = dbl.interior_boundary_discrimination_attack(
+ classifier,
+ test_loader,
+ attack_fn,
+ linearization_settings,
+ n_samples,
+ device,
+ n_samples_evaluation=200, # was set to n_samples
+ n_samples_asr_evaluation=linearization_settings.adversarial_attack_settings.n_steps,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.9999,
+ sample_training_data_from_corners=sample_training_data_from_corners,
+ batch_size=batch_size,
+ )
+
+ return dbl.format_result(
+ scores_logit_differences_and_validation_accuracies_and_asr,
+ n_samples,
+ title=title,
+ )
+
+
+def run_adversarial_evaluation(
+ classifier: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ loss: LossType,
+ adversarial_attack_settings: aut.AdversarialAttackSettings,
+ n_samples: int,
+ device: str,
+ randomly_targeted: bool = False,
+ n_classes: int = 10,
+ return_samples: bool = False,
+ early_stopping: bool = True,
+) -> Tuple[float, ...]:
+ """
+ Perform an adversarial evaluation of a classifier.
+
+ Args:
+ classifier: Classifier to evaluate.
+ test_loader: Test dataloader.
+ loss: Loss to use in adversarial attack.
+ adversarial_attack_settings: Settings of adversarial evaluation.
+ n_samples: Number of samples to evaluate robustness on.
+ device: Torch device:
+ randomly_targeted: Whether to use random targets for attack.
+ n_classes: Number of classes in the dataset (relevant for random targets)
+ return_samples: Returns clean and perturbed samples?
+ early_stopping: Stop once all samples have successfully been attacked
+ Returns:
+ Either only Attack Success Rate (ASR) or Tuple containing ASR and
+ clean/perturbed samples as well as their logits.
+ """
+
+ loss_per_sample = adversarial_attack_settings.attack == "kwta"
+
+ if loss == "ce":
+ sign = 1 if randomly_targeted else -1
+ if loss_per_sample:
+ reduction = "none"
+ else:
+ reduction = "sum"
+ loss_fn = lambda x, y: sign * F.cross_entropy(x, y, reduction=reduction)
+ elif loss == "logit-diff":
+ sign = -1 if randomly_targeted else 1
+
+ def loss_fn(logits, y):
+ gt_logits = logits[range(len(y)), y]
+ other = torch.max(
+ logits - 2 * torch.max(logits) * F.one_hot(y, logits.shape[-1]), -1
+ )[0]
+ value = sign * (gt_logits - other)
+ if not loss_per_sample:
+ value = value.sum()
+ return value
+
+ if adversarial_attack_settings.attack == "kwta":
+ if loss != "logit-diff":
+ warnings.warn(
+ "Adaptive attack for kWTA originally uses logit "
+ "differences and not CE loss",
+ RuntimeWarning,
+ )
+
+ n_attacked = 0
+ attack_successful = []
+ clean_samples = []
+ perturbed_samples = []
+ clean_or_target_labels = []
+ predicted_logits = []
+ for x, y in test_loader:
+ x = x[: max(1, min(len(x), n_samples - n_attacked))]
+ y = y[: max(1, min(len(y), n_samples - n_attacked))]
+
+ x = x.to(device)
+ y = y.to(device)
+
+ if randomly_targeted:
+ y = (y + torch.randint_like(y, 0, n_classes)) % n_classes
+ if adversarial_attack_settings.attack == "pgd":
+ x_adv = pgd.general_pgd(
+ loss_fn=lambda x, y: loss_fn(classifier(x), y),
+ is_adversarial_fn=lambda x, y: classifier(x).argmax(-1) == y
+ if randomly_targeted
+ else classifier(x).argmax(-1) != y,
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ step_size=adversarial_attack_settings.step_size,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ early_stopping=early_stopping,
+ n_averaging_steps=adversarial_attack_settings.n_averages,
+ random_start=adversarial_attack_settings.random_start,
+ )[0]
+ elif adversarial_attack_settings.attack == "autopgd":
+ temp = autopgd.auto_pgd(
+ model=classifier,
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ targeted=randomly_targeted,
+ n_averaging_steps=adversarial_attack_settings.n_averages,
+ )
+ x_adv = temp[0]
+ if randomly_targeted:
+ y = temp[-1]
+ elif adversarial_attack_settings.attack == "autopgd+":
+ temp = autopgd.auto_pgd(
+ model=classifier,
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ # from https://github.com/fra31/auto-attack/blob/
+ # 6482e4d6fbeeb51ae9585c41b16d50d14576aadc/autoattack/
+ # autoattack.py#L281
+ n_restarts=4,
+ targeted=randomly_targeted,
+ n_averaging_steps=adversarial_attack_settings.n_averages,
+ )
+ x_adv = temp[0]
+ if randomly_targeted:
+ y = temp[-1]
+ elif adversarial_attack_settings.attack == "fab":
+ temp = fab.fab(
+ model=classifier,
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ targeted=randomly_targeted,
+ n_restarts=5,
+ )
+ x_adv = temp[0]
+ if randomly_targeted:
+ y = temp[-1]
+ elif adversarial_attack_settings.attack == "kwta":
+ x_adv = adaptive_kwta_attack.gradient_estimator_pgd(
+ model=classifier,
+ loss_fn=lambda x, y: loss_fn(classifier(x), y),
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ step_size=adversarial_attack_settings.step_size,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ random_start=True,
+ early_stopping=early_stopping,
+ targeted=randomly_targeted,
+ )[0]
+ elif adversarial_attack_settings.attack == "thermometer-lspgd":
+ if hasattr(classifier, "l"):
+ l = classifier.l
+ else:
+ l = 16
+ warnings.warn(
+ "Could not determine thermometer parameter l; "
+ "using default of 16",
+ RuntimeWarning,
+ )
+
+ x_adv = thermometer_ls_pgd.general_thermometer_ls_pgd(
+ loss_fn=lambda x, y: loss_fn(classifier(x, skip_encoder=True), y),
+ is_adversarial_fn=lambda x, y: classifier(x).argmax(-1) == y
+ if randomly_targeted
+ else classifier(x).argmax(-1) != y,
+ x=x,
+ y=y,
+ n_steps=adversarial_attack_settings.n_steps,
+ step_size=adversarial_attack_settings.step_size,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ random_start=True,
+ early_stopping=early_stopping,
+ temperature=1.0,
+ annealing_factor=1.0, # 1.0/1.2,
+ n_restarts=0,
+ l=l,
+ )[0]
+ else:
+ raise ValueError(
+ f"Unknown adversarial attack "
+ f"({adversarial_attack_settings.attack})."
+ )
+
+ with torch.no_grad():
+ logits = classifier(x_adv)
+ if randomly_targeted:
+ correctly_classified = logits.argmax(-1) == y
+ attack_successful += (
+ correctly_classified.cpu().detach().numpy().tolist()
+ )
+ else:
+ incorrectly_classified = logits.argmax(-1) != y
+ attack_successful += (
+ incorrectly_classified.cpu().detach().numpy().tolist()
+ )
+
+ clean_samples.append(x.cpu())
+ perturbed_samples.append(x_adv.cpu())
+ clean_or_target_labels.append(y.cpu())
+ predicted_logits.append(logits.cpu())
+
+ n_attacked += len(x)
+
+ if n_attacked >= n_samples:
+ break
+ attack_successful = np.array(attack_successful)
+ clean_samples = np.concatenate(clean_samples, 0)
+ perturbed_samples = np.concatenate(perturbed_samples, 0)
+ clean_or_target_labels = np.concatenate(clean_or_target_labels, 0)
+ predicted_logits = np.concatenate(predicted_logits, 0)
+
+ attack_successful = attack_successful[:n_samples]
+ clean_samples = clean_samples[:n_samples]
+ perturbed_samples = perturbed_samples[:n_samples]
+ clean_or_target_labels = clean_or_target_labels[:n_samples]
+ predicted_logits = predicted_logits[:n_samples]
+
+ result = [np.mean(attack_successful).astype(np.float32)]
+
+ if return_samples:
+ result += [
+ (clean_samples, perturbed_samples, predicted_logits, clean_or_target_labels)
+ ]
+
+ return tuple(result)
+
+
+if __name__ == "__main__":
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch")
+ main()
diff --git a/case_studies/evaluate_detection_defense.py b/case_studies/evaluate_detection_defense.py
new file mode 100644
index 0000000..f93aa9c
--- /dev/null
+++ b/case_studies/evaluate_detection_defense.py
@@ -0,0 +1,223 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+from typing import Callable
+from typing import List
+
+import numpy as np
+import torch
+import torch.utils.data
+import torchvision
+from torch.nn import functional as F
+from torchvision import transforms
+
+import active_tests.logit_matching
+import argparse_utils as aut
+import networks
+from attacks import pgd
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(
+ "CIFAR-10 (Defense w/ Detector) Evaluation Script")
+ parser.add_argument("-bs", "--batch-size", default=128, type=int)
+ parser.add_argument("-ns", "--n-samples", default=512, type=int)
+ parser.add_argument("-i", "--input", required=True, type=str)
+ parser.add_argument("-d", "--device", default=None, type=str)
+
+ parser.add_argument("-a", "--adversarial-attack",
+ type=aut.parse_adversarial_attack_argument,
+ default=None)
+
+ parser.add_argument("-l", "--logit-matching",
+ type=aut.parse_logit_matching_argument,
+ default=None)
+
+ args = parser.parse_args()
+
+ if args.adversarial_attack is not None:
+ print("Performing adversarial attack:", args.adversarial_attack)
+
+ if args.logit_matching is not None:
+ print("Performing logit matching:", args.logit_matching)
+
+ return args
+
+
+def setup_dataloader(batch_size: int) -> torch.utils.data.DataLoader:
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ ])
+
+ testset = torchvision.datasets.CIFAR10(root='./data', train=False,
+ download=True,
+ transform=transform_test)
+ testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
+ shuffle=True, num_workers=8)
+
+ return testloader
+
+
+def main():
+ args = parse_arguments()
+ classifier = networks.cifar_resnet18(num_classes=10)
+ detector = networks.Detector(n_features_classifier=10, classifier=classifier)
+
+ state_dict = torch.load(args.input)
+ classifier.load_state_dict(state_dict["classifier"])
+ detector.load_state_dict(state_dict["detector"])
+ classifier.train(False)
+ detector.train(False)
+
+ test_loader = setup_dataloader(args.batch_size)
+ if args.device is None:
+ args.device = "cuda" if torch.cuda.is_available() else "cpu"
+ classifier = classifier.to(args.device)
+ detector = detector.to(args.device)
+
+ if args.adversarial_attack is not None:
+ print("faulty adversarial evaluation 1, ASR:",
+ run_faulty_adversarial_evaluation(classifier, detector, test_loader,
+ args.adversarial_attack,
+ args.n_samples,
+ args.device))
+ print("correct adversarial evaluation, ASR:",
+ run_correct_adversarial_evaluation(classifier, detector, test_loader,
+ args.adversarial_attack,
+ args.n_samples,
+ args.device))
+ if args.logit_matching is not None:
+ print("logit matching (dataset):",
+ run_logit_matching_evaluation(classifier, detector, test_loader,
+ args.logit_matching,
+ args.n_samples,
+ args.device))
+
+
+def run_faulty_adversarial_evaluation(classifier: torch.nn.Module,
+ detector: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ adversarial_attack_settings: aut.AdversarialAttackSettings,
+ n_samples: int, device: str) -> float:
+ def eval_batch(x: torch.Tensor, y: torch.Tensor) -> List[bool]:
+ x_adv = pgd.pgd(classifier, x, y, targeted=False,
+ n_steps=adversarial_attack_settings.n_steps,
+ step_size=adversarial_attack_settings.step_size,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ random_start=True)[0]
+ with torch.no_grad():
+ not_detected = detector(x_adv).argmax(-1) == torch.zeros_like(y)
+ incorrectly_classified = classifier(x_adv).argmax(-1) != y
+ successful_attack = torch.logical_and(not_detected,
+ incorrectly_classified).cpu().detach().numpy().tolist()
+ return successful_attack
+
+ return run_adversarial_evaluation(eval_batch, test_loader, n_samples, device)
+
+
+def run_correct_adversarial_evaluation(classifier: torch.nn.Module,
+ detector: torch.nn.Module,
+ test_loader: torch.utils.data.DataLoader,
+ adversarial_attack_settings: aut.AdversarialAttackSettings,
+ n_samples: int, device: str) -> float:
+ def eval_batch(x: torch.Tensor, y: torch.Tensor) -> List[bool]:
+ alpha = 0.5
+
+ def loss_fn(x, y):
+ classifier_logits = classifier(x)
+ detector_logits = detector(x)
+ # only maximize the CE wrt. to the GT label for correctly classified samples
+ classifier_mask = classifier_logits.argmax(-1).detach() == y
+ # only minimize the CE wrt. to the 0 label (i.e. not adversarial) for
+ # samples detected as adversarial examples
+ detector_mask = detector_logits.argmax(-1).detach() != 0
+ classifier_loss = -(F.cross_entropy(
+ classifier_logits,
+ y, reduction="none") * classifier_mask).mean()
+ detector_loss = (F.cross_entropy(
+ detector_logits, torch.zeros_like(y),
+ reduction="none") * detector_mask).mean()
+ return (1 - alpha) * classifier_loss + alpha * detector_loss
+
+ x_adv = pgd.general_pgd(x=x, y=y,
+ loss_fn=loss_fn,
+ is_adversarial_fn=lambda x, y: torch.logical_and(
+ classifier(x).argmax(-1) != y,
+ detector(x).argmax(
+ -1) == torch.zeros_like(y)),
+ n_steps=adversarial_attack_settings.n_steps,
+ step_size=adversarial_attack_settings.step_size,
+ epsilon=adversarial_attack_settings.epsilon,
+ norm=adversarial_attack_settings.norm,
+ random_start=True)[0]
+
+ with torch.no_grad():
+ not_detected = detector(x_adv).argmax(-1) == torch.zeros_like(y)
+
+ incorrectly_classified = classifier(x_adv).argmax(-1) != y
+ successful_attack = torch.logical_and(not_detected,
+ incorrectly_classified).cpu().detach().numpy().tolist()
+ return successful_attack
+
+ return run_adversarial_evaluation(eval_batch, test_loader, n_samples, device)
+
+
+def run_adversarial_evaluation(
+ batch_eval_fn: Callable[[torch.tensor, torch.Tensor], List[bool]],
+ test_loader: torch.utils.data.DataLoader, n_samples: int,
+ device: str) -> float:
+ """
+ :param batch_eval_fn:
+ :param test_loader:
+ :param n_samples:
+ :param device: torch device
+ :return: Returns Attack Success Rate
+ """
+
+ results = []
+ for x, y in test_loader:
+ x = x.to(device)
+ y = y.to(device)
+ results += batch_eval_fn(x, y)
+ if len(results) >= n_samples:
+ break
+ results = results[:n_samples]
+
+ return np.mean(np.array(results).astype(np.float32))
+
+
+def run_logit_matching_evaluation(classifier: Callable, detector: Callable,
+ test_loader: torch.utils.data.DataLoader,
+ logit_matching_settings: aut.LogitMatchingSettings, n_samples: int,
+ device: str):
+ merged_logits_fn = lambda x: torch.cat((classifier(x), detector(x)), 1)
+
+ results = []
+ for x, y in test_loader:
+ x = x.to(device)
+ results += active_tests.logit_matching.dataset_samples_logit_matching(
+ merged_logits_fn, x, logit_matching_settings.n_steps,
+ logit_matching_settings.step_size)
+
+ if len(results) >= n_samples:
+ break
+ results = results[:n_samples]
+ results = np.sqrt(np.array(results).sum(-1))
+ print(results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/feature_scatter/README.md b/case_studies/feature_scatter/README.md
new file mode 100644
index 0000000..8710273
--- /dev/null
+++ b/case_studies/feature_scatter/README.md
@@ -0,0 +1,55 @@
+# Feature Scattering Adversarial Training (NeurIPS 2019)
+
+## Introduction
+This is the implementation of the
+["Feature-Scattering Adversarial Training"](https://papers.nips.cc/paper/8459-defense-against-adversarial-attacks-using-feature-scattering-based-adversarial-training.pdf), which is a training method for improving model robustness against adversarial attacks. It advocates the usage of an unsupervised feature-scattering procedure for adversarial perturbation generation, which is effective for overcoming label leaking and improving model robustness.
+More information can be found on the project page: https://sites.google.com/site/hczhang1/projects/feature_scattering
+
+## Usage
+### Installation
+The training environment (PyTorch and dependencies) can be installed as follows:
+```
+git clone https://github.com/Haichao-Zhang/FeatureScatter.git
+cd FeatureScatter
+
+python3 -m venv .venv
+source .venv/bin/activate
+
+python3 setup.py install
+
+(or pip install -e .)
+```
+Tested under Python 3.5.2 and PyTorch 1.2.0.
+
+### Train
+Specify the path for saving the trained models in ```fs_train.sh```, and then run
+```
+sh ./fs_train.sh
+```
+
+### Evaluate
+Specify the path to the trained models to be evaluated in ```fs_eval.sh``` and then run
+```
+sh ./fs_eval.sh
+```
+
+### Reference Model
+A reference model trained on CIFAR10 is [here](https://drive.google.com/open?id=1FXgE7llvQoypf7iCGR680EKQf9cARTSg).
+
+
+## Cite
+
+If you find this work is useful, please cite the following:
+
+```
+@inproceedings{feature_scatter,
+ author = {Haichao Zhang and Jianyu Wang},
+ title = {Defense Against Adversarial Attacks Using Feature Scattering-based Adversarial Training},
+ booktitle = {Advances in Neural Information Processing Systems},
+ year = {2019}
+}
+```
+
+## Contact
+
+For questions related to feature-scattering, please send me an email: ```hczhang1@gmail.com```
diff --git a/case_studies/feature_scatter/__init__.py b/case_studies/feature_scatter/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/feature_scatter/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/feature_scatter/attack_methods.py b/case_studies/feature_scatter/attack_methods.py
new file mode 100644
index 0000000..679f799
--- /dev/null
+++ b/case_studies/feature_scatter/attack_methods.py
@@ -0,0 +1,358 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+from attacks import autopgd
+from attacks import pgd
+from models import *
+from torch.autograd import Variable
+import utils
+
+from ftsc_utils import softCrossEntropy
+from ftsc_utils import one_hot_tensor
+import ot
+import pickle
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+
+
+class Attack_None(nn.Module):
+ def __init__(self, basic_net, config):
+ super(Attack_None, self).__init__()
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+ self.basic_net = basic_net
+
+ def forward(self, inputs, targets, attack=None, batch_idx=-1):
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+ result = self.basic_net(inputs)
+ if isinstance(result, tuple):
+ outputs, _ = self.basic_net(inputs)
+ else:
+ outputs = result
+ return outputs, None, None
+
+
+class Attack_PGD(nn.Module):
+ # Back-propogate
+ def __init__(self, basic_net, config, attack_net=None):
+ super(Attack_PGD, self).__init__()
+ self.basic_net = basic_net
+ self.attack_net = attack_net
+ self.rand = config['random_start']
+ self.step_size = config['step_size']
+ self.epsilon = config['epsilon']
+ self.num_steps = config['num_steps']
+ self.loss_func = torch.nn.CrossEntropyLoss(
+ reduction='none') if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+
+ self.box_type = 'white' if 'box_type' not in config.keys(
+ ) else config['box_type']
+
+
+ def forward(self,
+ inputs,
+ targets,
+ attack=True,
+ targeted_label=-1,
+ batch_idx=0):
+
+ if not attack:
+ outputs = self.basic_net(inputs)[0]
+ return outputs, None
+
+ if self.box_type == 'white':
+ # aux_net = pickle.loads(pickle.dumps(self.basic_net))
+ aux_net = self.basic_net
+ elif self.box_type == 'black':
+ assert self.attack_net is not None, "should provide an additional net in black-box case"
+ aux_net = pickle.loads(pickle.dumps(self.basic_net))
+ aux_net.eval()
+
+ output = aux_net(inputs)
+ if isinstance(output, tuple):
+ logits_pred_nat = output[0]
+ else:
+ logits_pred_nat = output
+ targets_prob = F.softmax(logits_pred_nat.float(), dim=1)
+
+ num_classes = targets_prob.size(1)
+
+ y_tensor_adv = targets
+ step_sign = 1.0
+
+ x = inputs.detach()
+ if self.rand:
+ x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
+ x_org = x.detach()
+ loss_array = np.zeros((inputs.size(0), self.num_steps))
+
+ for i in range(self.num_steps):
+ x.requires_grad_()
+ if x.grad is not None:
+ x.grad.zero_()
+ if x.grad is not None:
+ x.grad.data.fill_(0)
+ aux_net.eval()
+
+ output = aux_net(x)
+ if isinstance(output, tuple):
+ logits = output[0]
+ else:
+ logits = output
+
+ loss = self.loss_func(logits, y_tensor_adv)
+ loss = loss.mean()
+ aux_net.zero_grad()
+ loss.backward()
+
+ x_adv = x.data + step_sign * self.step_size * torch.sign(
+ x.grad.data)
+ x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
+ inputs + self.epsilon)
+ x_adv = torch.clamp(x_adv, -1.0, 1.0)
+ x = Variable(x_adv)
+
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+
+ output = aux_net(x.detach())
+ if isinstance(output, tuple):
+ logits_pert = output[0]
+ else:
+ logits_pert = output
+
+ return logits_pert, targets_prob.detach(), x.detach()
+
+
+class Attack_BetterPGD(nn.Module):
+ # Back-propogate
+ def __init__(self, basic_net, config, attack_net=None):
+ super(Attack_BetterPGD, self).__init__()
+ self.basic_net = basic_net
+ self.attack_net = attack_net
+ self.rand = config['random_start']
+ self.step_size = config['step_size']
+ self.epsilon = config['epsilon']
+ self.num_steps = config['num_steps']
+ self.loss_func = torch.nn.CrossEntropyLoss(
+ reduction='none') if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+
+ self.box_type = 'white' if 'box_type' not in config.keys(
+ ) else config['box_type']
+
+
+ def forward(self,
+ inputs,
+ targets,
+ attack=True,
+ targeted_label=-1,
+ batch_idx=0):
+
+ def net(x):
+ output = self.basic_net(x)
+ if isinstance(output, tuple):
+ return output[0]
+ else:
+ return output
+
+ if attack:
+ sign = 1.0 if targeted_label != -1 else -1.0
+ x_adv = pgd.general_pgd(
+ loss_fn=lambda x, y: sign * self.loss_func(net(x), y),
+ is_adversarial_fn=lambda x, y: net(x).argmax(-1) == y
+ if targeted_label != -1 else net(x).argmax(-1) != y,
+ x=inputs, y=targets, n_steps=self.num_steps,
+ step_size=self.step_size,
+ epsilon=self.epsilon,
+ norm="linf",
+ random_start=self.rand
+ )[0]
+ else:
+ x_adv = inputs
+
+ logits_pert = net(x_adv)
+ targets_prob = torch.softmax(logits_pert, -1)
+
+ return logits_pert, targets_prob.detach(), x_adv.detach()
+
+
+class Attack_AutoPGD(nn.Module):
+ # Back-propogate
+ def __init__(self, basic_net, config, attack_net=None):
+ super(Attack_AutoPGD, self).__init__()
+ self.basic_net = basic_net
+ self.attack_net = attack_net
+ self.epsilon = config['epsilon']
+ self.n_restarts = 0 if "num_restarts" not in config else \
+ config["num_restarts"]
+ self.num_steps = config['num_steps']
+ self.loss_func = "ce" if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+ self.box_type = 'white' if 'box_type' not in config.keys(
+ ) else config['box_type']
+
+ self.targeted = False if 'targeted' not in config.keys(
+ ) else config['targeted']
+ self.n_classes = 10 if 'n_classes' not in config.keys(
+ ) else config['n_classes']
+
+ def forward(self,
+ inputs,
+ targets,
+ attack=True,
+ targeted_label=-1,
+ batch_idx=0):
+
+ assert targeted_label == -1
+
+ def net(x):
+ output = self.basic_net(x)
+ if isinstance(output, tuple):
+ return output[0]
+ else:
+ return output
+
+ if attack:
+ temp = autopgd.auto_pgd(
+ model=net,
+ x=inputs, y=targets, n_steps=self.num_steps,
+ loss=self.loss_func,
+ epsilon=self.epsilon,
+ norm="linf",
+ n_restarts=self.n_restarts,
+ targeted=self.targeted,
+ n_averaging_steps=1,
+ n_classes=self.n_classes
+ )
+ x_adv = temp[0]
+ else:
+ x_adv = inputs
+
+ logits_pert = net(x_adv)
+ targets_prob = torch.softmax(logits_pert, -1)
+
+ return logits_pert, targets_prob.detach(), x_adv.detach()
+
+
+class Attack_FeaScatter(nn.Module):
+ def __init__(self, basic_net, config, attack_net=None):
+ super(Attack_FeaScatter, self).__init__()
+ self.basic_net = basic_net
+ self.attack_net = attack_net
+ self.rand = config['random_start']
+ self.step_size = config['step_size']
+ self.epsilon = config['epsilon']
+ self.num_steps = config['num_steps']
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+ self.box_type = 'white' if 'box_type' not in config.keys(
+ ) else config['box_type']
+ self.ls_factor = 0.1 if 'ls_factor' not in config.keys(
+ ) else config['ls_factor']
+
+ def forward(self,
+ inputs,
+ targets,
+ attack=True,
+ targeted_label=-1,
+ batch_idx=0):
+
+ if not attack:
+ outputs, _ = self.basic_net(inputs)
+ return outputs, None
+ if self.box_type == 'white':
+ aux_net = pickle.loads(pickle.dumps(self.basic_net))
+ elif self.box_type == 'black':
+ assert self.attack_net is not None, "should provide an additional net in black-box case"
+ aux_net = pickle.loads(pickle.dumps(self.basic_net))
+
+ aux_net.eval()
+ batch_size = inputs.size(0)
+ m = batch_size
+ n = batch_size
+
+ logits = aux_net(inputs)[0]
+ num_classes = logits.size(1)
+
+ outputs = aux_net(inputs)[0]
+ targets_prob = F.softmax(outputs.float(), dim=1)
+ y_tensor_adv = targets
+ step_sign = 1.0
+
+ x = inputs.detach()
+
+ x_org = x.detach()
+ x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
+
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+
+ logits_pred_nat, fea_nat = aux_net(inputs)
+
+ num_classes = logits_pred_nat.size(1)
+ y_gt = one_hot_tensor(targets, num_classes, device)
+
+ loss_ce = softCrossEntropy()
+
+ iter_num = self.num_steps
+
+ for i in range(iter_num):
+ x.requires_grad_()
+ if x.grad is not None:
+ x.grad.zero_()
+ if x.grad is not None:
+ x.grad.data.fill_(0)
+
+ logits_pred, fea = aux_net(x)
+
+ ot_loss = ot.sinkhorn_loss_joint_IPOT(1, 0.00, logits_pred_nat,
+ logits_pred, None, None,
+ 0.01, m, n)
+
+ aux_net.zero_grad()
+ adv_loss = ot_loss
+ adv_loss.backward(retain_graph=True)
+ x_adv = x.data + self.step_size * torch.sign(x.grad.data)
+ x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
+ inputs + self.epsilon)
+ x_adv = torch.clamp(x_adv, -1.0, 1.0)
+ x = Variable(x_adv)
+
+ logits_pred, fea = self.basic_net(x)
+ self.basic_net.zero_grad()
+
+ y_sm = utils.label_smoothing(y_gt, y_gt.size(1), self.ls_factor)
+
+ adv_loss = loss_ce(logits_pred, y_sm.detach())
+
+ return logits_pred, adv_loss
+
diff --git a/case_studies/feature_scatter/binarization_test.sh b/case_studies/feature_scatter/binarization_test.sh
new file mode 100644
index 0000000..c4409f4
--- /dev/null
+++ b/case_studies/feature_scatter/binarization_test.sh
@@ -0,0 +1,48 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+
+# kwargs=""
+kwargs="--sample-from-corners"
+echo "Epsilon: $epsilon"
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and few steps (20)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+PYTHONPATH=$(pwd) python3 case_studies/feature_scatter/fs_eval.py \
+ --model-path=checkpoints/feature_scattering_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=pgd-autopgddlr \
+ --dataset=cifar10 \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=9999 \
+ --n-boundary-points=1 \
+ --resume \
+ --epsilon=$epsilon \
+ $kwargs
+ #9999
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and more steps (200)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+PYTHONPATH=$(pwd) python3 case_studies/feature_scatter/fs_eval.py \
+ --model-path=checkpoints/feature_scattering_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=pgd-autopgddlr \
+ --dataset=cifar10 \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=9999 \
+ --n-boundary-points=1 \
+ --resume \
+ --epsilon=$epsilon \
+ --more-steps \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/feature_scatter/fs_eval.py b/case_studies/feature_scatter/fs_eval.py
new file mode 100644
index 0000000..62e2146
--- /dev/null
+++ b/case_studies/feature_scatter/fs_eval.py
@@ -0,0 +1,547 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import argparse
+import inspect
+import os
+import sys
+import time
+import warnings
+from functools import partial
+
+import numpy as np
+import torch
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from tqdm import tqdm
+
+import active_tests.decision_boundary_binarization
+import ftsc_utils as utils
+import networks
+from attack_methods import Attack_BetterPGD
+from attack_methods import Attack_None
+from attack_methods import Attack_PGD
+from attack_methods import Attack_AutoPGD
+from ftsc_utils import CWLoss
+from models import *
+
+warnings.simplefilter('once', RuntimeWarning)
+
+currentdir = os.path.dirname(
+ os.path.abspath(inspect.getfile(inspect.currentframe())))
+grandarentdir = os.path.dirname(os.path.dirname(currentdir))
+sys.path.insert(0, grandarentdir)
+
+parser = argparse.ArgumentParser(
+ description='Feature Scattering Adversarial Training')
+
+parser.register('type', 'bool', utils.str2bool)
+
+parser.add_argument('--resume',
+ '-r',
+ action='store_true',
+ help='resume from checkpoint')
+parser.add_argument('--binarization-test', action="store_true")
+parser.add_argument('--attack', default=True, type='bool', help='attack')
+parser.add_argument('--model_dir', type=str, help='model path')
+parser.add_argument('--model-path', type=str, help='model path', default=None)
+parser.add_argument('--init_model_pass',
+ default='-1',
+ type=str,
+ help='init model pass')
+
+parser.add_argument('--attack_method',
+ default='pgd',
+ type=str,
+ help='adv_mode (natural, pdg or cw)')
+parser.add_argument('--attack_method_list', type=str)
+
+parser.add_argument('--log_step', default=7, type=int, help='log_step')
+
+# dataset dependent
+parser.add_argument('--num_classes', default=10, type=int, help='num classes')
+parser.add_argument('--dataset', default='cifar10', type=str,
+ help='dataset') # concat cascade
+parser.add_argument('--batch_size_test',
+ default=100,
+ type=int,
+ help='batch size for testing')
+parser.add_argument('--image_size', default=32, type=int, help='image size')
+
+parser.add_argument('--num_samples_test',
+ default=-1,
+ type=int)
+
+parser.add_argument('--n-inner-points',
+ default=50,
+ type=int)
+
+parser.add_argument('--n-boundary-points',
+ default=10,
+ type=int)
+
+parser.add_argument("--epsilon", type=int, default=8)
+parser.add_argument("--more-steps", action="store_true")
+parser.add_argument("--sample-from-corners", action="store_true")
+
+args = parser.parse_args()
+
+if args.binarization_test:
+ assert args.batch_size_test == 1
+
+if args.dataset == 'cifar10':
+ print('------------cifar10---------')
+ args.num_classes = 10
+ args.image_size = 32
+elif args.dataset == 'cifar100':
+ print('----------cifar100---------')
+ args.num_classes = 100
+ args.image_size = 32
+if args.dataset == 'svhn':
+ print('------------svhn10---------')
+ args.num_classes = 10
+ args.image_size = 32
+elif args.dataset == 'mnist':
+ print('----------mnist---------')
+ args.num_classes = 10
+ args.image_size = 28
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+start_epoch = 0
+
+# Data
+print('==> Preparing data..')
+
+if args.dataset == 'cifar10' or args.dataset == 'cifar100':
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+elif args.dataset == 'svhn':
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+
+if args.dataset == 'cifar10':
+ testset = torchvision.datasets.CIFAR10(root='./data',
+ train=False,
+ download=True,
+ transform=transform_test)
+elif args.dataset == 'cifar100':
+ testset = torchvision.datasets.CIFAR100(root='./data',
+ train=False,
+ download=True,
+ transform=transform_test)
+
+elif args.dataset == 'svhn':
+ testset = torchvision.datasets.SVHN(root='./data',
+ split='test',
+ download=True,
+ transform=transform_test)
+
+testloader = torch.utils.data.DataLoader(testset,
+ batch_size=args.batch_size_test,
+ shuffle=False,
+ num_workers=2)
+
+print('==> Building model..')
+if args.dataset == 'cifar10' or args.dataset == 'cifar100' or args.dataset == 'svhn':
+ print('---wide resenet-----')
+ basic_net = WideResNet(depth=28,
+ num_classes=args.num_classes,
+ widen_factor=10)
+
+basic_net = basic_net.to(device)
+
+
+class ZeroOneOneOneNetwork(nn.Module):
+ def __init__(self, model):
+ super().__init__()
+ self.model = model
+
+ def forward(self, x, **kwargs):
+ return self.model((x - 0.5) / 0.5, **kwargs)
+
+
+if args.binarization_test:
+ args.num_classes = 2
+
+if args.num_samples_test == -1:
+ num_samples_test = len(testset)
+
+# configs
+config_natural = {'train': False}
+
+config_fgsm = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 1,
+ 'step_size': args.epsilon / 255.0,
+ 'random_start': True
+}
+
+config_pgd = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'step_size': args.epsilon / 4.0 / 255.0,
+ 'random_start': True,
+ 'loss_func': torch.nn.CrossEntropyLoss(reduction='none')
+}
+
+config_cw = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'step_size': args.epsilon / 4.0 / 255.0,
+ 'random_start': True,
+ 'loss_func': CWLoss(args.num_classes)
+}
+
+config_auto_pgd_ce = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'loss_func': "ce"
+}
+
+config_auto_pgd_dlr = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'loss_func': "logit-diff"
+}
+
+config_auto_pgd_dlr_t = {
+ **config_auto_pgd_dlr,
+ "targeted": True,
+ "n_classes": 10,
+}
+
+config_auto_pgd_ce_plus = {
+ **config_auto_pgd_ce,
+ "n_restarts": 4
+}
+
+config_auto_pgd_dlr_plus = {
+ **config_auto_pgd_dlr,
+ "n_restarts": 4
+}
+
+class __KwargsSequential(torch.nn.Sequential):
+ """
+ Modification of a torch.nn.Sequential model that allows kwargs in the
+ forward pass. These will be passed to the first module of the network.
+ """
+
+ def forward(self, input, **kwargs):
+ for idx, module in enumerate(self):
+ if idx == 0:
+ input = module(input, **kwargs)
+ else:
+ input = module(input)
+ return input
+
+def train_classifier(n_features,
+ train_loader,
+ raw_train_loader,
+ logits,
+ device,
+ rescale_logits,
+ classifier):
+ del raw_train_loader
+
+ x_ = train_loader.dataset.tensors[0]
+ y_ = train_loader.dataset.tensors[1]
+
+ x_original = x_[0]
+ x_boundary = x_[y_ == 1]
+ assert len(x_boundary) == 1, "Method only works for a single boundary point"
+ x_boundary = x_boundary[0]
+
+ margin = 0.99999999999
+ delta = x_boundary - x_original
+ delta = delta / (torch.dot(delta, delta))
+ w = delta
+ b = -torch.dot(x_original, delta) - margin
+
+ binary_classifier = torch.nn.Linear(n_features, 2)
+
+ binary_classifier.weight.data = torch.stack((-w, w), 0)
+ binary_classifier.bias.data = torch.stack((-b, b), 0)
+
+ binary_classifier = binary_classifier.to(device)
+ #import pdb; pdb.set_trace()
+ #for x, y in train_loader:
+ # x, y = x.to(device), y.to(device)
+ #
+ # l = binary_classifier(x)
+ # p = l.argmax(-1)
+ # is_correct = p == y
+
+ linearized_model = __KwargsSequential(
+ networks.Lambda(
+ lambda x, **kwargs: classifier(x, features_only=True, **kwargs)),
+ binary_classifier)
+
+ return linearized_model
+
+if not args.binarization_test:
+ config_fgsm["epsilon"] *= 2.0
+ config_pgd["epsilon"] *= 2.0
+ config_cw["epsilon"] *= 2.0
+ config_fgsm["step_size"] *= 2.0
+ config_pgd["step_size"] *= 2.0
+ config_cw["step_size"] *= 2.0
+else:
+ config_auto_pgd_dlr_t["n_classes"] = 2
+
+print(f"Epsilon: {args.epsilon}")
+if args.more_steps:
+ config_pgd["step_size"] /= 5.0
+ config_cw["step_size"] /= 5.0
+ config_pgd["num_steps"] *= 10
+ config_cw["num_steps"] *= 10
+
+ config_auto_pgd_ce["num_steps"] *= 10
+ config_auto_pgd_dlr["num_steps"] *= 10
+ print("More & finer steps")
+
+
+def test_test(net, feature_extractor, config):
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ print("num_samples_test:", args.num_samples_test)
+ print("test epsilon:", config["epsilon"])
+ scores_logit_differences_and_validation_accuracies = \
+ active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
+ feature_extractor,
+ testloader,
+ attack_fn=lambda m, l, kwargs: test(0, create_attack(m), l, verbose=False,
+ inverse_acc=True, return_advs=True, **kwargs),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=config["epsilon"],
+ norm="linf",
+ lr=100000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=args.num_samples_test,
+ device=device,
+ n_samples_evaluation=200,#args.num_samples_test * 10
+ n_samples_asr_evaluation=200,
+ # TODO: use the right arguments here again!
+ # relative_inner_boundary_gap=0.00,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.9999,
+ sample_training_data_from_corners=args.sample_from_corners,
+ #train_classifier_fn=partial(train_classifier, classifier=feature_extractor)
+ )
+
+ print(active_tests.decision_boundary_binarization.format_result(
+ scores_logit_differences_and_validation_accuracies,
+ args.num_samples_test))
+
+
+def test(epoch, net, loader, verbose=True, inverse_acc=False,
+ return_advs=False):
+ # net.eval()
+ test_loss = 0
+ correct = 0
+ total = 0
+
+ if verbose:
+ iterator = tqdm(loader, ncols=0, leave=False)
+ else:
+ iterator = loader
+
+ if return_advs:
+ x_adv = []
+ logits_adv = []
+ else:
+ x_adv = None
+ logits_adv = None
+
+ for batch_idx, (inputs, targets) in enumerate(iterator):
+ start_time = time.time()
+ inputs, targets = inputs.to(device), targets.to(device)
+
+ pert_inputs = inputs.detach()
+
+ res = net(pert_inputs, targets)
+ if isinstance(res, tuple):
+ outputs, _, x_adv_it = res
+ else:
+ outputs = res
+
+ if return_advs:
+ x_adv.append(x_adv_it)
+ logits_adv.append(outputs)
+
+ loss = criterion(outputs, targets)
+ test_loss += loss.item()
+
+ duration = time.time() - start_time
+
+ _, predicted = outputs.max(1)
+ batch_size = targets.size(0)
+ total += batch_size
+ correct_num = predicted.eq(targets).sum().item()
+ correct += correct_num
+ if verbose:
+ iterator.set_description(
+ "Accuracy:" + str(predicted.eq(targets).sum().item() / targets.size(0)))
+
+ if batch_idx % args.log_step == 0:
+ print(
+ "step %d, duration %.2f, test acc %.2f, avg-acc %.2f, loss %.2f"
+ % (batch_idx, duration, 100. * correct_num / batch_size,
+ 100. * correct / total, test_loss / total))
+
+ if return_advs:
+ x_adv = torch.cat(x_adv, 0)
+ logits_adv = torch.cat(logits_adv, 0)
+
+ acc = 100. * correct / total
+
+ if inverse_acc:
+ acc = (100 - acc) / 100.0
+
+ if verbose:
+ print("Robust Accuracy:", acc)
+
+ # print('Val acc:', acc)
+ return acc, (x_adv, logits_adv)
+
+
+if args.resume and args.init_model_pass != '-1':
+ # Load checkpoint.
+ print('==> Resuming from checkpoint..')
+ if args.model_dir is not None:
+ f_path_latest = os.path.join(args.model_dir, 'latest')
+ f_path = os.path.join(args.model_dir,
+ ('checkpoint-%s' % args.init_model_pass))
+ if args.model_path is not None:
+ f_path = args.model_path
+ f_path_latest = args.model_path
+ if not os.path.isfile(f_path):
+ print('train from scratch: no checkpoint directory or file found')
+ elif args.init_model_pass == 'latest' and os.path.isfile(
+ f_path_latest):
+ checkpoint = torch.load(f_path_latest, map_location="cpu")
+ basic_net.load_state_dict(
+ {(k[len("module.basic_net."):] if k.startswith(
+ "module.basic_net.") else k): v
+ for k, v in checkpoint['net'].items()})
+ start_epoch = checkpoint['epoch']
+ print('resuming from epoch %s in latest' % start_epoch)
+ elif os.path.isfile(f_path):
+ checkpoint = torch.load(f_path)
+ # net.load_state_dict(checkpoint['net'])
+ basic_net.load_state_dict(
+ {(k[len("module.basic_net."):] if k.startswith(
+ "module.basic_net.") else k): v
+ for k, v in checkpoint['net'].items()})
+ start_epoch = checkpoint['epoch']
+ print('resuming from epoch %s' % start_epoch)
+ elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
+ print('train from scratch: no checkpoint directory or file found')
+
+attack_list = args.attack_method_list.split('-')
+attack_num = len(attack_list)
+
+for attack_idx in range(attack_num):
+
+ args.attack_method = attack_list[attack_idx]
+
+ if args.attack_method == 'natural':
+ print()
+ print('-----natural non-adv mode -----')
+ # config is only dummy, not actually used
+ create_attack = lambda n: Attack_None(n, config_natural)
+ elif args.attack_method.upper() == 'FGSM':
+ print()
+ print('-----FGSM adv mode -----')
+ create_attack = lambda n: Attack_PGD(n, config_fgsm)
+ elif args.attack_method.upper() == 'PGD':
+ print()
+ print('-----PGD adv mode -----')
+ create_attack = lambda n: Attack_PGD(n, config_pgd)
+ elif args.attack_method.upper() == 'CW':
+ print()
+ print('-----CW adv mode -----')
+ create_attack = lambda n: Attack_PGD(n, config_cw)
+ elif args.attack_method.upper() == 'BETTERPGD':
+ print()
+ print('-----Better PGD adv mode -----')
+ create_attack = lambda n: Attack_BetterPGD(n, config_pgd)
+ elif args.attack_method.upper() == 'BETTERCW':
+ print()
+ print('-----Better CW adv mode -----')
+ create_attack = lambda n: Attack_BetterPGD(n, config_cw)
+ elif args.attack_method.upper() == 'AUTOPGDCE':
+ print()
+ print('-----Auto PGD (CE) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce)
+ elif args.attack_method.upper() == 'AUTOPGDDLR':
+ print()
+ print('-----Auto PGD (DLR) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr)
+ elif args.attack_method.upper() == 'AUTOPGDDLRT':
+ print()
+ print('-----Auto PGD (DLR, targeted) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_t)
+ elif args.attack_method.upper() == 'AUTOPGDCE+':
+ print()
+ print('-----Auto PGD+ (CE) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce_plus)
+ elif args.attack_method.upper() == 'AUTOPGDDLR+':
+ print()
+ print('-----Auto PGD+ (DLR) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_plus)
+ else:
+ raise Exception(
+ 'Should be a valid attack method. The specified attack method is: {}'
+ .format(args.attack_method))
+
+ if args.binarization_test or args.attack_method.upper().startswith("AUTOPGD"):
+ specific_net = ZeroOneOneOneNetwork(basic_net)
+ specific_net.eval()
+ net = create_attack(specific_net)
+ else:
+ net = create_attack(basic_net)
+
+ if device == 'cuda':
+ net = torch.nn.DataParallel(net)
+ if "specific_net" in locals():
+ if not isinstance(specific_net, torch.nn.DataParallel):
+ specific_net = torch.nn.DataParallel(specific_net)
+ cudnn.benchmark = True
+
+ criterion = nn.CrossEntropyLoss()
+
+ if args.binarization_test:
+ test_test(net, specific_net, config_pgd)
+ else:
+ test(0, net, testloader)
diff --git a/case_studies/feature_scatter/fs_eval.sh b/case_studies/feature_scatter/fs_eval.sh
new file mode 100644
index 0000000..42fb8a9
--- /dev/null
+++ b/case_studies/feature_scatter/fs_eval.sh
@@ -0,0 +1,15 @@
+epsilon=${1:-8}
+attack=${2-pgd-autopgddlr+}
+
+export PYTHONPATH=./:$PYTHONPATH
+python3 case_studies/feature_scatter/fs_eval.py \
+ --model-path=checkpoints/feature_scattering_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=$attack \
+ --dataset=cifar10 \
+ --batch_size_test=256 \
+ --resume \
+ --epsilon=$epsilon
+
+#natural-fgsm-pgd-cw
\ No newline at end of file
diff --git a/case_studies/feature_scatter/fs_eval_test.sh b/case_studies/feature_scatter/fs_eval_test.sh
new file mode 100644
index 0000000..84ef3b5
--- /dev/null
+++ b/case_studies/feature_scatter/fs_eval_test.sh
@@ -0,0 +1,47 @@
+epsilon=${1:-8}
+attack=${2-pgd-autopgddlr+}
+nsamples=${3:-2048}
+#attack=${2-pgd-autopgddlr-autopgddlrt-autopgddlrplus}
+export PYTHONPATH=./:$PYTHONPATH
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and few steps (20)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+python3 case_studies/feature_scatter/fs_eval.py \
+ --model-path=checkpoints/feature_scattering_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=$attack \
+ --dataset=cifar10 \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --resume \
+ --epsilon=$epsilon
+
+exit
+
+# autopgddlr-autopgdce-autopgddlr+-autopgdce+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and more steps (200)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+
+python3 case_studies/feature_scatter/fs_eval.py \
+ --model-path=checkpoints/feature_scattering_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=$attack \
+ --dataset=cifar10 \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=999 \
+ --n-boundary-points=1 \
+ --resume \
+ --epsilon=$epsilon \
+ --more-steps
diff --git a/case_studies/feature_scatter/fs_main.py b/case_studies/feature_scatter/fs_main.py
new file mode 100644
index 0000000..5ecd63e
--- /dev/null
+++ b/case_studies/feature_scatter/fs_main.py
@@ -0,0 +1,338 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Train Adversarially Robust Models with Feature Scattering'''
+from __future__ import print_function
+import time
+import numpy as np
+import random
+import torch
+import torch.nn as nn
+import torch.optim as optim
+import torch.nn.functional as F
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+
+from torch.autograd.gradcheck import zero_gradients
+import copy
+from torch.autograd import Variable
+from PIL import Image
+
+import os
+import argparse
+import datetime
+
+from tqdm import tqdm
+from models import *
+
+import utils
+from utils import softCrossEntropy
+from utils import one_hot_tensor
+from attack_methods import Attack_FeaScatter
+
+torch.set_printoptions(threshold=10000)
+np.set_printoptions(threshold=np.inf)
+
+parser = argparse.ArgumentParser(description='Feature Scatterring Training')
+
+# add type keyword to registries
+parser.register('type', 'bool', utils.str2bool)
+
+parser.add_argument('--resume',
+ '-r',
+ action='store_true',
+ help='resume from checkpoint')
+parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
+parser.add_argument('--adv_mode',
+ default='feature_scatter',
+ type=str,
+ help='adv_mode (feature_scatter)')
+parser.add_argument('--model_dir', type=str, help='model path')
+parser.add_argument('--init_model_pass',
+ default='-1',
+ type=str,
+ help='init model pass (-1: from scratch; K: checkpoint-K)')
+parser.add_argument('--max_epoch',
+ default=200,
+ type=int,
+ help='max number of epochs')
+parser.add_argument('--save_epochs', default=100, type=int, help='save period')
+parser.add_argument('--decay_epoch1',
+ default=60,
+ type=int,
+ help='learning rate decay epoch one')
+parser.add_argument('--decay_epoch2',
+ default=90,
+ type=int,
+ help='learning rate decay point two')
+parser.add_argument('--decay_rate',
+ default=0.1,
+ type=float,
+ help='learning rate decay rate')
+parser.add_argument('--batch_size_train',
+ default=128,
+ type=int,
+ help='batch size for training')
+parser.add_argument('--momentum',
+ default=0.9,
+ type=float,
+ help='momentum (1-tf.momentum)')
+parser.add_argument('--weight_decay',
+ default=2e-4,
+ type=float,
+ help='weight decay')
+parser.add_argument('--log_step', default=10, type=int, help='log_step')
+
+# number of classes and image size will be updated below based on the dataset
+parser.add_argument('--num_classes', default=10, type=int, help='num classes')
+parser.add_argument('--image_size', default=32, type=int, help='image size')
+parser.add_argument('--dataset', default='cifar10', type=str,
+ help='dataset') # concat cascade
+
+args = parser.parse_args()
+
+if args.dataset == 'cifar10':
+ print('------------cifar10---------')
+ args.num_classes = 10
+ args.image_size = 32
+elif args.dataset == 'cifar100':
+ print('----------cifar100---------')
+ args.num_classes = 100
+ args.image_size = 32
+if args.dataset == 'svhn':
+ print('------------svhn10---------')
+ args.num_classes = 10
+ args.image_size = 32
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+start_epoch = 0
+
+# Data
+print('==> Preparing data..')
+
+if args.dataset == 'cifar10' or args.dataset == 'cifar100':
+ transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+elif args.dataset == 'svhn':
+ transform_train = transforms.Compose([
+ # transforms.RandomCrop(32, padding=4),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
+ ])
+
+if args.dataset == 'cifar10':
+ trainset = torchvision.datasets.CIFAR10(root='./data',
+ train=True,
+ download=True,
+ transform=transform_train)
+ testset = torchvision.datasets.CIFAR10(root='./data',
+ train=False,
+ download=True,
+ transform=transform_test)
+ classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
+ 'ship', 'truck')
+elif args.dataset == 'cifar100':
+ trainset = torchvision.datasets.CIFAR100(root='./data',
+ train=True,
+ download=True,
+ transform=transform_train)
+ testset = torchvision.datasets.CIFAR100(root='./data',
+ train=False,
+ download=True,
+ transform=transform_test)
+
+elif args.dataset == 'svhn':
+ trainset = torchvision.datasets.SVHN(root='./data',
+ split='train',
+ download=True,
+ transform=transform_train)
+ testset = torchvision.datasets.SVHN(root='./data',
+ split='test',
+ download=True,
+ transform=transform_test)
+
+trainloader = torch.utils.data.DataLoader(trainset,
+ batch_size=args.batch_size_train,
+ shuffle=True,
+ num_workers=2)
+
+print('==> Building model..')
+
+if args.dataset == 'cifar10' or args.dataset == 'cifar100' or args.dataset == 'svhn':
+ print('---wide resenet-----')
+ basic_net = WideResNet(depth=28,
+ num_classes=args.num_classes,
+ widen_factor=10)
+
+
+def print_para(net):
+ for name, param in net.named_parameters():
+ if param.requires_grad:
+ print(name)
+ print(param.data)
+ break
+
+
+basic_net = basic_net.to(device)
+
+# config for feature scatter
+config_feature_scatter = {
+ 'train': True,
+ 'epsilon': 8.0 / 255 * 2,
+ 'num_steps': 1,
+ 'step_size': 8.0 / 255 * 2,
+ 'random_start': True,
+ 'ls_factor': 0.5,
+}
+
+if args.adv_mode.lower() == 'feature_scatter':
+ print('-----Feature Scatter mode -----')
+ net = Attack_FeaScatter(basic_net, config_feature_scatter)
+else:
+ print('-----OTHER_ALGO mode -----')
+ raise NotImplementedError("Please implement this algorithm first!")
+
+if device == 'cuda':
+ net = torch.nn.DataParallel(net)
+ cudnn.benchmark = True
+
+optimizer = optim.SGD(net.parameters(),
+ lr=args.lr,
+ momentum=args.momentum,
+ weight_decay=args.weight_decay)
+
+if args.resume and args.init_model_pass != '-1':
+ # Load checkpoint.
+ print('==> Resuming from checkpoint..')
+ f_path_latest = os.path.join(args.model_dir, 'latest')
+ f_path = os.path.join(args.model_dir,
+ ('checkpoint-%s' % args.init_model_pass))
+ if not os.path.isdir(args.model_dir):
+ print('train from scratch: no checkpoint directory or file found')
+ elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
+ checkpoint = torch.load(f_path_latest)
+ net.load_state_dict(checkpoint['net'])
+ start_epoch = checkpoint['epoch'] + 1
+ print('resuming from epoch %s in latest' % start_epoch)
+ elif os.path.isfile(f_path):
+ checkpoint = torch.load(f_path)
+ net.load_state_dict(checkpoint['net'])
+ start_epoch = checkpoint['epoch'] + 1
+ print('resuming from epoch %s' % (start_epoch - 1))
+ elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
+ print('train from scratch: no checkpoint directory or file found')
+
+soft_xent_loss = softCrossEntropy()
+
+
+def train_fun(epoch, net):
+ print('\nEpoch: %d' % epoch)
+ net.train()
+
+ train_loss = 0
+ correct = 0
+ total = 0
+
+ # update learning rate
+ if epoch < args.decay_epoch1:
+ lr = args.lr
+ elif epoch < args.decay_epoch2:
+ lr = args.lr * args.decay_rate
+ else:
+ lr = args.lr * args.decay_rate * args.decay_rate
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ def get_acc(outputs, targets):
+ _, predicted = outputs.max(1)
+ total = targets.size(0)
+ correct = predicted.eq(targets).sum().item()
+ acc = 1.0 * correct / total
+ return acc
+
+ iterator = tqdm(trainloader, ncols=0, leave=False)
+ for batch_idx, (inputs, targets) in enumerate(iterator):
+ start_time = time.time()
+ inputs, targets = inputs.to(device), targets.to(device)
+
+ adv_acc = 0
+
+ optimizer.zero_grad()
+
+ # forward
+ outputs, loss_fs = net(inputs.detach(), targets)
+
+ optimizer.zero_grad()
+ loss = loss_fs.mean()
+ loss.backward()
+
+ optimizer.step()
+
+ train_loss = loss.item()
+
+ duration = time.time() - start_time
+ if batch_idx % args.log_step == 0:
+ if adv_acc == 0:
+ adv_acc = get_acc(outputs, targets)
+ iterator.set_description(str(adv_acc))
+
+ nat_outputs, _ = net(inputs, targets, attack=False)
+ nat_acc = get_acc(nat_outputs, targets)
+
+ print(
+ "epoch %d, step %d, lr %.4f, duration %.2f, training nat acc %.2f, training adv acc %.2f, training adv loss %.4f"
+ % (epoch, batch_idx, lr, duration, 100 * nat_acc,
+ 100 * adv_acc, train_loss))
+
+ if epoch % args.save_epochs == 0 or epoch >= args.max_epoch - 2:
+ print('Saving..')
+ f_path = os.path.join(args.model_dir, ('checkpoint-%s' % epoch))
+ state = {
+ 'net': net.state_dict(),
+ # 'optimizer': optimizer.state_dict()
+ }
+ if not os.path.isdir(args.model_dir):
+ os.mkdir(args.model_dir)
+ torch.save(state, f_path)
+
+ if epoch >= 0:
+ print('Saving latest @ epoch %s..' % (epoch))
+ f_path = os.path.join(args.model_dir, 'latest')
+ state = {
+ 'net': net.state_dict(),
+ 'epoch': epoch,
+ 'optimizer': optimizer.state_dict()
+ }
+ if not os.path.isdir(args.model_dir):
+ os.mkdir(args.model_dir)
+ torch.save(state, f_path)
+
+
+for epoch in range(start_epoch, args.max_epoch):
+ train_fun(epoch, net)
diff --git a/case_studies/feature_scatter/fs_train.sh b/case_studies/feature_scatter/fs_train.sh
new file mode 100644
index 0000000..dabe228
--- /dev/null
+++ b/case_studies/feature_scatter/fs_train.sh
@@ -0,0 +1,16 @@
+export PYTHONPATH=./:$PYTHONPATH
+model_dir=~/models/feature_scatter_cifar10/
+mkdir -p $model_dir
+CUDA_VISIBLE_DEVICES=0 python3 fs_main.py \
+ --resume \
+ --adv_mode='feature_scatter' \
+ --lr=0.1 \
+ --model_dir=$model_dir \
+ --init_model_pass=latest \
+ --max_epoch=200 \
+ --save_epochs=100 \
+ --decay_epoch1=60 \
+ --decay_epoch2=90 \
+ --batch_size_train=60 \
+ --dataset=cifar10
+
diff --git a/case_studies/feature_scatter/ftsc_utils.py b/case_studies/feature_scatter/ftsc_utils.py
new file mode 100644
index 0000000..a778f13
--- /dev/null
+++ b/case_studies/feature_scatter/ftsc_utils.py
@@ -0,0 +1,97 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Some utility functions
+'''
+import os
+import sys
+import time
+import datetime
+import math
+import torch
+import torch.nn as nn
+import torch.nn.init as init
+import torch.nn.functional as F
+
+import numpy as np
+
+import torch
+
+def one_hot_tensor(y_batch_tensor, num_classes, device):
+ y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
+ num_classes).fill_(0)
+ y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
+ return y_tensor
+
+
+def label_smoothing(y_batch_tensor, num_classes, delta):
+ y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * \
+ y_batch_tensor + delta / (num_classes - 1)
+ return y_batch_smooth
+
+
+def str2bool(v):
+ return v.lower() in ("yes", "true", "t", "1")
+
+
+class softCrossEntropy(nn.Module):
+ def __init__(self, reduce=True):
+ super(softCrossEntropy, self).__init__()
+ self.reduce = reduce
+ return
+
+ def forward(self, inputs, targets):
+ """
+ :param inputs: predictions
+ :param targets: target labels in vector form
+ :return: loss
+ """
+ log_likelihood = -F.log_softmax(inputs, dim=1)
+ sample_num, class_num = targets.shape
+ if self.reduce:
+ loss = torch.sum(torch.mul(log_likelihood, targets)) / sample_num
+ else:
+ loss = torch.sum(torch.mul(log_likelihood, targets), 1)
+
+ return loss
+
+
+class CWLoss(nn.Module):
+ def __init__(self, num_classes, margin=50, reduce=True):
+ super(CWLoss, self).__init__()
+ self.num_classes = num_classes
+ self.margin = margin
+ self.reduce = reduce
+ return
+
+ def forward(self, logits, targets):
+ """
+ :param inputs: predictions
+ :param targets: target labels
+ :return: loss
+ """
+ onehot_targets = one_hot_tensor(targets, self.num_classes,
+ targets.device)
+
+ self_loss = torch.sum(onehot_targets * logits, dim=1)
+ other_loss = torch.max(
+ (1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
+
+ loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
+
+ if self.reduce:
+ sample_num = onehot_targets.shape[0]
+ loss = loss / sample_num
+
+ return loss
diff --git a/case_studies/feature_scatter/models/__init__.py b/case_studies/feature_scatter/models/__init__.py
new file mode 100644
index 0000000..1a5e1a7
--- /dev/null
+++ b/case_studies/feature_scatter/models/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .wideresnet import *
diff --git a/case_studies/feature_scatter/models/wideresnet.py b/case_studies/feature_scatter/models/wideresnet.py
new file mode 100644
index 0000000..c1e8638
--- /dev/null
+++ b/case_studies/feature_scatter/models/wideresnet.py
@@ -0,0 +1,143 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class BasicBlock(nn.Module):
+ def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
+ super(BasicBlock, self).__init__()
+ self.bn1 = nn.BatchNorm2d(in_planes)
+ self.relu1 = nn.ReLU(inplace=True)
+ self.conv1 = nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False)
+ self.bn2 = nn.BatchNorm2d(out_planes)
+ self.relu2 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(out_planes,
+ out_planes,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False)
+ self.droprate = dropRate
+ self.equalInOut = (in_planes == out_planes)
+ self.convShortcut = (not self.equalInOut) and nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=1,
+ stride=stride,
+ padding=0,
+ bias=False) or None
+
+ def forward(self, x):
+ if not self.equalInOut:
+ x = self.relu1(self.bn1(x))
+ else:
+ out = self.relu1(self.bn1(x))
+ out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
+ if self.droprate > 0:
+ out = F.dropout(out, p=self.droprate, training=self.training)
+ out = self.conv2(out)
+ return torch.add(x if self.equalInOut else self.convShortcut(x), out)
+
+
+class NetworkBlock(nn.Module):
+ def __init__(self,
+ nb_layers,
+ in_planes,
+ out_planes,
+ block,
+ stride,
+ dropRate=0.0):
+ super(NetworkBlock, self).__init__()
+ self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
+ stride, dropRate)
+
+ def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
+ dropRate):
+ layers = []
+ for i in range(int(nb_layers)):
+ layers.append(
+ block(i == 0 and in_planes or out_planes, out_planes,
+ i == 0 and stride or 1, dropRate))
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ return self.layer(x)
+
+
+class WideResNet(nn.Module):
+ def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
+ super(WideResNet, self).__init__()
+ nChannels = [
+ 16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
+ ]
+ assert ((depth - 4) % 6 == 0)
+ n = (depth - 4) / 6
+ block = BasicBlock
+ # 1st conv before any network block
+ self.conv1 = nn.Conv2d(3,
+ nChannels[0],
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False)
+ # 1st block
+ self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
+ dropRate)
+ # 2nd block
+ self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
+ dropRate)
+ # 3rd block
+ self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
+ dropRate)
+ # global average pooling and classifier
+ self.bn1 = nn.BatchNorm2d(nChannels[3])
+ self.relu = nn.ReLU(inplace=True)
+ self.fc = nn.Linear(nChannels[3], num_classes)
+ self.nChannels = nChannels[3]
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ elif isinstance(m, nn.Linear):
+ m.bias.data.zero_()
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ if features_only: assert not features_and_logits
+ out = self.conv1(x)
+ out = self.block1(out)
+ out = self.block2(out)
+ out = self.block3(out)
+ out = self.relu(self.bn1(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(-1, self.nChannels)
+
+ if features_only:
+ return out
+ elif features_and_logits:
+ return out, self.fc(out)
+ else:
+ return self.fc(out), out.view(x.size(0), -1)
diff --git a/case_studies/feature_scatter/ot.py b/case_studies/feature_scatter/ot.py
new file mode 100644
index 0000000..2c6f086
--- /dev/null
+++ b/case_studies/feature_scatter/ot.py
@@ -0,0 +1,140 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+
+OT using IPOT and Sinkhorn algorithm
+
+"""
+
+import torch
+from torch.autograd import Variable
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ftsc_utils import softCrossEntropy
+
+import numpy as np
+
+
+def sinkhorn_loss_joint_IPOT(alpha, beta, x_feature, y_feature, x_label,
+ y_label, epsilon, m, n):
+
+ C_fea = get_cost_matrix(x_feature, y_feature)
+ C = C_fea
+ T = sinkhorn(C, 0.01, 100)
+ # T = IPOT(C, 1)
+ batch_size = C.size(0)
+ cost_ot = torch.sum(T * C)
+ return cost_ot
+
+
+def sinkhorn(C, epsilon, niter=50, device='cuda'):
+ m = C.size(0)
+ n = C.size(1)
+ mu = Variable(1. / m * torch.FloatTensor(m).fill_(1).to('cuda'),
+ requires_grad=False)
+ nu = Variable(1. / n * torch.FloatTensor(n).fill_(1).to('cuda'),
+ requires_grad=False)
+
+ # Parameters of the Sinkhorn algorithm.
+ rho = 1 # (.5) **2 # unbalanced transport
+ tau = -.8 # nesterov-like acceleration
+ lam = rho / (rho + epsilon) # Update exponent
+ thresh = 10**(-1) # stopping criterion
+
+ # Elementary operations .....................................................................
+ def ave(u, u1):
+ "Barycenter subroutine, used by kinetic acceleration through extrapolation."
+ return tau * u + (1 - tau) * u1
+
+ def M(u, v):
+ "Modified cost for logarithmic updates"
+ "$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
+ return (-C + u.unsqueeze(1) + v.unsqueeze(0)) / epsilon
+
+ def lse(A):
+ "log-sum-exp"
+ return torch.log(torch.exp(A).sum(1, keepdim=True) +
+ 1e-6) # add 10^-6 to prevent NaN
+
+ # Actual Sinkhorn loop ......................................................................
+ u, v, err = 0. * mu, 0. * nu, 0.
+ actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
+
+ for i in range(niter):
+ u1 = u # useful to check the update
+ u = epsilon * (torch.log(mu) - lse(M(u, v)).squeeze()) + u
+ v = epsilon * (torch.log(nu) - lse(M(u, v).t()).squeeze()) + v
+ # accelerated unbalanced iterations
+ # u = ave( u, lam * ( epsilon * ( torch.log(mu) - lse(M(u,v)).squeeze() ) + u ) )
+ # v = ave( v, lam * ( epsilon * ( torch.log(nu) - lse(M(u,v).t()).squeeze() ) + v ) )
+ err = (u - u1).abs().sum()
+
+ actual_nits += 1
+ if (err < thresh).cpu().data.numpy():
+ break
+ U, V = u, v
+
+ pi = torch.exp(M(U, V)) # Transport plan pi = diag(a)*K*diag(b)
+ pi = pi.to('cuda').float()
+ return pi # return the transport
+
+
+def IPOT(cost_matrix, beta=1, device='cuda'):
+ m = cost_matrix.size(0)
+ n = cost_matrix.size(1)
+ sigma = 1.0 / n * torch.ones([n, 1]).to(device)
+
+ T = torch.ones([m, n]).to(device)
+ A = torch.exp(-cost_matrix / beta)
+
+ for t in range(50):
+ # BUG: should be elementwise product, * in numpy
+ #Q = torch.mm(A, T)
+ Q = A * T # Hardmard product
+ for k in range(1):
+ delta = 1.0 / (m * torch.mm(Q, sigma))
+ sigma = 1.0 / (n * torch.mm(delta.t(), Q)).t()
+ #sigma = 1.0 / (n * torch.mv(Q, delta))
+ tmp = torch.mm(construct_diag(torch.squeeze(delta)), Q)
+ T = torch.mm(tmp, construct_diag(torch.squeeze(sigma)))
+
+ return T
+
+
+def construct_diag(d):
+ n = d.size(0)
+ x = torch.zeros([n, n]).to(d.device)
+ x[range(n), range(n)] = d.view(-1)
+ return x
+
+
+def get_cost_matrix(x_feature, y_feature):
+ C_fea = cost_matrix_cos(x_feature, y_feature) # Wasserstein cost function
+ return C_fea
+
+
+def cost_matrix_cos(x, y, p=2):
+ # return the m*n sized cost matrix
+ "Returns the matrix of $|x_i-y_j|^p$."
+ # un squeeze differently so that the tensors can broadcast
+ # dim-2 (summed over) is the feature dim
+ x_col = x.unsqueeze(1)
+ y_lin = y.unsqueeze(0)
+
+ cos = nn.CosineSimilarity(dim=2, eps=1e-6)
+ c = torch.clamp(1 - cos(x_col, y_lin), min=0)
+
+ return c
\ No newline at end of file
diff --git a/case_studies/inputtransformations/README.md b/case_studies/inputtransformations/README.md
new file mode 100644
index 0000000..2a07891
--- /dev/null
+++ b/case_studies/inputtransformations/README.md
@@ -0,0 +1,27 @@
+# Countering Adversarial Images using Input Transformations
+
+Paper: [Guo et al. 2018](https://arxiv.org/abs/1711.00117)
+
+## Setup
+
+Run `./setup.sh` to fetch models.
+
+## Breaks
+
+* Bit-depth reduction: `bitdepth.ipynb` (broken with BPDA)
+* JPEG: `jpeg.ipynb` (broken with BPDA)
+* Cropping: `crop.ipynb` (broken with EOT)
+* Quilting: `quilt.ipynb` (broken with EOT+BPDA)
+* Total variation denoising: `tv.ipynb` (broken with EOT+BPDA)
+
+## [robustml] evaluation
+
+Run with:
+
+```bash
+python robustml_attack.py --imagenet-path --defense
+````
+
+Where `` is one of `bitdepth`, `jpeg`, `crop`, `quilt`, or `tv`.
+
+[robustml]: https://github.com/robust-ml/robustml
diff --git a/case_studies/inputtransformations/adversarial_evaluation.py b/case_studies/inputtransformations/adversarial_evaluation.py
new file mode 100644
index 0000000..e7b02d5
--- /dev/null
+++ b/case_studies/inputtransformations/adversarial_evaluation.py
@@ -0,0 +1,98 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import tensorflow as tf
+import torch
+import torchvision
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+from robustml_model import InputTransformations
+from robustml_attack import BPDA
+import argparse
+import numpy as np
+import tensorflow as tf
+import robustml
+import torch.utils.data
+import sys
+import tqdm
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--imagenet-path', type=str, required=True,
+ help='directory containing `val.txt` and `val/` folder')
+ parser.add_argument('--defense', type=str, required=True,
+ help='bitdepth | jpeg | crop | quilt | tv')
+ parser.add_argument('--n-samples', type=int, default=100)
+ parser.add_argument('--batch-size', type=int, default=128)
+ parser.add_argument("--epsilon", default=0.05, type=float)
+ parser.add_argument("--pgd-steps", type=int, default=100)
+ args = parser.parse_args()
+
+ # set up TensorFlow session
+ sess = tf.Session()
+
+ # initialize a model
+ model = InputTransformations(sess, args.defense)
+
+ # initialize an attack (it's a white box attack, and it's allowed to look
+ # at the internals of the model in any way it wants)
+ # XXX restore
+ # TODO: use the distance conversion from original code; I think there is a
+ # factor sqrt(3) missing here
+ attack = BPDA(sess, model, args.epsilon * 299, debug=False, max_steps=args.pgd_steps)
+
+ # initialize a data provider for ImageNet images
+ provider = robustml.provider.ImageNet(args.imagenet_path, model.dataset.shape)
+
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(args.imagenet_path, 'val'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.Resize(299),
+ torchvision.transforms.CenterCrop(299),
+ torchvision.transforms.ToTensor(),
+ ]))
+ random_indices = list(range(len(provider)))
+ if args.n_samples == -1:
+ args.n_samples = len(random_indices)
+ np.random.shuffle(random_indices)
+ random_indices = random_indices[:args.n_samples]
+ dataset = torch.utils.data.Subset(dataset, random_indices)
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=args.batch_size,
+ shuffle=False,
+ pin_memory=False)
+ success = 0
+ total = 0
+
+ for x_batch, y_batch in tqdm.tqdm(data_loader):
+ x_batch = x_batch.numpy().transpose((0, 2, 3, 1))
+ y_batch = y_batch.numpy()
+
+ total += len(x_batch)
+
+ x_batch_adv = attack.run(x_batch, y_batch, None)
+ y_batch_adv = model.classify(x_batch_adv)
+ # adv_acc = (y_batch_adv == y_batch).mean()
+ success += (y_batch_adv != y_batch).sum()
+
+ success_rate = success / total
+
+ print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, total))
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/case_studies/inputtransformations/adversarial_evaluation.sh b/case_studies/inputtransformations/adversarial_evaluation.sh
new file mode 100644
index 0000000..784255e
--- /dev/null
+++ b/case_studies/inputtransformations/adversarial_evaluation.sh
@@ -0,0 +1,11 @@
+nsamples=128
+
+epsilon=${1:-0.05}
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/inputtransformations/adversarial_evaluation.py \
+ --imagenet-path=/imagenet_dataset/ \
+ --batch-size=128 \
+ --n-samples=$nsamples \
+ --epsilon=$epsilon \
+ --defense=jpeg \
+ --pgd-steps=256
\ No newline at end of file
diff --git a/case_studies/inputtransformations/defense.py b/case_studies/inputtransformations/defense.py
new file mode 100644
index 0000000..a2018a7
--- /dev/null
+++ b/case_studies/inputtransformations/defense.py
@@ -0,0 +1,334 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import PIL
+import PIL.Image
+from io import BytesIO
+import numpy as np
+import tensorflow as tf
+
+def defend_reduce(arr, depth=3):
+ arr = (arr * 255.0).astype(np.uint8)
+ shift = 8 - depth
+ arr = (arr >> shift) << shift
+ arr = arr.astype(np.float32)/255.0
+ return arr
+
+batched_defend_reduce = defend_reduce
+
+def defend_jpeg(input_array):
+ pil_image = PIL.Image.fromarray((input_array*255.0).astype(np.uint8))
+ f = BytesIO()
+ pil_image.save(f, format='jpeg', quality=75) # quality level specified in paper
+ jpeg_image = np.asarray(PIL.Image.open(f)).astype(np.float32)/255.0
+ return jpeg_image
+
+def batched_defend_jpeg(xs):
+ return np.stack([defend_jpeg(x) for x in xs], 0)
+
+# based on https://github.com/scikit-image/scikit-image/blob/master/skimage/restoration/_denoise_cy.pyx
+
+# super slow since this is implemented in pure python :'(
+
+def bregman(image, mask, weight, eps=1e-3, max_iter=100):
+ rows, cols, dims = image.shape
+ rows2 = rows + 2
+ cols2 = cols + 2
+ total = rows * cols * dims
+ shape_ext = (rows2, cols2, dims)
+
+ u = np.zeros(shape_ext)
+ dx = np.zeros(shape_ext)
+ dy = np.zeros(shape_ext)
+ bx = np.zeros(shape_ext)
+ by = np.zeros(shape_ext)
+
+ u[1:-1, 1:-1] = image
+ # reflect image
+ u[0, 1:-1] = image[1, :]
+ u[1:-1, 0] = image[:, 1]
+ u[-1, 1:-1] = image[-2, :]
+ u[1:-1, -1] = image[:, -2]
+
+ i = 0
+ rmse = np.inf
+ lam = 2 * weight
+ norm = (weight + 4 * lam)
+
+ while i < max_iter and rmse > eps:
+ rmse = 0
+
+ for k in range(dims):
+ for r in range(1, rows+1):
+ for c in range(1, cols+1):
+ uprev = u[r, c, k]
+
+ # forward derivatives
+ ux = u[r, c+1, k] - uprev
+ uy = u[r+1, c, k] - uprev
+
+ # Gauss-Seidel method
+ if mask[r-1, c-1]:
+ unew = (lam * (u[r+1, c, k] +
+ u[r-1, c, k] +
+ u[r, c+1, k] +
+ u[r, c-1, k] +
+ dx[r, c-1, k] -
+ dx[r, c, k] +
+ dy[r-1, c, k] -
+ dy[r, c, k] -
+ bx[r, c-1, k] +
+ bx[r, c, k] -
+ by[r-1, c, k] +
+ by[r, c, k]
+ ) + weight * image[r-1, c-1, k]
+ ) / norm
+ else:
+ # similar to the update step above, except we take
+ # lim_{weight->0} of the update step, effectively
+ # ignoring the l2 loss
+ unew = (u[r+1, c, k] +
+ u[r-1, c, k] +
+ u[r, c+1, k] +
+ u[r, c-1, k] +
+ dx[r, c-1, k] -
+ dx[r, c, k] +
+ dy[r-1, c, k] -
+ dy[r, c, k] -
+ bx[r, c-1, k] +
+ bx[r, c, k] -
+ by[r-1, c, k] +
+ by[r, c, k]
+ ) / 4.0
+ u[r, c, k] = unew
+
+ # update rms error
+ rmse += (unew - uprev)**2
+
+ bxx = bx[r, c, k]
+ byy = by[r, c, k]
+
+ # d_subproblem
+ s = ux + bxx
+ if s > 1/lam:
+ dxx = s - 1/lam
+ elif s < -1/lam:
+ dxx = s + 1/lam
+ else:
+ dxx = 0
+ s = uy + byy
+ if s > 1/lam:
+ dyy = s - 1/lam
+ elif s < -1/lam:
+ dyy = s + 1/lam
+ else:
+ dyy = 0
+
+ dx[r, c, k] = dxx
+ dy[r, c, k] = dyy
+
+ bx[r, c, k] += ux - dxx
+ by[r, c, k] += uy - dyy
+
+ rmse = np.sqrt(rmse / total)
+ i += 1
+
+ return np.squeeze(np.asarray(u[1:-1, 1:-1]))
+
+def defend_tv(input_array, keep_prob=0.5, lambda_tv=0.03):
+ mask = np.random.uniform(size=input_array.shape[:2])
+ mask = mask < keep_prob
+ return bregman(input_array, mask, weight=2.0/lambda_tv)
+
+def batched_defend_tv(xs):
+ return np.stack([defend_tv(x) for x in xs], 0)
+
+def make_defend_quilt(sess):
+ # setup for quilting
+ quilt_db = np.load('checkpoints/inputtransformations_inceptionv3/quilt_db.npy')
+ quilt_db_reshaped = quilt_db.reshape(1000000, -1)
+ TILE_SIZE = 5
+ TILE_OVERLAP = 2
+ tile_skip = TILE_SIZE - TILE_OVERLAP
+ K = 10
+ db_tensor = tf.placeholder(tf.float32, quilt_db_reshaped.shape)
+ query_imgs = tf.placeholder(tf.float32, (TILE_SIZE * TILE_SIZE * 3, None))
+ norms = tf.reduce_sum(tf.square(db_tensor), axis=1)[:, tf.newaxis] \
+ - 2*tf.matmul(db_tensor, query_imgs)
+ _, topk_indices = tf.nn.top_k(-tf.transpose(norms), k=K, sorted=False)
+ def min_error_table(arr, direction):
+ assert direction in ('horizontal', 'vertical')
+ y, x = arr.shape
+ cum = np.zeros_like(arr)
+ if direction == 'horizontal':
+ cum[:, -1] = arr[:, -1]
+ for ix in range(x-2, -1, -1):
+ for iy in range(y):
+ m = arr[iy, ix+1]
+ if iy > 0:
+ m = min(m, arr[iy-1, ix+1])
+ if iy < y - 1:
+ m = min(m, arr[iy+1, ix+1])
+ cum[iy, ix] = arr[iy, ix] + m
+ elif direction == 'vertical':
+ cum[-1, :] = arr[-1, :]
+ for iy in range(y-2, -1, -1):
+ for ix in range(x):
+ m = arr[iy+1, ix]
+ if ix > 0:
+ m = min(m, arr[iy+1, ix-1])
+ if ix < x - 1:
+ m = min(m, arr[iy+1, ix+1])
+ cum[iy, ix] = arr[iy, ix] + m
+ return cum
+ def index_exists(arr, index):
+ if arr.ndim != len(index):
+ return False
+ return all(i > 0 for i in index) and all(index[i] < arr.shape[i] for i in range(arr.ndim))
+ def assign_block(ix, iy, tile, synth):
+ posx = tile_skip * ix
+ posy = tile_skip * iy
+
+ if ix == 0 and iy == 0:
+ synth[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :] = tile
+ elif iy == 0:
+ # first row, only have horizontal overlap of the block
+ tile_left = tile[:, :TILE_OVERLAP, :]
+ synth_right = synth[:TILE_SIZE, posx:posx+TILE_OVERLAP, :]
+ errors = np.sum(np.square(tile_left - synth_right), axis=2)
+ table = min_error_table(errors, direction='vertical')
+ # copy row by row into synth
+ xoff = np.argmin(table[0, :])
+ synth[posy, posx+xoff:posx+TILE_SIZE] = tile[0, xoff:]
+ for yoff in range(1, TILE_SIZE):
+ # explore nearby xoffs
+ candidates = [(yoff, xoff), (yoff, xoff-1), (yoff, xoff+1)]
+ index = min((i for i in candidates if index_exists(table, i)), key=lambda i: table[i])
+ xoff = index[1]
+ synth[posy+yoff, posx+xoff:posx+TILE_SIZE] = tile[yoff, xoff:]
+ elif ix == 0:
+ # first column, only have vertical overlap of the block
+ tile_up = tile[:TILE_OVERLAP, :, :]
+ synth_bottom = synth[posy:posy+TILE_OVERLAP, :TILE_SIZE, :]
+ errors = np.sum(np.square(tile_up - synth_bottom), axis=2)
+ table = min_error_table(errors, direction='horizontal')
+ # copy column by column into synth
+ yoff = np.argmin(table[:, 0])
+ synth[posy+yoff:posy+TILE_SIZE, posx] = tile[yoff:, 0]
+ for xoff in range(1, TILE_SIZE):
+ # explore nearby yoffs
+ candidates = [(yoff, xoff), (yoff-1, xoff), (yoff+1, xoff)]
+ index = min((i for i in candidates if index_exists(table, i)), key=lambda i: table[i])
+ yoff = index[0]
+ synth[posy+yoff:posy+TILE_SIZE, posx+xoff] = tile[yoff:, xoff]
+ else:
+ # glue cuts along diagonal
+ tile_up = tile[:TILE_OVERLAP, :, :]
+ synth_bottom = synth[posy:posy+TILE_OVERLAP, :TILE_SIZE, :]
+ errors_up = np.sum(np.square(tile_up - synth_bottom), axis=2)
+ table_up = min_error_table(errors_up, direction='horizontal')
+ tile_left = tile[:, :TILE_OVERLAP, :]
+ synth_right = synth[:TILE_SIZE, posx:posx+TILE_OVERLAP, :]
+ errors_left = np.sum(np.square(tile_left - synth_right), axis=2)
+ table_left = min_error_table(errors_left, direction='vertical')
+ glue_index = -1
+ glue_value = np.inf
+ for i in range(TILE_OVERLAP):
+ e = table_up[i, i] + table_left[i, i]
+ if e < glue_value:
+ glue_value = e
+ glue_index = i
+ # copy left part first, up to the overlap column
+ xoff = glue_index
+ synth[posy+glue_index, posx+xoff:posx+TILE_OVERLAP] = tile[glue_index, xoff:TILE_OVERLAP]
+ for yoff in range(glue_index+1, TILE_SIZE):
+ # explore nearby xoffs
+ candidates = [(yoff, xoff), (yoff, xoff-1), (yoff, xoff+1)]
+ index = min((i for i in candidates if index_exists(table_left, i)), key=lambda i: table_left[i])
+ xoff = index[1]
+ synth[posy+yoff, posx+xoff:posx+TILE_OVERLAP] = tile[yoff, xoff:TILE_OVERLAP]
+ # copy right part, down to overlap row
+ yoff = glue_index
+ synth[posy+yoff:posy+TILE_OVERLAP, posx+glue_index] = tile[yoff:TILE_OVERLAP, glue_index]
+ for xoff in range(glue_index+1, TILE_SIZE):
+ # explore nearby yoffs
+ candidates = [(yoff, xoff), (yoff-1, xoff), (yoff+1, xoff)]
+ index = min((i for i in candidates if index_exists(table_up, i)), key=lambda i: table_up[i])
+ yoff = index[0]
+ synth[posy+yoff:posy+TILE_OVERLAP, posx+xoff] = tile[yoff:TILE_OVERLAP, xoff]
+ # copy rest of image
+ synth[posy+TILE_OVERLAP:posy+TILE_SIZE, posx+TILE_OVERLAP:posx+TILE_SIZE] = tile[TILE_OVERLAP:, TILE_OVERLAP:]
+ KNN_MAX_BATCH = 1000
+ def quilt(arr, graphcut=True):
+ h, w, c = arr.shape
+ assert (h - TILE_SIZE) % tile_skip == 0
+ assert (w - TILE_SIZE) % tile_skip == 0
+ horiz_blocks = (w - TILE_SIZE) // tile_skip + 1
+ vert_blocks = (h - TILE_SIZE) // tile_skip + 1
+ num_patches = horiz_blocks * vert_blocks
+ patches = np.zeros((TILE_SIZE * TILE_SIZE * 3, num_patches))
+ idx = 0
+ for iy in range(vert_blocks):
+ for ix in range(horiz_blocks):
+ posx = tile_skip*ix
+ posy = tile_skip*iy
+ patches[:, idx] = arr[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :].ravel()
+ idx += 1
+
+ ind = []
+ for chunk in range(num_patches // KNN_MAX_BATCH + (1 if num_patches % KNN_MAX_BATCH != 0 else 0)):
+ start = KNN_MAX_BATCH * chunk
+ end = start + KNN_MAX_BATCH
+ # for some reason, the code below is 10x slower when run in a Jupyter notebook
+ # not sure why...
+ indices_ = sess.run(topk_indices, {db_tensor: quilt_db_reshaped, query_imgs: patches[:, start:end]})
+ for i in indices_:
+ ind.append(np.random.choice(i))
+
+ synth = np.zeros((299, 299, 3))
+
+ idx = 0
+ for iy in range(vert_blocks):
+ for ix in range(horiz_blocks):
+ posx = tile_skip*ix
+ posy = tile_skip*iy
+ tile = quilt_db[ind[idx]]
+ if not graphcut:
+ synth[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :] = tile
+ else:
+ assign_block(ix, iy, tile, synth)
+ idx += 1
+ return synth
+
+ return quilt
+
+
+def batched_make_defend_jpeg(sess):
+ quilt = make_defend_quilt(sess)
+ def inner(xs, *args, **kwargs):
+ return np.stack([quilt(x, *args, **kwargs) for x in xs], 0)
+ return inner
+
+# x is a square image (3-tensor)
+def defend_crop(x, crop_size=90, ensemble_size=30):
+ x_size = tf.to_float(x.shape[1])
+ frac = crop_size/x_size
+ start_fraction_max = (x_size - crop_size)/x_size
+ def randomizing_crop(x):
+ start_x = tf.random_uniform((), 0, start_fraction_max)
+ start_y = tf.random_uniform((), 0, start_fraction_max)
+ return tf.image.crop_and_resize([x], boxes=[[start_y, start_x, start_y+frac, start_x+frac]],
+ box_ind=[0], crop_size=[crop_size, crop_size])
+
+ return tf.concat([randomizing_crop(x) for _ in range(ensemble_size)], axis=0)
\ No newline at end of file
diff --git a/case_studies/inputtransformations/imagenet_labels.py b/case_studies/inputtransformations/imagenet_labels.py
new file mode 100644
index 0000000..b824734
--- /dev/null
+++ b/case_studies/inputtransformations/imagenet_labels.py
@@ -0,0 +1,1020 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+_lut = [
+ 'tench, Tinca tinca',
+ 'goldfish, Carassius auratus',
+ 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
+ 'tiger shark, Galeocerdo cuvieri',
+ 'hammerhead, hammerhead shark',
+ 'electric ray, crampfish, numbfish, torpedo',
+ 'stingray',
+ 'cock',
+ 'hen',
+ 'ostrich, Struthio camelus',
+ 'brambling, Fringilla montifringilla',
+ 'goldfinch, Carduelis carduelis',
+ 'house finch, linnet, Carpodacus mexicanus',
+ 'junco, snowbird',
+ 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
+ 'robin, American robin, Turdus migratorius',
+ 'bulbul',
+ 'jay',
+ 'magpie',
+ 'chickadee',
+ 'water ouzel, dipper',
+ 'kite',
+ 'bald eagle, American eagle, Haliaeetus leucocephalus',
+ 'vulture',
+ 'great grey owl, great gray owl, Strix nebulosa',
+ 'European fire salamander, Salamandra salamandra',
+ 'common newt, Triturus vulgaris',
+ 'eft',
+ 'spotted salamander, Ambystoma maculatum',
+ 'axolotl, mud puppy, Ambystoma mexicanum',
+ 'bullfrog, Rana catesbeiana',
+ 'tree frog, tree-frog',
+ 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
+ 'loggerhead, loggerhead turtle, Caretta caretta',
+ 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
+ 'mud turtle',
+ 'terrapin',
+ 'box turtle, box tortoise',
+ 'banded gecko',
+ 'common iguana, iguana, Iguana iguana',
+ 'American chameleon, anole, Anolis carolinensis',
+ 'whiptail, whiptail lizard',
+ 'agama',
+ 'frilled lizard, Chlamydosaurus kingi',
+ 'alligator lizard',
+ 'Gila monster, Heloderma suspectum',
+ 'green lizard, Lacerta viridis',
+ 'African chameleon, Chamaeleo chamaeleon',
+ 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
+ 'African crocodile, Nile crocodile, Crocodylus niloticus',
+ 'American alligator, Alligator mississipiensis',
+ 'triceratops',
+ 'thunder snake, worm snake, Carphophis amoenus',
+ 'ringneck snake, ring-necked snake, ring snake',
+ 'hognose snake, puff adder, sand viper',
+ 'green snake, grass snake',
+ 'king snake, kingsnake',
+ 'garter snake, grass snake',
+ 'water snake',
+ 'vine snake',
+ 'night snake, Hypsiglena torquata',
+ 'boa constrictor, Constrictor constrictor',
+ 'rock python, rock snake, Python sebae',
+ 'Indian cobra, Naja naja',
+ 'green mamba',
+ 'sea snake',
+ 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
+ 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
+ 'sidewinder, horned rattlesnake, Crotalus cerastes',
+ 'trilobite',
+ 'harvestman, daddy longlegs, Phalangium opilio',
+ 'scorpion',
+ 'black and gold garden spider, Argiope aurantia',
+ 'barn spider, Araneus cavaticus',
+ 'garden spider, Aranea diademata',
+ 'black widow, Latrodectus mactans',
+ 'tarantula',
+ 'wolf spider, hunting spider',
+ 'tick',
+ 'centipede',
+ 'black grouse',
+ 'ptarmigan',
+ 'ruffed grouse, partridge, Bonasa umbellus',
+ 'prairie chicken, prairie grouse, prairie fowl',
+ 'peacock',
+ 'quail',
+ 'partridge',
+ 'African grey, African gray, Psittacus erithacus',
+ 'macaw',
+ 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
+ 'lorikeet',
+ 'coucal',
+ 'bee eater',
+ 'hornbill',
+ 'hummingbird',
+ 'jacamar',
+ 'toucan',
+ 'drake',
+ 'red-breasted merganser, Mergus serrator',
+ 'goose',
+ 'black swan, Cygnus atratus',
+ 'tusker',
+ 'echidna, spiny anteater, anteater',
+ 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
+ 'wallaby, brush kangaroo',
+ 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
+ 'wombat',
+ 'jellyfish',
+ 'sea anemone, anemone',
+ 'brain coral',
+ 'flatworm, platyhelminth',
+ 'nematode, nematode worm, roundworm',
+ 'conch',
+ 'snail',
+ 'slug',
+ 'sea slug, nudibranch',
+ 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
+ 'chambered nautilus, pearly nautilus, nautilus',
+ 'Dungeness crab, Cancer magister',
+ 'rock crab, Cancer irroratus',
+ 'fiddler crab',
+ 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
+ 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
+ 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
+ 'crayfish, crawfish, crawdad, crawdaddy',
+ 'hermit crab',
+ 'isopod',
+ 'white stork, Ciconia ciconia',
+ 'black stork, Ciconia nigra',
+ 'spoonbill',
+ 'flamingo',
+ 'little blue heron, Egretta caerulea',
+ 'American egret, great white heron, Egretta albus',
+ 'bittern',
+ 'crane',
+ 'limpkin, Aramus pictus',
+ 'European gallinule, Porphyrio porphyrio',
+ 'American coot, marsh hen, mud hen, water hen, Fulica americana',
+ 'bustard',
+ 'ruddy turnstone, Arenaria interpres',
+ 'red-backed sandpiper, dunlin, Erolia alpina',
+ 'redshank, Tringa totanus',
+ 'dowitcher',
+ 'oystercatcher, oyster catcher',
+ 'pelican',
+ 'king penguin, Aptenodytes patagonica',
+ 'albatross, mollymawk',
+ 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
+ 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
+ 'dugong, Dugong dugon',
+ 'sea lion',
+ 'Chihuahua',
+ 'Japanese spaniel',
+ 'Maltese dog, Maltese terrier, Maltese',
+ 'Pekinese, Pekingese, Peke',
+ 'Shih-Tzu',
+ 'Blenheim spaniel',
+ 'papillon',
+ 'toy terrier',
+ 'Rhodesian ridgeback',
+ 'Afghan hound, Afghan',
+ 'basset, basset hound',
+ 'beagle',
+ 'bloodhound, sleuthhound',
+ 'bluetick',
+ 'black-and-tan coonhound',
+ 'Walker hound, Walker foxhound',
+ 'English foxhound',
+ 'redbone',
+ 'borzoi, Russian wolfhound',
+ 'Irish wolfhound',
+ 'Italian greyhound',
+ 'whippet',
+ 'Ibizan hound, Ibizan Podenco',
+ 'Norwegian elkhound, elkhound',
+ 'otterhound, otter hound',
+ 'Saluki, gazelle hound',
+ 'Scottish deerhound, deerhound',
+ 'Weimaraner',
+ 'Staffordshire bullterrier, Staffordshire bull terrier',
+ 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
+ 'Bedlington terrier',
+ 'Border terrier',
+ 'Kerry blue terrier',
+ 'Irish terrier',
+ 'Norfolk terrier',
+ 'Norwich terrier',
+ 'Yorkshire terrier',
+ 'wire-haired fox terrier',
+ 'Lakeland terrier',
+ 'Sealyham terrier, Sealyham',
+ 'Airedale, Airedale terrier',
+ 'cairn, cairn terrier',
+ 'Australian terrier',
+ 'Dandie Dinmont, Dandie Dinmont terrier',
+ 'Boston bull, Boston terrier',
+ 'miniature schnauzer',
+ 'giant schnauzer',
+ 'standard schnauzer',
+ 'Scotch terrier, Scottish terrier, Scottie',
+ 'Tibetan terrier, chrysanthemum dog',
+ 'silky terrier, Sydney silky',
+ 'soft-coated wheaten terrier',
+ 'West Highland white terrier',
+ 'Lhasa, Lhasa apso',
+ 'flat-coated retriever',
+ 'curly-coated retriever',
+ 'golden retriever',
+ 'Labrador retriever',
+ 'Chesapeake Bay retriever',
+ 'German short-haired pointer',
+ 'vizsla, Hungarian pointer',
+ 'English setter',
+ 'Irish setter, red setter',
+ 'Gordon setter',
+ 'Brittany spaniel',
+ 'clumber, clumber spaniel',
+ 'English springer, English springer spaniel',
+ 'Welsh springer spaniel',
+ 'cocker spaniel, English cocker spaniel, cocker',
+ 'Sussex spaniel',
+ 'Irish water spaniel',
+ 'kuvasz',
+ 'schipperke',
+ 'groenendael',
+ 'malinois',
+ 'briard',
+ 'kelpie',
+ 'komondor',
+ 'Old English sheepdog, bobtail',
+ 'Shetland sheepdog, Shetland sheep dog, Shetland',
+ 'collie',
+ 'Border collie',
+ 'Bouvier des Flandres, Bouviers des Flandres',
+ 'Rottweiler',
+ 'German shepherd, German shepherd dog, German police dog, alsatian',
+ 'Doberman, Doberman pinscher',
+ 'miniature pinscher',
+ 'Greater Swiss Mountain dog',
+ 'Bernese mountain dog',
+ 'Appenzeller',
+ 'EntleBucher',
+ 'boxer',
+ 'bull mastiff',
+ 'Tibetan mastiff',
+ 'French bulldog',
+ 'Great Dane',
+ 'Saint Bernard, St Bernard',
+ 'Eskimo dog, husky',
+ 'malamute, malemute, Alaskan malamute',
+ 'Siberian husky',
+ 'dalmatian, coach dog, carriage dog',
+ 'affenpinscher, monkey pinscher, monkey dog',
+ 'basenji',
+ 'pug, pug-dog',
+ 'Leonberg',
+ 'Newfoundland, Newfoundland dog',
+ 'Great Pyrenees',
+ 'Samoyed, Samoyede',
+ 'Pomeranian',
+ 'chow, chow chow',
+ 'keeshond',
+ 'Brabancon griffon',
+ 'Pembroke, Pembroke Welsh corgi',
+ 'Cardigan, Cardigan Welsh corgi',
+ 'toy poodle',
+ 'miniature poodle',
+ 'standard poodle',
+ 'Mexican hairless',
+ 'timber wolf, grey wolf, gray wolf, Canis lupus',
+ 'white wolf, Arctic wolf, Canis lupus tundrarum',
+ 'red wolf, maned wolf, Canis rufus, Canis niger',
+ 'coyote, prairie wolf, brush wolf, Canis latrans',
+ 'dingo, warrigal, warragal, Canis dingo',
+ 'dhole, Cuon alpinus',
+ 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
+ 'hyena, hyaena',
+ 'red fox, Vulpes vulpes',
+ 'kit fox, Vulpes macrotis',
+ 'Arctic fox, white fox, Alopex lagopus',
+ 'grey fox, gray fox, Urocyon cinereoargenteus',
+ 'tabby, tabby cat',
+ 'tiger cat',
+ 'Persian cat',
+ 'Siamese cat, Siamese',
+ 'Egyptian cat',
+ 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
+ 'lynx, catamount',
+ 'leopard, Panthera pardus',
+ 'snow leopard, ounce, Panthera uncia',
+ 'jaguar, panther, Panthera onca, Felis onca',
+ 'lion, king of beasts, Panthera leo',
+ 'tiger, Panthera tigris',
+ 'cheetah, chetah, Acinonyx jubatus',
+ 'brown bear, bruin, Ursus arctos',
+ 'American black bear, black bear, Ursus americanus, Euarctos americanus',
+ 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
+ 'sloth bear, Melursus ursinus, Ursus ursinus',
+ 'mongoose',
+ 'meerkat, mierkat',
+ 'tiger beetle',
+ 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
+ 'ground beetle, carabid beetle',
+ 'long-horned beetle, longicorn, longicorn beetle',
+ 'leaf beetle, chrysomelid',
+ 'dung beetle',
+ 'rhinoceros beetle',
+ 'weevil',
+ 'fly',
+ 'bee',
+ 'ant, emmet, pismire',
+ 'grasshopper, hopper',
+ 'cricket',
+ 'walking stick, walkingstick, stick insect',
+ 'cockroach, roach',
+ 'mantis, mantid',
+ 'cicada, cicala',
+ 'leafhopper',
+ 'lacewing, lacewing fly',
+ "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
+ 'damselfly',
+ 'admiral',
+ 'ringlet, ringlet butterfly',
+ 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
+ 'cabbage butterfly',
+ 'sulphur butterfly, sulfur butterfly',
+ 'lycaenid, lycaenid butterfly',
+ 'starfish, sea star',
+ 'sea urchin',
+ 'sea cucumber, holothurian',
+ 'wood rabbit, cottontail, cottontail rabbit',
+ 'hare',
+ 'Angora, Angora rabbit',
+ 'hamster',
+ 'porcupine, hedgehog',
+ 'fox squirrel, eastern fox squirrel, Sciurus niger',
+ 'marmot',
+ 'beaver',
+ 'guinea pig, Cavia cobaya',
+ 'sorrel',
+ 'zebra',
+ 'hog, pig, grunter, squealer, Sus scrofa',
+ 'wild boar, boar, Sus scrofa',
+ 'warthog',
+ 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
+ 'ox',
+ 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
+ 'bison',
+ 'ram, tup',
+ 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
+ 'ibex, Capra ibex',
+ 'hartebeest',
+ 'impala, Aepyceros melampus',
+ 'gazelle',
+ 'Arabian camel, dromedary, Camelus dromedarius',
+ 'llama',
+ 'weasel',
+ 'mink',
+ 'polecat, fitch, foulmart, foumart, Mustela putorius',
+ 'black-footed ferret, ferret, Mustela nigripes',
+ 'otter',
+ 'skunk, polecat, wood pussy',
+ 'badger',
+ 'armadillo',
+ 'three-toed sloth, ai, Bradypus tridactylus',
+ 'orangutan, orang, orangutang, Pongo pygmaeus',
+ 'gorilla, Gorilla gorilla',
+ 'chimpanzee, chimp, Pan troglodytes',
+ 'gibbon, Hylobates lar',
+ 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
+ 'guenon, guenon monkey',
+ 'patas, hussar monkey, Erythrocebus patas',
+ 'baboon',
+ 'macaque',
+ 'langur',
+ 'colobus, colobus monkey',
+ 'proboscis monkey, Nasalis larvatus',
+ 'marmoset',
+ 'capuchin, ringtail, Cebus capucinus',
+ 'howler monkey, howler',
+ 'titi, titi monkey',
+ 'spider monkey, Ateles geoffroyi',
+ 'squirrel monkey, Saimiri sciureus',
+ 'Madagascar cat, ring-tailed lemur, Lemur catta',
+ 'indri, indris, Indri indri, Indri brevicaudatus',
+ 'Indian elephant, Elephas maximus',
+ 'African elephant, Loxodonta africana',
+ 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
+ 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
+ 'barracouta, snoek',
+ 'eel',
+ 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
+ 'rock beauty, Holocanthus tricolor',
+ 'anemone fish',
+ 'sturgeon',
+ 'gar, garfish, garpike, billfish, Lepisosteus osseus',
+ 'lionfish',
+ 'puffer, pufferfish, blowfish, globefish',
+ 'abacus',
+ 'abaya',
+ "academic gown, academic robe, judge's robe",
+ 'accordion, piano accordion, squeeze box',
+ 'acoustic guitar',
+ 'aircraft carrier, carrier, flattop, attack aircraft carrier',
+ 'airliner',
+ 'airship, dirigible',
+ 'altar',
+ 'ambulance',
+ 'amphibian, amphibious vehicle',
+ 'analog clock',
+ 'apiary, bee house',
+ 'apron',
+ 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
+ 'assault rifle, assault gun',
+ 'backpack, back pack, knapsack, packsack, rucksack, haversack',
+ 'bakery, bakeshop, bakehouse',
+ 'balance beam, beam',
+ 'balloon',
+ 'ballpoint, ballpoint pen, ballpen, Biro',
+ 'Band Aid',
+ 'banjo',
+ 'bannister, banister, balustrade, balusters, handrail',
+ 'barbell',
+ 'barber chair',
+ 'barbershop',
+ 'barn',
+ 'barometer',
+ 'barrel, cask',
+ 'barrow, garden cart, lawn cart, wheelbarrow',
+ 'baseball',
+ 'basketball',
+ 'bassinet',
+ 'bassoon',
+ 'bathing cap, swimming cap',
+ 'bath towel',
+ 'bathtub, bathing tub, bath, tub',
+ 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
+ 'beacon, lighthouse, beacon light, pharos',
+ 'beaker',
+ 'bearskin, busby, shako',
+ 'beer bottle',
+ 'beer glass',
+ 'bell cote, bell cot',
+ 'bib',
+ 'bicycle-built-for-two, tandem bicycle, tandem',
+ 'bikini, two-piece',
+ 'binder, ring-binder',
+ 'binoculars, field glasses, opera glasses',
+ 'birdhouse',
+ 'boathouse',
+ 'bobsled, bobsleigh, bob',
+ 'bolo tie, bolo, bola tie, bola',
+ 'bonnet, poke bonnet',
+ 'bookcase',
+ 'bookshop, bookstore, bookstall',
+ 'bottlecap',
+ 'bow',
+ 'bow tie, bow-tie, bowtie',
+ 'brass, memorial tablet, plaque',
+ 'brassiere, bra, bandeau',
+ 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
+ 'breastplate, aegis, egis',
+ 'broom',
+ 'bucket, pail',
+ 'buckle',
+ 'bulletproof vest',
+ 'bullet train, bullet',
+ 'butcher shop, meat market',
+ 'cab, hack, taxi, taxicab',
+ 'caldron, cauldron',
+ 'candle, taper, wax light',
+ 'cannon',
+ 'canoe',
+ 'can opener, tin opener',
+ 'cardigan',
+ 'car mirror',
+ 'carousel, carrousel, merry-go-round, roundabout, whirligig',
+ "carpenter's kit, tool kit",
+ 'carton',
+ 'car wheel',
+ 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
+ 'cassette',
+ 'cassette player',
+ 'castle',
+ 'catamaran',
+ 'CD player',
+ 'cello, violoncello',
+ 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
+ 'chain',
+ 'chainlink fence',
+ 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
+ 'chain saw, chainsaw',
+ 'chest',
+ 'chiffonier, commode',
+ 'chime, bell, gong',
+ 'china cabinet, china closet',
+ 'Christmas stocking',
+ 'church, church building',
+ 'cinema, movie theater, movie theatre, movie house, picture palace',
+ 'cleaver, meat cleaver, chopper',
+ 'cliff dwelling',
+ 'cloak',
+ 'clog, geta, patten, sabot',
+ 'cocktail shaker',
+ 'coffee mug',
+ 'coffeepot',
+ 'coil, spiral, volute, whorl, helix',
+ 'combination lock',
+ 'computer keyboard, keypad',
+ 'confectionery, confectionary, candy store',
+ 'container ship, containership, container vessel',
+ 'convertible',
+ 'corkscrew, bottle screw',
+ 'cornet, horn, trumpet, trump',
+ 'cowboy boot',
+ 'cowboy hat, ten-gallon hat',
+ 'cradle',
+ 'crane',
+ 'crash helmet',
+ 'crate',
+ 'crib, cot',
+ 'Crock Pot',
+ 'croquet ball',
+ 'crutch',
+ 'cuirass',
+ 'dam, dike, dyke',
+ 'desk',
+ 'desktop computer',
+ 'dial telephone, dial phone',
+ 'diaper, nappy, napkin',
+ 'digital clock',
+ 'digital watch',
+ 'dining table, board',
+ 'dishrag, dishcloth',
+ 'dishwasher, dish washer, dishwashing machine',
+ 'disk brake, disc brake',
+ 'dock, dockage, docking facility',
+ 'dogsled, dog sled, dog sleigh',
+ 'dome',
+ 'doormat, welcome mat',
+ 'drilling platform, offshore rig',
+ 'drum, membranophone, tympan',
+ 'drumstick',
+ 'dumbbell',
+ 'Dutch oven',
+ 'electric fan, blower',
+ 'electric guitar',
+ 'electric locomotive',
+ 'entertainment center',
+ 'envelope',
+ 'espresso maker',
+ 'face powder',
+ 'feather boa, boa',
+ 'file, file cabinet, filing cabinet',
+ 'fireboat',
+ 'fire engine, fire truck',
+ 'fire screen, fireguard',
+ 'flagpole, flagstaff',
+ 'flute, transverse flute',
+ 'folding chair',
+ 'football helmet',
+ 'forklift',
+ 'fountain',
+ 'fountain pen',
+ 'four-poster',
+ 'freight car',
+ 'French horn, horn',
+ 'frying pan, frypan, skillet',
+ 'fur coat',
+ 'garbage truck, dustcart',
+ 'gasmask, respirator, gas helmet',
+ 'gas pump, gasoline pump, petrol pump, island dispenser',
+ 'goblet',
+ 'go-kart',
+ 'golf ball',
+ 'golfcart, golf cart',
+ 'gondola',
+ 'gong, tam-tam',
+ 'gown',
+ 'grand piano, grand',
+ 'greenhouse, nursery, glasshouse',
+ 'grille, radiator grille',
+ 'grocery store, grocery, food market, market',
+ 'guillotine',
+ 'hair slide',
+ 'hair spray',
+ 'half track',
+ 'hammer',
+ 'hamper',
+ 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
+ 'hand-held computer, hand-held microcomputer',
+ 'handkerchief, hankie, hanky, hankey',
+ 'hard disc, hard disk, fixed disk',
+ 'harmonica, mouth organ, harp, mouth harp',
+ 'harp',
+ 'harvester, reaper',
+ 'hatchet',
+ 'holster',
+ 'home theater, home theatre',
+ 'honeycomb',
+ 'hook, claw',
+ 'hoopskirt, crinoline',
+ 'horizontal bar, high bar',
+ 'horse cart, horse-cart',
+ 'hourglass',
+ 'iPod',
+ 'iron, smoothing iron',
+ "jack-o'-lantern",
+ 'jean, blue jean, denim',
+ 'jeep, landrover',
+ 'jersey, T-shirt, tee shirt',
+ 'jigsaw puzzle',
+ 'jinrikisha, ricksha, rickshaw',
+ 'joystick',
+ 'kimono',
+ 'knee pad',
+ 'knot',
+ 'lab coat, laboratory coat',
+ 'ladle',
+ 'lampshade, lamp shade',
+ 'laptop, laptop computer',
+ 'lawn mower, mower',
+ 'lens cap, lens cover',
+ 'letter opener, paper knife, paperknife',
+ 'library',
+ 'lifeboat',
+ 'lighter, light, igniter, ignitor',
+ 'limousine, limo',
+ 'liner, ocean liner',
+ 'lipstick, lip rouge',
+ 'Loafer',
+ 'lotion',
+ 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
+ "loupe, jeweler's loupe",
+ 'lumbermill, sawmill',
+ 'magnetic compass',
+ 'mailbag, postbag',
+ 'mailbox, letter box',
+ 'maillot',
+ 'maillot, tank suit',
+ 'manhole cover',
+ 'maraca',
+ 'marimba, xylophone',
+ 'mask',
+ 'matchstick',
+ 'maypole',
+ 'maze, labyrinth',
+ 'measuring cup',
+ 'medicine chest, medicine cabinet',
+ 'megalith, megalithic structure',
+ 'microphone, mike',
+ 'microwave, microwave oven',
+ 'military uniform',
+ 'milk can',
+ 'minibus',
+ 'miniskirt, mini',
+ 'minivan',
+ 'missile',
+ 'mitten',
+ 'mixing bowl',
+ 'mobile home, manufactured home',
+ 'Model T',
+ 'modem',
+ 'monastery',
+ 'monitor',
+ 'moped',
+ 'mortar',
+ 'mortarboard',
+ 'mosque',
+ 'mosquito net',
+ 'motor scooter, scooter',
+ 'mountain bike, all-terrain bike, off-roader',
+ 'mountain tent',
+ 'mouse, computer mouse',
+ 'mousetrap',
+ 'moving van',
+ 'muzzle',
+ 'nail',
+ 'neck brace',
+ 'necklace',
+ 'nipple',
+ 'notebook, notebook computer',
+ 'obelisk',
+ 'oboe, hautboy, hautbois',
+ 'ocarina, sweet potato',
+ 'odometer, hodometer, mileometer, milometer',
+ 'oil filter',
+ 'organ, pipe organ',
+ 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
+ 'overskirt',
+ 'oxcart',
+ 'oxygen mask',
+ 'packet',
+ 'paddle, boat paddle',
+ 'paddlewheel, paddle wheel',
+ 'padlock',
+ 'paintbrush',
+ "pajama, pyjama, pj's, jammies",
+ 'palace',
+ 'panpipe, pandean pipe, syrinx',
+ 'paper towel',
+ 'parachute, chute',
+ 'parallel bars, bars',
+ 'park bench',
+ 'parking meter',
+ 'passenger car, coach, carriage',
+ 'patio, terrace',
+ 'pay-phone, pay-station',
+ 'pedestal, plinth, footstall',
+ 'pencil box, pencil case',
+ 'pencil sharpener',
+ 'perfume, essence',
+ 'Petri dish',
+ 'photocopier',
+ 'pick, plectrum, plectron',
+ 'pickelhaube',
+ 'picket fence, paling',
+ 'pickup, pickup truck',
+ 'pier',
+ 'piggy bank, penny bank',
+ 'pill bottle',
+ 'pillow',
+ 'ping-pong ball',
+ 'pinwheel',
+ 'pirate, pirate ship',
+ 'pitcher, ewer',
+ "plane, carpenter's plane, woodworking plane",
+ 'planetarium',
+ 'plastic bag',
+ 'plate rack',
+ 'plow, plough',
+ "plunger, plumber's helper",
+ 'Polaroid camera, Polaroid Land camera',
+ 'pole',
+ 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
+ 'poncho',
+ 'pool table, billiard table, snooker table',
+ 'pop bottle, soda bottle',
+ 'pot, flowerpot',
+ "potter's wheel",
+ 'power drill',
+ 'prayer rug, prayer mat',
+ 'printer',
+ 'prison, prison house',
+ 'projectile, missile',
+ 'projector',
+ 'puck, hockey puck',
+ 'punching bag, punch bag, punching ball, punchball',
+ 'purse',
+ 'quill, quill pen',
+ 'quilt, comforter, comfort, puff',
+ 'racer, race car, racing car',
+ 'racket, racquet',
+ 'radiator',
+ 'radio, wireless',
+ 'radio telescope, radio reflector',
+ 'rain barrel',
+ 'recreational vehicle, RV, R.V.',
+ 'reel',
+ 'reflex camera',
+ 'refrigerator, icebox',
+ 'remote control, remote',
+ 'restaurant, eating house, eating place, eatery',
+ 'revolver, six-gun, six-shooter',
+ 'rifle',
+ 'rocking chair, rocker',
+ 'rotisserie',
+ 'rubber eraser, rubber, pencil eraser',
+ 'rugby ball',
+ 'rule, ruler',
+ 'running shoe',
+ 'safe',
+ 'safety pin',
+ 'saltshaker, salt shaker',
+ 'sandal',
+ 'sarong',
+ 'sax, saxophone',
+ 'scabbard',
+ 'scale, weighing machine',
+ 'school bus',
+ 'schooner',
+ 'scoreboard',
+ 'screen, CRT screen',
+ 'screw',
+ 'screwdriver',
+ 'seat belt, seatbelt',
+ 'sewing machine',
+ 'shield, buckler',
+ 'shoe shop, shoe-shop, shoe store',
+ 'shoji',
+ 'shopping basket',
+ 'shopping cart',
+ 'shovel',
+ 'shower cap',
+ 'shower curtain',
+ 'ski',
+ 'ski mask',
+ 'sleeping bag',
+ 'slide rule, slipstick',
+ 'sliding door',
+ 'slot, one-armed bandit',
+ 'snorkel',
+ 'snowmobile',
+ 'snowplow, snowplough',
+ 'soap dispenser',
+ 'soccer ball',
+ 'sock',
+ 'solar dish, solar collector, solar furnace',
+ 'sombrero',
+ 'soup bowl',
+ 'space bar',
+ 'space heater',
+ 'space shuttle',
+ 'spatula',
+ 'speedboat',
+ "spider web, spider's web",
+ 'spindle',
+ 'sports car, sport car',
+ 'spotlight, spot',
+ 'stage',
+ 'steam locomotive',
+ 'steel arch bridge',
+ 'steel drum',
+ 'stethoscope',
+ 'stole',
+ 'stone wall',
+ 'stopwatch, stop watch',
+ 'stove',
+ 'strainer',
+ 'streetcar, tram, tramcar, trolley, trolley car',
+ 'stretcher',
+ 'studio couch, day bed',
+ 'stupa, tope',
+ 'submarine, pigboat, sub, U-boat',
+ 'suit, suit of clothes',
+ 'sundial',
+ 'sunglass',
+ 'sunglasses, dark glasses, shades',
+ 'sunscreen, sunblock, sun blocker',
+ 'suspension bridge',
+ 'swab, swob, mop',
+ 'sweatshirt',
+ 'swimming trunks, bathing trunks',
+ 'swing',
+ 'switch, electric switch, electrical switch',
+ 'syringe',
+ 'table lamp',
+ 'tank, army tank, armored combat vehicle, armoured combat vehicle',
+ 'tape player',
+ 'teapot',
+ 'teddy, teddy bear',
+ 'television, television system',
+ 'tennis ball',
+ 'thatch, thatched roof',
+ 'theater curtain, theatre curtain',
+ 'thimble',
+ 'thresher, thrasher, threshing machine',
+ 'throne',
+ 'tile roof',
+ 'toaster',
+ 'tobacco shop, tobacconist shop, tobacconist',
+ 'toilet seat',
+ 'torch',
+ 'totem pole',
+ 'tow truck, tow car, wrecker',
+ 'toyshop',
+ 'tractor',
+ 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
+ 'tray',
+ 'trench coat',
+ 'tricycle, trike, velocipede',
+ 'trimaran',
+ 'tripod',
+ 'triumphal arch',
+ 'trolleybus, trolley coach, trackless trolley',
+ 'trombone',
+ 'tub, vat',
+ 'turnstile',
+ 'typewriter keyboard',
+ 'umbrella',
+ 'unicycle, monocycle',
+ 'upright, upright piano',
+ 'vacuum, vacuum cleaner',
+ 'vase',
+ 'vault',
+ 'velvet',
+ 'vending machine',
+ 'vestment',
+ 'viaduct',
+ 'violin, fiddle',
+ 'volleyball',
+ 'waffle iron',
+ 'wall clock',
+ 'wallet, billfold, notecase, pocketbook',
+ 'wardrobe, closet, press',
+ 'warplane, military plane',
+ 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
+ 'washer, automatic washer, washing machine',
+ 'water bottle',
+ 'water jug',
+ 'water tower',
+ 'whiskey jug',
+ 'whistle',
+ 'wig',
+ 'window screen',
+ 'window shade',
+ 'Windsor tie',
+ 'wine bottle',
+ 'wing',
+ 'wok',
+ 'wooden spoon',
+ 'wool, woolen, woollen',
+ 'worm fence, snake fence, snake-rail fence, Virginia fence',
+ 'wreck',
+ 'yawl',
+ 'yurt',
+ 'web site, website, internet site, site',
+ 'comic book',
+ 'crossword puzzle, crossword',
+ 'street sign',
+ 'traffic light, traffic signal, stoplight',
+ 'book jacket, dust cover, dust jacket, dust wrapper',
+ 'menu',
+ 'plate',
+ 'guacamole',
+ 'consomme',
+ 'hot pot, hotpot',
+ 'trifle',
+ 'ice cream, icecream',
+ 'ice lolly, lolly, lollipop, popsicle',
+ 'French loaf',
+ 'bagel, beigel',
+ 'pretzel',
+ 'cheeseburger',
+ 'hotdog, hot dog, red hot',
+ 'mashed potato',
+ 'head cabbage',
+ 'broccoli',
+ 'cauliflower',
+ 'zucchini, courgette',
+ 'spaghetti squash',
+ 'acorn squash',
+ 'butternut squash',
+ 'cucumber, cuke',
+ 'artichoke, globe artichoke',
+ 'bell pepper',
+ 'cardoon',
+ 'mushroom',
+ 'Granny Smith',
+ 'strawberry',
+ 'orange',
+ 'lemon',
+ 'fig',
+ 'pineapple, ananas',
+ 'banana',
+ 'jackfruit, jak, jack',
+ 'custard apple',
+ 'pomegranate',
+ 'hay',
+ 'carbonara',
+ 'chocolate sauce, chocolate syrup',
+ 'dough',
+ 'meat loaf, meatloaf',
+ 'pizza, pizza pie',
+ 'potpie',
+ 'burrito',
+ 'red wine',
+ 'espresso',
+ 'cup',
+ 'eggnog',
+ 'alp',
+ 'bubble',
+ 'cliff, drop, drop-off',
+ 'coral reef',
+ 'geyser',
+ 'lakeside, lakeshore',
+ 'promontory, headland, head, foreland',
+ 'sandbar, sand bar',
+ 'seashore, coast, seacoast, sea-coast',
+ 'valley, vale',
+ 'volcano',
+ 'ballplayer, baseball player',
+ 'groom, bridegroom',
+ 'scuba diver',
+ 'rapeseed',
+ 'daisy',
+ "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
+ 'corn',
+ 'acorn',
+ 'hip, rose hip, rosehip',
+ 'buckeye, horse chestnut, conker',
+ 'coral fungus',
+ 'agaric',
+ 'gyromitra',
+ 'stinkhorn, carrion fungus',
+ 'earthstar',
+ 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
+ 'bolete',
+ 'ear, spike, capitulum',
+ 'toilet tissue, toilet paper, bathroom tissue'
+]
+
+def label_to_name(label):
+ global _lut
+ return _lut[label]
diff --git a/case_studies/inputtransformations/inceptionv3.py b/case_studies/inputtransformations/inceptionv3.py
new file mode 100644
index 0000000..448c1eb
--- /dev/null
+++ b/case_studies/inputtransformations/inceptionv3.py
@@ -0,0 +1,61 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from utils import optimistic_restore
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+import tensorflow.contrib.slim.nets as nets
+import functools
+
+INCEPTION_CHECKPOINT_PATH = 'checkpoints/inputtransformations_inceptionv3/inception_v3.ckpt'
+
+
+def _get_model(reuse):
+ arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)
+ func = nets.inception.inception_v3
+ @functools.wraps(func)
+ def network_fn(images):
+ with slim.arg_scope(arg_scope):
+ return func(images, 1001, is_training=False, reuse=reuse)
+ if hasattr(func, 'default_image_size'):
+ network_fn.default_image_size = func.default_image_size
+ return network_fn
+
+def _preprocess(image, height, width, scope=None):
+ with tf.name_scope(scope, 'eval_image', [image, height, width]):
+ if image.dtype != tf.float32:
+ image = tf.image.convert_image_dtype(image, dtype=tf.float32)
+ image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
+ image = tf.subtract(image, 0.5)
+ image = tf.multiply(image, 2.0)
+ return image
+
+# input is [batch, ?, ?, 3], pixels in [0, 1]
+# it's rescaled to [batch, 299, 299, 3] and shifted to [-1, 1]
+# output is [batch, 1000] (imagenet classes)
+_inception_initialized = False
+def model(sess, image):
+ global _inception_initialized
+ network_fn = _get_model(reuse=_inception_initialized)
+ size = network_fn.default_image_size
+ preprocessed = _preprocess(image, size, size)
+ logits, _ = network_fn(preprocessed)
+ logits = logits[:,1:] # ignore background class
+ predictions = tf.argmax(logits, 1)
+
+ if not _inception_initialized:
+ optimistic_restore(sess, INCEPTION_CHECKPOINT_PATH)
+ _inception_initialized = True
+
+ return logits, predictions
diff --git a/case_studies/inputtransformations/quilt_preprocess.py b/case_studies/inputtransformations/quilt_preprocess.py
new file mode 100644
index 0000000..538b95b
--- /dev/null
+++ b/case_studies/inputtransformations/quilt_preprocess.py
@@ -0,0 +1,74 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import random
+import PIL
+import PIL.Image
+import numpy as np
+import sys
+
+
+SAMPLES = 1000000
+DIM = 5
+RESIZE = True
+RESIZE_DIM = 300
+OUTPUT_FILE = 'quilt_db.npy'
+
+def main(argv):
+ imagenet_train_dir = argv[1]
+
+ assert SAMPLES % 1000 == 0
+
+ db = np.zeros((SAMPLES, DIM, DIM, 3), dtype=np.float32)
+
+ idx = 0
+ files = []
+ for d in os.listdir(imagenet_train_dir):
+ d = os.path.join(imagenet_train_dir, d)
+ files.extend(os.path.join(d, i) for i in os.listdir(d) if i.endswith('.JPEG'))
+ for f in random.sample(files, SAMPLES):
+ img = load_image(f)
+ h, w, _ = img.shape
+ h_start = random.randint(0, h - DIM)
+ w_start = random.randint(0, w - DIM)
+ crop = img[h_start:h_start+DIM, w_start:w_start+DIM, :]
+ db[idx, :, :, :] = crop
+ idx += 1
+
+ if idx % 100 == 0:
+ print('%.2f%% done' % (100 * (float(idx) / SAMPLES)))
+
+ np.save(OUTPUT_FILE, db)
+
+
+def load_image(path):
+ image = PIL.Image.open(path)
+ if RESIZE:
+ if image.height > image.width:
+ image = image.resize((int(float(image.width) / image.height * RESIZE_DIM), RESIZE_DIM))
+ elif image.width > image.height:
+ image = image.resize((RESIZE_DIM, int(float(image.height) / image.width * RESIZE_DIM)))
+ img = np.asarray(image).astype(np.float32) / 255.0
+ if img.ndim == 2:
+ img = np.repeat(img[:,:,np.newaxis], repeats=3, axis=2)
+ if img.shape[2] == 4:
+ # alpha channel
+ img = img[:,:,:3]
+ return img
+
+
+if __name__ == '__main__':
+ main(sys.argv)
+
diff --git a/case_studies/inputtransformations/robustml_attack.py b/case_studies/inputtransformations/robustml_attack.py
new file mode 100644
index 0000000..108c835
--- /dev/null
+++ b/case_studies/inputtransformations/robustml_attack.py
@@ -0,0 +1,83 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import robustml
+import sys
+import tensorflow as tf
+import numpy as np
+
+class BPDA(robustml.attack.Attack):
+ def __init__(self, sess, model, epsilon, max_steps=1000, learning_rate=0.1, lam=1e-6, debug=False):
+ self._sess = sess
+
+ self._model = model
+ self._input = model.input
+ self._l2_input = tf.placeholder(tf.float32, self._input.shape, name="l2_input") # using BPDA, so we want this to pass the original adversarial example
+ self._original = tf.placeholder(tf.float32, self._input.shape, name="original")
+ self._label = tf.placeholder(tf.int32, (None,), name="label")
+ one_hot = tf.one_hot(self._label, 1000)
+ #ensemble_labels = tf.tile(one_hot, (model.logits.shape[0], 1))
+ self._l2 = tf.sqrt(2*tf.nn.l2_loss(self._l2_input - self._original))
+ self._xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model.logits, labels=one_hot))
+ self._loss = lam * tf.maximum(self._l2 - epsilon, 0) + self._xent
+ self._grad, = tf.gradients(self._loss, self._input)
+
+ self._epsilon = epsilon
+ self._max_steps = max_steps
+ self._learning_rate = learning_rate
+ self._debug = debug
+
+ def run(self, x, y, target):
+ if target is not None:
+ raise NotImplementedError
+ adv = np.copy(x)
+ for i in range(self._max_steps):
+ adv_def = self._model.defend(adv)
+ p, ll2, lxent, g = self._sess.run(
+ [self._model.predictions, self._l2, self._xent, self._grad],
+ {self._input: adv_def, self._label: y, self._l2_input: adv, self._original: x}
+ )
+ if self._debug:
+ print(
+ 'attack: step %d/%d, xent loss = %g, l2 loss = %g (max %g), (true %d, predicted %s)' % (
+ i+1,
+ self._max_steps,
+ lxent,
+ ll2,
+ self._epsilon,
+ y,
+ p
+ ),
+ file=sys.stderr
+ )
+ is_adv = np.logical_and(y != p, ll2 < self._epsilon)
+ print(is_adv.sum())
+ if np.all(is_adv):
+ #if y not in p and ll2 < self._epsilon:
+ # we're done
+ #if self._debug:
+ print('returning early', file=sys.stderr)
+ break
+ g *= (~is_adv).astype(int).reshape(-1, 1, 1, 1)
+ adv += self._learning_rate * g
+ adv = np.clip(adv, 0, 1)
+
+ adv_l2 = np.sqrt(((adv - x)**2).sum((1, 2, 3), keepdims=True))
+ factor = self._epsilon / adv_l2
+ factor = np.minimum(factor, np.ones_like(factor))
+ diff = adv - x
+ adv = diff*factor + x
+
+ return adv
+
diff --git a/case_studies/inputtransformations/robustml_model.py b/case_studies/inputtransformations/robustml_model.py
new file mode 100644
index 0000000..530d918
--- /dev/null
+++ b/case_studies/inputtransformations/robustml_model.py
@@ -0,0 +1,87 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import robustml
+from defense import *
+from inceptionv3 import model as inceptionv3_model
+import tensorflow as tf
+
+class InputTransformations(robustml.model.Model):
+ def __init__(self, sess, defense):
+ self._sess = sess
+ self._input = tf.placeholder(tf.float32, (None, 299, 299, 3))
+ #self._input_single = tf.placeholder(tf.float32, (299, 299, 3))
+ #input_expanded = tf.expand_dims(self._input, axis=0)
+
+ if defense == 'crop':
+ raise NotImplementedError("crop transformation not properly "
+ "implemented yet")
+ cropped_xs = defend_crop(self._input)
+ self._logits, _ = inceptionv3_model(sess, cropped_xs)
+ self._probs = tf.reduce_mean(tf.nn.softmax(self._logits), axis=0, keepdims=True)
+ else:
+ self._logits, _ = inceptionv3_model(sess, self._input)
+ self._probs = tf.nn.softmax(self._logits)
+
+ self._predictions = tf.argmax(self._probs, 1)
+
+ if defense == 'bitdepth':
+ self._defend = batched_defend_reduce
+ elif defense == 'jpeg':
+ self._defend = batched_defend_jpeg
+ elif defense == 'crop':
+ raise NotImplementedError("crop transformation not properly "
+ "implemented yet")
+ self._defend = lambda x: x # implemented as part of model so it's differentiable
+ elif defense == 'quilt':
+ self._defend = batched_make_defend_jpeg(sess)
+ elif defense == 'tv':
+ self._defend = batched_defend_tv
+ elif defense == None:
+ self._defend = lambda x: x
+ else:
+ raise ValueError('invalid defense: %s' % defense)
+
+ #self._dataset = robustml.dataset.ImageNet((299, 299, 3))
+ #self._threat_model = robustml.threat_model.L2(epsilon=0.05*299) # 0.05 * sqrt(299*299)
+ # TODO: I think there is a factor sqrt(3) missing here
+
+ @property
+ def dataset(self):
+ return self._dataset
+
+ @property
+ def threat_model(self):
+ return self._threat_model
+
+ def classify(self, x):
+ x_defended = self.defend(x)
+ return self._sess.run(self._predictions, {self._input: x_defended})
+
+ # expose internals for white box attacks
+
+ def defend(self, x):
+ return self._defend(x)
+
+ @property
+ def input(self):
+ return self._input
+
+ @property
+ def logits(self):
+ return self._logits
+
+ @property
+ def predictions(self):
+ return self._predictions
diff --git a/case_studies/inputtransformations/setup.sh b/case_studies/inputtransformations/setup.sh
new file mode 100644
index 0000000..a394009
--- /dev/null
+++ b/case_studies/inputtransformations/setup.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+cd "$(dirname "$0")" # cd to directory of this script
+
+# $1 is filename
+# $2 is expected sha
+check_sha1() {
+ computed=$(sha1sum "$1" 2>/dev/null | awk '{print $1}') || return 1
+ if [ "$computed" == "$2" ]; then
+ return 0;
+ else
+ return 1;
+ fi
+}
+
+# $1 is URL
+# $2 is extracted file name
+# $3 is the checksum
+fetch() {
+ if check_sha1 $2 $3; then
+ echo "$2 already downloaded"
+ return
+ fi
+ echo "downloading $1"
+ f=${1##*/}
+ wget -q $1 -O $f
+ tar xf $f
+ rm -f $f
+ if check_sha1 $2 $3; then
+ echo "downloaded $2"
+ else
+ echo "HASH MISMATCH, SHA1($2) != $3"
+ fi
+}
+
+cd data
+
+fetch \
+ http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz \
+ inception_v3.ckpt 606fd6953c58c817c56fd3bc2f0384fc2ecaba92
+
+fetch \
+ https://github.com/anishathalye/obfuscated-gradients/releases/download/v0/quilt_db.tar.gz \
+ quilt_db.npy ff4b94f45c9e8441b341fd5ffcf2adb0a8049873
diff --git a/case_studies/inputtransformations/utils.py b/case_studies/inputtransformations/utils.py
new file mode 100644
index 0000000..4838934
--- /dev/null
+++ b/case_studies/inputtransformations/utils.py
@@ -0,0 +1,67 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+from tensorflow.python.framework import ops
+import numpy as np
+import PIL.Image
+from imagenet_labels import label_to_name
+import matplotlib.pyplot as plt
+
+def one_hot(index, total):
+ arr = np.zeros((total))
+ arr[index] = 1.0
+ return arr
+
+def optimistic_restore(session, save_file):
+ reader = tf.train.NewCheckpointReader(save_file)
+ saved_shapes = reader.get_variable_to_shape_map()
+ var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
+ if var.name.split(':')[0] in saved_shapes])
+ restore_vars = []
+ with tf.variable_scope('', reuse=True):
+ for var_name, saved_var_name in var_names:
+ curr_var = tf.get_variable(saved_var_name)
+ var_shape = curr_var.get_shape().as_list()
+ if var_shape == saved_shapes[saved_var_name]:
+ restore_vars.append(curr_var)
+ saver = tf.train.Saver(restore_vars)
+ saver.restore(session, save_file)
+
+def load_image(path):
+ return (np.asarray(PIL.Image.open(path).resize((299, 299)))/255.0).astype(np.float32)
+
+def make_classify(sess, input_, probs):
+ def classify(img, correct_class=None, target_class=None):
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
+ fig.sca(ax1)
+ p = sess.run(probs, feed_dict={input_: img})[0]
+ ax1.imshow(img)
+ fig.sca(ax1)
+
+ topk = list(p.argsort()[-10:][::-1])
+ topprobs = p[topk]
+ barlist = ax2.bar(range(10), topprobs)
+ if target_class in topk:
+ barlist[topk.index(target_class)].set_color('r')
+ if correct_class in topk:
+ barlist[topk.index(correct_class)].set_color('g')
+ plt.sca(ax2)
+ plt.ylim([0, 1.1])
+ plt.xticks(range(10),
+ [label_to_name(i)[:15] for i in topk],
+ rotation='vertical')
+ fig.subplots_adjust(bottom=0.2)
+ plt.show()
+ return classify
\ No newline at end of file
diff --git a/case_studies/interpolation_training/__init__.py b/case_studies/interpolation_training/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/interpolation_training/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/interpolation_training/adv_interp.py b/case_studies/interpolation_training/adv_interp.py
new file mode 100644
index 0000000..fd3db17
--- /dev/null
+++ b/case_studies/interpolation_training/adv_interp.py
@@ -0,0 +1,65 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+''' Adversarial Interpolation '''
+
+from it_utils import cos_dist
+import copy
+import pickle
+import torch
+from torch.autograd.gradcheck import zero_gradients
+from torch.autograd import Variable
+
+
+def adv_interp(inputs,
+ y,
+ base_net,
+ num_classes,
+ epsilon=8,
+ epsilon_y=0.5,
+ v_min=0,
+ v_max=255):
+ # x: image batch with shape [batch_size, c, h, w]
+ # y: one-hot label batch with shape [batch_size, num_classes]
+ net = copy.deepcopy(base_net)
+ x = inputs.clone()
+
+ inv_index = torch.arange(x.size(0) - 1, -1, -1).long()
+ x_prime = x[inv_index, :, :, :].detach()
+ y_prime = y[inv_index, :]
+ x_init = x.detach() + torch.zeros_like(x).uniform_(-epsilon, epsilon)
+
+ x_init.requires_grad_()
+ zero_gradients(x_init)
+ if x_init.grad is not None:
+ x_init.grad.data.fill_(0)
+ net.eval()
+
+ fea_b = net(x_init, mode='feature')
+ fea_t = net(x_prime, mode='feature')
+
+ loss_adv = cos_dist(fea_b, fea_t)
+ net.zero_grad()
+ loss_adv = loss_adv.mean()
+ loss_adv.backward(retain_graph=True)
+
+ x_tilde = x_init.data - epsilon * torch.sign(x_init.grad.data)
+
+ x_tilde = torch.min(torch.max(x_tilde, inputs - epsilon), inputs + epsilon)
+ x_tilde = torch.clamp(x_tilde, v_min, v_max)
+
+ y_bar_prime = (1 - y_prime) / (num_classes - 1.0)
+ y_tilde = (1 - epsilon_y) * y + epsilon_y * y_bar_prime
+
+ return x_tilde.detach(), y_tilde.detach()
\ No newline at end of file
diff --git a/case_studies/interpolation_training/adv_interp_train.sh b/case_studies/interpolation_training/adv_interp_train.sh
new file mode 100644
index 0000000..825237b
--- /dev/null
+++ b/case_studies/interpolation_training/adv_interp_train.sh
@@ -0,0 +1,16 @@
+export PYTHONPATH=./:$PYTHONPATH
+checkpoint_dir=~/models/adv_interp_model/
+mkdir -p $checkpoint_dir
+CUDA_VISIBLE_DEVICES=0 python train_adv_interp.py \
+ --resume \
+ --lr=0.1 \
+ --model_dir=$checkpoint_dir \
+ --init_model_pass=-1 \
+ --max_epoch=200 \
+ --save_epochs=10 \
+ --decay_epoch1=60 \
+ --decay_epoch2=90 \
+ --batch_size_train=128 \
+ --label_adv_delta=0.5 \
+ --save_epochs=100 \
+
diff --git a/case_studies/interpolation_training/binarization_test.sh b/case_studies/interpolation_training/binarization_test.sh
new file mode 100644
index 0000000..d703531
--- /dev/null
+++ b/case_studies/interpolation_training/binarization_test.sh
@@ -0,0 +1,65 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+
+# kwargs=""
+kwargs="--sample-from-corners"
+echo "Epsilon: $epsilon"
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and few steps (20)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+PYTHONPATH=$(pwd) python3 case_studies/interpolation_training/eval.py \
+ --model-path=checkpoints/adversarial_interpolation_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=pgd-autopgddlr \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=999 \
+ --n-boundary-points=10 \
+ --resume \
+ --epsilon=$epsilon \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and more steps (200)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+PYTHONPATH=$(pwd) python3 case_studies/interpolation_training/eval.py \
+ --model-path=checkpoints/adversarial_interpolation_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=pgd-autopgddlr \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=999 \
+ --n-boundary-points=10 \
+ --resume \
+ --epsilon=$epsilon \
+ --more-steps \
+ $kwargs
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Using epsilon = $epsilon and more steps (400)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+PYTHONPATH=$(pwd) python3 case_studies/interpolation_training/eval.py \
+ --model-path=checkpoints/adversarial_interpolation_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=pgd-autopgddlr \
+ --batch_size_test=1 \
+ --binarization-test \
+ --num_samples_test=$nsamples \
+ --n-inner-points=999 \
+ --n-boundary-points=10 \
+ --resume \
+ --epsilon=$epsilon \
+ --more-more-steps \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/interpolation_training/eval.py b/case_studies/interpolation_training/eval.py
new file mode 100644
index 0000000..a27c3b9
--- /dev/null
+++ b/case_studies/interpolation_training/eval.py
@@ -0,0 +1,419 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import argparse
+import inspect
+import os
+import sys
+import time
+import warnings
+
+import numpy as np
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from tqdm import tqdm
+
+import active_tests.decision_boundary_binarization
+import it_utils
+from it_utils import Attack_PGD, Attack_AutoPGD
+from it_utils import CWLoss
+from models import *
+
+warnings.simplefilter('once', RuntimeWarning)
+
+currentdir = os.path.dirname(
+ os.path.abspath(inspect.getfile(inspect.currentframe())))
+grandarentdir = os.path.dirname(os.path.dirname(currentdir))
+sys.path.insert(0, grandarentdir)
+
+parser = argparse.ArgumentParser(
+ description='Adversarial Interpolation Training')
+
+parser.register('type', 'bool', it_utils.str2bool)
+
+parser.add_argument('--resume',
+ '-r',
+ action='store_true',
+ help='resume from checkpoint')
+parser.add_argument('--attack', default=True, type='bool', help='attack')
+parser.add_argument('--model_dir', type=str, help='model path')
+parser.add_argument('--init_model_pass',
+ default='-1',
+ type=str,
+ help='init model pass')
+
+parser.add_argument('--attack_method',
+ default='pgd',
+ type=str,
+ help='adv_mode (natural, pdg or cw)')
+parser.add_argument('--attack_method_list', type=str)
+
+parser.add_argument('--log_step', default=7, type=int, help='log_step')
+
+parser.add_argument('--num_classes', default=10, type=int, help='num classes')
+parser.add_argument('--batch_size_test',
+ default=100,
+ type=int,
+ help='batch size for testing')
+parser.add_argument('--image_size', default=32, type=int, help='image size')
+
+parser.add_argument('--binarization-test', action="store_true")
+parser.add_argument('--model-path', type=str, help='model path', default=None)
+parser.add_argument('--num_samples_test',
+ default=-1,
+ type=int)
+
+parser.add_argument('--n-inner-points',
+ default=50,
+ type=int)
+
+parser.add_argument('--n-boundary-points',
+ default=10,
+ type=int)
+
+parser.add_argument("--epsilon", default=8, type=int)
+parser.add_argument("--more-steps", action="store_true")
+parser.add_argument("--more-more-steps", action="store_true")
+parser.add_argument("--sample-from-corners", action="store_true")
+
+args = parser.parse_args()
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+
+transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
+])
+
+testset = torchvision.datasets.CIFAR10(root='./data',
+ train=False,
+ download=True,
+ transform=transform_test)
+
+testloader = torch.utils.data.DataLoader(testset,
+ batch_size=args.batch_size_test,
+ shuffle=False,
+ num_workers=2)
+
+print('======= WideResenet 28-10 ========')
+basic_net = WideResNet(depth=28, num_classes=args.num_classes, widen_factor=10)
+
+basic_net = basic_net.to(device)
+
+
+class ZeroOneOneOneNetwork(nn.Module):
+ def __init__(self, model):
+ super().__init__()
+ self.model = model
+
+ def forward(self, x, *args, **kwargs):
+ return self.model((x - 0.5) / 0.5, *args, **kwargs)
+
+
+if device == 'cuda':
+ basic_net = torch.nn.DataParallel(basic_net)
+ cudnn.benchmark = True
+
+if args.binarization_test:
+ args.num_classes = 2
+
+if args.num_samples_test == -1:
+ num_samples_test = len(testset)
+
+# load parameters
+
+if args.resume and args.init_model_pass != '-1':
+ # Load checkpoint.
+ print('==> Resuming from saved checkpoint..')
+ if args.model_dir is not None:
+ f_path_latest = os.path.join(args.model_dir, 'latest')
+ f_path = os.path.join(args.model_dir,
+ ('checkpoint-%s' % args.init_model_pass))
+ elif args.model_path is not None:
+ f_path = args.model_path
+ f_path_latest = args.model_path
+
+ if not os.path.isfile(f_path):
+ print('train from scratch: no checkpoint directory or file found')
+ elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
+ checkpoint = torch.load(f_path_latest)
+ basic_net.load_state_dict(checkpoint['net'])
+ start_epoch = checkpoint['epoch']
+ print('resuming from epoch %s in latest' % start_epoch)
+ elif os.path.isfile(f_path):
+ checkpoint = torch.load(f_path)
+ basic_net.load_state_dict(checkpoint['net'])
+ start_epoch = checkpoint['epoch']
+ print('resuming from epoch %s' % start_epoch)
+ elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
+ print('train from scratch: no checkpoint directory or file found')
+
+# configs
+config_natural = {'train': False, 'attack': False}
+
+config_fgsm = {
+ 'train': False,
+ 'v_min': -1.0,
+ 'v_max': 1.0,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 1,
+ 'step_size': args.epsilon / 255.0,
+ 'random_start': True
+}
+
+config_pgd = {
+ 'train': False,
+ 'v_min': -1.0,
+ 'v_max': 1.0,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'step_size': args.epsilon / 4.0 / 255.0,
+ 'random_start': True,
+ 'loss_func': torch.nn.CrossEntropyLoss(reduction='none')
+}
+
+config_cw = {
+ 'train': False,
+ 'v_min': -1.0,
+ 'v_max': 1.0,
+ 'epsilon': args.epsilon / 255,
+ 'num_steps': 20 * 10,
+ 'step_size': args.epsilon / 4.0 / 255 / 5,
+ 'random_start': True,
+ 'loss_func': CWLoss(args.num_classes)
+}
+
+config_auto_pgd_ce = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'loss_func': "ce"
+}
+
+config_auto_pgd_dlr = {
+ 'train': False,
+ 'targeted': False,
+ 'epsilon': args.epsilon / 255.0,
+ 'num_steps': 20,
+ 'loss_func': "logit-diff"
+}
+
+config_auto_pgd_dlr_t = {
+ **config_auto_pgd_dlr,
+ "targeted": True,
+ "n_classes": 10,
+}
+
+config_auto_pgd_ce_plus = {
+ **config_auto_pgd_ce,
+ "n_restarts": 4
+}
+
+config_auto_pgd_dlr_plus = {
+ **config_auto_pgd_dlr,
+ "n_restarts": 4
+}
+
+if not args.binarization_test:
+ config_fgsm["epsilon"] *= 2.0
+ config_pgd["epsilon"] *= 2.0
+ config_cw["epsilon"] *= 2.0
+ config_fgsm["step_size"] *= 2.0
+ config_pgd["step_size"] *= 2.0
+ config_cw["step_size"] *= 2.0
+else:
+ config_auto_pgd_dlr_t["n_classes"] = 2
+
+if args.more_steps:
+ config_pgd["step_size"] /= 5.0
+ config_cw["step_size"] /= 5.0
+ config_pgd["num_steps"] *= 10
+ config_cw["num_steps"] *= 10
+
+ config_auto_pgd_ce["num_steps"] *= 10
+ config_auto_pgd_dlr["num_steps"] *= 10
+ print("More & finer steps")
+
+if args.more_more_steps:
+ config_pgd["step_size"] /= 5.0
+ config_cw["step_size"] /= 5.0
+ config_pgd["num_steps"] *= 20
+ config_cw["num_steps"] *= 20
+
+ config_auto_pgd_ce["num_steps"] *= 20
+ config_auto_pgd_dlr["num_steps"] *= 20
+ print("More & finer steps")
+
+
+def test_test(net, feature_extractor, config):
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ class DummyModel(nn.Module):
+ def __init__(self, model):
+ super().__init__()
+ self.model = model
+ def forward(self, x, mode=None):
+ del mode
+ return self.model(x)
+
+ scores_logit_differences_and_validation_accuracies = active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
+ feature_extractor,
+ testloader,
+ attack_fn=lambda m, l, kwargs: test_main(0, create_attack(DummyModel(m)), l, verbose=False,
+ inverse_acc=True, return_advs=True),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=config["epsilon"],
+ norm="linf",
+ lr=10000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=args.num_samples_test,
+ device=device,
+ n_samples_evaluation=200,#args.num_samples_test * 10,
+ n_samples_asr_evaluation=200,
+ batch_size=args.num_samples_test * 5,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.999,
+ sample_training_data_from_corners=args.sample_from_corners
+ )
+
+ print(active_tests.decision_boundary_binarization.format_result(
+ scores_logit_differences_and_validation_accuracies,
+ args.num_samples_test))
+
+
+def test_main(epoch, net, loader, verbose=False, inverse_acc=False,
+ return_advs=False):
+ net.eval()
+ test_loss = 0.0
+ correct = 0.0
+ total = 0.0
+
+ if verbose:
+ iterator = tqdm(loader, ncols=0, leave=False)
+ else:
+ iterator = loader
+
+ x_adv = []
+ logits_adv = []
+ for batch_idx, (inputs, targets) in enumerate(iterator):
+ start_time = time.time()
+ inputs, targets = inputs.to(device), targets.to(device)
+
+ pert_inputs = inputs.detach()
+
+ res = net(pert_inputs, targets)
+ if isinstance(res, tuple):
+ outputs, _, x_adv_it = res
+ else:
+ outputs = res
+
+ if return_advs:
+ x_adv.append(x_adv_it)
+ else:
+ del x_adv_it
+ logits_adv.append(outputs.detach().cpu())
+
+ duration = time.time() - start_time
+
+ _, predicted = outputs.max(1)
+ batch_size = targets.size(0)
+ total += batch_size
+ correct_num = predicted.eq(targets).sum().item()
+ correct += correct_num
+ if verbose:
+ iterator.set_description(
+ str(predicted.eq(targets).sum().item() / targets.size(0)))
+
+ if batch_idx % args.log_step == 0:
+ print(
+ "Step %d, Duration %.2f, Current-batch-acc %.2f, Avg-acc %.2f"
+ % (batch_idx, duration, 100. * correct_num / batch_size,
+ 100. * correct / total))
+
+ if return_advs:
+ x_adv = torch.cat(x_adv, 0)
+ logits_adv = torch.cat(logits_adv, 0)
+
+ acc = 100. * correct / total
+ if verbose:
+ print('Test accuracy: %.2f' % (acc))
+
+ if inverse_acc:
+ acc = (100 - acc) / 100.0
+
+ return acc, (x_adv, logits_adv)
+
+
+attack_list = args.attack_method_list.split('-')
+attack_num = len(attack_list)
+
+print(f"Epsilon: {args.epsilon}")
+
+for attack_idx in range(attack_num):
+
+ args.attack_method = attack_list[attack_idx]
+
+ if args.attack_method == 'natural':
+ print('======Evaluation using Natural Images======')
+ create_attack = lambda n: Attack_PGD(n, config_natural)
+ elif args.attack_method.upper() == 'FGSM':
+ print('======Evaluation under FGSM Attack======')
+ create_attack = lambda n: Attack_PGD(n, config_fgsm)
+ elif args.attack_method.upper() == 'PGD':
+ print('======Evaluation under PGD Attack======')
+ create_attack = lambda n: Attack_PGD(n, config_pgd)
+ elif args.attack_method.upper() == 'CW':
+ print('======Evaluation under CW Attack======')
+ create_attack = lambda n: Attack_PGD(n, config_cw)
+ elif args.attack_method.upper() == 'AUTOPGDCE':
+ print()
+ print('-----Auto PGD (CE) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce)
+ elif args.attack_method.upper() == 'AUTOPGDDLR':
+ print()
+ print('-----Auto PGD (DLR) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr)
+ elif args.attack_method.upper() == 'AUTOPGDDLRT':
+ print()
+ print('-----Auto PGD (DLR, targeted) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_t)
+ elif args.attack_method.upper() == 'AUTOPGDCE+':
+ print()
+ print('-----Auto PGD+ (CE) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce_plus)
+ elif args.attack_method.upper() == 'AUTOPGDDLR+':
+ print()
+ print('-----Auto PGD+ (DLR) adv mode -----')
+ create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_plus)
+ else:
+ raise Exception(
+ 'Should be a valid attack method. The specified attack method is: {}'
+ .format(args.attack_method))
+
+ if args.binarization_test:
+ specific_net = ZeroOneOneOneNetwork(basic_net)
+ net = create_attack(specific_net)
+
+ test_test(net, basic_net, config_pgd)
+ else:
+ net = create_attack(basic_net)
+
+ test_main(0, net, testloader, verbose=True)
diff --git a/case_studies/interpolation_training/eval.sh b/case_studies/interpolation_training/eval.sh
new file mode 100644
index 0000000..90138bf
--- /dev/null
+++ b/case_studies/interpolation_training/eval.sh
@@ -0,0 +1,12 @@
+epsilon=${1:-8}
+attack=${2-pgd-cw}
+
+export PYTHONPATH=./:$PYTHONPATH
+python case_studies/interpolation_training/eval.py \
+ --model-path=checkpoints/adversarial_inperpolation_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=$attack \
+ --batch_size_test=128 \
+ --epsilon=$epsilon \
+ --resume
\ No newline at end of file
diff --git a/case_studies/interpolation_training/eval_pretrain.sh b/case_studies/interpolation_training/eval_pretrain.sh
new file mode 100644
index 0000000..7109de4
--- /dev/null
+++ b/case_studies/interpolation_training/eval_pretrain.sh
@@ -0,0 +1,9 @@
+export PYTHONPATH=./:$PYTHONPATH
+checkpoint_dir=./pre_trained_adv_interp_models/
+CUDA_VISIBLE_DEVICES=1 python eval.py \
+ --model_dir=$checkpoint_dir \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=natural-fgsm-pgd-cw \
+ --batch_size_test=80 \
+ --resume
\ No newline at end of file
diff --git a/case_studies/interpolation_training/eval_test_pretrain.sh b/case_studies/interpolation_training/eval_test_pretrain.sh
new file mode 100644
index 0000000..79d269e
--- /dev/null
+++ b/case_studies/interpolation_training/eval_test_pretrain.sh
@@ -0,0 +1,8 @@
+export PYTHONPATH=./:$PYTHONPATH
+python case_studies/interpolation_training/eval.py \
+ --model-path=checkpoints/interpolation_training_linf_200_epochs.pth \
+ --init_model_pass=latest \
+ --attack=True \
+ --attack_method_list=natural-fgsm-pgd-cw \
+ --batch_size_test=80 \
+ --resume
\ No newline at end of file
diff --git a/case_studies/interpolation_training/it_utils.py b/case_studies/interpolation_training/it_utils.py
new file mode 100644
index 0000000..0a325ca
--- /dev/null
+++ b/case_studies/interpolation_training/it_utils.py
@@ -0,0 +1,315 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Utility functions, PGD attacks and Loss functions
+'''
+import math
+import numpy as np
+import random
+import scipy.io
+import copy
+
+from torch.autograd import Variable
+
+from attacks import autopgd
+from attacks import pgd
+from networks import *
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+
+
+class Attack_AutoPGD(nn.Module):
+ # Back-propogate
+ def __init__(self, basic_net, config, attack_net=None):
+ super(Attack_AutoPGD, self).__init__()
+ self.basic_net = basic_net
+ self.attack_net = attack_net
+ self.epsilon = config['epsilon']
+ self.n_restarts = 0 if "num_restarts" not in config else \
+ config["num_restarts"]
+ self.num_steps = config['num_steps']
+ self.loss_func = "ce" if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+ self.box_type = 'white' if 'box_type' not in config.keys(
+ ) else config['box_type']
+
+ self.targeted = False if 'targeted' not in config.keys(
+ ) else config['targeted']
+ self.n_classes = 10 if 'n_classes' not in config.keys(
+ ) else config['n_classes']
+
+ def forward(self,
+ inputs,
+ targets,
+ attack=True,
+ targeted_label=-1,
+ batch_idx=0):
+
+ assert targeted_label == -1
+
+ def net(x):
+ output = self.basic_net(x)
+ if isinstance(output, tuple):
+ return output[0]
+ else:
+ return output
+
+ if attack:
+ temp = autopgd.auto_pgd(
+ model=net,
+ x=inputs, y=targets, n_steps=self.num_steps,
+ loss=self.loss_func,
+ epsilon=self.epsilon,
+ norm="linf",
+ n_restarts=self.n_restarts,
+ targeted=self.targeted,
+ n_averaging_steps=1,
+ n_classes=self.n_classes
+ )
+ x_adv = temp[0]
+ else:
+ x_adv = inputs
+
+ logits_pert = net(x_adv)
+ targets_prob = torch.softmax(logits_pert, -1)
+
+ return logits_pert, targets_prob.detach(), x_adv.detach()
+
+
+class Attack_PGD(nn.Module):
+ def __init__(self, basic_net, config):
+ super(Attack_PGD, self).__init__()
+ self.basic_net = basic_net
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+
+ self.attack = True if 'attack' not in config.keys(
+ ) else config['attack']
+ if self.attack:
+ self.rand = config['random_start']
+ self.step_size = config['step_size']
+ self.v_min = config['v_min']
+ self.v_max = config['v_max']
+ self.epsilon = config['epsilon']
+ self.num_steps = config['num_steps']
+ self.loss_func = torch.nn.CrossEntropyLoss(
+ reduction='none') if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+
+ # print(config)
+
+ def forward(self, inputs, targets):
+
+ if not self.attack:
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+ outputs = self.basic_net(inputs, mode="logits")
+ return outputs, None
+
+ #aux_net = pickle.loads(pickle.dumps(self.basic_net))
+ aux_net = copy.deepcopy(self.basic_net)
+
+ aux_net.eval()
+ logits_pred_nat = aux_net(inputs, mode="logits")
+ targets_prob = F.softmax(logits_pred_nat.float(), dim=1)
+
+ num_classes = targets_prob.size(1)
+
+ outputs = aux_net(inputs, mode="logits")
+ targets_prob = F.softmax(outputs.float(), dim=1)
+ y_tensor_adv = targets
+
+ x = inputs.detach()
+ if self.rand:
+ x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
+ x_org = x.detach()
+ loss_array = np.zeros((inputs.size(0), self.num_steps))
+
+ for i in range(self.num_steps):
+ x.requires_grad_()
+ if x.grad is not None and x.grad.data is not None:
+ x.grad.data.zero_()
+ if x.grad is not None:
+ x.grad.data.fill_(0)
+ aux_net.eval()
+ logits = aux_net(x, mode="logits")
+ loss = self.loss_func(logits, y_tensor_adv)
+ loss = loss.mean()
+ aux_net.zero_grad()
+ loss.backward()
+
+ x_adv = x.data + self.step_size * torch.sign(x.grad.data)
+ x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
+ inputs + self.epsilon)
+ x_adv = torch.clamp(x_adv, self.v_min, self.v_max)
+ x = Variable(x_adv)
+
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+
+ logits_pert = self.basic_net(x.detach(), mode="logits")
+
+ return logits_pert, targets_prob.detach(), x.detach()
+
+
+class Attack_BetterPGD(nn.Module):
+ def __init__(self, basic_net, config):
+ super(Attack_BetterPGD, self).__init__()
+ self.basic_net = basic_net
+ self.train_flag = True if 'train' not in config.keys(
+ ) else config['train']
+
+ self.attack = True if 'attack' not in config.keys(
+ ) else config['attack']
+ if self.attack:
+ self.rand = config['random_start']
+ self.step_size = config['step_size']
+ self.v_min = config['v_min']
+ self.v_max = config['v_max']
+ self.epsilon = config['epsilon']
+ self.num_steps = config['num_steps']
+ self.loss_func = torch.nn.CrossEntropyLoss(
+ reduction='none') if 'loss_func' not in config.keys(
+ ) else config['loss_func']
+
+ # print(config)
+
+ def forward(self, inputs, targets):
+
+ if not self.attack:
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+ outputs = self.basic_net(inputs, mode="logits")
+ return outputs, None
+
+ #aux_net = pickle.loads(pickle.dumps(self.basic_net))
+ aux_net = copy.deepcopy(self.basic_net)
+
+ def net(x):
+ return aux_net(x, mode="logits")
+
+ aux_net.eval()
+
+ outputs = aux_net(inputs, mode="logits")
+ targets_prob = F.softmax(outputs.float(), dim=1)
+
+ sign = 1.0
+ x_adv = pgd.general_pgd(
+ loss_fn=lambda x, y: sign * self.loss_func(net(x), y),
+ is_adversarial_fn=lambda x, y: net(x).argmax(-1) == y
+ if targets != -1 else net(x).argmax(-1) != y,
+ x=inputs, y=targets, n_steps=self.num_steps,
+ step_size=self.step_size,
+ epsilon=self.epsilon,
+ norm="linf",
+ random_start=self.rand
+ )[0]
+
+ if self.train_flag:
+ self.basic_net.train()
+ else:
+ self.basic_net.eval()
+
+ logits_pert = self.basic_net(x_adv.detach(), mode="logits")
+
+ return logits_pert, targets_prob.detach(), x_adv.detach()
+
+
+class softCrossEntropy(nn.Module):
+ def __init__(self, reduce=True):
+ super(softCrossEntropy, self).__init__()
+ self.reduce = reduce
+ return
+
+ def forward(self, inputs, target):
+ """
+ :param inputs: predictions
+ :param target: target labels in vector form
+ :return: loss
+ """
+ log_likelihood = -F.log_softmax(inputs, dim=1)
+ sample_num, class_num = target.shape
+ if self.reduce:
+ loss = torch.sum(torch.mul(log_likelihood, target)) / sample_num
+ else:
+ loss = torch.sum(torch.mul(log_likelihood, target), 1)
+
+ return loss
+
+
+class CWLoss(nn.Module):
+ def __init__(self, num_classes, margin=50, reduce=True):
+ super(CWLoss, self).__init__()
+ self.num_classes = num_classes
+ self.margin = margin
+ self.reduce = reduce
+ return
+
+ def forward(self, logits, targets):
+ """
+ :param inputs: predictions
+ :param targets: target labels
+ :return: loss
+ """
+ onehot_targets = one_hot_tensor(targets, self.num_classes,
+ targets.device)
+
+ self_loss = torch.sum(onehot_targets * logits, dim=1)
+ other_loss = torch.max(
+ (1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
+
+ loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
+
+ if self.reduce:
+ sample_num = onehot_targets.shape[0]
+ loss = loss / sample_num
+
+ return loss
+
+
+def cos_dist(x, y):
+ cos = nn.CosineSimilarity(dim=1, eps=1e-6)
+ batch_size = x.size(0)
+ c = torch.clamp(1 - cos(x.view(batch_size, -1), y.view(batch_size, -1)),
+ min=0)
+ return c.mean()
+
+
+def one_hot_tensor(y_batch_tensor, num_classes, device):
+ y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
+ num_classes).fill_(0)
+ y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
+ return y_tensor
+
+
+def get_acc(outputs, targets):
+ _, predicted = outputs.max(1)
+ total = targets.size(0)
+ correct = predicted.eq(targets).sum().item()
+ acc = 1.0 * correct / total
+ return acc
+
+
+def str2bool(v):
+ return v.lower() in ("yes", "true", "t", "1")
\ No newline at end of file
diff --git a/case_studies/interpolation_training/models/__init__.py b/case_studies/interpolation_training/models/__init__.py
new file mode 100644
index 0000000..72c479d
--- /dev/null
+++ b/case_studies/interpolation_training/models/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .wideresnet import *
\ No newline at end of file
diff --git a/case_studies/interpolation_training/models/wideresnet.py b/case_studies/interpolation_training/models/wideresnet.py
new file mode 100644
index 0000000..409716b
--- /dev/null
+++ b/case_studies/interpolation_training/models/wideresnet.py
@@ -0,0 +1,145 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class BasicBlock(nn.Module):
+ def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
+ super(BasicBlock, self).__init__()
+ self.bn1 = nn.BatchNorm2d(in_planes)
+ self.relu1 = nn.ReLU(inplace=True)
+ self.conv1 = nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False)
+ self.bn2 = nn.BatchNorm2d(out_planes)
+ self.relu2 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(out_planes,
+ out_planes,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False)
+ self.droprate = dropRate
+ self.equalInOut = (in_planes == out_planes)
+ self.convShortcut = (not self.equalInOut) and nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=1,
+ stride=stride,
+ padding=0,
+ bias=False) or None
+
+ def forward(self, x):
+ if not self.equalInOut:
+ x = self.relu1(self.bn1(x))
+ else:
+ out = self.relu1(self.bn1(x))
+ out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
+ if self.droprate > 0:
+ out = F.dropout(out, p=self.droprate, training=self.training)
+ out = self.conv2(out)
+ return torch.add(x if self.equalInOut else self.convShortcut(x), out)
+
+
+class NetworkBlock(nn.Module):
+ def __init__(self,
+ nb_layers,
+ in_planes,
+ out_planes,
+ block,
+ stride,
+ dropRate=0.0):
+ super(NetworkBlock, self).__init__()
+ self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
+ stride, dropRate)
+
+ def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
+ dropRate):
+ layers = []
+ for i in range(int(nb_layers)):
+ layers.append(
+ block(i == 0 and in_planes or out_planes, out_planes,
+ i == 0 and stride or 1, dropRate))
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ return self.layer(x)
+
+
+class WideResNet(nn.Module):
+ def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
+ super(WideResNet, self).__init__()
+ nChannels = [
+ 16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
+ ]
+ assert ((depth - 4) % 6 == 0)
+ n = (depth - 4) / 6
+ block = BasicBlock
+ self.conv1 = nn.Conv2d(3,
+ nChannels[0],
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False)
+ # block 1
+ self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
+ dropRate)
+ # block 2
+ self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
+ dropRate)
+ # block 3
+ self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
+ dropRate)
+
+ self.bn1 = nn.BatchNorm2d(nChannels[3])
+ self.relu = nn.ReLU(inplace=True)
+ self.fc = nn.Linear(nChannels[3], num_classes)
+ self.nChannels = nChannels[3]
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ elif isinstance(m, nn.Linear):
+ m.bias.data.zero_()
+
+ def forward(self, x, mode='logits', features_only=False, features_and_logits=False):
+ out = self.conv1(x)
+ out = self.block1(out)
+ out = self.block2(out)
+ out = self.block3(out)
+ out = self.relu(self.bn1(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(-1, self.nChannels)
+
+ if features_only:
+ return out.view(x.size(0), -1)
+ if features_and_logits:
+ return out.view(x.size(0), -1), self.fc(out)
+ if mode.lower() == 'logits':
+ return self.fc(out)
+ elif mode.lower() == 'feature':
+ return out.view(x.size(0), -1)
+ else:
+ raise Exception('unsupported mode is specified')
diff --git a/case_studies/interpolation_training/train_adv_interp.py b/case_studies/interpolation_training/train_adv_interp.py
new file mode 100644
index 0000000..a66d58a
--- /dev/null
+++ b/case_studies/interpolation_training/train_adv_interp.py
@@ -0,0 +1,239 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Advsersarial Interpolation Training'''
+from __future__ import print_function
+import time
+import numpy as np
+import random
+import copy
+import os
+import argparse
+import datetime
+import pickle
+import it_utils
+
+from it_utils import softCrossEntropy
+from it_utils import one_hot_tensor
+from adv_interp import adv_interp
+from tqdm import tqdm
+from PIL import Image
+from networks import *
+
+import torch
+import torch.nn as nn
+import torch.optim as optim
+import torch.nn.functional as F
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from torch.autograd.gradcheck import zero_gradients
+from torch.autograd import Variable
+
+parser = argparse.ArgumentParser(
+ description='Advsersarial Interpolation Training')
+
+parser.register('type', 'bool', it_utils.str2bool)
+
+parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
+parser.add_argument('--label_adv_delta',
+ default=0.5,
+ type=float,
+ help='label_adv_delta')
+parser.add_argument('--resume',
+ '-r',
+ action='store_true',
+ help='resume from checkpoint')
+parser.add_argument('--model_dir', type=str, help='model path')
+parser.add_argument('--init_model_pass',
+ default='-1',
+ type=str,
+ help='init model pass')
+parser.add_argument('--save_epochs', default=10, type=int, help='save period')
+
+parser.add_argument('--max_epoch', default=200, type=int, help='save period')
+parser.add_argument('--decay_epoch1', default=60, type=int, help='save period')
+parser.add_argument('--decay_epoch2', default=90, type=int, help='save period')
+parser.add_argument('--decay_rate',
+ default=0.1,
+ type=float,
+ help='save period')
+parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
+parser.add_argument('--weight_decay',
+ default=2e-4,
+ type=float,
+ help='weight decay factor')
+
+parser.add_argument('--log_step', default=10, type=int, help='log_step')
+
+parser.add_argument('--num_classes', default=10, type=int, help='num classes')
+parser.add_argument('--image_size', default=32, type=int, help='image size')
+parser.add_argument('--batch_size_train',
+ default=128,
+ type=int,
+ help='batch size for training')
+
+args = parser.parse_args()
+
+device = 'cuda' if torch.cuda.is_available() else 'cpu'
+start_epoch = 1
+
+transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
+])
+
+trainset = torchvision.datasets.CIFAR10(root='./data',
+ train=True,
+ download=True,
+ transform=transform_train)
+
+trainloader = torch.utils.data.DataLoader(trainset,
+ batch_size=args.batch_size_train,
+ shuffle=True,
+ num_workers=2)
+
+print('======= WideResenet 28-10 ========')
+net = WideResNet(depth=28, num_classes=args.num_classes, widen_factor=10)
+
+net = net.to(device)
+
+config_adv_interp = {
+ 'v_min': -1.0,
+ 'v_max': 1.0,
+ 'epsilon': 8.0 / 255 * 2,
+ 'num_steps': 1,
+ 'step_size': 8.0 / 255 * 2,
+ 'label_adv_delta': args.label_adv_delta,
+}
+
+if device == 'cuda':
+ net = torch.nn.DataParallel(net)
+ cudnn.benchmark = True
+
+optimizer = optim.SGD(net.parameters(),
+ lr=args.lr,
+ momentum=args.momentum,
+ weight_decay=args.weight_decay)
+
+if args.resume and args.init_model_pass != '-1':
+ print('Resume training from checkpoint..')
+ f_path_latest = os.path.join(args.model_dir, 'latest')
+ f_path = os.path.join(args.model_dir,
+ ('checkpoint-%s' % args.init_model_pass))
+
+ if not os.path.isdir(args.model_dir):
+ print('train from scratch: no checkpoint directory or file found')
+ elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
+ checkpoint = torch.load(f_path_latest)
+ pretrained_dict = checkpoint['net']
+ model_dict = net.state_dict()
+ model_dict.update(pretrained_dict)
+ net.load_state_dict(model_dict, strict=False)
+ #optimizer.load_state_dict(checkpoint['optimizer'])
+ start_epoch = checkpoint['epoch'] + 1
+ print('resuming training from epoch %s in latest' % start_epoch)
+ elif os.path.isfile(f_path):
+ checkpoint = torch.load(f_path)
+ net.load_state_dict(checkpoint['net'])
+ #optimizer.load_state_dict(checkpoint['optimizer'])
+ start_epoch = checkpoint['epoch'] + 1
+ print('resuming training from epoch %s' % (start_epoch - 1))
+ elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
+ print('train from scratch: no checkpoint directory or file found')
+
+soft_xent_loss = softCrossEntropy()
+
+
+def train_one_epoch(epoch, net):
+ print('\n Training for Epoch: %d' % epoch)
+
+ net.train()
+
+ # learning rate schedule
+ if epoch < args.decay_epoch1:
+ lr = args.lr
+ elif epoch < args.decay_epoch2:
+ lr = args.lr * args.decay_rate
+ else:
+ lr = args.lr * args.decay_rate * args.decay_rate
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ iterator = tqdm(trainloader, ncols=0, leave=False)
+ for batch_idx, (inputs, targets) in enumerate(iterator):
+ start_time = time.time()
+ inputs, targets = inputs.to(device), targets.to(device)
+
+ targets_onehot = one_hot_tensor(targets, args.num_classes, device)
+
+ x_tilde, y_tilde = adv_interp(inputs, targets_onehot, net,
+ args.num_classes,
+ config_adv_interp['epsilon'],
+ config_adv_interp['label_adv_delta'],
+ config_adv_interp['v_min'],
+ config_adv_interp['v_max'])
+
+ outputs = net(x_tilde, mode='logits')
+ loss = soft_xent_loss(outputs, y_tilde)
+
+ optimizer.zero_grad()
+ loss.backward()
+
+ optimizer.step()
+
+ train_loss = loss.detach().item()
+
+ duration = time.time() - start_time
+ if batch_idx % args.log_step == 0:
+
+ adv_acc = it_utils.get_acc(outputs, targets)
+ # natural
+ net_cp = copy.deepcopy(net)
+ nat_outputs = net_cp(inputs, mode='logits')
+ nat_acc = it_utils.get_acc(nat_outputs, targets)
+ print(
+ "Epoch %d, Step %d, lr %.4f, Duration %.2f, Training nat acc %.2f, Training adv acc %.2f, Training adv loss %.4f"
+ % (epoch, batch_idx, lr, duration, 100 * nat_acc,
+ 100 * adv_acc, train_loss))
+
+ if epoch % args.save_epochs == 0 or epoch >= args.max_epoch - 2:
+ print('Saving..')
+ f_path = os.path.join(args.model_dir, ('checkpoint-%s' % epoch))
+ state = {
+ 'net': net.state_dict(),
+ 'epoch': epoch,
+ #'optimizer': optimizer.state_dict()
+ }
+ if not os.path.isdir(args.model_dir):
+ os.makedirs(args.model_dir)
+ torch.save(state, f_path)
+
+ if epoch >= 1:
+ print('Saving latest model for epoch %s..' % (epoch))
+ f_path = os.path.join(args.model_dir, 'latest')
+ state = {
+ 'net': net.state_dict(),
+ 'epoch': epoch,
+ #'optimizer': optimizer.state_dict()
+ }
+ if not os.path.isdir(args.model_dir):
+ os.mkdir(args.model_dir)
+ torch.save(state, f_path)
+
+
+for epoch in range(start_epoch, args.max_epoch + 1):
+ train_one_epoch(epoch, net)
diff --git a/case_studies/jarn/README.md b/case_studies/jarn/README.md
new file mode 100644
index 0000000..7aec313
--- /dev/null
+++ b/case_studies/jarn/README.md
@@ -0,0 +1,47 @@
+# JARN_ICLR2020
+This is our Tensorflow implementation of Jacobian Adversarially Regularized Networks (JARN).
+
+**Jacobian Adversarially Regularized Networks for Robustness (ICLR 2020)**
+*Alvin Chan, Yi Tay, Yew Soon Ong, Jie Fu*
+https://arxiv.org/abs/1912.10185
+
+TL;DR: We show that training classifiers to produce salient input Jacobian matrices with a GAN-like regularization can boost adversarial robustness.
+
+
+## Dependencies
+1. Tensorflow 1.14.0
+2. Python 3.7
+
+
+## Usage
+1. Install dependencies with `pip install -r requirements.txt`.
+2. Run JARN training and evaluation with `sh run_train_jarn.sh`. Final evaluation output is saved in `attack_log`.
+
+
+## Code overview
+- `train_jarn.py`: trains the JARN model and subsequently evaluate on adversarial examples.
+- `pgd_attack.py`: generates adversarial examples and save them in `attacks/`.
+- `run_attack.py`: evaluates model on adversarial examples from `attacks/`.
+- `config.py`: training parameters for JARN.
+- `config_attack.py`: parameters for adversarial example evaluation.
+- `model_jarn.py`: contains code for JARN model architectures.
+- `cifar10_input.py` provides utility functions and classes for loading the CIFAR10 dataset.
+
+
+## Citation
+If you find our repository useful, please consider citing our paper:
+
+```
+@article{chan2019jacobian,
+ title={Jacobian Adversarially Regularized Networks for Robustness},
+ author={Chan, Alvin and Tay, Yi and Ong, Yew Soon and Fu, Jie},
+ journal={arXiv preprint arXiv:1912.10185},
+ year={2019}
+}
+```
+
+
+## Acknowledgements
+
+Useful code bases we used in our work:
+- https://github.com/MadryLab/cifar10_challenge (for adversarial example generation and evaluation)
\ No newline at end of file
diff --git a/case_studies/jarn/adversarial_evaluation.py b/case_studies/jarn/adversarial_evaluation.py
new file mode 100644
index 0000000..0f3417a
--- /dev/null
+++ b/case_studies/jarn/adversarial_evaluation.py
@@ -0,0 +1,140 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Modified according to main method in pgd_attack.py
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+
+import torch
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import tensorflow as tf
+import numpy as np
+import sys
+import math
+
+
+import cifar10_input
+
+import config_attack
+from pgd_attack import LinfPGDAttack
+
+def main():
+
+ config = vars(config_attack.get_args())
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ model_file = tf.train.latest_checkpoint(config['model_dir'])
+ print("config['model_dir']: ", config['model_dir'])
+ if model_file is None:
+ print('No model found')
+ sys.exit()
+
+ print("JARN MODEL")
+ from model_jarn import Model
+ if "_zeromeaninput" in config['model_dir']:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True,
+ # added by AUTHOR
+ mode='eval')
+ else:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'],
+ # added by AUTHOR
+ mode='eval')
+
+ saver = tf.train.Saver()
+
+ data_path = config['data_path']
+
+ print("load cifar10 dataset")
+ cifar = cifar10_input.CIFAR10Data(data_path)
+
+ with tf.Session() as sess:
+ print("Using attack:", config['attack'])
+ if config['attack'] == 'pgd' or config['attack'] == 'pgd-ld':
+ attack = LinfPGDAttack(model,
+ config['epsilon'] / 255.0,
+ config['num_steps'],
+ config['step_size'],
+ config['random_start'],
+ config['loss_func'],
+ dataset=config['dataset'],
+ clip_max=1.0)
+ attack_fn = lambda x, y: attack.perturb(x, y, sess)
+ elif config['attack'] == 'apgd':
+ from autoattack import autopgd_base
+ from autoattack_adapter import ModelAdapter
+ autoattack_model = ModelAdapter(
+ model.pre_softmax, model.x_input,
+ model.y_input, sess, num_classes=10, device="cpu")
+ attack = autopgd_base.APGDAttack(
+ autoattack_model, n_restarts=5, n_iter=100, verbose=True,
+ eps=config["epsilon"] / 255.0, norm="Linf", eot_iter=1, rho=.99,
+ is_tf_model=True, device="cpu", loss='dlr')
+ attack_fn = lambda x, y: attack.perturb(
+ torch.tensor(x.transpose((0, 3, 1, 2)), device="cpu"),
+ torch.tensor(y, device="cpu")
+ ).detach().cpu().numpy().transpose((0, 2, 3, 1))
+ else:
+ raise ValueError("invalid attack")
+
+
+ # Restore the checkpoint
+ saver.restore(sess, model_file)
+ # Iterate over the samples batch-by-batch
+ num_eval_examples = config['num_eval_examples']
+ eval_batch_size = config['eval_batch_size']
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+
+ preds =[]
+ adv_preds = []
+ ys = []
+ for ibatch in range(num_batches):
+ bstart = ibatch * eval_batch_size
+ bend = min(bstart + eval_batch_size, num_eval_examples)
+
+ x_batch = cifar.eval_data.xs[bstart:bend, :] / 255.0
+ y_batch = cifar.eval_data.ys[bstart:bend]
+
+ x_batch_adv = attack_fn(x_batch, y_batch)
+
+ logits = sess.run(model.pre_softmax, {model.x_input: x_batch})
+ adv_logits = sess.run(model.pre_softmax, {model.x_input: x_batch_adv})
+
+ preds.append(logits.argmax(-1))
+ adv_preds.append(adv_logits.argmax(-1))
+ ys.append(y_batch)
+
+ preds = np.concatenate(preds)
+ adv_preds = np.concatenate(adv_preds)
+ ys = np.concatenate(ys)
+
+ acc = np.mean(preds == ys)
+ adv_acc = np.mean(adv_preds == ys)
+
+ print("Accuracy:", acc)
+ print("Robust Accuracy:", adv_acc)
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/jarn/adversarial_evaluation.sh b/case_studies/jarn/adversarial_evaluation.sh
new file mode 100644
index 0000000..894e2a3
--- /dev/null
+++ b/case_studies/jarn/adversarial_evaluation.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+nsamples=${1:-512}
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ ./case_studies/jarn/adversarial_evaluation.py \
+ --model_dir=checkpoints/jarn/modelJARN_cifar10_b64_beta_1.000_gamma_1.000_disc_update_steps20_l5bc32_imgpert_advdelay140000_tanhencact_zeromeaninput_160000steps \
+ --data_path=data/cifar-10-batches-py/ \
+ --eval_batch_size=512 \
+ --num_steps=1 \
+ --num_eval_examples=$nsamples \
+ --attack=pgd
diff --git a/case_studies/jarn/autoattack_adapter.py b/case_studies/jarn/autoattack_adapter.py
new file mode 100644
index 0000000..16b5d10
--- /dev/null
+++ b/case_studies/jarn/autoattack_adapter.py
@@ -0,0 +1,129 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+import numpy as np
+import torch
+
+class ModelAdapter:
+ def __init__(self, logits, x, y, sess, num_classes=10, device="cuda"):
+ self.logits = logits
+ self.sess = sess
+ self.x_input = x
+ self.y_input = y
+ self.num_classes = num_classes
+ self.device = device
+
+ # gradients of logits
+ if num_classes <= 10:
+ self.grads = [None] * num_classes
+ for cl in range(num_classes):
+ self.grads[cl] = tf.gradients(self.logits[:, cl], self.x_input)[0]
+
+ # cross-entropy loss
+ self.xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.logits, labels=self.y_input)
+ self.grad_xent = tf.gradients(self.xent, self.x_input)[0]
+
+ # dlr loss
+ self.dlr = dlr_loss(self.logits, self.y_input, num_classes=self.num_classes)
+ self.grad_dlr = tf.gradients(self.dlr, self.x_input)[0]
+
+ # targeted dlr loss
+ self.y_target = tf.placeholder(tf.int64, shape=[None])
+ self.dlr_target = dlr_loss_targeted(self.logits, self.y_input, self.y_target, num_classes=self.num_classes)
+ self.grad_target = tf.gradients(self.dlr_target, self.x_input)[0]
+
+ self.la = tf.placeholder(tf.int64, shape=[None])
+ self.la_target = tf.placeholder(tf.int64, shape=[None])
+ la_mask = tf.one_hot(self.la, self.num_classes)
+ la_target_mask = tf.one_hot(self.la_target, self.num_classes)
+ la_logit = tf.reduce_sum(la_mask * self.logits, axis=1)
+ la_target_logit = tf.reduce_sum(la_target_mask * self.logits, axis=1)
+ self.diff_logits = la_target_logit - la_logit
+ self.grad_diff_logits = tf.gradients(self.diff_logits, self.x_input)[0]
+
+ def predict(self, x):
+ x = x.detach()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ y = self.sess.run(self.logits, {self.x_input: x2})
+
+ return torch.from_numpy(y).to(self.device)
+
+ def grad_logits(self, x):
+ x = x.detach()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ logits, g2 = self.sess.run([self.logits, self.grads], {self.x_input: x2})
+ g2 = np.moveaxis(np.array(g2), 0, 1)
+ g2 = np.transpose(g2, (0, 1, 4, 2, 3))
+
+ return torch.from_numpy(logits).cuda(), torch.from_numpy(g2).cuda()
+
+ def get_grad_diff_logits_target(self, x, y=None, y_target=None):
+ x = x.detach()
+ la = y.cpu().numpy()
+ la_target = y_target.cpu().numpy()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ dl, g2 = self.sess.run([self.diff_logits, self.grad_diff_logits], {self.x_input: x2, self.la: la, self.la_target: la_target})
+ g2 = np.transpose(np.array(g2), (0, 3, 1, 2))
+
+ return torch.from_numpy(dl).to(self.device), torch.from_numpy(g2).to(self.device)
+
+ def get_logits_loss_grad_xent(self, x, y):
+ x = x.detach()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ y2 = y.clone().cpu().numpy()
+ logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.xent, self.grad_xent], {self.x_input: x2, self.y_input: y2})
+ grad_val = np.moveaxis(grad_val, 3, 1)
+
+ return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
+
+ def get_logits_loss_grad_dlr(self, x, y):
+ x = x.detach()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ y2 = y.clone().cpu().numpy()
+ logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr, self.grad_dlr], {self.x_input: x2, self.y_input: y2})
+ grad_val = np.moveaxis(grad_val, 3, 1)
+
+ return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
+
+ def get_logits_loss_grad_target(self, x, y, y_target):
+ x = x.detach()
+ x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
+ y2 = y.clone().cpu().numpy()
+ y_targ = y_target.clone().cpu().numpy()
+ logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr_target, self.grad_target], {self.x_input: x2, self.y_input: y2, self.y_target: y_targ})
+ grad_val = np.moveaxis(grad_val, 3, 1)
+
+ return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
+
+
+def dlr_loss(x, y, num_classes=10):
+ x_sort = tf.contrib.framework.sort(x, axis=1)
+ y_onehot = tf.one_hot(y, num_classes)
+ if num_classes > 2:
+ ### TODO: adapt to the case when the point is already misclassified
+ loss = -(x_sort[:, -1] - x_sort[:, -2]) / (x_sort[:, -1] - x_sort[:, -3] + 1e-12)
+ else:
+ loss = (tf.reduce_max(x - y_onehot * 1e9) - tf.gather(x, y, axis=-1)) / tf.reduce_max(x - y_onehot * 1e9)
+
+ return loss
+
+def dlr_loss_targeted(x, y, y_target, num_classes=10):
+ x_sort = tf.contrib.framework.sort(x, axis=1)
+ y_onehot = tf.one_hot(y, num_classes)
+ y_target_onehot = tf.one_hot(y_target, num_classes)
+ loss = -(tf.reduce_sum(x * y_onehot, axis=1) - tf.reduce_sum(x * y_target_onehot, axis=1)) / (x_sort[:, -1] - .5 * x_sort[:, -3] - .5 * x_sort[:, -4] + 1e-12)
+
+ return loss
\ No newline at end of file
diff --git a/case_studies/jarn/binarization_test.py b/case_studies/jarn/binarization_test.py
new file mode 100644
index 0000000..dfc449e
--- /dev/null
+++ b/case_studies/jarn/binarization_test.py
@@ -0,0 +1,249 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Modified according to main method in pgd_attack.py
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+from functools import partial
+
+import torch
+
+import utils
+from argparse_utils import DecisionBoundaryBinarizationSettings
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+from active_tests import decision_boundary_binarization as dbb
+
+import tensorflow as tf
+import numpy as np
+import sys
+import math
+
+import cifar10_input
+
+import config_attack
+from pgd_attack import LinfPGDAttack
+
+
+class BinarizedModel:
+ def __init__(self, model, logit_diff_loss=False):
+ self.model = model
+ self.x_input = model.x_input
+ self.y_input = model.y_input
+
+ features = model.neck
+
+ with tf.variable_scope("binarized_readout"):
+ # build linear readout
+ bro_w = tf.get_variable(
+ 'DW', [features.shape[-1], 2],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ bro_b = tf.get_variable('biases', [2],
+ initializer=tf.constant_initializer())
+ self.bro_w_pl = tf.placeholder(tf.float32, shape=[features.shape[-1], 2])
+ self.bro_b_pl = tf.placeholder(tf.float32, shape=[2])
+ self.bro_w_set_weight = bro_w.assign(self.bro_w_pl)
+ self.bro_b_set_weight = bro_b.assign(self.bro_b_pl)
+ self.pre_softmax = tf.nn.xw_plus_b(features, bro_w, bro_b)
+
+ if logit_diff_loss:
+ yh = tf.one_hot(self.y_input, 2)
+ self.loss = tf.reduce_max(self.pre_softmax - yh * 1e9) - tf.gather(
+ self.pre_softmax, self.y_input, axis=-1)
+ else:
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.loss = tf.reduce_sum(self.y_xent, name='y_xent')
+
+
+def run_attack(m, l, sess, logits, x_pl, bro_w_pl, bro_b_pl, bro_w_assign,
+ bro_b_assign, attack):
+ linear_layer = m[-1]
+ del m
+
+ sess.run(bro_w_assign, {bro_w_pl: linear_layer.weight.data.numpy().T})
+ sess.run(bro_b_assign, {bro_b_pl: linear_layer.bias.data.numpy()})
+
+ for x, y in l:
+ x, y = x.numpy(), y.numpy()
+ x = x.transpose((0, 2, 3, 1))
+
+ x_adv = attack(x, y)
+
+ clean_logits = sess.run(logits, {x_pl: x})
+ adv_logits = sess.run(logits, {x_pl: x_adv})
+ is_adv = adv_logits.argmax(-1) != y
+
+ print(is_adv, clean_logits, adv_logits)
+
+ return is_adv, (torch.tensor(x_adv.transpose((0, 3, 1, 2))),
+ torch.tensor(adv_logits))
+
+
+def main():
+ config = vars(config_attack.get_args())
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ model_file = tf.train.latest_checkpoint(config['model_dir'])
+ print("config['model_dir']: ", config['model_dir'])
+ if model_file is None:
+ print('No model found')
+ sys.exit()
+
+ print("JARN MODEL")
+ from model_jarn import Model
+ if "_zeromeaninput" in config['model_dir']:
+ model = Model(dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ normalize_zero_mean=True,
+ zero_one=True,
+ # added by AUTHOR
+ mode='eval')
+ else:
+ model = Model(dataset=config['dataset'],
+ train_batch_size=config['eval_batch_size'],
+ zero_one=True,
+ # added by AUTHOR
+ mode='eval')
+ print("model eval mode:", model.mode)
+ sess = tf.Session()
+ saver = tf.train.Saver()
+ # Restore the checkpoint
+ saver.restore(sess, model_file)
+
+ binarized_model = BinarizedModel(model,
+ logit_diff_loss=config['attack'] == 'pgd-ld')
+
+ print("Using attack:", config['attack'])
+ if config['attack'] == 'pgd' or config['attack'] == 'pgd-ld':
+ attack = LinfPGDAttack(binarized_model,
+ config['epsilon'] / 255.0,
+ config['num_steps'],
+ config['step_size'] / 255.0,
+ config['random_start'],
+ config['loss_func'],
+ dataset=config['dataset'],
+ clip_max=1.0)
+ attack_fn = lambda x, y: attack.perturb(x, y, sess)
+ elif config['attack'] == 'apgd':
+ from autoattack import autopgd_base
+ from autoattack_adapter import ModelAdapter
+ autoattack_model = ModelAdapter(
+ binarized_model.pre_softmax, binarized_model.x_input,
+ binarized_model.y_input, sess, num_classes=2, device="cpu")
+ attack = autopgd_base.APGDAttack(
+ autoattack_model, n_restarts=5, n_iter=100, verbose=True,
+ eps=config["epsilon"] / 255.0, norm="Linf", eot_iter=1, rho=.99,
+ is_tf_model=True, device="cpu", loss='dlr')
+ attack_fn = lambda x, y: attack.perturb(
+ torch.tensor(x.transpose((0, 3, 1, 2)), device="cpu"),
+ torch.tensor(y, device="cpu")
+ ).detach().cpu().numpy().transpose((0, 2, 3, 1))
+ else:
+ raise ValueError("invalid attack")
+
+ data_path = config['data_path']
+
+ print("load cifar10 dataset")
+ cifar = cifar10_input.CIFAR10Data(data_path)
+
+ # Iterate over the samples batch-by-batch
+ num_eval_examples = config['num_eval_examples']
+ eval_batch_size = config['eval_batch_size']
+
+ x_data = cifar.eval_data.xs[:num_eval_examples]
+ y_data = cifar.eval_data.ys[:num_eval_examples]
+ x_data = x_data.transpose((0, 3, 1, 2)) / 255.0
+ assert x_data.max() <= 1 and x_data.min() >= 0, (x_data.min(), x_data.max())
+
+ test_loader = utils.build_dataloader_from_arrays(x_data, y_data,
+ eval_batch_size)
+
+ def feature_extractor_forward_pass(x, features_and_logits: bool = False,
+ features_only: bool = False):
+ if features_and_logits:
+ assert not features_only, "Only one of the flags must be set."
+ if features_and_logits:
+ return sess.run(
+ (model.neck, model.pre_softmax),
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
+ elif features_only:
+ return sess.run(
+ model.neck,
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
+ else:
+ return sess.run(
+ model.pre_softmax,
+ feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=feature_extractor_forward_pass,
+ logit_forward_and_backward_pass=None,
+ )
+
+ attack_fn_partial = partial(
+ run_attack,
+ sess=sess, logits=binarized_model.pre_softmax,
+ x_pl=model.x_input,
+ bro_w_pl=binarized_model.bro_w_pl, bro_b_pl=binarized_model.bro_b_pl,
+ bro_w_assign=binarized_model.bro_w_set_weight,
+ bro_b_assign=binarized_model.bro_b_set_weight,
+ attack=attack_fn)
+
+ scores_logit_differences_and_validation_accuracies = \
+ dbb.interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ # m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
+ # bro_w_assign, bro_b_assign, attack_fn)
+ attack_fn=lambda m, l, kwargs: attack_fn_partial(m, l),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=config["epsilon"] / 255.0,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=config["n_boundary_points"],
+ n_inner_points=config["n_inner_points"],
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=config["num_eval_examples"],
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ #rescale_logits="adaptive",
+ sample_training_data_from_corners=config["sample_from_corners"],
+ #decision_boundary_closeness=0.9999,
+ fail_on_exception=False
+ # args.num_samples_test * 10
+ )
+
+ print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
+ config["num_eval_examples"]))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/jarn/binarization_test.sh b/case_studies/jarn/binarization_test.sh
new file mode 100644
index 0000000..bff3e3e
--- /dev/null
+++ b/case_studies/jarn/binarization_test.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+nsamples=${1:-512}
+echo "#samples: $nsamples"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, 1 boundary, 999 inner points (original PGD)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ ./case_studies/jarn/binarization_test.py \
+ --model_dir=checkpoints/jarn/modelJARN_cifar10_b64_beta_1.000_gamma_1.000_disc_update_steps20_l5bc32_imgpert_advdelay140000_tanhencact_zeromeaninput_160000steps \
+ --data_path=data/cifar-10-batches-py/ \
+ --eval_batch_size=512 \
+ --num_steps=20 \
+ --num_eval_examples=$nsamples \
+ --attack=pgd \
+ --step_size=2.0
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, 1 boundary, 999 inner points (modified PGD)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ ./case_studies/jarn/binarization_test.py \
+ --model_dir=checkpoints/jarn/modelJARN_cifar10_b64_beta_1.000_gamma_1.000_disc_update_steps20_l5bc32_imgpert_advdelay140000_tanhencact_zeromeaninput_160000steps \
+ --data_path=data/cifar-10-batches-py/ \
+ --eval_batch_size=512 \
+ --num_steps=500 \
+ --num_eval_examples=$nsamples \
+ --attack=pgd-ld \
+ --step_size=1.0 \
+ --random_start
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, 1 boundary, 999 inner points (APGD)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python \
+ ./case_studies/jarn/binarization_test.py \
+ --model_dir=checkpoints/jarn/modelJARN_cifar10_b64_beta_1.000_gamma_1.000_disc_update_steps20_l5bc32_imgpert_advdelay140000_tanhencact_zeromeaninput_160000steps \
+ --data_path=data/cifar-10-batches-py/ \
+ --eval_batch_size=512 \
+ --num_steps=500 \
+ --num_eval_examples=$nsamples \
+ --attack=apgd \
+ --step_size=1.0 \
+ --random_start
+
diff --git a/case_studies/jarn/cifar10_input.py b/case_studies/jarn/cifar10_input.py
new file mode 100644
index 0000000..f9c7522
--- /dev/null
+++ b/case_studies/jarn/cifar10_input.py
@@ -0,0 +1,204 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for importing the CIFAR10 dataset.
+Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
+being unsigned integers (i.e., in the range 0,1,...,255).
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import pickle
+import sys
+import tensorflow as tf
+import numpy as np
+import re
+
+version = sys.version_info
+
+
+class CIFAR10Data(object):
+ """
+ Unpickles the CIFAR10 dataset from a specified folder containing a pickled
+ version following the format of Krizhevsky which can be found
+ [here](https://www.cs.toronto.edu/~kriz/cifar.html).
+ Inputs to constructor
+ =====================
+ - path: path to the pickled dataset. The training data must be pickled
+ into five files named data_batch_i for i = 1, ..., 5, containing 10,000
+ examples each, the test data
+ must be pickled into a single file called test_batch containing 10,000
+ examples, and the 10 class names must be
+ pickled into a file called batches.meta. The pickled examples should
+ be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
+ arrays, and an array of their 10,000 true labels.
+ """
+
+ def __init__(self, path, init_shuffle=True, train_size_ratio=1):
+ num_classes = 10
+ path = CIFAR10Data.rec_search(path)
+ train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
+ eval_filename = 'test_batch'
+ metadata_filename = 'batches.meta'
+
+ train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
+ train_labels = np.zeros(50000, dtype='int32')
+ for ii, fname in enumerate(train_filenames):
+ cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
+ train_images[ii * 10000: (ii + 1) * 10000, ...] = cur_images
+ train_labels[ii * 10000: (ii + 1) * 10000, ...] = cur_labels
+ eval_images, eval_labels = self._load_datafile(
+ os.path.join(path, eval_filename))
+
+ with open(os.path.join(path, metadata_filename), 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ self.label_names = data_dict[b'label_names']
+ for ii in range(len(self.label_names)):
+ self.label_names[ii] = self.label_names[ii].decode('utf-8')
+
+ if train_size_ratio < 1:
+ new_train_images = []
+ new_train_labels = []
+ for class_ind in range(num_classes):
+ current_class_train_images = train_images[train_labels == class_ind]
+ num_train_per_class = int(current_class_train_images.shape[0] * train_size_ratio)
+ new_train_images.append(current_class_train_images[:num_train_per_class])
+ new_train_labels.append(np.full(num_train_per_class, class_ind, dtype='int32'))
+ train_images = np.concatenate(new_train_images, axis=0)
+ train_labels = np.concatenate(new_train_labels)
+
+ self.train_data = DataSubset(train_images, train_labels, init_shuffle=init_shuffle)
+ self.eval_data = DataSubset(eval_images, eval_labels, init_shuffle=init_shuffle)
+
+ @staticmethod
+ def rec_search(original_path):
+ rx = re.compile(r'data_batch_[0-9]+')
+ r = []
+ for path, _, file_names in os.walk(original_path):
+ r.extend([os.path.join(path, x) for x in file_names if rx.search(x)])
+ if len(r) is 0: # TODO: Is this the best way?
+ return original_path
+ return os.path.dirname(r[0])
+
+ @staticmethod
+ def _load_datafile(filename):
+ with open(filename, 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ assert data_dict[b'data'].dtype == np.uint8
+ image_data = data_dict[b'data']
+ image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
+ return image_data, np.array(data_dict[b'labels'])
+
+
+class AugmentedCIFAR10Data(object):
+ """
+ Data augmentation wrapper over a loaded dataset.
+ Inputs to constructor
+ =====================
+ - raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
+ - sess: current tensorflow session
+ - model: current model (needed for input tensor)
+ """
+
+ def __init__(self, raw_cifar10data, sess, model):
+ assert isinstance(raw_cifar10data, CIFAR10Data)
+ self.image_size = 32
+
+ # create augmentation computational graph
+ self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
+ padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
+ img, self.image_size + 4, self.image_size + 4),
+ self.x_input_placeholder)
+ cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
+ self.image_size,
+ 3]), padded)
+ flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
+ self.augmented = flipped
+
+ self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.label_names = raw_cifar10data.label_names
+
+
+class DataSubset(object):
+ def __init__(self, xs, ys, init_shuffle=True):
+ self.xs = xs
+ self.n = xs.shape[0]
+ self.ys = ys
+ self.batch_start = 0
+ if init_shuffle:
+ self.cur_order = np.random.permutation(self.n)
+ else:
+ self.cur_order = np.arange(self.n)
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ if self.n < batch_size:
+ raise ValueError('Batch size can be at most the dataset size')
+ if not multiple_passes:
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size <= 0:
+ raise ValueError('Pass through the dataset is complete.')
+ batch_end = self.batch_start + actual_batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
+ self.batch_start += actual_batch_size
+ if actual_batch_size < batch_size:
+ print('actual_batch_size < batch_size, padding with zeros')
+ batch_xs_pad = np.zeros(shape=(batch_size - actual_batch_size, batch_xs.shape[1], batch_xs.shape[2], batch_xs.shape[3]), dtype=batch_xs.dtype)
+ batch_ys_pad = np.zeros(batch_size - actual_batch_size, dtype=batch_ys.dtype)
+ batch_xs = np.concatenate([batch_xs, batch_xs_pad], axis=0)
+ batch_ys = np.concatenate([batch_ys, batch_ys_pad], axis=0)
+ return batch_xs, batch_ys
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size < batch_size:
+ if reshuffle_after_pass:
+ self.cur_order = np.random.permutation(self.n)
+ self.batch_start = 0
+ batch_end = self.batch_start + batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
+ self.batch_start += actual_batch_size
+ return batch_xs, batch_ys
+
+
+class AugmentedDataSubset(object):
+ def __init__(self, raw_datasubset, sess, x_input_placeholder,
+ augmented):
+ self.sess = sess
+ self.raw_datasubset = raw_datasubset
+ self.x_input_placeholder = x_input_placeholder
+ self.augmented = augmented
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
+ reshuffle_after_pass)
+ images = raw_batch[0].astype(np.float32)
+ return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
+ raw_batch[0]}), raw_batch[1]
diff --git a/case_studies/jarn/config.py b/case_studies/jarn/config.py
new file mode 100644
index 0000000..29ee600
--- /dev/null
+++ b/case_studies/jarn/config.py
@@ -0,0 +1,101 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import configargparse
+import pdb
+
+def pair(arg):
+ return [float(x) for x in arg.split(',')]
+
+def get_args():
+ parser = configargparse.ArgParser(default_config_files=[])
+ parser.add("--config", type=str, is_config_file=True, help="You can store all the config args in a config file and pass the path here")
+ parser.add("--model_dir", type=str, default="models/model", help="Path to save/load the checkpoints, default=models/model")
+ parser.add("--data_dir", type=str, default="datasets/", help="Path to load datasets from, default=datasets")
+ parser.add("--model_suffix", type=str, default="", help="Suffix to append to model name, default=''")
+ parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "svhn"], help="Path to load dataset, default=cifar10")
+ parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
+ parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
+ parser.add("--train_steps", type=int, default=80000, help="Maximum number of training steps, default=80000")
+ parser.add("--out_steps", "-o", type=int, default=100, help="Number of output steps, default=100")
+ parser.add("--summary_steps", type=int, default=500, help="Number of summary steps, default=500")
+ parser.add("--checkpoint_steps", "-c", type=int, default=1000, help="Number of checkpoint steps, default=1000")
+ parser.add("--train_batch_size", "-b", type=int, default=128, help="The training batch size, default=128")
+ parser.add("--step_size_schedule", nargs='+', type=pair, default=[[0, 0.1], [40000, 0.01], [60000, 0.001]], help="The step size scheduling, default=[[0, 0.1], [40000, 0.01], [60000, 0.001]], use like: --stepsize 0,0.1 40000,0.01 60000,0.001")
+ parser.add("--weight_decay", "-w", type=float, default=0.0002, help="The weight decay parameter, default=0.0002")
+ parser.add("--momentum", type=float, default=0.9, help="The momentum parameter, default=0.9")
+ parser.add("--replay_m", "-m", type=int, default=8, help="Number of steps to repeat training on the same batch, default=8")
+ parser.add("--eval_examples", type=int, default=10000, help="Number of evaluation examples, default=10000")
+ parser.add("--eval_size", type=int, default=128, help="Evaluation batch size, default=128")
+ parser.add("--eval_cpu", dest='eval_cpu', action='store_true', help="Set True to do evaluation on CPU instead of GPU, default=False")
+ parser.set_defaults(eval_cpu=False)
+ # attack params
+ parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
+ parser.add("--pgd_steps", "-k", type=int, default=20, help="Number of steps to PGD attack, default=20")
+ parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
+ parser.add("--loss_func", "-f", type=str, default="xent", choices=["xent", "cw"], help="Loss function for the model, choices are [xent, cw], default=xent")
+ parser.add("--num_restarts", type=int, default=1, help="Number of resets for the PGD attack, default=1")
+ parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
+ parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
+ parser.set_defaults(random_start=True)
+ # input grad generation param
+ parser.add("--randinit_repeat", type=int, default=1, help="Number of randinit grad to generate, default=1")
+ parser.add("--num_gen_grad", type=int, default=0, help="Number of input grad samples to generate, 0 means all data default=0")
+ parser.add("--num_gen_act", type=int, default=0, help="Number of activation samples to generate, 0 means all data default=0")
+ # input grad reg params
+ parser.add("--beta", type=float, default=1, help="Weight of input gradient regularization, default=1")
+ parser.add("--gamma", type=float, default=1, help="Weight of disc xent term on encoder opt, default=1")
+ parser.add("--alpha", type=float, default=0, help="Weight of image-input gradient l2 norm regularization, default=0")
+ parser.add("--disc_update_steps", type=int, default=5, help="Number of classifier opt steps between each disc opt step, default=5")
+ parser.add("--adv_update_steps_per_iter", type=int, default=1, help="Number of classifier adv opt steps per classification xent opt step, default=1")
+ parser.add("--disc_layers", type=int, default=5, help="Number of conv layers in disc model, default=5")
+ parser.add("--disc_base_channels", type=int, default=16, help="Number of channels in first disc conv layer, default=16")
+ parser.add("--steps_before_adv_opt", type=int, default=0, help="Number of training steps to wait before training on adv loss, default=0")
+ parser.add("--adv_encoder_type", type=str, default='simple', help="Type of input grad encoder for adv training, default=simple")
+ parser.add("--enc_output_activation", type=str, default='tanh', help="Activation function of encoder output default=None")
+ parser.add("--sep_opt_version", type=int, default=1, choices=[0, 1, 2], help="Sep opt version 0: train_jan.py, 1: train_jan_sep_opt-CD.py, 2: train_jan_sep_opt2-CD.py default=1")
+ parser.add("--grad_image_ratio", type=float, default=1, help="Ratio of input grad to mix with image default=1")
+ parser.add("--final_grad_image_ratio", type=float, default=0, help="Final ratio of input grad to mix with image, set to 0 for static ratio default=0")
+ parser.add("--num_grad_image_ratios", type=int, default=5, help="Number of times to adjust grad_image_ratio default=4")
+
+ parser.add("--eval_adv_attack", dest="eval_adv_attack", action="store_true", help="Evaluate trained model on adv attack after training default=True")
+ parser.add("--no-eval_adv_attack", dest="eval_adv_attack", action="store_false", help="Evaluate trained model on adv attack after training default=True")
+ parser.set_defaults(eval_adv_attack=True)
+
+ parser.add("--normalize_zero_mean", dest="normalize_zero_mean", action="store_true", help="Normalize classifier input to zero mean default=True")
+ parser.add("--no-normalize_zero_mean", dest="normalize_zero_mean", action="store_false", help="Normalize classifier input to zero mean default=True")
+ parser.set_defaults(normalize_zero_mean=True)
+
+ parser.add("--same_optimizer", dest="same_optimizer", action="store_true", help="Train classifier and disc with same optimizer configuration default=True")
+ parser.add("--no-same_optimizer", dest="same_optimizer", action="store_false", help="Train classifier and disc with same optimizer configuration default=True")
+ parser.set_defaults(same_optimizer=True)
+
+ parser.add("--only_fully_connected", dest="only_fully_connected", action="store_true", help="Fully connected disc model default=False")
+ parser.add("--no-only_fully_connected", dest="only_fully_connected", action="store_false", help="Fully connected disc model default=False")
+ parser.set_defaults(only_fully_connected=False)
+
+ parser.add("--img_random_pert", dest="img_random_pert", action="store_true", help="Random start image pertubation augmentation default=False")
+ parser.add("--no-img_random_pert", dest="img_random_pert", action="store_false", help="No random start image pertubation augmentation default=False")
+ parser.set_defaults(img_random_pert=False)
+
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ print(get_args())
+ pdb.set_trace()
+
+# TODO Default for model_dir
+# TODO Need to update the helps
diff --git a/case_studies/jarn/config_attack.py b/case_studies/jarn/config_attack.py
new file mode 100644
index 0000000..7920039
--- /dev/null
+++ b/case_studies/jarn/config_attack.py
@@ -0,0 +1,60 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import configargparse
+import pdb
+
+def pair(arg):
+ return [float(x) for x in arg.split(',')]
+
+def get_args():
+ parser = configargparse.ArgParser(default_config_files=[])
+ parser.add("--model_dir", type=str, default="models/adv_trained", help="Path to save/load the checkpoints, default=models/model")
+ parser.add("--data_path", type=str, default="datasets/cifar10", help="Path to dataset, default=datasets/cifar10")
+ parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
+ parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
+ parser.add("--num_eval_examples", type=int, default=10000, help="Number of eval samples, default=10000")
+ parser.add("--eval_batch_size", type=int, default=100, help="Eval batch size, default=100")
+ parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
+ parser.add("--num_steps", type=int, default=10, help="Number of steps to PGD attack, default=10")
+ parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
+ parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
+ parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
+ parser.set_defaults(random_start=True)
+ parser.add("--loss_func", "-f", type=str, default="xent", choices=["xent", "target_task_xent", "cw"], help="Loss function for the model, choices are [xent, cw], default=xent")
+ parser.add("--attack_norm", type=str, default="inf", choices=["inf", "2"], help="Lp norm type for attacks, choices are [inf, 2], default=inf")
+ parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "imagenet"], help="Path to load dataset, default=cifar10")
+ parser.add("--store_adv_path", type=str, default=None, help="Path to save adversarial examples, default=None")
+ parser.add("--attack_name", type=str, default=None, help="Path to save adversarial examples, default=''")
+ parser.add("--save_eval_log", dest="save_eval_log", action="store_true", help="Save txt file for attack eval")
+ parser.add("--no-save_eval_log", dest="save_eval_log", action="store_false", help="Save txt file for attack eval")
+ parser.set_defaults(save_eval_log=False)
+
+ parser.add("--xfer_attack", dest="xfer_attack", action="store_true", help="Adversarial transfer attack")
+ parser.add("--no-xfer_attack", dest="xfer_attack", action="store_false", help="not adversarial transfer attack")
+ parser.set_defaults(xfer_attack=False)
+ parser.add("--custom_output_model_name", type=str, default=None, help="Custom model name, default=None")
+
+ parser.add("--n_inner_points", type=int, default=999, help="")
+ parser.add("--n_boundary_points", type=int, default=1, help="")
+ parser.add("--sample_from_corners", type=bool, default=False, help="")
+ parser.add("--attack", type=str, default="pgd", help="either pgd or apgd")
+
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ print(get_args())
+ pdb.set_trace()
diff --git a/case_studies/jarn/model_jarn.py b/case_studies/jarn/model_jarn.py
new file mode 100644
index 0000000..41ea784
--- /dev/null
+++ b/case_studies/jarn/model_jarn.py
@@ -0,0 +1,582 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# based on https://github.com/tensorflow/models/tree/master/resnet
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+import json
+from collections import OrderedDict
+
+
+class Model(object):
+ """ResNet model."""
+
+ def __init__(self, dataset, mode='train', train_batch_size=None, normalize_zero_mean=False, zero_one=False):
+ """
+ ResNet constructor.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.num_classes = 10
+ self.train_batch_size = train_batch_size
+ self.activations = []
+ self.normalize_zero_mean = normalize_zero_mean
+ self.zero_one = zero_one
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('classifier'):
+ with tf.variable_scope('input'):
+
+ self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, 32, 32, 3])
+
+ self.y_input = tf.placeholder(tf.int64, shape=None)
+
+ if self.zero_one:
+ self.final_input = self.x_input * 255.0
+ else:
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,32,32,3])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
+ self.activations.append(x)
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ # filters = [16, 16, 32, 64] # for debugging
+ filters = [16, 160, 320, 640]
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ self.activations.append(x)
+ for i in range(1, 5):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+ self.activations.append(x)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+ self.neck = x
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+ self.activations.append(self.pre_softmax)
+ self.softmax = tf.nn.softmax(self.pre_softmax)
+
+ sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
+ sample_indices = tf.expand_dims(sample_indices, axis=-1)
+ target_indices = tf.expand_dims(self.y_input, axis=-1)
+ self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
+ self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
+ self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.loss = self.xent
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.y_xent_dbp = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent_dbp = tf.reduce_sum(self.y_xent_dbp, name='y_xent_dbp')
+ self.mean_xent_dbp = tf.reduce_mean(self.y_xent_dbp)
+ self.weight_decay_loss = self._decay()
+
+ # for top-2 logit diff loss
+ self.label_mask = tf.one_hot(self.y_input,
+ self.num_classes,
+ on_value=1.0,
+ off_value=0.0,
+ dtype=tf.float32)
+ self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
+ self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
+ self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+class JarnConvDiscriminatorModel(object):
+ """Simple conv discriminator model."""
+ # based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
+
+ def __init__(self, mode, dataset, train_batch_size=None, num_conv_layers=5, base_num_channels=16, x_modelgrad_input_tensor=None,
+ y_modelgrad_input_tensor=None, x_image_input_tensor=None, y_image_input_tensor=None, normalize_zero_mean=False, only_fully_connected=False, num_fc_layers=3, image_size=32, num_input_channels=3):
+ """
+ conv disc constructor.
+ """
+ self.neck = None
+ self.y_pred = None
+ self.mode = mode
+ self.num_classes = 2
+ self.train_batch_size = train_batch_size
+ self.num_conv_layers = num_conv_layers
+ self.num_fc_layers = num_fc_layers
+ self.base_num_channels = base_num_channels
+ self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
+ self.y_modelgrad_input_tensor = y_modelgrad_input_tensor
+ self.x_image_input_tensor = x_image_input_tensor
+ self.y_image_input_tensor = y_image_input_tensor
+ self.normalize_zero_mean = normalize_zero_mean
+ self.only_fully_connected = only_fully_connected
+ self.image_size = image_size
+ self.num_input_channels = num_input_channels
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('discriminator'):
+ with tf.variable_scope('input'):
+
+ if self.x_modelgrad_input_tensor == None:
+ self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
+
+ self.x_image_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.image_size, self.image_size, self.num_input_channels])
+ else:
+ self.x_modelgrad_input = self.x_modelgrad_input_tensor
+ self.x_image_input = self.x_image_input_tensor
+
+ self.x_input = tf.concat([self.x_modelgrad_input, self.x_image_input], axis=0)
+
+
+ if self.y_modelgrad_input_tensor == None:
+ self.y_modelgrad_input = tf.get_variable(name='y_modelgrad_input', initializer=tf.zeros_initializer,
+ shape=self.train_batch_size, dtype=tf.int64)
+
+ self.y_image_input = tf.placeholder(tf.int64, shape=None)
+ else:
+ self.y_modelgrad_input = self.y_modelgrad_input_tensor
+ self.y_image_input = self.y_image_input_tensor
+
+ self.y_input = tf.concat([self.y_modelgrad_input, self.y_image_input], axis=0)
+
+ self.final_input = self.x_input
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,self.image_size,self.image_size,self.num_input_channels])
+ zero_mean_final_input = self.final_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
+
+ x = self.input_standardized
+ base_num_channels = self.base_num_channels
+ if self.only_fully_connected == False:
+ for i in range(self.num_conv_layers):
+ output_num_channels = base_num_channels * 2**i
+ if i == 0:
+ x = self._conv('conv{}'.format(i), x, 4, self.num_input_channels, output_num_channels, self._stride_arr(2), bias=True)
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+ else:
+ x = self._conv('conv{}'.format(i), x, 4, output_num_channels // 2, output_num_channels, self._stride_arr(2), bias=True)
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+ else:
+ for i in range(self.num_fc_layers):
+ if i == self.num_fc_layers -1:
+ x = self._fully_connected(x, base_num_channels//2, name='fc{}'.format(i))
+ else:
+ x = self._fully_connected(x, base_num_channels, name='fc{}'.format(i))
+ x = self._batch_norm('bn{}'.format(i), x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, self.num_classes)
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.y_pred = self.predictions
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.loss = self.xent
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+
+ self.input_grad_standardized = self.input_standardized[:self.train_batch_size]
+ self.image_standardized = self.input_standardized[self.train_batch_size:]
+ self.ig_img_l2_norm_diff = tf.reduce_mean(tf.reduce_sum(tf.pow(tf.subtract(self.input_grad_standardized, self.image_standardized), 2.0), keepdims=True))
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ if bias == True:
+ b = tf.get_variable('biases', [out_filters],
+ initializer=tf.constant_initializer())
+ conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
+ conv_out_b = tf.nn.bias_add(conv_out, b)
+ return conv_out_b
+ else:
+ return tf.nn.conv2d(x, kernel, strides, padding=padding)
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim, name=None):
+ """FullyConnected layer for final output."""
+ if name == None:
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+ else:
+ with tf.variable_scope(name):
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+class InputGradEncoderModel(object):
+ """3x3 + 1x1 conv model."""
+ # based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
+
+ def __init__(self, mode, train_batch_size=None, encoder_type='simple', output_activation=None, x_modelgrad_input_tensor=None, normalize_zero_mean=False, pix2pix_layers=5, pix2pix_features_root=16, pix2pix_filter_size=4, image_size=32, num_input_channels=3, num_output_channels=None):
+ """conv disc constructor.
+
+ """
+ self.mode = mode
+ self.train_batch_size = train_batch_size
+ self.encoder_type = encoder_type
+ self.output_activation = output_activation
+ self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
+ self.normalize_zero_mean = normalize_zero_mean
+ self.keep_prob = 1
+ self.layers = pix2pix_layers
+ self.features_root = pix2pix_features_root
+ self.filter_size = pix2pix_filter_size
+ self.image_size = image_size
+ self.num_input_channels = num_input_channels
+ if num_output_channels == None:
+ self.num_output_channels = num_input_channels
+ self._build_model()
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self):
+ """Build the core model within the graph."""
+ assert self.mode == 'train' or self.mode == 'eval'
+ with tf.variable_scope('encoder'):
+
+ with tf.variable_scope('input'):
+ if self.x_modelgrad_input_tensor == None:
+ self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
+ else:
+ self.x_modelgrad_input = self.x_modelgrad_input_tensor
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.x_modelgrad_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1,self.image_size,self.image_size,self.num_input_channels])
+ zero_mean_final_input = self.x_modelgrad_input - final_input_mean
+ self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.input_standardized = tf.math.l2_normalize(self.x_modelgrad_input, axis=[1,2,3])
+
+
+ if self.output_activation == 'tanh':
+ x_modelgrad_transformed_preact = self._conv('conv', self.input_standardized, 1, self.num_output_channels, self.num_output_channels, self._stride_arr(1), bias=True)
+ self.x_modelgrad_transformed = tf.tanh(x_modelgrad_transformed_preact)
+ else:
+ self.x_modelgrad_transformed = self._conv('conv', self.input_standardized, 1, self.num_output_channels, self.num_output_channels, self._stride_arr(1), bias=True)
+
+
+ with tf.variable_scope('costs'):
+ self.weight_decay_loss = self._decay()
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
+ updates_collections=None, is_training=(self.mode == 'train'))
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0 / n)))
+ if bias == True:
+ b = tf.get_variable('biases', [out_filters],
+ initializer=tf.constant_initializer())
+ conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
+ conv_out_b = tf.nn.bias_add(conv_out, b)
+ return conv_out_b
+ else:
+ return tf.nn.conv2d(x, kernel, strides, padding=padding)
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+class GradImageMixer(object):
+ """ Model to mix input grad with image."""
+
+ def __init__(self, train_batch_size=None, direct_feed_input=False, grad_input_tensor=None, image_input_tensor=None, normalize_zero_mean=False, image_size=32, num_input_channels=3):
+ """GradImageMixer constructor.
+
+ Args:
+
+ """
+ self.train_batch_size = train_batch_size
+ self.direct_feed_input = direct_feed_input
+ self.grad_input_tensor = grad_input_tensor
+ self.image_input_tensor = image_input_tensor
+ self.normalize_zero_mean = normalize_zero_mean
+ self.image_size = image_size
+ self.num_input_channels = num_input_channels
+ self._build_model()
+
+ def _build_model(self):
+ """Build the core model within the graph."""
+ with tf.variable_scope('mixer'):
+ with tf.variable_scope('input'):
+ if self.direct_feed_input:
+ self.grad_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.image_size, self.image_size, self.num_input_channels])
+
+ self.image_input = tf.placeholder(
+ tf.float32,
+ shape=[None, self.image_size, self.image_size, self.num_input_channels])
+ else:
+ if self.grad_input_tensor == None:
+ self.grad_input = tf.get_variable(name='grad_input', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
+
+ self.image_input = tf.get_variable(name='image_input', initializer=tf.zeros_initializer,
+ shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
+ else:
+ self.grad_input = self.grad_input_tensor
+ self.image_input = self.image_input_tensor
+
+ self.grad_ratio = tf.placeholder(tf.float32, shape=())
+
+ if self.normalize_zero_mean:
+ final_input_mean = tf.reduce_mean(self.grad_input, axis=[1,2,3])
+ for i in range(3):
+ final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
+ final_input_mean = tf.tile(final_input_mean, [1, self.image_size, self.image_size,self.num_input_channels])
+ zero_mean_final_input = self.grad_input - final_input_mean
+ self.grad_input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
+ else:
+ self.grad_input_standardized = tf.math.l2_normalize(self.grad_input, axis=[1,2,3])
+
+ self.output = self.grad_input_standardized * self.grad_ratio + self.image_input * (1 - self.grad_ratio)
diff --git a/case_studies/jarn/pgd_attack.py b/case_studies/jarn/pgd_attack.py
new file mode 100644
index 0000000..e16d6ba
--- /dev/null
+++ b/case_studies/jarn/pgd_attack.py
@@ -0,0 +1,199 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Implementation of attack methods. Running this file as a program will
+apply the attack to the model specified by the config file and store
+the examples in an .npy file.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import os
+import tensorflow as tf
+import numpy as np
+
+import cifar10_input
+
+import config_attack
+
+class LinfPGDAttack:
+ def __init__(self, model, epsilon, num_steps, step_size, random_start, loss_func, dataset='cifar10',
+ clip_max=255.0):
+ """Attack parameter initialization. The attack performs k steps of
+ size a, while always staying within epsilon from the initial
+ point."""
+ self.model = model
+ self.epsilon = epsilon
+ self.num_steps = num_steps
+ self.step_size = step_size
+ self.rand = random_start
+ self.clip_max = clip_max
+
+ self.loss = model.loss
+
+ self.logits = model.pre_softmax
+
+ self.grad = tf.gradients(self.loss, model.x_input)[0]
+
+ def perturb(self, x_nat, y, sess):
+ """Given a set of examples (x_nat, y), returns a set of adversarial
+ examples within epsilon of x_nat in l_infinity norm."""
+ if self.rand:
+ x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ x = np.clip(x, 0, self.clip_max) # ensure valid pixel range
+ else:
+ x = np.copy(x_nat)
+
+ for i in range(self.num_steps):
+ loss, logits, grad = sess.run((self.loss, self.logits, self.grad), feed_dict={self.model.x_input: x,
+ self.model.y_input: y})
+
+ # added by AUTHOR
+ if np.all(logits.argmax(-1) != y):
+ break
+
+ print(i, loss, logits)
+ # x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
+
+ # changed by AUTHOR
+ grad = np.sign(grad)
+ #grad = grad / (grad.reshape(len(grad), -1)**2).sum(-1)
+ x = x + self.step_size * grad
+
+ x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
+ x = np.clip(x, 0, self.clip_max) # ensure valid pixel range
+
+ return x
+
+ def perturb_l2(self, x_nat, y, sess):
+ """Given a set of examples (x_nat, y), returns a set of adversarial
+ examples within epsilon of x_nat in l_2 norm."""
+ if self.rand:
+ pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
+ pert_norm = np.linalg.norm(pert)
+ pert = pert / max(1, pert_norm)
+ else:
+ pert = np.zeros(x_nat.shape)
+
+ for i in range(self.num_steps):
+ x = x_nat + pert
+ grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
+ self.model.y_input: y})
+
+ normalized_grad = grad / np.linalg.norm(grad)
+ pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
+
+ # project pert to norm ball
+ pert_norm = np.linalg.norm(pert)
+ rescale_factor = pert_norm / self.epsilon
+ pert = pert / max(1, rescale_factor)
+
+ x = x_nat + pert
+ x = np.clip(x, 0, 255)
+
+ return x
+
+
+if __name__ == '__main__':
+ import json
+ import sys
+ import math
+
+ config = vars(config_attack.get_args())
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ model_file = tf.train.latest_checkpoint(config['model_dir'])
+ print("config['model_dir']: ", config['model_dir'])
+ if model_file is None:
+ print('No model found')
+ sys.exit()
+
+ print("JARN MODEL")
+ from model_jarn import Model
+ if "_zeromeaninput" in config['model_dir']:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
+ else:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
+
+ attack = LinfPGDAttack(model,
+ config['epsilon'],
+ config['num_steps'],
+ config['step_size'],
+ config['random_start'],
+ config['loss_func'],
+ dataset=config['dataset'])
+ saver = tf.train.Saver()
+
+ data_path = config['data_path']
+
+ print("load cifar10 dataset")
+ cifar = cifar10_input.CIFAR10Data(data_path)
+
+ with tf.Session() as sess:
+ # Restore the checkpoint
+ saver.restore(sess, model_file)
+ # Iterate over the samples batch-by-batch
+ num_eval_examples = config['num_eval_examples']
+ eval_batch_size = config['eval_batch_size']
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+
+ x_adv = [] # adv accumulator
+
+ print('Iterating over {} batches'.format(num_batches))
+
+ for ibatch in range(num_batches):
+ bstart = ibatch * eval_batch_size
+ bend = min(bstart + eval_batch_size, num_eval_examples)
+ print('batch size: {}'.format(bend - bstart))
+
+ x_batch = cifar.eval_data.xs[bstart:bend, :]
+ y_batch = cifar.eval_data.ys[bstart:bend]
+
+ x_batch_adv = attack.perturb(x_batch, y_batch, sess)
+
+ x_adv.append(x_batch_adv)
+
+ print('Storing examples')
+ path = config['store_adv_path']
+ if path == None:
+ model_name = config['model_dir'].split('/')[1]
+ if config['attack_name'] == None:
+ path = "attacks/{}_attack.npy".format(model_name)
+ else:
+ path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
+
+ if not os.path.exists("attacks/"):
+ os.makedirs("attacks/")
+
+ x_adv = np.concatenate(x_adv, axis=0)
+ np.save(path, x_adv)
+ print('Examples stored in {}'.format(path))
+
+ if config['save_eval_log']:
+ if not os.path.exists("attack_log/"):
+ os.makedirs("attack_log/")
+ date_str = datetime.now().strftime("%d_%b")
+ log_dir = "attack_log/" + date_str
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+ log_filename = path.split("/")[-1].replace('.npy', '.txt')
+ log_file_path = os.path.join(log_dir, log_filename)
+ with open(log_file_path, "w") as f:
+ f.write('Saved model name: {} \n'.format(model_file))
+ print('Model name saved at ', log_file_path)
diff --git a/case_studies/jarn/requirements.txt b/case_studies/jarn/requirements.txt
new file mode 100644
index 0000000..83c38be
--- /dev/null
+++ b/case_studies/jarn/requirements.txt
@@ -0,0 +1,5 @@
+ConfigArgParse==0.14.0
+tqdm==4.31.1
+tensorflow-gpu==1.14.0
+numba>=0.43.1
+matplotlib>=3.0.3
\ No newline at end of file
diff --git a/case_studies/jarn/run_attack.py b/case_studies/jarn/run_attack.py
new file mode 100644
index 0000000..65076f0
--- /dev/null
+++ b/case_studies/jarn/run_attack.py
@@ -0,0 +1,242 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Evaluates a model against examples from a .npy file as specified
+ in attack_config.json"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import json
+import math
+import os
+import sys
+import time
+
+import tensorflow as tf
+import numpy as np
+from tqdm import tqdm
+
+# from model import Model
+import cifar10_input
+# import cifar100_input
+
+import config_attack
+
+# with open('attack_config.json') as config_file:
+# config = json.load(config_file)
+
+config = vars(config_attack.get_args())
+
+# if config['model_dir'] in ["models/adv_trained", "models/naturally_trained"]:
+# from free_model_original import Model
+# elif 'DefPert2' in config['model_dir']:
+# from model_jarn import ModelDefPert as Model
+# elif 'JARN':
+# from model_jarn import Model
+# else:
+# from free_model import Model
+
+data_path = config['data_path']
+
+def run_attack(checkpoint, x_adv, epsilon):
+# cifar = cifar10_input.CIFAR10Data(data_path)
+ cifar = cifar10_input.CIFAR10Data(data_path)
+ # if config['dataset'] == 'cifar10':
+ # cifar = cifar10_input.CIFAR10Data(data_path)
+ # else:
+ # cifar = cifar100_input.CIFAR100Data(data_path)
+
+
+ print("JARN MODEL")
+ from model_jarn import Model
+ if "_zeromeaninput" in config['model_dir']:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
+ else:
+ model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
+
+ saver = tf.train.Saver()
+
+ num_eval_examples = 10000
+ eval_batch_size = 100
+
+ num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
+ total_corr = 0
+
+ x_nat = cifar.eval_data.xs
+ l_inf = np.amax(np.abs(x_nat - x_adv))
+
+ if l_inf > epsilon + 0.0001:
+ print('maximum perturbation found: {}'.format(l_inf))
+ print('maximum perturbation allowed: {}'.format(epsilon))
+ return
+
+ y_pred = [] # label accumulator
+
+ with tf.Session() as sess:
+ # Restore the checkpoint
+ saver.restore(sess, checkpoint)
+ # if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+ # sess.run(tf.global_variables_initializer())
+ # source_model_file = tf.train.latest_checkpoint("models/model_AdvTrain-jrtsource-JRT-tinyimagenet_b16")
+ # source_model_saver.restore(sess, source_model_file)
+ # finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
+ # finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ # elif 'mnist_adv_trained_finetuned_on_cifar10_bwtransform' in config['model_dir']:
+ # sess.run(tf.global_variables_initializer())
+ # source_model_file = tf.train.latest_checkpoint("models/mnist_adv_trained")
+ # source_model_saver.restore(sess, source_model_file)
+ # finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
+ # finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ # elif 'finetuned_on_cifar100' in config['model_dir']:
+ # sess.run(tf.global_variables_initializer())
+ # source_model_file = tf.train.latest_checkpoint("models/adv_trained")
+ # source_model_saver.restore(sess, source_model_file)
+ # finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
+ # finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+
+ # # sess.run(tf.global_variables_initializer())
+ # # source_model_file = tf.train.latest_checkpoint("models/adv_trained")
+ # # source_model_saver.restore(sess, source_model_file)
+ # # finetuned_source_model_file = tf.train.latest_checkpoint("models/adv_trained_finetuned_on_cifar100_b32_20ep")
+ # # finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
+ # else:
+ # saver.restore(sess, checkpoint)
+
+ # Iterate over the samples batch-by-batch
+ for ibatch in range(num_batches):
+ bstart = ibatch * eval_batch_size
+ bend = min(bstart + eval_batch_size, num_eval_examples)
+
+ x_batch = x_adv[bstart:bend, :]
+ y_batch = cifar.eval_data.ys[bstart:bend]
+
+ dict_adv = {model.x_input: x_batch,
+ model.y_input: y_batch}
+
+ cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
+ feed_dict=dict_adv)
+ # if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+ # cur_corr, y_pred_batch = sess.run([model.target_task_num_correct, model.target_task_predictions],
+ # feed_dict=dict_adv)
+ # else:
+ # cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
+ # feed_dict=dict_adv)
+
+ total_corr += cur_corr
+ y_pred.append(y_pred_batch)
+
+ accuracy = total_corr / num_eval_examples
+
+ print('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
+ y_pred = np.concatenate(y_pred, axis=0)
+
+ store_adv_pred_path = "preds/" + adv_examples_path.split("/")[-1]
+ if not os.path.exists("preds/"):
+ os.makedirs("preds/")
+ np.save(store_adv_pred_path, y_pred)
+ print('Output saved at ', store_adv_pred_path)
+
+ if config['save_eval_log']:
+ date_str = datetime.now().strftime("%d_%b")
+ log_dir = "attack_log/" + date_str
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+ log_filename = adv_examples_path.split("/")[-1].replace('.npy', '.txt')
+ model_name = config['model_dir'].split('/')[1]
+ # if model_name not in log_filename or config['xfer_attack']:
+ # print("Transfer Attack!")
+ # if config['custom_output_model_name'] is not None:
+ # new_log_filename = config['custom_output_model_name'] +"-xferattacked_by-"+ log_filename
+ # else:
+ # new_log_filename = model_name +"-xferattacked_by-"+ log_filename
+ # log_filename = new_log_filename
+ log_file_path = os.path.join(log_dir, log_filename)
+ with open(log_file_path, "w") as f:
+ f.write('Model checkpoint: {} \n'.format(checkpoint))
+ f.write('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
+ print('Results saved at ', log_file_path)
+
+ # full test evaluation
+ # raw_data = cifar10_input.CIFAR10Data(data_path)
+ if config['dataset'] == 'cifar10':
+ raw_data = cifar10_input.CIFAR10Data(data_path)
+ else:
+ raw_data = cifar100_input.CIFAR100Data(data_path)
+ data_size = raw_data.eval_data.n
+ if data_size % config['eval_batch_size'] == 0:
+ eval_steps = data_size // config['eval_batch_size']
+ else:
+ eval_steps = data_size // config['eval_batch_size'] + 1
+ total_num_correct = 0
+ for ii in tqdm(range(eval_steps)):
+ x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(config['eval_batch_size'], multiple_passes=False)
+ eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
+ if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
+ num_correct = sess.run(model.target_task_num_correct, feed_dict=eval_dict)
+ else:
+ num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
+ total_num_correct += num_correct
+ eval_acc = total_num_correct / data_size
+ with open(log_file_path, "a+") as f:
+ f.write('\nClean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
+ print('Clean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
+ print('Results saved at ', log_file_path)
+
+if __name__ == '__main__':
+ import json
+
+ # with open('attack_config.json') as config_file:
+ # config = json.load(config_file)
+
+ model_dir = config['model_dir']
+
+ checkpoint = tf.train.latest_checkpoint(model_dir)
+
+ adv_examples_path = config['store_adv_path']
+ if adv_examples_path == None:
+ model_name = config['model_dir'].split('/')[1]
+ if config['attack_name'] == None:
+ adv_examples_path = "attacks/{}_attack.npy".format(model_name)
+ # if config['dataset'] == 'cifar10':
+ # adv_examples_path = "attacks/{}_attack.npy".format(model_name)
+ # else:
+ # adv_examples_path = "attacks/{}_c100attack.npy".format(model_name)
+ else:
+ adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
+ # if config['dataset'] == 'cifar10':
+ # adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
+ # else:
+ # adv_examples_path = "attacks/{}_{}_c100attack.npy".format(model_name, config['attack_name'])
+
+ # if config['attack_norm'] == '2':
+ # adv_examples_path = adv_examples_path.replace("attack.npy", "l2attack.npy")
+
+ x_adv = np.load(adv_examples_path)
+
+ tf.set_random_seed(config['tf_seed'])
+ np.random.seed(config['np_seed'])
+
+ if checkpoint is None:
+ print('No checkpoint found')
+ elif x_adv.shape != (10000, 32, 32, 3):
+ print('Invalid shape: expected (10000, 32, 32, 3), found {}'.format(x_adv.shape))
+ elif np.amax(x_adv) > 255.0001 or np.amin(x_adv) < -0.0001:
+ print('Invalid pixel range. Expected [0, 255], found [{}, {}]'.format(
+ np.amin(x_adv),
+ np.amax(x_adv)))
+ else:
+ print("adv_examples_path: ", adv_examples_path)
+ run_attack(checkpoint, x_adv, config['epsilon'])
diff --git a/case_studies/jarn/run_train_jarn.sh b/case_studies/jarn/run_train_jarn.sh
new file mode 100644
index 0000000..cc7d912
--- /dev/null
+++ b/case_studies/jarn/run_train_jarn.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+python train_jarn.py --img_random_pert --eval_adv_attack --sep_opt_version 1 --beta 1 --disc_layers 5 --disc_base_channels 32 --disc_update_steps 20 --steps_before_adv_opt 140000 --step_size_schedule 0,0.1 80000,0.01 120000,0.001 --train_steps 160000 -b 64
\ No newline at end of file
diff --git a/case_studies/jarn/train_jarn.py b/case_studies/jarn/train_jarn.py
new file mode 100644
index 0000000..38c9aa5
--- /dev/null
+++ b/case_studies/jarn/train_jarn.py
@@ -0,0 +1,286 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Trains a model, saving checkpoints and tensorboard summaries along
+ the way."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import os
+import shutil
+from timeit import default_timer as timer
+import tensorflow as tf
+import numpy as np
+import sys
+from model_jarn import Model, JarnConvDiscriminatorModel, InputGradEncoderModel, GradImageMixer
+import cifar10_input
+import pdb
+from tqdm import tqdm
+import subprocess
+import time
+from numba import cuda
+
+import config
+
+def get_path_dir(data_dir, dataset, **_):
+ if dataset == "cifar10":
+ path = "../../data/cifar10/cifar-10-batches-py/"
+ else:
+ path = os.path.join(data_dir, dataset)
+ if os.path.islink(path):
+ path = os.readlink(path)
+ return path
+
+
+def train(tf_seed, np_seed, train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule,
+ weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, model_suffix, dataset,
+ beta, gamma, disc_update_steps, adv_update_steps_per_iter, disc_layers, disc_base_channels, steps_before_adv_opt, adv_encoder_type, enc_output_activation,
+ sep_opt_version, grad_image_ratio, final_grad_image_ratio, num_grad_image_ratios, normalize_zero_mean, eval_adv_attack, same_optimizer, only_fully_connected, img_random_pert, **kwargs):
+ tf.set_random_seed(tf_seed)
+ np.random.seed(np_seed)
+
+ model_dir = model_dir + 'JARN_%s_b%d_beta_%.3f_gamma_%.3f_disc_update_steps%d_l%dbc%d' % (dataset, train_batch_size, beta, gamma, disc_update_steps, disc_layers, disc_base_channels)
+
+ if img_random_pert:
+ model_dir = model_dir + '_imgpert'
+
+ if steps_before_adv_opt != 0:
+ model_dir = model_dir + '_advdelay%d' % (steps_before_adv_opt)
+ if adv_encoder_type != 'simple':
+ model_dir = model_dir + '_%senc' % (adv_encoder_type)
+ if enc_output_activation != None:
+ model_dir = model_dir + '_%sencact' % (enc_output_activation)
+ if grad_image_ratio != 1:
+ model_dir = model_dir + '_gradmixratio%.2f' % (grad_image_ratio)
+
+ if normalize_zero_mean:
+ model_dir = model_dir + '_zeromeaninput'
+ if train_steps != 80000:
+ model_dir = model_dir + '_%dsteps' % (train_steps)
+ if same_optimizer == False:
+ model_dir = model_dir + '_adamDopt'
+ if only_fully_connected:
+ model_dir = model_dir + '_FCdisc'
+
+ if tf_seed != 451760341:
+ model_dir = model_dir + '_tf_seed%d' % (tf_seed)
+ if np_seed != 216105420:
+ model_dir = model_dir + '_np_seed%d' % (np_seed)
+
+ model_dir = model_dir + model_suffix
+
+ # Setting up the data and the model
+ data_path = get_path_dir(dataset=dataset, **kwargs)
+ raw_data = cifar10_input.CIFAR10Data(data_path)
+ global_step = tf.train.get_or_create_global_step()
+ increment_global_step_op = tf.assign(global_step, global_step+1)
+ model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size, normalize_zero_mean=normalize_zero_mean)
+
+ # Setting up the optimizers
+ boundaries = [int(sss[0]) for sss in step_size_schedule][1:]
+ values = [sss[1] for sss in step_size_schedule]
+ learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
+ c_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
+ e_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
+
+ if same_optimizer:
+ d_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
+ else:
+ print("Using ADAM opt for DISC model")
+ d_optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)
+
+ # Using target softmax for input grad
+ input_grad = tf.gradients(model.target_softmax, model.x_input, name="gradients_ig")[0]
+
+ # Setting up the gradimagemixer model
+ grad_image_mixer = GradImageMixer(train_batch_size=train_batch_size, grad_input_tensor=input_grad, image_input_tensor=model.input_standardized, normalize_zero_mean=normalize_zero_mean)
+
+ # Setting up the discriminator model
+ encoder_model = InputGradEncoderModel(mode='train', train_batch_size=train_batch_size, encoder_type=adv_encoder_type, output_activation=enc_output_activation, x_modelgrad_input_tensor=grad_image_mixer.output, normalize_zero_mean=normalize_zero_mean)
+ transformed_input_grad = encoder_model.x_modelgrad_transformed
+ labels_input_grad = tf.zeros( tf.shape(input_grad)[0] , dtype=tf.int64)
+ labels_disc_image_input = tf.ones( tf.shape(input_grad)[0] , dtype=tf.int64)
+ disc_model = JarnConvDiscriminatorModel(mode='train', dataset=dataset, train_batch_size=train_batch_size, num_conv_layers=disc_layers, base_num_channels=disc_base_channels, normalize_zero_mean=normalize_zero_mean,
+ x_modelgrad_input_tensor=transformed_input_grad, y_modelgrad_input_tensor=labels_input_grad, x_image_input_tensor=model.input_standardized, y_image_input_tensor=labels_disc_image_input, only_fully_connected=only_fully_connected)
+
+ t_vars = tf.trainable_variables()
+ C_vars = [var for var in t_vars if 'classifier' in var.name]
+ D_vars = [var for var in t_vars if 'discriminator' in var.name]
+ E_vars = [var for var in t_vars if 'encoder' in var.name]
+
+ # Classifier: Optimizing computation
+ # total classifier loss: Add discriminator loss into total classifier loss
+ total_loss = model.mean_xent + weight_decay * model.weight_decay_loss - beta * disc_model.mean_xent
+ classification_c_loss = model.mean_xent + weight_decay * model.weight_decay_loss
+ adv_c_loss = - beta * disc_model.mean_xent
+
+ # Discriminator: Optimizating computation
+ # discriminator loss
+ total_d_loss = disc_model.mean_xent + weight_decay * disc_model.weight_decay_loss
+
+ # Train classifier
+ # classifier opt step
+ # AUTHOR added the next two lines to fix batch norm
+ update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
+ with tf.control_dependencies(update_ops):
+ final_grads = c_optimizer.compute_gradients(total_loss, var_list=C_vars)
+ no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in final_grads]
+ c_min_step = c_optimizer.apply_gradients(no_pert_grad)
+
+ classification_final_grads = c_optimizer.compute_gradients(classification_c_loss, var_list=C_vars)
+ classification_no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in classification_final_grads]
+ c_classification_min_step = c_optimizer.apply_gradients(classification_no_pert_grad)
+
+ # Encoder: Optimizating computation
+ # encoder loss
+ total_e_loss = weight_decay * encoder_model.weight_decay_loss - gamma * disc_model.mean_xent
+ e_min_step = e_optimizer.minimize(total_e_loss, var_list=E_vars)
+
+ # discriminator opt step
+ d_min_step = d_optimizer.minimize(total_d_loss, var_list=D_vars)
+
+ # Loss gradients to the model params
+ logit_weights = tf.get_default_graph().get_tensor_by_name('classifier/logit/DW:0')
+ last_conv_weights = tf.get_default_graph().get_tensor_by_name('classifier/unit_3_4/sub2/conv2/DW:0')
+ first_conv_weights = tf.get_default_graph().get_tensor_by_name('classifier/input/init_conv/DW:0')
+
+ # Setting up the Tensorboard and checkpoint outputs
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+
+ saver = tf.train.Saver(max_to_keep=1)
+ tf.summary.scalar('C accuracy', model.accuracy)
+ tf.summary.scalar('D accuracy', disc_model.accuracy)
+ tf.summary.scalar('C xent', model.xent / train_batch_size)
+ tf.summary.scalar('D xent', disc_model.xent / train_batch_size)
+ tf.summary.scalar('total C loss', total_loss / train_batch_size)
+ tf.summary.scalar('total D loss', total_d_loss / train_batch_size)
+ tf.summary.scalar('adv C loss', adv_c_loss / train_batch_size)
+ tf.summary.scalar('C cls xent loss', model.mean_xent)
+ merged_summaries = tf.summary.merge_all()
+
+ with tf.Session() as sess:
+ print('params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size))
+
+ data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model)
+
+ # Initialize the summary writer, global variables, and our time counter.
+ summary_writer = tf.summary.FileWriter(model_dir + '/train', sess.graph)
+ eval_summary_writer = tf.summary.FileWriter(model_dir + '/eval')
+ sess.run(tf.global_variables_initializer())
+
+ # Main training loop
+ for ii in tqdm(range(train_steps)):
+
+ x_batch, y_batch = data.train_data.get_next_batch(train_batch_size, multiple_passes=True)
+ if img_random_pert:
+ x_batch = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
+ x_batch = np.clip(x_batch, 0, 255) # ensure valid pixel range
+
+ labels_image_disc = np.ones_like( y_batch, dtype=np.int64)
+
+ nat_dict = {model.x_input: x_batch, model.y_input: y_batch,
+ disc_model.x_image_input: x_batch, disc_model.y_image_input: labels_image_disc, grad_image_mixer.grad_ratio: grad_image_ratio}
+
+ # Output to stdout
+ if ii % summary_steps == 0:
+ train_acc, train_disc_acc, train_c_loss, train_e_loss, train_d_loss, train_adv_c_loss, summary = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss, merged_summaries], feed_dict=nat_dict)
+ summary_writer.add_summary(summary, global_step.eval(sess))
+
+ x_eval_batch, y_eval_batch = data.eval_data.get_next_batch(train_batch_size, multiple_passes=True)
+ if img_random_pert:
+ x_eval_batch = x_eval_batch + np.random.uniform(-epsilon, epsilon, x_eval_batch.shape)
+ x_eval_batch = np.clip(x_eval_batch, 0, 255) # ensure valid pixel range
+
+ labels_image_disc = np.ones_like( y_eval_batch, dtype=np.int64)
+
+ eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch,
+ disc_model.x_image_input: x_eval_batch, disc_model.y_image_input: labels_image_disc, grad_image_mixer.grad_ratio: grad_image_ratio}
+
+ val_acc, val_disc_acc, val_c_loss, val_e_loss, val_d_loss, val_adv_c_loss, summary = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss, merged_summaries], feed_dict=eval_dict)
+ eval_summary_writer.add_summary(summary, global_step.eval(sess))
+ print('Step {}: ({})'.format(ii, datetime.now()))
+ print(' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%'.format(train_acc * 100,
+ val_acc * 100))
+ print(' training nat disc accuracy {:.4}% -- validation nat disc accuracy {:.4}%'.format(train_disc_acc * 100,
+ val_disc_acc * 100))
+ print(' training nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( train_c_loss, train_e_loss, train_d_loss, train_adv_c_loss))
+ print(' validation nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( val_c_loss, val_e_loss, val_d_loss, val_adv_c_loss))
+
+ sys.stdout.flush()
+ # Tensorboard summaries
+ elif ii % out_steps == 0:
+ nat_acc, nat_disc_acc, nat_c_loss, nat_e_loss, nat_d_loss, nat_adv_c_loss = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss], feed_dict=nat_dict)
+ print('Step {}: ({})'.format(ii, datetime.now()))
+ print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
+ print(' training nat disc accuracy {:.4}%'.format(nat_disc_acc * 100))
+ print(' training nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( nat_c_loss, nat_e_loss, nat_d_loss, nat_adv_c_loss))
+
+ # Write a checkpoint
+ if (ii+1) % checkpoint_steps == 0:
+ saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step)
+
+ if ii >= steps_before_adv_opt:
+ # Actual training step for Classifier
+ sess.run([c_min_step, e_min_step], feed_dict=nat_dict)
+ sess.run(increment_global_step_op)
+
+ if ii % disc_update_steps == 0:
+ # Actual training step for Discriminator
+ sess.run(d_min_step, feed_dict=nat_dict)
+ else:
+ # only train on classification loss
+ sess.run(c_classification_min_step, feed_dict=nat_dict)
+ sess.run(increment_global_step_op)
+
+ # full test evaluation
+ raw_data = cifar10_input.CIFAR10Data(data_path)
+ data_size = raw_data.eval_data.n
+
+ eval_steps = data_size // train_batch_size
+
+ total_num_correct = 0
+ for ii in tqdm(range(eval_steps)):
+ x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(train_batch_size, multiple_passes=False)
+ eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
+ num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
+ total_num_correct += num_correct
+ eval_acc = total_num_correct / data_size
+
+ clean_eval_file_path = os.path.join(model_dir, 'full_clean_eval_acc.txt')
+ with open(clean_eval_file_path, "a+") as f:
+ f.write("Full clean eval_acc: {}%".format(eval_acc*100))
+ print("Full clean eval_acc: {}%".format(eval_acc*100))
+
+
+ devices = sess.list_devices()
+ for d in devices:
+ print("sess' device names:")
+ print(d.name)
+
+ return model_dir
+
+if __name__ == '__main__':
+ args = config.get_args()
+ args_dict = vars(args)
+ model_dir = train(**args_dict)
+ if args_dict['eval_adv_attack']:
+ cuda.select_device(0)
+ cuda.close()
+
+ print("{}: Evaluating on CIFAR10 fgsm and pgd attacks".format(datetime.now()))
+ subprocess.run("python pgd_attack.py --attack_name fgsm --save_eval_log --num_steps 1 --no-random_start --step_size 8 --model_dir {} ; python run_attack.py --attack_name fgsm --save_eval_log --model_dir {} ; python pgd_attack.py --save_eval_log --model_dir {} ; python run_attack.py --save_eval_log --model_dir {} ; python pgd_attack.py --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python run_attack.py --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {}".format(model_dir, model_dir, model_dir, model_dir, model_dir, model_dir), shell=True)
+ print("{}: Ended evaluation on CIFAR10 fgsm and pgd attacks".format(datetime.now()))
diff --git a/case_studies/kwta/__init__.py b/case_studies/kwta/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/kwta/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/kwta/all.sh b/case_studies/kwta/all.sh
new file mode 100644
index 0000000..200d7ba
--- /dev/null
+++ b/case_studies/kwta/all.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+./case_studies/kwta/single_kwta.sh $1 $2 $3 4
+./case_studies/kwta/single_kwta.sh $1 $2 $3 6
+./case_studies/kwta/single_kwta.sh $1 $2 $3 8
\ No newline at end of file
diff --git a/case_studies/kwta/binarization_test.sh b/case_studies/kwta/binarization_test.sh
new file mode 100644
index 0000000..e1e2843
--- /dev/null
+++ b/case_studies/kwta/binarization_test.sh
@@ -0,0 +1,61 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+attack=${3:-pgd}
+sparsity=${4:-1}
+echo "Attack: $attack"
+echo "Epsilon: $epsilon"
+echo "#samples: $nsamples"
+echo "Sparsity: $sparsity"
+
+if [[ "$sparsity" == "1" ]]; then
+ checkpoint="checkpoints/kwta_sparse_resnet18_0.1.pth"
+elif [[ "$sparsity" == "2" ]]; then
+ checkpoint="checkpoints/kwta_sparse_resnet18_0.2.pth"
+else
+ echo "invalid sparsity"
+ exit -1
+fi
+
+if [[ "$attack" == "kwta" ]]; then
+ kwargs="--no-ce-loss"
+else
+ kwargs="--no-logit-diff-loss"
+fi
+
+kwargs="$kwargs --dbl-sample-from-corners"
+
+echo "kwargs: $kwargs"
+
+if [[ "$epsilon" == "8" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 8/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$sparsity" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --input=$checkpoint \
+ $kwargs
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "6" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 6/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$sparsity" \
+ --decision-boundary-binarization="norm=linf epsilon=0.02352941176 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --input=$checkpoint \
+ $kwargs
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "4" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 4/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$sparsity" \
+ --decision-boundary-binarization="norm=linf epsilon=0.0156862745 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --input=$checkpoint \
+ $kwargs
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+else
+ echo "Unknown epsilon value: $epsilon"
+fi
\ No newline at end of file
diff --git a/case_studies/kwta/models.py b/case_studies/kwta/models.py
new file mode 100644
index 0000000..6b38ec8
--- /dev/null
+++ b/case_studies/kwta/models.py
@@ -0,0 +1,201 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# From https://github.com/a554b554/kWTA-Activation
+
+import torch
+import torch.nn as nn
+
+
+class Flatten(nn.Module):
+ def forward(self, x):
+ return x.view(x.shape[0], -1)
+
+
+class SparsifyBase(nn.Module):
+ def __init__(self, sparse_ratio=0.5):
+ super(SparsifyBase, self).__init__()
+ self.sr = sparse_ratio
+ self.preact = None
+ self.act = None
+
+ def get_activation(self):
+ def hook(model, input, output):
+ self.preact = input[0].cpu().detach().clone()
+ self.act = output.cpu().detach().clone()
+
+ return hook
+
+ def record_activation(self):
+ self.register_forward_hook(self.get_activation())
+
+
+class Sparsify1D(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify1D, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ k = int(self.sr * x.shape[1])
+ topval = x.topk(k, dim=1)[0][:, -1]
+ topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify1D_kactive(SparsifyBase):
+ def __init__(self, k=1):
+ super(Sparsify1D_kactive, self).__init__()
+ self.k = k
+
+ def forward(self, x):
+ k = self.k
+ topval = x.topk(k, dim=1)[0][:, -1]
+ topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D, self).__init__()
+ self.sr = sparse_ratio
+
+ self.preact = None
+ self.act = None
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ tmpx = x.view(x.shape[0], x.shape[1], -1)
+ topval = tmpx.topk(k, dim=2)[0][:, :, -1]
+ topval = topval.expand(x.shape[2], x.shape[3], x.shape[0],
+ x.shape[1]).permute(2, 3, 0, 1)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_vol(SparsifyBase):
+ '''cross channel sparsify'''
+
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_vol, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ size = x.shape[1] * x.shape[2] * x.shape[3]
+ k = int(self.sr * size)
+
+ tmpx = x.view(x.shape[0], -1)
+ topval = tmpx.topk(k, dim=1)[0][:, -1]
+ topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_kactive(SparsifyBase):
+ '''cross channel sparsify'''
+
+ def __init__(self, k):
+ super(Sparsify2D_vol, self).__init__()
+ self.k = k
+
+ def forward(self, x):
+ k = self.k
+ tmpx = x.view(x.shape[0], -1)
+ topval = tmpx.topk(k, dim=1)[0][:, -1]
+ topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_abs(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_abs, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ absx = torch.abs(x)
+ tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
+ topval = tmpx.topk(k, dim=2)[0][:, :, -1]
+ topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
+ absx.shape[1]).permute(2, 3, 0, 1)
+ comp = (absx >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_invabs(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_invabs, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ absx = torch.abs(x)
+ tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
+ topval = tmpx.topk(k, dim=2, largest=False)[0][:, :, -1]
+ topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
+ absx.shape[1]).permute(2, 3, 0, 1)
+ comp = (absx >= topval).to(x)
+ return comp * x
+
+
+class breakReLU(nn.Module):
+ def __init__(self, sparse_ratio=5):
+ super(breakReLU, self).__init__()
+ self.h = sparse_ratio
+ self.thre = nn.Threshold(0, -self.h)
+
+ def forward(self, x):
+ return self.thre(x)
+
+
+class SmallCNN(nn.Module):
+ def __init__(self, fc_in=3136, n_classes=10):
+ super(SmallCNN, self).__init__()
+
+ self.module_list = nn.ModuleList([nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(),
+ nn.Conv2d(32, 32, 3, padding=1, stride=2),
+ nn.ReLU(),
+ nn.Conv2d(32, 64, 3, padding=1),
+ nn.ReLU(),
+ nn.Conv2d(64, 64, 3, padding=1, stride=2),
+ nn.ReLU(),
+ Flatten(),
+ nn.Linear(fc_in, 100), nn.ReLU(),
+ nn.Linear(100, n_classes)])
+
+ def forward(self, x):
+ for i in range(len(self.module_list)):
+ x = self.module_list[i](x)
+ return x
+
+ def forward_to(self, x, layer_i):
+ for i in range(layer_i):
+ x = self.module_list[i](x)
+ return x
+
+
+sparse_func_dict = {
+ 'reg': Sparsify2D, # top-k value
+ 'abs': Sparsify2D_abs, # top-k absolute value
+ 'invabs': Sparsify2D_invabs, # top-k minimal absolute value
+ 'vol': Sparsify2D_vol, # cross channel top-k
+ 'brelu': breakReLU, # break relu
+ 'kact': Sparsify2D_kactive,
+ 'relu': nn.ReLU
+}
diff --git a/case_studies/kwta/original/__init__.py b/case_studies/kwta/original/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/kwta/original/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/kwta/original/kWTA/.gitignore b/case_studies/kwta/original/kWTA/.gitignore
new file mode 100644
index 0000000..bee8a64
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/case_studies/kwta/original/kWTA/__init__.py b/case_studies/kwta/original/kWTA/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/kwta/original/kWTA/activation.py b/case_studies/kwta/original/kWTA/activation.py
new file mode 100644
index 0000000..05a50f1
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/activation.py
@@ -0,0 +1,275 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+import numpy as np
+from kWTA import models
+import copy
+
+def register_layers(activation_list):
+ for layer in activation_list:
+ layer.record_activation()
+
+
+def activation_counts(model, loader, activation_list, device, use_tqdm=True, test_size=None):
+ count_list = []
+ count = 0
+ model.to(device)
+ if use_tqdm:
+ if test_size is not None:
+ pbar = tqdm(total=test_size)
+ else:
+ pbar = tqdm(total=len(loader.dataset))
+
+ for i, (X, y) in enumerate(loader):
+ X = X.to(device)
+ _ = model(X)
+ for j, layer in enumerate(activation_list):
+ act = layer.act
+ batch_size = act.shape[0]
+ if len(count_list) <= j:
+ count_list.append(torch.zeros_like(act[0,:]))
+ mask = (act!=0).to(act)
+ mask_sum = mask.sum(dim=0)
+ count_list[j] += mask_sum
+ count += X.shape[0]
+ if test_size is not None:
+ if count >= test_size:
+ break
+
+ if use_tqdm:
+ pbar.update(X.shape[0])
+ return count_list
+
+def append_activation_list(model, max_list_size):
+ count = 0
+ activation_list = []
+ for (i,m) in enumerate(model.modules()):
+ if isinstance(m, models.SparsifyBase):
+ count += 1
+ activation_list.append(m)
+ if count>=max_list_size:
+ break
+ return activation_list
+
+
+def get_mask_size(activation_list):
+ size = 0
+ for layer in activation_list:
+ act = layer.act
+ act = act.view(act.shape[0], -1)
+ size += act.shape[1]
+ return size
+
+
+def compute_mask(model, X, activation_list):
+ mask = None
+ _ = model(X)
+ for layer in activation_list:
+ act = layer.act
+ act = act.view(X.shape[0], -1)
+ act_mask = act>0
+ if mask is None:
+ mask = act_mask
+ else:
+ mask = torch.cat((mask, act_mask), dim=1)
+ return mask.to(dtype=torch.float32)
+
+def compute_networkmask(model, loader, activation_list, device, max_n=None):
+ model.to(device)
+ all_label = None
+ count = 0
+ for i, (X,y) in enumerate(loader):
+ X, y = X.to(device), y.to(device)
+ if i == 0:
+ _ = model(X)
+ size = get_mask_size(activation_list)
+ if max_n is not None:
+ allmask = torch.zeros(max_n, size, dtype=torch.float32)
+ else:
+ allmask = torch.zeros(len(loader.dataset), size, dtype=torch.float32)
+ current_mask = compute_mask(model, X, activation_list)
+ allmask[i*X.shape[0]:(i+1)*X.shape[0], :].copy_(current_mask)
+
+
+ current_sum = current_mask.sum().item()
+ all_sum = allmask.sum().item()
+
+ print('current mask:', current_sum, current_sum/current_mask.numel())
+ print(i,'/',len(loader), all_sum , all_sum/allmask.numel())
+
+ if all_label is None:
+ all_label = y
+ else:
+ all_label = torch.cat((all_label, y))
+
+ count += X.shape[0]
+ if max_n is not None:
+ if count>= max_n:
+ break
+
+ return allmask, all_label.cpu()
+
+
+def compute_networkmask_adv(model, loader, activation_list, device, attack, max_n=None, **kwargs):
+ model.to(device)
+ all_label = None
+ count = 0
+ for i, (X,y) in enumerate(loader):
+ X, y = X.to(device), y.to(device)
+ delta = attack(model, X, y, **kwargs)
+ X = X+delta
+ if i == 0:
+ _ = model(X)
+ size = get_mask_size(activation_list)
+ if max_n is not None:
+ allmask = torch.zeros(max_n, size, dtype=torch.float32)
+ else:
+ allmask = torch.zeros(len(loader.dataset), size, dtype=torch.float32)
+ current_mask = compute_mask(model, X, activation_list, size)
+ allmask[i*X.shape[0]:(i+1)*X.shape[0], :].copy_(current_mask)
+
+
+ current_sum = current_mask.sum().item()
+ all_sum = allmask.sum().item()
+
+ print('current mask:', current_sum, current_sum/current_mask.numel())
+ print(i,'/',len(loader), all_sum , all_sum/allmask.numel())
+
+ if all_label is None:
+ all_label = y
+ else:
+ all_label = torch.cat((all_label, y))
+
+ count += X.shape[0]
+ if max_n is not None:
+ if count>= max_n:
+ break
+
+ return allmask, all_label.cpu()
+
+def kNN(model, labels, X, k, device, test_labels):
+ model = model.to(device)
+ X = X.to(device)
+ correct = 0
+ total = 0
+ for i in range(X.shape[0]):
+ x0 = X[i, :]
+ sub = model-x0
+ dist = torch.norm(sub, p=1, dim=1)
+ mindist, idx = torch.topk(dist, k, largest=False)
+ print('mindist', mindist.item(), 'predict label:', labels[idx].item(), 'true label:', test_labels[i].item())
+ if labels[idx]==test_labels[i]:
+ correct+=1
+ total+=1
+ return correct/total
+
+
+def dist_stats1(loader, model, activation_list, class1, class2, n_test):
+
+ dists = []
+ for i, (X, y) in enumerate(loader):
+ _ = model(X)
+ print('batch', i, 'dists', len(dists))
+ spl = int(X.shape[0]/2)
+ mask = compute_mask(model, X, activation_list, get_mask_size(activation_list))
+ for id1 in range(spl):
+ if y[id1].item() != class1:
+ continue
+ for id2 in range(spl, spl*2):
+ if y[id2].item() != class2:
+ continue
+ dist = torch.norm(mask[id1,:]-mask[id2,:], p=1)
+ dists.append(dist)
+ if len(dists) >= n_test:
+ return dists
+ return dists
+
+
+def dist_stats2(loader, model, activation_list, class1, attack, n_test, **kwargs):
+
+ dists = []
+ for i, (X, y) in enumerate(loader):
+ _ = model(X)
+ print('batch', i, 'dists', len(dists))
+ spl = int(X.shape[0])
+ mask = compute_mask(model, X, activation_list, get_mask_size(activation_list))
+ delta = attack(model, X, y, **kwargs)
+ X_adv = X+delta
+ _ = model(X_adv)
+ mask_adv = compute_mask(model, X_adv, activation_list, get_mask_size(activation_list))
+ for id1 in range(spl):
+ if y[id1].item() != class1:
+ continue
+ dist = torch.norm(mask[id1,:]-mask_adv[id1,:], p=1)
+ dists.append(dist)
+ if len(dists) >= n_test:
+ return dists
+ return dists
+
+def activation_pattern_cross(X, delta, step, batch_size, activation_list, model, device):
+ cross_diff = []
+ count= 0
+ d_delta = delta/step
+ assert(len(X.shape)==3)
+ assert(step % batch_size == 0)
+ model.to(device)
+ while 1:
+ T = torch.zeros(batch_size, X.shape[0], X.shape[1], X.shape[2])
+ for i in range(batch_size):
+ T[i,:,:,:] = X + count*d_delta
+ count += 1
+ T = T.to(device)
+ mask = compute_mask(model, T, activation_list)
+ for i in range(mask.shape[0]-1):
+ diff = torch.norm(mask[i+1,:]-mask[i,:], p=1)
+ cross_diff.append(diff.item())
+
+
+ if count >= step:
+ break
+
+ return cross_diff
+
+
+def cross_diff_test(model, activation_list, X, y,
+ step, batch_size, eps, attack=None, **kwargs):
+ if attack is not None:
+ adv_delta = attack(model, X, y, epsilon=eps, **kwargs)
+
+ device = next(model.parameters()).device
+
+ stats0 = []
+ stats5 = []
+ stats10 = []
+
+ for i in range(X.shape[0]):
+ X0 = X[i,:,:,:]
+ if attack is None:
+ delta = torch.rand_like(X0)
+ delta = delta.clamp(-eps, eps)
+ else:
+ delta = adv_delta[i,:,:,:].detach().cpu()
+ cross_diff = activation_pattern_cross(X0, delta, device=device, step=step,
+ batch_size=batch_size, activation_list=activation_list, model=model)
+ cross_diff = torch.FloatTensor(cross_diff)
+ crossed = (cross_diff>0).sum().item()
+ stats0.append(crossed)
+ crossed = (cross_diff>5).sum().item()
+ stats5.append(crossed)
+ crossed = (cross_diff>10).sum().item()
+ stats10.append(crossed)
+
+ return torch.FloatTensor(stats0),torch.FloatTensor(stats5),torch.FloatTensor(stats10)
\ No newline at end of file
diff --git a/case_studies/kwta/original/kWTA/models.py b/case_studies/kwta/original/kWTA/models.py
new file mode 100644
index 0000000..6b38ec8
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/models.py
@@ -0,0 +1,201 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# From https://github.com/a554b554/kWTA-Activation
+
+import torch
+import torch.nn as nn
+
+
+class Flatten(nn.Module):
+ def forward(self, x):
+ return x.view(x.shape[0], -1)
+
+
+class SparsifyBase(nn.Module):
+ def __init__(self, sparse_ratio=0.5):
+ super(SparsifyBase, self).__init__()
+ self.sr = sparse_ratio
+ self.preact = None
+ self.act = None
+
+ def get_activation(self):
+ def hook(model, input, output):
+ self.preact = input[0].cpu().detach().clone()
+ self.act = output.cpu().detach().clone()
+
+ return hook
+
+ def record_activation(self):
+ self.register_forward_hook(self.get_activation())
+
+
+class Sparsify1D(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify1D, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ k = int(self.sr * x.shape[1])
+ topval = x.topk(k, dim=1)[0][:, -1]
+ topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify1D_kactive(SparsifyBase):
+ def __init__(self, k=1):
+ super(Sparsify1D_kactive, self).__init__()
+ self.k = k
+
+ def forward(self, x):
+ k = self.k
+ topval = x.topk(k, dim=1)[0][:, -1]
+ topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D, self).__init__()
+ self.sr = sparse_ratio
+
+ self.preact = None
+ self.act = None
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ tmpx = x.view(x.shape[0], x.shape[1], -1)
+ topval = tmpx.topk(k, dim=2)[0][:, :, -1]
+ topval = topval.expand(x.shape[2], x.shape[3], x.shape[0],
+ x.shape[1]).permute(2, 3, 0, 1)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_vol(SparsifyBase):
+ '''cross channel sparsify'''
+
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_vol, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ size = x.shape[1] * x.shape[2] * x.shape[3]
+ k = int(self.sr * size)
+
+ tmpx = x.view(x.shape[0], -1)
+ topval = tmpx.topk(k, dim=1)[0][:, -1]
+ topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_kactive(SparsifyBase):
+ '''cross channel sparsify'''
+
+ def __init__(self, k):
+ super(Sparsify2D_vol, self).__init__()
+ self.k = k
+
+ def forward(self, x):
+ k = self.k
+ tmpx = x.view(x.shape[0], -1)
+ topval = tmpx.topk(k, dim=1)[0][:, -1]
+ topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
+ comp = (x >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_abs(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_abs, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ absx = torch.abs(x)
+ tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
+ topval = tmpx.topk(k, dim=2)[0][:, :, -1]
+ topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
+ absx.shape[1]).permute(2, 3, 0, 1)
+ comp = (absx >= topval).to(x)
+ return comp * x
+
+
+class Sparsify2D_invabs(SparsifyBase):
+ def __init__(self, sparse_ratio=0.5):
+ super(Sparsify2D_invabs, self).__init__()
+ self.sr = sparse_ratio
+
+ def forward(self, x):
+ layer_size = x.shape[2] * x.shape[3]
+ k = int(self.sr * layer_size)
+ absx = torch.abs(x)
+ tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
+ topval = tmpx.topk(k, dim=2, largest=False)[0][:, :, -1]
+ topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
+ absx.shape[1]).permute(2, 3, 0, 1)
+ comp = (absx >= topval).to(x)
+ return comp * x
+
+
+class breakReLU(nn.Module):
+ def __init__(self, sparse_ratio=5):
+ super(breakReLU, self).__init__()
+ self.h = sparse_ratio
+ self.thre = nn.Threshold(0, -self.h)
+
+ def forward(self, x):
+ return self.thre(x)
+
+
+class SmallCNN(nn.Module):
+ def __init__(self, fc_in=3136, n_classes=10):
+ super(SmallCNN, self).__init__()
+
+ self.module_list = nn.ModuleList([nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(),
+ nn.Conv2d(32, 32, 3, padding=1, stride=2),
+ nn.ReLU(),
+ nn.Conv2d(32, 64, 3, padding=1),
+ nn.ReLU(),
+ nn.Conv2d(64, 64, 3, padding=1, stride=2),
+ nn.ReLU(),
+ Flatten(),
+ nn.Linear(fc_in, 100), nn.ReLU(),
+ nn.Linear(100, n_classes)])
+
+ def forward(self, x):
+ for i in range(len(self.module_list)):
+ x = self.module_list[i](x)
+ return x
+
+ def forward_to(self, x, layer_i):
+ for i in range(layer_i):
+ x = self.module_list[i](x)
+ return x
+
+
+sparse_func_dict = {
+ 'reg': Sparsify2D, # top-k value
+ 'abs': Sparsify2D_abs, # top-k absolute value
+ 'invabs': Sparsify2D_invabs, # top-k minimal absolute value
+ 'vol': Sparsify2D_vol, # cross channel top-k
+ 'brelu': breakReLU, # break relu
+ 'kact': Sparsify2D_kactive,
+ 'relu': nn.ReLU
+}
diff --git a/case_studies/kwta/original/kWTA/resnet.py b/case_studies/kwta/original/kWTA/resnet.py
new file mode 100644
index 0000000..200a258
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/resnet.py
@@ -0,0 +1,329 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+from torch.optim import lr_scheduler
+from torchvision import datasets, transforms
+import torchvision
+from kWTA import models
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1):
+ super(BasicBlock, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(self.expansion*planes)
+ )
+
+ def forward(self, x):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = self.bn2(self.conv2(out))
+ out += self.shortcut(x)
+ out = F.relu(out)
+ return out
+
+class SparseBasicBlock(nn.Module):
+ expansion = 1
+ def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True, sparse_func='reg', bias=False):
+ super(SparseBasicBlock, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.use_relu = use_relu
+ self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
+ self.relu = nn.ReLU()
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion*planes)
+ )
+
+ def forward(self, x):
+ out = self.bn1(self.conv1(x))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse1(out)
+ out = self.bn2(self.conv2(out))
+ out += self.shortcut(x)
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse2(out)
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+ def __init__(self, in_planes, planes, stride=1, bias=True):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=bias)
+ self.bn3 = nn.BatchNorm2d(self.expansion*planes)
+ self.relu = nn.ReLU()
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion*planes)
+ )
+
+ def forward(self, x):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.relu(self.bn2(self.conv2(out)))
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+ out = self.relu(out)
+ return out
+
+class SparseBottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True, sparse_func='reg', bias=True):
+ super(SparseBottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=bias)
+ self.bn3 = nn.BatchNorm2d(self.expansion*planes)
+ self.relu = nn.ReLU()
+
+ self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse3 = models.sparse_func_dict[sparse_func](sparsity)
+
+ self.use_relu = use_relu
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion*planes)
+ )
+
+ def forward(self, x):
+ out = self.bn1(self.conv1(x))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse1(out)
+
+ out = self.bn2(self.conv2(out))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse2(out)
+
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse3(out)
+ return out
+
+
+class SparseResNet(nn.Module):
+ def __init__(self, block, num_blocks, sparsities, num_classes=10, use_relu=True, sparse_func='reg', bias=True):
+ super(SparseResNet, self).__init__()
+ self.in_planes = 64
+ self.use_relu = use_relu
+
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, sparsity=sparsities[0], sparse_func=sparse_func, bias=bias)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, sparsity=sparsities[1], sparse_func=sparse_func, bias=bias)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, sparsity=sparsities[2], sparse_func=sparse_func, bias=bias)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, sparsity=sparsities[3], sparse_func=sparse_func, bias=bias)
+ self.linear = nn.Linear(512*block.expansion, num_classes)
+
+ self.relu = nn.ReLU()
+
+ self.activation = {}
+
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5, sparse_func='reg', bias=True):
+ strides = [stride] + [1]*(num_blocks-1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride, sparsity, self.use_relu, sparse_func=sparse_func, bias=bias))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ out = self.linear(out)
+ return out
+
+
+class SparseResNet_ImageNet(nn.Module):
+ def __init__(self, block, num_blocks, sparsities, num_classes=1000, sparse_func='vol', bias=False):
+ super(SparseResNet_ImageNet, self).__init__()
+ self.in_planes = 64
+
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, sparsity=sparsities[0], sparse_func=sparse_func, bias=bias)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, sparsity=sparsities[1], sparse_func=sparse_func, bias=bias)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, sparsity=sparsities[2], sparse_func=sparse_func, bias=bias)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, sparsity=sparsities[3], sparse_func=sparse_func, bias=bias)
+ self.linear = nn.Linear(512*block.expansion, num_classes)
+
+ self.sp = models.sparse_func_dict[sparse_func](sparsities[0])
+
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+
+ self.activation = {}
+
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5, sparse_func='reg', bias=True):
+ strides = [stride] + [1]*(num_blocks-1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride, sparsity, use_relu=False, sparse_func=sparse_func, bias=bias))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ out = self.sp(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = self.avgpool(out)
+ out = out.view(out.size(0), -1)
+ out = self.linear(out)
+ return out
+
+class ResNet(nn.Module):
+ def __init__(self, block, num_blocks, num_classes=10):
+ super(ResNet, self).__init__()
+ self.in_planes = 64
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
+ self.linear = nn.Linear(512*block.expansion, num_classes)
+
+ self.relu = nn.ReLU()
+
+
+ self.activation = {}
+
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride):
+ strides = [stride] + [1]*(num_blocks-1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ out = self.linear(out)
+ return out
+
+
+
+def ResNet18():
+ return ResNet(BasicBlock, [2,2,2,2])
+
+def ResNet34():
+ return ResNet(BasicBlock, [3,4,6,3])
+
+def ResNet50():
+ return ResNet(Bottleneck, [3,4,6,3])
+
+def ResNet101():
+ return ResNet(Bottleneck, [3,4,23,3])
+
+def ResNet152():
+ return ResNet(Bottleneck, [3,8,36,3])
+
+def SparseResNet18(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet(SparseBasicBlock, [2,2,2,2], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+def SparseResNet34(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet(SparseBasicBlock, [3,4,6,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+def SparseResNet50(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3,4,6,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+def SparseResNet101(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3,4,23,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+def SparseResNet152(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3,8,36,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+def SparseResNet152_ImageNet(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
+ return SparseResNet_ImageNet(SparseBottleneck, [3,8,36,3], sparsities, sparse_func=sparse_func, bias=bias)
+
+########### End resnet related ##################
\ No newline at end of file
diff --git a/case_studies/kwta/original/kWTA/training.py b/case_studies/kwta/original/kWTA/training.py
new file mode 100644
index 0000000..dfa8076
--- /dev/null
+++ b/case_studies/kwta/original/kWTA/training.py
@@ -0,0 +1,612 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import os
+import argparse
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+from torch.optim import lr_scheduler
+from torchvision import datasets, transforms
+import torchvision
+from torch.autograd import Variable
+
+def isnotebook():
+ try:
+ shell = get_ipython().__class__.__name__
+ if shell == 'ZMQInteractiveShell':
+ return True # Jupyter notebook or qtconsole
+ elif shell == 'TerminalInteractiveShell':
+ return False # Terminal running IPython
+ else:
+ return False # Other type (?)
+ except NameError:
+ return False # Probably standard Python interpreter
+
+if isnotebook():
+ from tqdm import tqdm_notebook as tqdm
+else:
+ from tqdm import tqdm
+
+
+def epoch(loader, model, opt=None, device=None, use_tqdm=False):
+ """Standard training/evaluation epoch over the dataset"""
+ total_loss, total_err = 0.,0.
+ if opt is None:
+ model.eval()
+ else:
+ model.train()
+
+ if use_tqdm:
+ pbar = tqdm(total=len(loader))
+
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+
+
+ yp = model(X)
+ loss = nn.CrossEntropyLoss()(yp,y)
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_err += (yp.max(dim=1)[1] != y).sum().item()
+ total_loss += loss.item() * X.shape[0]
+
+ if use_tqdm:
+ pbar.update(1)
+
+ return total_err / len(loader.dataset), total_loss / len(loader.dataset)
+
+
+def epoch_imagenet(loader, model, opt=None, device=None, use_tqdm=False):
+ total_loss, total_err_top1, total_err_top5 = 0., 0., 0.
+
+ if opt is None:
+ model.eval()
+
+ if use_tqdm:
+ pbar = tqdm(total=len(loader))
+
+ model.to(device)
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+
+ yp = model(X)
+ loss = nn.CrossEntropyLoss()(yp, y)
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_err_top1 += (yp.max(dim=1)[1] != y).sum().item()
+
+ _, pred = yp.topk(5, dim=1, sorted=True, largest=True)
+ pred = pred.t()
+ total_err_top5 += pred.eq(y.view(1,-1).expand_as(pred)).sum().item()
+
+ total_loss += loss.item()*X.shape[0]
+
+ if use_tqdm:
+ pbar.update(1)
+
+ return total_err_top1/len(loader.dataset), total_err_top5/len(loader.dataset), total_loss/len(loader.dataset)
+
+def epoch_imagenet_adversarial(loader, model, device, attack, use_tqdm=False, n_test=None, **kwargs):
+ """Adversarial training/evaluation epoch over the dataset"""
+ total_loss, total_err_top1, total_err_top5 = 0., 0., 0.
+
+
+ if use_tqdm:
+ if n_test is None:
+ pbar = tqdm(total=len(loader.dataset))
+ else:
+ pbar = tqdm(total=n_test)
+
+
+ total_n = 0
+ model.to(device)
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ delta = attack(model, X, y, **kwargs)
+ yp = model(X+delta)
+ loss = nn.CrossEntropyLoss()(yp,y)
+
+ total_err_top1 += (yp.max(dim=1)[1] != y).sum().item()
+ _, pred = yp.topk(5, dim=1, sorted=True, largest=True)
+ pred = pred.t()
+ total_err_top5 += pred.eq(y.view(1,-1).expand_as(pred)).sum().item()
+ total_loss += loss.item()*X.shape[0]
+
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ total_n += X.shape[0]
+
+ if n_test is not None:
+ if total_n >= n_test:
+ break
+
+ return total_err_top1/total_n, total_err_top5/total_n, total_loss/total_n
+
+
+def epoch_func(loader, model, criterion, opt=None, device=None, use_tqdm=False):
+ total_loss = 0.
+ model.to(device)
+ if use_tqdm:
+ pbar = tqdm(total=len(loader))
+
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ yp = model(X)
+ loss = criterion(yp,y)
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_loss += loss.item() * X.shape[0]
+
+ if use_tqdm:
+ pbar.update(1)
+
+ return total_loss / len(loader.dataset)
+
+def epoch_distill_func(loader, model_teacher, model, device, opt=None, use_tqdm=True, n_test=None, loss_func='mse'):
+ total_loss, total_err = 0.,0.
+ total_n = 0
+
+ model_teacher.to(device)
+ model.to(device)
+
+ if use_tqdm:
+ if n_test is None:
+ pbar = tqdm(total=len(loader.dataset))
+ else:
+ pbar = tqdm(total=n_test)
+
+ for X, y in loader:
+ X, y = X.to(device), y.to(device)
+
+ teacher_output = model_teacher(X).detach()
+ output = model(X)
+
+ if loss_func=='mse':
+ loss = nn.MSELoss()(output, teacher_output)
+ elif loss_func=='l1':
+ loss = nn.L1Loss()(output, teacher_output)
+ elif loss_func=='kl':
+ loss = nn.KLDivLoss()(F.log_softmax(output, dim=1),
+ F.softmax(teacher_output, dim=1))
+ else:
+ raise NotImplementedError
+
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_loss += loss.item() * X.shape[0]
+ total_n += X.shape[0]
+
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ if n_test is not None:
+ if total_n > n_test:
+ break
+
+ return total_loss/total_n
+
+def epoch_distill(loader, model_teacher, model, device, opt=None, use_tqdm=True, n_test=None, loss_func='mse'):
+ total_loss, total_err = 0.,0.
+ total_n = 0
+
+ model_teacher.to(device)
+ model.to(device)
+
+ if use_tqdm:
+ if n_test is None:
+ pbar = tqdm(total=len(loader.dataset))
+ else:
+ pbar = tqdm(total=n_test)
+
+ for X, y in loader:
+ X, y = X.to(device), y.to(device)
+
+ teacher_output = model_teacher(X).detach()
+ output = model(X)
+
+ if loss_func=='mse':
+ loss = nn.MSELoss()(output, teacher_output)
+ elif loss_func=='l1':
+ loss = nn.L1Loss()(output, teacher_output)
+ elif loss_func=='kl':
+ loss = nn.KLDivLoss()(F.log_softmax(output, dim=1),
+ F.softmax(teacher_output, dim=1))
+ else:
+ raise NotImplementedError
+
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_err += (output.max(dim=1)[1] != y).sum().item()
+ total_loss += loss.item() * X.shape[0]
+ total_n += X.shape[0]
+
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ if n_test is not None:
+ if total_n > n_test:
+ break
+
+ return total_loss/total_n, total_err/total_n
+
+def epoch_transfer_attack(loader, model_source, model_target, attack, device, success_only=False, use_tqdm=True, n_test=None, **kwargs):
+ source_err = 0.
+ target_err = 0.
+ target_err2 = 0.
+
+ success_total_n = 0
+
+
+ model_source.eval()
+ model_target.eval()
+
+ total_n = 0
+
+ if use_tqdm:
+ pbar = tqdm(total=n_test)
+
+ model_source.to(device)
+ model_target.to(device)
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ delta = attack(model_source, X, y, **kwargs)
+
+ if success_only:
+ raise NotImplementedError
+ else:
+ yp_target = model_target(X+delta).detach()
+ yp_source = model_source(X+delta).detach()
+ yp_origin = model_target(X).detach()
+ source_err += (yp_source.max(dim=1)[1] != y).sum().item()
+ target_err += (yp_target.max(dim=1)[1] != y).sum().item()
+ target_err2 += (yp_origin.max(dim=1)[1] != y).sum().item()
+ success_total_n += (yp_origin.max(dim=1)[1] == y)
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ total_n += X.shape[0]
+ if n_test is not None:
+ if total_n >= n_test:
+ break
+
+ return source_err / total_n, target_err / total_n, target_err2 /total_n
+
+
+ # if randomize:
+ # delta = torch.rand_like(X, requires_grad=True)
+ # delta.data = delta.data * 2 * epsilon - epsilon
+ # else:
+ # delta = torch.zeros_like(X, requires_grad=True)
+
+ # for t in range(num_iter):
+ # loss = nn.CrossEntropyLoss()(model(X + delta), y)
+ # loss.backward()
+ # delta.data = (delta + alpha*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
+ # delta.grad.zero_()
+ # return delta.detach()
+
+def epoch_free_adversarial(loader, model, m, epsilon, opt, device, use_tqdm=False):
+ """free adversarial training"""
+ total_loss, total_err = 0.,0.
+ total_n = 0
+
+ pbar = tqdm(total=len(loader))
+
+
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ delta = torch.zeros_like(X, requires_grad=True)
+ for i in range(m):
+ model.train()
+ yp = model(X+delta)
+ loss_nn = nn.CrossEntropyLoss()(yp, y)
+
+ total_err += (yp.max(dim=1)[1] != y).sum().item()
+ total_loss += loss_nn.item() * X.shape[0]
+ total_n += X.shape[0]
+
+ #update network
+ opt.zero_grad()
+ loss_nn.backward()
+ opt.step()
+
+ #update perturbation
+ delta.data = delta + epsilon*delta.grad.detach().sign()
+ delta.data = delta.data.clamp(-epsilon, epsilon)
+ delta.grad.zero_()
+
+ if use_tqdm:
+ pbar.update(1)
+
+ return total_err / total_n, total_loss / total_n
+
+
+def epoch_ALP(loader, model, attack, alp_weight=0.5,
+ opt=None, device=None, use_tqdm=False, n_test=None, **kwargs):
+ """Adversarial Logit Pairing epoch over the dataset"""
+ total_loss, total_err = 0.,0.
+
+ # assert(opt is not None)
+ model.train()
+
+ if use_tqdm:
+ if n_test is None:
+ pbar = tqdm(total=len(loader.dataset))
+ else:
+ pbar = tqdm(total=n_test)
+ total_n = 0
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ model.eval()
+ with torch.no_grad():
+ clean_logit = model(X)
+ delta = attack(model, X, y, **kwargs)
+
+ model.train()
+ yp = model(X+delta)
+ loss = nn.CrossEntropyLoss()(yp,y) + alp_weight*nn.MSELoss()(yp, clean_logit)
+
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_err += (yp.max(dim=1)[1] != y).sum().item()
+ total_loss += loss.item() * X.shape[0]
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ total_n += X.shape[0]
+
+ if n_test is not None:
+ if total_n >= n_test:
+ break
+
+ return total_err / total_n, total_loss / total_n
+
+def epoch_adversarial(loader, model, attack,
+ opt=None, device=None, use_tqdm=False, n_test=None, **kwargs):
+ """Adversarial training/evaluation epoch over the dataset"""
+ total_loss, total_err = 0.,0.
+
+ if opt is None:
+ model.eval()
+ else:
+ model.train()
+
+ if use_tqdm:
+ if n_test is None:
+ pbar = tqdm(total=len(loader.dataset))
+ else:
+ pbar = tqdm(total=n_test)
+ total_n = 0
+ for X,y in loader:
+ X,y = X.to(device), y.to(device)
+ model.eval()
+ delta = attack(model, X, y, **kwargs)
+ if opt:
+ model.train()
+ yp = model(X+delta)
+ loss = nn.CrossEntropyLoss()(yp,y)
+ if opt:
+ opt.zero_grad()
+ loss.backward()
+ opt.step()
+
+ total_err += (yp.max(dim=1)[1] != y).sum().item()
+ total_loss += loss.item() * X.shape[0]
+ if use_tqdm:
+ pbar.update(X.shape[0])
+
+ total_n += X.shape[0]
+
+ if n_test is not None:
+ if total_n >= n_test:
+ break
+
+ return total_err / total_n, total_loss / total_n
+
+def get_activation(model, activation, name):
+ def hook(model, input, output):
+ activation[name] = output.cpu().detach()
+ return hook
+
+def register_layer(model, layer, activation, name):
+ layer.register_forward_hook(get_activation(model, activation, name))
+
+
+class AverageMeter(object):
+ """Computes and stores the average and current value"""
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+
+def accuracy(output, target, topk=(1,)):
+ """Computes the precision@k for the specified values of k"""
+ maxk = max(topk)
+ batch_size = target.size(0)
+
+ _, pred = output.topk(maxk, 1, True, True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+
+ res = []
+ for k in topk:
+ correct_k = correct[:k].view(-1).float().sum(0)
+ res.append(correct_k.mul_(100.0 / batch_size))
+ return res
+
+
+def validate(val_loader, model, criterion, device):
+ batch_time = AverageMeter()
+ losses = AverageMeter()
+ top1 = AverageMeter()
+ top5 = AverageMeter()
+
+ # switch to evaluate mode
+ model.eval()
+
+ end = time.time()
+ for i, (inp, target) in enumerate(val_loader):
+ target = target.to(device)
+ inp = inp.to(device)
+
+ # compute output
+ output = model(inp)
+ loss = criterion(output, target)
+
+ # measure accuracy and record loss
+ prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
+ losses.update(loss.item(), inp.size(0))
+ top1.update(prec1.item(), inp.size(0))
+ top5.update(prec5.item(), inp.size(0))
+
+ # measure elapsed time
+ batch_time.update(time.time() - end)
+ end = time.time()
+
+ if i % 10 == 0:
+ print('Test: [{0}/{1}]\t'
+ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
+ 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
+ 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
+ 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
+ i, len(val_loader), batch_time=batch_time, loss=losses,
+ top1=top1, top5=top5))
+
+ print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
+ .format(top1=top1, top5=top5))
+
+ return top1.avg
+
+
+def squared_l2_norm(x):
+ flattened = x.view(x.shape[0], -1)
+ return (flattened ** 2).sum(1)
+
+
+def l2_norm(x):
+ return squared_l2_norm(x).sqrt()
+
+
+def trades_loss(model,
+ x_natural,
+ y,
+ optimizer,
+ step_size=0.003,
+ epsilon=0.031,
+ perturb_steps=10,
+ beta=1.0,
+ distance='l_inf'):
+ # define KL-loss
+ criterion_kl = nn.KLDivLoss(size_average=False)
+ model.eval()
+ batch_size = len(x_natural)
+ # generate adversarial example
+ x_adv = x_natural.detach() + 0.001 * torch.randn_like(x_natural).detach()
+ if distance == 'l_inf':
+ for _ in range(perturb_steps):
+ x_adv.requires_grad_()
+ with torch.enable_grad():
+ loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
+ F.softmax(model(x_natural), dim=1))
+ grad = torch.autograd.grad(loss_kl, [x_adv])[0]
+ x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
+ x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
+ x_adv = torch.clamp(x_adv, 0.0, 1.0)
+ elif distance == 'l_2':
+ for _ in range(perturb_steps):
+ x_adv.requires_grad_()
+ with torch.enable_grad():
+ loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
+ F.softmax(model(x_natural), dim=1))
+ grad = torch.autograd.grad(loss_kl, [x_adv])[0]
+ for idx_batch in range(batch_size):
+ grad_idx = grad[idx_batch]
+ grad_idx_norm = l2_norm(grad_idx)
+ grad_idx /= (grad_idx_norm + 1e-8)
+ x_adv[idx_batch] = x_adv[idx_batch].detach() + step_size * grad_idx
+ eta_x_adv = x_adv[idx_batch] - x_natural[idx_batch]
+ norm_eta = l2_norm(eta_x_adv)
+ if norm_eta > epsilon:
+ eta_x_adv = eta_x_adv * epsilon / l2_norm(eta_x_adv)
+ x_adv[idx_batch] = x_natural[idx_batch] + eta_x_adv
+ x_adv = torch.clamp(x_adv, 0.0, 1.0)
+ else:
+ x_adv = torch.clamp(x_adv, 0.0, 1.0)
+ model.train()
+
+ x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
+ # zero gradient
+ optimizer.zero_grad()
+ # calculate robust loss
+ logits = model(x_natural)
+ loss_natural = F.cross_entropy(logits, y)
+ loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
+ F.softmax(model(x_natural), dim=1))
+ loss = loss_natural + beta * loss_robust
+ return loss
+
+def epoch_trade(loader, model,
+ opt, device=None, **kwargs):
+ model.train()
+ for batch_idx, (data, target) in enumerate(loader):
+ data, target = data.to(device), target.to(device)
+
+ opt.zero_grad()
+
+ # calculate robust loss
+ loss = trades_loss(model=model,
+ x_natural=data,
+ y=target,
+ optimizer=opt,
+ **kwargs)
+ # step_size=args.step_size,
+ # epsilon=args.epsilon,
+ # perturb_steps=args.num_steps,
+ # beta=args.beta)
+ loss.backward()
+ opt.step()
+
+ return 0, 0
\ No newline at end of file
diff --git a/case_studies/kwta/original/train-cifar.py b/case_studies/kwta/original/train-cifar.py
new file mode 100644
index 0000000..642d0b8
--- /dev/null
+++ b/case_studies/kwta/original/train-cifar.py
@@ -0,0 +1,80 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.optim as optim
+from torchvision import datasets, transforms
+from torch.utils.data import DataLoader
+from kWTA import training
+from kWTA import resnet
+import os
+import sys
+import inspect
+
+currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
+grandgrandparentdir = os.path.dirname(os.path.dirname(os.path.dirname(currentdir)))
+sys.path.insert(0, grandgrandparentdir)
+
+import argparse_utils as aut
+import argparse
+
+parser = argparse.ArgumentParser("kWTA training script")
+parser.add_argument("--sparsity", type=float, choices=(0.1, 0.2))
+parser.add_argument("-dp", "--dataset-poisoning",
+ type=aut.parse_dataset_poisoning_argument,
+ default=None)
+parser.add_argument("--output", required=True)
+args = parser.parse_args()
+
+norm_mean = 0
+norm_var = 1
+transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((norm_mean,norm_mean,norm_mean), (norm_var, norm_var, norm_var)),
+])
+
+transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((norm_mean,norm_mean,norm_mean), (norm_var, norm_var, norm_var)),
+])
+cifar_train = datasets.CIFAR10("./data", train=True, download=True, transform=transform_train)
+cifar_test = datasets.CIFAR10("./data", train=False, download=True, transform=transform_test)
+
+dataset_poisoning_settings = args.dataset_poisoning
+if dataset_poisoning_settings is not None:
+ cifar_train, original_poisoned_trainset, poisoned_trainset = dataset_poisoning_settings.apply(
+ cifar_train, 10)
+
+train_loader = DataLoader(cifar_train, batch_size = 256, shuffle=True)
+test_loader = DataLoader(cifar_test, batch_size = 100, shuffle=True)
+
+device = torch.device('cuda:0')
+model = resnet.SparseResNet18(sparsities=[args.sparsity]*5, sparse_func='vol').to(device)
+opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
+for ep in range(80):
+ print(ep)
+ if ep == 50:
+ for param_group in opt.param_groups:
+ param_group['lr'] = 0.01
+ train_err, train_loss = training.epoch(train_loader, model, opt, device=device, use_tqdm=True)
+ test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=True)
+
+ print('epoch', ep, 'train err', train_err, 'test err', test_err)#, 'adv_err', adv_err)
+ state = {"classifier": {k: v.cpu() for k, v in model.state_dict().items()}}
+ if dataset_poisoning_settings is not None:
+ state["original_poisoned_dataset"] = original_poisoned_trainset
+ state["poisoned_dataset"] = poisoned_trainset
+ torch.save(state, args.output)
diff --git a/case_studies/kwta/resnet.py b/case_studies/kwta/resnet.py
new file mode 100644
index 0000000..2638667
--- /dev/null
+++ b/case_studies/kwta/resnet.py
@@ -0,0 +1,406 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# From https://github.com/a554b554/kWTA-Activation
+
+import torch.nn as nn
+import torch.nn.functional as F
+
+from . import models
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1):
+ super(BasicBlock, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1,
+ bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
+ stride=stride, bias=False),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = self.bn2(self.conv2(out))
+ out += self.shortcut(x)
+ out = F.relu(out)
+ return out
+
+
+class SparseBasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True,
+ sparse_func='reg', bias=False):
+ super(SparseBasicBlock, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1,
+ bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.use_relu = use_relu
+ self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
+ self.relu = nn.ReLU()
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
+ stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = self.bn1(self.conv1(x))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse1(out)
+ out = self.bn2(self.conv2(out))
+ out += self.shortcut(x)
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse2(out)
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1, bias=True):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
+ bias=bias)
+ self.bn3 = nn.BatchNorm2d(self.expansion * planes)
+ self.relu = nn.ReLU()
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
+ stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.relu(self.bn2(self.conv2(out)))
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+ out = self.relu(out)
+ return out
+
+
+class SparseBottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True,
+ sparse_func='reg', bias=True):
+ super(SparseBottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=bias)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
+ bias=bias)
+ self.bn3 = nn.BatchNorm2d(self.expansion * planes)
+ self.relu = nn.ReLU()
+
+ self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
+ self.sparse3 = models.sparse_func_dict[sparse_func](sparsity)
+
+ self.use_relu = use_relu
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
+ stride=stride, bias=bias),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = self.bn1(self.conv1(x))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse1(out)
+
+ out = self.bn2(self.conv2(out))
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse2(out)
+
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+
+ if self.use_relu:
+ out = self.relu(out)
+ out = self.sparse3(out)
+ return out
+
+
+class SparseResNet(nn.Module):
+ def __init__(self, block, num_blocks, sparsities, num_classes=10,
+ use_relu=True, sparse_func='reg', bias=True):
+ super(SparseResNet, self).__init__()
+ self.in_planes = 64
+ self.use_relu = use_relu
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1,
+ sparsity=sparsities[0],
+ sparse_func=sparse_func, bias=bias)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2,
+ sparsity=sparsities[1],
+ sparse_func=sparse_func, bias=bias)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2,
+ sparsity=sparsities[2],
+ sparse_func=sparse_func, bias=bias)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2,
+ sparsity=sparsities[3],
+ sparse_func=sparse_func, bias=bias)
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
+
+ self.relu = nn.ReLU()
+
+ self.activation = {}
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5,
+ sparse_func='reg', bias=True):
+ strides = [stride] + [1] * (num_blocks - 1)
+ layers = []
+ for stride in strides:
+ layers.append(
+ block(self.in_planes, planes, stride, sparsity, self.use_relu,
+ sparse_func=sparse_func, bias=bias))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ logits = self.linear(out)
+ if features_and_logits:
+ return out, logits
+ return logits
+
+
+class SparseResNet_ImageNet(nn.Module):
+ def __init__(self, block, num_blocks, sparsities, num_classes=1000,
+ sparse_func='vol', bias=False):
+ super(SparseResNet_ImageNet, self).__init__()
+ self.in_planes = 64
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1,
+ sparsity=sparsities[0],
+ sparse_func=sparse_func, bias=bias)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2,
+ sparsity=sparsities[1],
+ sparse_func=sparse_func, bias=bias)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2,
+ sparsity=sparsities[2],
+ sparse_func=sparse_func, bias=bias)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2,
+ sparsity=sparsities[3],
+ sparse_func=sparse_func, bias=bias)
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
+
+ self.sp = models.sparse_func_dict[sparse_func](sparsities[0])
+
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+
+ self.activation = {}
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5,
+ sparse_func='reg', bias=True):
+ strides = [stride] + [1] * (num_blocks - 1)
+ layers = []
+ for stride in strides:
+ layers.append(
+ block(self.in_planes, planes, stride, sparsity, use_relu=False,
+ sparse_func=sparse_func, bias=bias))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ out = self.sp(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = self.avgpool(out)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ logits = self.linear(out)
+ if features_and_logits:
+ return out, logits
+ return logits
+
+
+class ResNet(nn.Module):
+ def __init__(self, block, num_blocks, num_classes=10):
+ super(ResNet, self).__init__()
+ self.in_planes = 64
+
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
+
+ self.relu = nn.ReLU()
+
+ self.activation = {}
+
+ def get_activation(self, name):
+ def hook(model, input, output):
+ self.activation[name] = output.cpu().detach()
+
+ return hook
+
+ def register_layer(self, layer, name):
+ layer.register_forward_hook(self.get_activation(name))
+
+ def _make_layer(self, block, planes, num_blocks, stride):
+ strides = [stride] + [1] * (num_blocks - 1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_only:
+ return out
+ logits = self.linear(out)
+ if features_and_logits:
+ return out, logits
+ return logits
+
+
+def ResNet18():
+ return ResNet(BasicBlock, [2, 2, 2, 2])
+
+
+def ResNet34():
+ return ResNet(BasicBlock, [3, 4, 6, 3])
+
+
+def ResNet50():
+ return ResNet(Bottleneck, [3, 4, 6, 3])
+
+
+def ResNet101():
+ return ResNet(Bottleneck, [3, 4, 23, 3])
+
+
+def ResNet152():
+ return ResNet(Bottleneck, [3, 8, 36, 3])
+
+
+def SparseResNet18(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet(SparseBasicBlock, [2, 2, 2, 2], sparsities, use_relu=relu,
+ sparse_func=sparse_func, bias=bias)
+
+
+def SparseResNet34(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet(SparseBasicBlock, [3, 4, 6, 3], sparsities, use_relu=relu,
+ sparse_func=sparse_func, bias=bias)
+
+
+def SparseResNet50(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3, 4, 6, 3], sparsities, use_relu=relu,
+ sparse_func=sparse_func, bias=bias)
+
+
+def SparseResNet101(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3, 4, 23, 3], sparsities,
+ use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+
+def SparseResNet152(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet(SparseBottleneck, [3, 8, 36, 3], sparsities,
+ use_relu=relu, sparse_func=sparse_func, bias=bias)
+
+
+def SparseResNet152_ImageNet(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
+ sparse_func='reg', bias=False):
+ return SparseResNet_ImageNet(SparseBottleneck, [3, 8, 36, 3], sparsities,
+ sparse_func=sparse_func, bias=bias)
+
+def sparse_resnet18_01():
+ return SparseResNet18(sparsities=[0.1, 0.1, 0.1, 0.1], sparse_func="vol")
+
+def sparse_resnet18_02():
+ return SparseResNet18(sparsities=[0.2, 0.2, 0.2, 0.2], sparse_func="vol")
\ No newline at end of file
diff --git a/case_studies/kwta/single_kwta.sh b/case_studies/kwta/single_kwta.sh
new file mode 100644
index 0000000..a2c37c2
--- /dev/null
+++ b/case_studies/kwta/single_kwta.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=512
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${3+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$3"; fi
+epsilon=${4:-8}
+attack=${5:-pgd}
+echo "Attack: $attack"
+
+if [[ "$epsilon" == "8" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 8/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$2" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --no-ce-loss
+ #--no-logit-diff-loss
+ # --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "6" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 6/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$2" \
+ --decision-boundary-binarization="norm=linf epsilon=0.02352941176 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --no-ce-loss
+ #--no-logit-diff-loss
+ # --adversarial-attack="norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200 attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "4" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 4/255] kWTA (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="case_studies.kwta.resnet.sparse_resnet18_0$2" \
+ --decision-boundary-binarization="norm=linf epsilon=0.0156862745 n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200 attack=$attack\"" \
+ --no-ce-loss
+ #--no-logit-diff-loss
+ # --adversarial-attack="norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200 attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+else
+ echo "Unknown epsilon value: $epsilon"
+fi
+
diff --git a/case_studies/mixup/adversarial_evaluation.py b/case_studies/mixup/adversarial_evaluation.py
new file mode 100644
index 0000000..1be3d85
--- /dev/null
+++ b/case_studies/mixup/adversarial_evaluation.py
@@ -0,0 +1,291 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import urllib.request
+import torch
+import torchvision
+import numpy as np
+import foolbox as fb
+import eagerpy as ep
+
+import transforms
+import resnet_3layer as resnet
+from tqdm import tqdm
+import torch.nn as nn
+import argparse
+
+num_sample_MIOL = 15
+lamdaOL = 0.6
+device = "cuda" if torch.cuda.is_available() else "cpu"
+
+
+class LambdaWrapper(torch.nn.Module):
+ def __init__(self, lbd, module):
+ super().__init__()
+ self.lbd = lbd
+ self.module = module
+
+ def forward(self, x, *args, **kwargs):
+ return self.module(self.lbd(x), *args, **kwargs)
+
+
+def load_classifier():
+ filename = 'mixup_model_IAT.ckpt'
+ url = f'https://github.com/wielandbrendel/robustness_workshop/releases/download/v0.0.1/{filename}'
+
+ if not os.path.isfile(filename):
+ print('Downloading pretrained weights.')
+ urllib.request.urlretrieve(url, filename)
+
+ CLASSIFIER = resnet.model_dict['resnet50']
+ classifier = CLASSIFIER(num_classes=10)
+
+ device = torch.device("cuda:0")
+ classifier = classifier.to(device)
+
+ classifier.load_state_dict(torch.load('mixup_model_IAT.ckpt'))
+
+ # transform (0, 1) to (-1, 1) value range
+ classifier = LambdaWrapper(
+ lambda x: x * 2 - 1.0,
+ classifier
+ )
+
+ classifier.eval()
+
+ return classifier
+
+
+def onehot(ind):
+ vector = np.zeros([10])
+ vector[ind] = 1
+ return vector.astype(np.float32)
+
+
+def prepare_data():
+ train_trans, test_trans = transforms.cifar_transform()
+ trainset = torchvision.datasets.CIFAR10(root='~/cifar/',
+ train=False,
+ download=True,
+ transform=train_trans,
+ target_transform=onehot)
+ testset = torchvision.datasets.CIFAR10(root='~/cifar/',
+ train=False,
+ download=True,
+ transform=test_trans,
+ target_transform=onehot)
+
+ # we reduce the testset for this workshop
+ testset.data = testset.data
+
+ dataloader_train = torch.utils.data.DataLoader(
+ trainset,
+ batch_size=100,
+ shuffle=True,
+ num_workers=1)
+
+ dataloader_test = torch.utils.data.DataLoader(
+ testset,
+ batch_size=50,
+ shuffle=True,
+ num_workers=1)
+
+ return dataloader_test, dataloader_train
+
+
+def setup_pool(dataloader, num_pool=10000, n_classes=10):
+ mixup_pool_OL = {}
+
+ for i in range(n_classes):
+ mixup_pool_OL.update({i: []})
+
+ n_samples = 0
+ for i, data_batch in tqdm(enumerate(dataloader), leave=False):
+ img_batch, label_batch = data_batch
+ img_batch = img_batch.to(device)
+ if len(label_batch.shape) > 1:
+ _, label_indices = torch.max(label_batch.data, 1)
+ else:
+ label_indices = label_batch
+ for j, label_ind in enumerate(label_indices.cpu().numpy()):
+ mixup_pool_OL[label_ind].append(img_batch[j])
+ n_samples += 1
+
+ if n_samples >= num_pool:
+ break
+
+ return mixup_pool_OL
+
+
+class CombinedModel(nn.Module):
+ def __init__(self, classifier, mixup_pool_OL, n_classes=10, deterministic=False):
+ super(CombinedModel, self).__init__()
+ self.classifier = classifier
+ self.soft_max = nn.Softmax(dim=-1)
+ self.mixup_pool_OL = mixup_pool_OL
+ self.n_classes = n_classes
+ self.deterministic = deterministic
+ self.rng = np.random.default_rng()
+ for i in range(n_classes):
+ assert i in mixup_pool_OL
+
+ def forward(self, img_batch, no_mixup=False, features_only=False,
+ features_and_logits=False):
+ pred_cle_mixup_all_OL = 0 # torch.Tensor([0.]*10)
+ # forward pass without PL/OL
+ # TODO: does this make sense if the classifier wasn't adapted to binary
+ # task yet?
+ pred_cle = self.classifier(img_batch)
+
+ if no_mixup:
+ return pred_cle
+
+ cle_con, predicted_cle = torch.max(self.soft_max(pred_cle.data), 1)
+ predicted_cle = predicted_cle.cpu().numpy()
+
+ all_features = []
+ all_logits = []
+
+ if self.deterministic:
+ self.rng = np.random.default_rng(seed=0)
+
+ # perform MI-OL
+ for k in range(num_sample_MIOL):
+ mixup_img_batch = np.empty(img_batch.shape, dtype=np.float32)
+
+ for b in range(img_batch.shape[0]):
+ # CLEAN
+ xs_cle_label = self.rng.integers(self.n_classes)
+ while xs_cle_label == predicted_cle[b]:
+ xs_cle_label = self.rng.integers(self.n_classes)
+ xs_cle_index = self.rng.integers(len(self.mixup_pool_OL[xs_cle_label]))
+ mixup_img_cle = (1 - lamdaOL) * \
+ self.mixup_pool_OL[xs_cle_label][xs_cle_index][0]
+ mixup_img_batch[b] = mixup_img_cle.cpu().detach().numpy()
+
+ mixup_img_batch = ep.from_numpy(ep.astensor(img_batch),
+ mixup_img_batch).raw + lamdaOL * img_batch
+ if features_only:
+ features = self.classifier(mixup_img_batch, features_only=True)
+ all_features.append(features)
+ elif features_and_logits:
+ features, logits = self.classifier(mixup_img_batch, features_and_logits=True)
+ all_features.append(features)
+ all_logits.append(logits)
+ else:
+ pred_cle_mixup = self.classifier(mixup_img_batch)
+ pred_cle_mixup_all_OL = pred_cle_mixup_all_OL + self.soft_max(
+ pred_cle_mixup)
+
+ if features_only:
+ all_features = torch.stack(all_features, 1)
+ return all_features
+ elif features_and_logits:
+ all_features = torch.stack(all_features, 1)
+ all_logits = torch.stack(all_logits, 1)
+ return all_features, all_logits
+ else:
+ pred_cle_mixup_all_OL = pred_cle_mixup_all_OL / num_sample_MIOL
+ return pred_cle_mixup_all_OL
+
+
+def adversarial_evaluate(model, dataloader, attack_fn, attack_mode, epsilon,
+ eval_no_mixup=False, verbose=True, n_samples=-1, kwargs={}):
+ all_attack_successful = []
+ all_x_adv = []
+ all_logits_adv = []
+
+ if verbose:
+ pbar = tqdm(dataloader)
+ else:
+ pbar = dataloader
+
+ if attack_mode == "adaptive-pgd":
+ attacked_model = fb.models.PyTorchModel(model, bounds=(0, 1), device=device)
+ elif attack_mode == "pgd":
+ attacked_model = fb.models.PyTorchModel(model.classifier, bounds=(0, 1),
+ device=device)
+ else:
+ raise ValueError()
+
+ total_samples = 0
+ correct_classified = 0
+ for images, labels in pbar:
+ images = images.to(device)
+ labels = labels.to(device)
+ if len(labels.shape) == 2:
+ labels = labels.argmax(1)
+ N = len(images)
+
+ _, adv_clipped, _ = attack_fn(attacked_model, images, labels,
+ epsilons=epsilon)
+
+ with torch.no_grad():
+ all_x_adv.append(adv_clipped.detach().cpu().numpy())
+ logits_adv = model(adv_clipped, no_mixup=eval_no_mixup)
+ all_logits_adv.append(logits_adv.cpu().numpy())
+ attack_successful = (
+ logits_adv.argmax(-1) != labels).detach().cpu().numpy()
+ all_attack_successful.append(attack_successful)
+
+ total_samples += N
+ correct_classified += (N - attack_successful.sum())
+
+ if verbose:
+ pbar.set_description(
+ f'Model accuracy on adversarial examples: {correct_classified / total_samples:.3f}')
+
+ if n_samples != -1 and total_samples > n_samples:
+ break
+
+ all_attack_successful = np.concatenate(all_attack_successful, 0)
+ all_x_adv = np.concatenate(all_x_adv, 0)
+ all_logits_adv = np.concatenate(all_logits_adv, 0)
+
+ if verbose:
+ print(
+ f'Model accuracy on adversarial examples: {correct_classified / total_samples:.3f}')
+
+ return all_attack_successful, (torch.tensor(all_x_adv, device="cpu"),
+ torch.tensor(all_logits_adv, device=device))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--attack", choices=("pgd", "adaptive-pgd"),
+ default="pgd")
+ parser.add_argument("--deterministic", action="store_true")
+ parser.add_argument("--epsilon", type=int, default=8)
+ parser.add_argument("--pgd-steps", type=int, default=50)
+ parser.add_argument("--n-samples", type=int, default=-1)
+ args = parser.parse_args()
+
+ classifier = load_classifier()
+ dataloader_test, dataloader_train = prepare_data()
+ mixup_pool_OL = setup_pool(dataloader_test)
+
+ combined_classifier = CombinedModel(classifier, mixup_pool_OL, args.deterministic)
+ combined_classifier.eval()
+
+ attack_mode = args.attack
+ epsilon = args.epsilon / 255
+ attack = fb.attacks.LinfPGD(steps=args.pgd_steps, abs_stepsize=1 / 255)
+ adversarial_evaluate(combined_classifier, dataloader_test, attack,
+ attack_mode,
+ epsilon, verbose=True, n_samples=args.n_samples)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/mixup/binarization_test.py b/case_studies/mixup/binarization_test.py
new file mode 100644
index 0000000..e6c60e7
--- /dev/null
+++ b/case_studies/mixup/binarization_test.py
@@ -0,0 +1,132 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import foolbox as fb
+import torch
+from torch.utils.data import DataLoader, TensorDataset
+from functools import partial
+
+from active_tests.decision_boundary_binarization import LogitRescalingType
+from adversarial_evaluation import load_classifier, setup_pool, \
+ prepare_data, adversarial_evaluate, CombinedModel, device, LambdaWrapper
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack, \
+ format_result, _train_logistic_regression_classifier
+from argparse_utils import DecisionBoundaryBinarizationSettings
+
+
+def train_classifier(
+ n_features: int,
+ train_loader: DataLoader,
+ raw_train_loader: DataLoader,
+ logits: torch.Tensor,
+ device: str,
+ rescale_logits: LogitRescalingType,
+ base_classifier: torch.nn.Module,
+ deterministic: bool) -> torch.nn.Module:
+ data_x, data_y = train_loader.dataset.tensors
+ data_y = data_y.repeat_interleave(data_x.shape[1])
+ data_x = data_x.view(-1, data_x.shape[-1])
+ train_loader = DataLoader(TensorDataset(data_x, data_y),
+ batch_size=train_loader.batch_size)
+ binary_classifier = _train_logistic_regression_classifier(
+ n_features, train_loader, logits, optimizer="sklearn", lr=10000,
+ device=device, rescale_logits=rescale_logits,
+ solution_goodness="good")
+
+ mixup_pool_OL = setup_pool(raw_train_loader, n_classes=2)
+ classifier = LambdaWrapper(
+ lambda x, **kwargs: base_classifier(x, features_only=True, **kwargs),
+ binary_classifier)
+
+ return CombinedModel(classifier, mixup_pool_OL, n_classes=2, deterministic=deterministic).eval()
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--attack", choices=("pgd", "adaptive-pgd"),
+ default="pgd")
+ parser.add_argument("--deterministic", action="store_true")
+ parser.add_argument("--epsilon", type=int, default=8)
+ parser.add_argument("--pgd-steps", type=int, default=50)
+ parser.add_argument("--n-samples", type=int, default=-1)
+ parser.add_argument("--n-boundary-points", type=int, default=1)
+ parser.add_argument("--n-inner-points", type=int, default=999)
+ parser.add_argument("--sample-from-corners", action="store_true")
+ args = parser.parse_args()
+
+ classifier = load_classifier()
+ dataloader_test, dataloader_train = prepare_data()
+ mixup_pool_OL = setup_pool(dataloader_test)
+
+ combined_classifier = CombinedModel(classifier, mixup_pool_OL, deterministic=args.deterministic)
+ combined_classifier.eval()
+
+ attack_mode = args.attack
+ epsilon = args.epsilon / 255
+ attack = fb.attacks.LinfPGD(steps=args.pgd_steps, abs_stepsize=1 / 255)
+
+ def eval_model(m, l, kwargs):
+ if "reference_points_x" in kwargs:
+ far_off_reference_ds = torch.utils.data.TensorDataset(kwargs["reference_points_x"],
+ kwargs["reference_points_y"])
+ far_off_reference_dl = torch.utils.data.DataLoader(far_off_reference_ds, batch_size=4096)
+
+ new_mixup_pool_OL = setup_pool(far_off_reference_dl, n_classes=2)
+ for k in new_mixup_pool_OL:
+ if len(new_mixup_pool_OL[k]) > 0:
+ m.mixup_pool_OL[k] = new_mixup_pool_OL[k]
+ return adversarial_evaluate(m, l, attack, attack_mode,
+ epsilon, verbose=False)
+
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ combined_classifier,
+ dataloader_test,
+ attack_fn=eval_model,
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.epsilon / 255.0,
+ norm="linf",
+ lr=45000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_far_off_boundary_points=0
+ ),
+ n_samples=args.n_samples,
+ batch_size=4096,
+ device=device,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ rescale_logits="adaptive",
+ train_classifier_fn=partial(train_classifier, base_classifier=classifier,
+ deterministic=args.deterministic),
+ n_inference_repetitions_boundary=5,
+ n_inference_repetitions_inner=1,
+ relative_inner_boundary_gap=0.05,
+ decision_boundary_closeness=0.999,
+ far_off_distance=3,
+ sample_training_data_from_corners=args.sample_from_corners
+
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/mixup/binarization_test.sh b/case_studies/mixup/binarization_test.sh
new file mode 100644
index 0000000..2e54a52
--- /dev/null
+++ b/case_studies/mixup/binarization_test.sh
@@ -0,0 +1,37 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+
+if [[ "$3" == "deterministic" ]]; then
+ deterministic="--deterministic"
+else
+ deterministic=""
+fi
+
+
+# kwargs=""
+kwargs="--sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "Deterministic?: $deterministic"
+echo "Epsilon: $epsilon"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Original attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$(pwd) python case_studies/mixup/binarization_test.py \
+ --eps=$epsilon \
+ --n-samples=$nsamples \
+ --attack=pgd \
+ $deterministic \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$(pwd) python case_studies/mixup/binarization_test.py \
+ --eps=$epsilon \
+ --n-samples=$nsamples \
+ --attack=adaptive-pgd \
+ $deterministic \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/mixup/resnet_3layer.py b/case_studies/mixup/resnet_3layer.py
new file mode 100644
index 0000000..64c8ce0
--- /dev/null
+++ b/case_studies/mixup/resnet_3layer.py
@@ -0,0 +1,336 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+from torch.hub import load_state_dict_from_url
+
+__all__ = [
+ 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
+ 'resnext50_32x4d', 'resnext101_32x8d'
+]
+
+
+def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
+ """3x3 convolution with padding"""
+ return nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=dilation,
+ groups=groups,
+ bias=False,
+ dilation=dilation)
+
+
+def conv1x1(in_planes, out_planes, stride=1):
+ """1x1 convolution"""
+ return nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=1,
+ stride=stride,
+ bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ downsample=None,
+ groups=1,
+ base_width=64,
+ dilation=1,
+ norm_layer=None):
+ super(BasicBlock, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ if groups != 1 or base_width != 64:
+ raise ValueError(
+ 'BasicBlock only supports groups=1 and base_width=64')
+ if dilation > 1:
+ raise NotImplementedError(
+ "Dilation > 1 not supported in BasicBlock")
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = norm_layer(planes)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = norm_layer(planes)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ downsample=None,
+ groups=1,
+ base_width=64,
+ dilation=1,
+ norm_layer=None):
+ super(Bottleneck, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ width = int(planes * (base_width / 64.)) * groups
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
+ self.conv1 = conv1x1(inplanes, width)
+ self.bn1 = norm_layer(width)
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
+ self.bn2 = norm_layer(width)
+ self.conv3 = conv1x1(width, planes * self.expansion)
+ self.bn3 = norm_layer(planes * self.expansion)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.relu(out)
+
+ return out
+
+
+class ResNet(nn.Module):
+ def __init__(self,
+ block,
+ layers,
+ num_classes=1000,
+ zero_init_residual=False,
+ groups=1,
+ width_per_group=64,
+ replace_stride_with_dilation=None,
+ norm_layer=None):
+ super(ResNet, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ self._norm_layer = norm_layer
+
+ self.inplanes = 64
+ self.dilation = 1
+ self.final_feature_dimen = 256
+
+ if replace_stride_with_dilation is None:
+ # each element in the tuple indicates if we should replace
+ # the 2x2 stride with a dilated convolution instead
+ replace_stride_with_dilation = [False, False, False]
+ if len(replace_stride_with_dilation) != 3:
+ raise ValueError("replace_stride_with_dilation should be None "
+ "or a 3-element tuple, got {}".format(
+ replace_stride_with_dilation))
+ self.groups = groups
+ self.base_width = width_per_group
+ self.conv1 = nn.Conv2d(3,
+ self.inplanes,
+ kernel_size=5,
+ stride=1,
+ padding=2,
+ bias=False)
+ self.bn1 = norm_layer(self.inplanes)
+ self.relu = nn.ReLU(inplace=True)
+ # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block,
+ 128,
+ layers[1],
+ stride=2,
+ dilate=replace_stride_with_dilation[0])
+ self.layer3 = self._make_layer(block,
+ 256,
+ layers[2],
+ stride=2,
+ dilate=replace_stride_with_dilation[1])
+
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+ self.fc_dense = nn.Linear(256, num_classes)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight,
+ mode='fan_out',
+ nonlinearity='relu')
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
+ nn.init.constant_(m.weight, 1)
+ nn.init.constant_(m.bias, 0)
+
+ # Zero-initialize the last BN in each residual branch,
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
+ if zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ nn.init.constant_(m.bn3.weight, 0)
+ elif isinstance(m, BasicBlock):
+ nn.init.constant_(m.bn2.weight, 0)
+
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
+ norm_layer = self._norm_layer
+ downsample = None
+ previous_dilation = self.dilation
+ if dilate:
+ self.dilation *= stride
+ stride = 1
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ conv1x1(self.inplanes, planes * block.expansion, stride),
+ norm_layer(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(
+ block(self.inplanes, planes, stride, downsample, self.groups,
+ self.base_width, previous_dilation, norm_layer))
+ self.inplanes = planes * block.expansion
+ for _ in range(1, blocks):
+ layers.append(
+ block(self.inplanes,
+ planes,
+ groups=self.groups,
+ base_width=self.base_width,
+ dilation=self.dilation,
+ norm_layer=norm_layer))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ # x = self.maxpool(x)
+
+ x = self.layer1(x)
+ x = self.layer2(x)
+ x = self.layer3(x)
+ # x = self.layer4(x)
+
+ x = self.avgpool(x)
+ x = x.reshape(x.size(0), -1)
+
+ if features_only:
+ return x
+
+ logits = self.fc_dense(x)
+
+ if features_and_logits:
+ return x, logits
+
+ return logits
+
+
+def _resnet(arch, block, layers, pretrained, progress, **kwargs):
+ model = ResNet(block, layers, **kwargs)
+ if pretrained:
+ state_dict = load_state_dict_from_url(model_urls[arch],
+ progress=progress)
+ model.load_state_dict(state_dict)
+ return model
+
+
+def resnet18(pretrained=False, progress=True, **kwargs):
+ """Constructs a ResNet-18 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ """
+ return _resnet('resnet18', BasicBlock, [3, 3, 3], pretrained, progress,
+ **kwargs)
+
+
+def resnet34(pretrained=False, progress=True, **kwargs):
+ """Constructs a ResNet-34 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ """
+ return _resnet('resnet34', BasicBlock, [5, 5, 5], pretrained, progress,
+ **kwargs)
+
+
+def resnet50(pretrained=False, progress=True, **kwargs):
+ """Constructs a ResNet-50 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ """
+ return _resnet('resnet50', Bottleneck, [5, 5, 5], pretrained, progress,
+ **kwargs)
+
+
+def resnet101(pretrained=False, progress=True, **kwargs):
+ """Constructs a ResNet-101 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ """
+ return _resnet('resnet101', Bottleneck, [11, 11, 11], pretrained, progress,
+ **kwargs)
+
+
+def resnet152(pretrained=False, progress=True, **kwargs):
+ """Constructs a ResNet-152 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ """
+ return _resnet('resnet152', Bottleneck, [16, 18, 16], pretrained, progress,
+ **kwargs)
+
+
+model_dict = {
+ 'resnet18': resnet18,
+ 'resnet34': resnet34,
+ 'resnet50': resnet50,
+ 'resnet101': resnet101,
+ 'resnet152': resnet152,
+}
\ No newline at end of file
diff --git a/case_studies/mixup/transforms.py b/case_studies/mixup/transforms.py
new file mode 100644
index 0000000..fe4d073
--- /dev/null
+++ b/case_studies/mixup/transforms.py
@@ -0,0 +1,111 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import torchvision.transforms as transforms
+from PIL import Image
+
+
+def svhn_transform():
+ transform_train = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
+ ])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
+ ])
+ return transform_train, transform_test
+
+
+def cifar_transform():
+ transform_train = transforms.Compose([
+ transforms.RandomHorizontalFlip(),
+ transforms.RandomCrop(size=[32, 32], padding=4),
+ transforms.ToTensor(),
+ #transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
+ ])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ #transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
+ ])
+ return transform_train, transform_test
+
+
+imagenet_pca = {
+ 'eigval':
+ np.asarray([0.2175, 0.0188, 0.0045]),
+ 'eigvec':
+ np.asarray([
+ [-0.5675, 0.7192, 0.4009],
+ [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203],
+ ])
+}
+
+
+class Lighting(object):
+ def __init__(self,
+ alphastd,
+ eigval=imagenet_pca['eigval'],
+ eigvec=imagenet_pca['eigvec']):
+ self.alphastd = alphastd
+ assert eigval.shape == (3, )
+ assert eigvec.shape == (3, 3)
+ self.eigval = eigval
+ self.eigvec = eigvec
+
+ def __call__(self, img):
+ if self.alphastd == 0.:
+ return img
+ rnd = np.random.randn(3) * self.alphastd
+ rnd = rnd.astype('float32')
+ v = rnd
+ old_dtype = np.asarray(img).dtype
+ v = v * self.eigval
+ v = v.reshape((3, 1))
+ inc = np.dot(self.eigvec, v).reshape((3, ))
+ img = np.add(img, inc)
+ if old_dtype == np.uint8:
+ img = np.clip(img, 0, 255)
+ img = Image.fromarray(img.astype(old_dtype), 'RGB')
+ return img
+
+ def __repr__(self):
+ return self.__class__.__name__ + '()'
+
+
+def imagenet_transform(img_size=224):
+ normalize = transforms.Normalize([0.485, 0.456, 0.406],
+ [0.229, 0.224, 0.225])
+ jitter_param = 0.4
+ lighting_param = 0.1
+
+ transform_train = transforms.Compose([
+ transforms.RandomResizedCrop(img_size, scale=(0.25, 1)),
+ transforms.RandomHorizontalFlip(),
+ transforms.ColorJitter(brightness=jitter_param,
+ contrast=jitter_param,
+ saturation=jitter_param),
+ Lighting(lighting_param),
+ transforms.ToTensor(), normalize
+ ])
+ transform_test = transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(img_size),
+ transforms.ToTensor(), normalize
+ ])
+ return transform_train, transform_test
\ No newline at end of file
diff --git a/case_studies/ml_loo/README.md b/case_studies/ml_loo/README.md
new file mode 100644
index 0000000..76043d0
--- /dev/null
+++ b/case_studies/ml_loo/README.md
@@ -0,0 +1,50 @@
+# ML-LOO: Detecting Adversarial Examples with Feature Attribution
+
+Code for ML-LOO on a minimal example.
+
+## Dependencies
+The code runs with Python 2.7 and requires Tensorflow 1.11.0, and Keras 2.2.4. Please `pip install` the following packages:
+- `numpy`
+- `scipy`
+- `tensorflow`
+- `keras`
+- `scikit-learn`
+
+## Preparation stage
+Generate the adversarial examples by C&W attack, to be used in the training stage of ML-LOO
+
+```shell
+###############################################
+python generate_attack.py --dataset_name cifar10 --model_name resnet
+###############################################
+```
+
+Extract ML-LOO features (i.e., IQR of LOO attribution maps of different layers of a neural network), and then split the data set containing original and adversarial examples into a training set and a test set.
+
+```shell
+###############################################
+python features_extract.py --dataset_name cifar10 --model_name resnet --attack cw --det ml_loo
+###############################################
+```
+
+## Train ML-LOO and evaluate its performance
+Train ML-LOO and evaluate it on the test set.
+```shell
+python train_and_evaluate.py --dataset_name cifar10 --model_name resnet --data_sample x_val200
+```
+The generated AUC plot can be found in cifar10resnet/figs/.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/case_studies/ml_loo/adversarial_evaluation.py b/case_studies/ml_loo/adversarial_evaluation.py
new file mode 100644
index 0000000..b07e5c0
--- /dev/null
+++ b/case_studies/ml_loo/adversarial_evaluation.py
@@ -0,0 +1,215 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+# disable tf logging
+# some of these might have to be commented out to use verbose=True in the
+# adaptive attack
+import os
+
+from cleverhans.utils_keras import KerasModelWrapper
+
+from ml_loo import collect_layers
+
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+import warnings
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+try:
+ import cPickle as pickle
+except:
+ import _pickle as pickle
+
+import numpy as np
+from sklearn.linear_model import LogisticRegressionCV
+from tqdm import tqdm
+
+from build_model import ImageModel
+from load_data import ImageData, split_data
+from attack_model import BIM, CW, FMA
+
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+ parser.add_argument(
+ '--attack',
+ type = str,
+ choices = ['cw', 'bim', 'bim2', 'fma'],
+ default = 'cw'
+ )
+ parser.add_argument("--batch-size", type=int, default=50)
+ parser.add_argument("--detector-attack", choices=['cw', 'bim', 'bim2'], default='cw')
+ parser.add_argument("--n-samples", type=int, default=500)
+
+ # default equals value for FPPR5; obtained from train_and_evaluate.py
+ parser.add_argument("--detector-threshold", type=float, default=0.6151412488088068)
+
+ args = parser.parse_args()
+ dict_a = vars(args)
+ args.data_model = args.dataset_name + args.model_name
+
+ # load detector
+ with open(f"{args.data_model}/models/ml_loo_{args.detector_attack}_lr.pkl", "rb") as f:
+ lr = pickle.load(f)
+
+ print('Loading dataset...')
+ dataset = ImageData(args.dataset_name)
+ model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
+
+ if args.dataset_name == 'cifar10':
+ X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
+ dataset.y_val, model, num_classes = 10,
+ split_rate = 0.8, sample_per_class = 1000)
+ else:
+ raise NotImplementedError()
+
+ if args.n_samples == -1:
+ args.n_samples = len(X_test)
+
+ X_test = X_test[:args.n_samples]
+ Y_test = Y_test[:args.n_samples]
+
+ from ml_loo import get_ml_loo_features
+
+ if args.model_name == 'resnet':
+ interested_layers = [14,24,35,45,56,67,70]
+ else:
+ raise ValueError()
+
+ # only relevant feature used by logistic regression model
+ stat_names = ['quantile']
+ reference = - dataset.x_train_mean
+ get_ml_loo_features_ = lambda x: get_ml_loo_features(model, x, reference, interested_layers, stat_names=stat_names)[:, 0]
+ detector = lambda x: lr.predict_proba(get_ml_loo_features_(x))[:, 1]
+
+ batch_size = args.batch_size
+ detector_threshold = args.detector_threshold
+
+ if args.attack == 'cw':
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = CW(
+ model,
+ source_samples = batch_size,
+ binary_search_steps = 5,
+ cw_learning_rate = 1e-2,
+ confidence = 0,
+ attack_iterations = 100,
+ attack_initial_const = 1e-2,
+ )
+ elif args.attack == "bim":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = BIM(
+ KerasModelWrapper(model.model),
+ model.sess,
+ model.input_ph,
+ model.num_classes,
+ attack_iterations = 100,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 100,
+ random_init=True
+ )
+ elif args.attack == "bim2":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = BIM(
+ KerasModelWrapper(model.model),
+ model.sess,
+ model.input_ph,
+ model.num_classes,
+ attack_iterations = 10,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 10,
+ random_init=True
+ )
+ elif args.attack == "fma":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ target_samples = []
+ for y in range(10):
+ target_samples.append(X_train[np.argmax(Y_train == y)])
+ target_samples = np.array(target_samples)
+ attack_model = FMA(
+ model,
+ KerasModelWrapper(model.model),
+ model.sess,
+ model.input_ph,
+ model.num_classes,
+ target_samples=target_samples,
+ reference=reference,
+ features=collect_layers(model, interested_layers),
+ attack_iterations = 500,
+ epsilon=0.03,
+ learning_rate=4 * 0.03 / 100,
+ num_random_features=3100,
+ random_init=True
+ )
+
+
+ n_batches = int(np.ceil(len(X_test) / batch_size))
+
+ all_is_adv = []
+ all_is_detected = []
+ all_is_adv_not_detected = []
+ pbar = tqdm(range(n_batches))
+ for i in pbar:
+ x = X_test[i * batch_size:(i+1) * batch_size]
+ y = Y_test[i * batch_size:(i+1) * batch_size]
+ # undo one-hot encoding
+ y = y.argmax(-1)
+
+ x_adv = attack_model.attack(x)
+ y_adv = model.predict(x_adv, verbose=False, logits=False).argmax(-1)
+
+ is_adv = y_adv != y
+ is_detected = detector(x_adv) > detector_threshold
+ all_is_adv.append(is_adv)
+ all_is_detected.append(is_detected)
+
+ is_adv_not_detected = np.logical_and(is_adv, ~is_detected)
+ all_is_adv_not_detected.append(is_adv_not_detected)
+
+ pbar.set_description(
+ f"ASR (w/o detector): {np.mean(np.concatenate(all_is_adv))} "
+ f"ASR (w/ detector): {np.mean(np.concatenate(all_is_adv_not_detected))}")
+
+ all_is_adv = np.concatenate(all_is_adv)
+ all_is_detected = np.concatenate(all_is_detected)
+ all_is_adv_not_detected = np.concatenate(all_is_adv_not_detected)
+ print("ASR (w/o detector):", np.mean(all_is_adv))
+ print("ASR (w/ detector):", np.mean(all_is_adv_not_detected))
+
+
+
+
+
+
+
+
diff --git a/case_studies/ml_loo/adversarial_evaluation.sh b/case_studies/ml_loo/adversarial_evaluation.sh
new file mode 100644
index 0000000..fb2b6b8
--- /dev/null
+++ b/case_studies/ml_loo/adversarial_evaluation.sh
@@ -0,0 +1,51 @@
+# strong bim/bim:
+#fpr1="0.2630430452371708"
+#fpr5="0.05742787427778243"
+#fpr10="0.006814145480778491"
+
+# weak bim/bim2:
+fpr1="0.8909015048587877"
+fpr5="0.38117096263305317"
+fpr10="0.05692284412332819"
+
+nsamples=${1:-512}
+fpr=${2:-5}
+
+case $fpr in
+ 1)
+ echo "Evaluating at @FPR=1"
+ threshold=${fpr1}
+ ;;
+ 5)
+ echo "Evaluating at @FPR=5"
+ threshold=${fpr5}
+ ;;
+ 10)
+ echo "Evaluating at @FPR=10"
+ threshold=${fpr10}
+ ;;
+esac
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Naive attack (FPR$fpr)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 ../../venv3.8tf/bin/python adversarial_evaluation.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --detector-attack=bim2 \
+ --attack=bim2 \
+ --detector-threshold=${threshold} \
+ --batch-size=1 \
+ --n-samples=512
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Adaptive attack (FPR$fpr)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 \../../venv3.8tf/bin/python adversarial_evaluation.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --detector-attack=bim2 \
+ --attack=fma \
+ --detector-threshold=${threshold} \
+ --batch-size=1 \
+ --n-samples=$nsamples
diff --git a/case_studies/ml_loo/attack_model.py b/case_studies/ml_loo/attack_model.py
new file mode 100644
index 0000000..ad87842
--- /dev/null
+++ b/case_studies/ml_loo/attack_model.py
@@ -0,0 +1,246 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This tutorial shows how to generate adversarial examples
+using C&W attack in white-box setting.
+The original paper can be found at:
+https://nicholas.carlini.com/papers/2017_sp_nnrobustattacks.pdf
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import logging
+import os
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+from keras.layers import Input
+from cleverhans.attacks import CarliniWagnerL2
+from cleverhans.dataset import MNIST
+from cleverhans.loss import CrossEntropy
+from cleverhans.utils import grid_visual, AccuracyReport
+from cleverhans.utils import set_log_level
+from cleverhans.utils_tf import model_eval, tf_model_load
+from cleverhans.train import train
+from cleverhans.utils_keras import KerasModelWrapper
+import pickle as pkl
+from keras import backend as K
+
+from cleverhans.attacks import CarliniWagnerL2, FastGradientMethod, SaliencyMapMethod, DeepFool, BasicIterativeMethod
+
+class Attack(object):
+ def __init__(self, model, *args):
+ self.model = model
+
+ def attack(self, x, *args):
+ raise NotImplementedError
+
+class CleverhansAttackFeedableRunMixin:
+ def generate_np(self, x_val, feedable_dict={}, **kwargs):
+ """
+ Generate adversarial examples and return them as a NumPy array.
+ Sub-classes *should not* implement this method unless they must
+ perform special handling of arguments.
+ :param x_val: A NumPy array with the original inputs.
+ :param **kwargs: optional parameters used by child classes.
+ :return: A NumPy array holding the adversarial examples.
+ """
+
+ if self.sess is None:
+ raise ValueError("Cannot use `generate_np` when no `sess` was"
+ " provided")
+
+ packed = self.construct_variables(kwargs)
+ fixed, feedable, _, hash_key = packed
+
+ if hash_key not in self.graphs:
+ self.construct_graph(fixed, feedable, x_val, hash_key)
+ else:
+ # remove the None arguments, they are just left blank
+ for k in list(feedable.keys()):
+ if feedable[k] is None:
+ del feedable[k]
+
+ x, new_kwargs, x_adv = self.graphs[hash_key]
+
+ feed_dict = {x: x_val}
+
+ for name in feedable:
+ feed_dict[new_kwargs[name]] = feedable[name]
+
+ return self.sess.run(x_adv, {**feed_dict, **feedable_dict})
+
+class FeedableRunCarliniWagnerL2(CleverhansAttackFeedableRunMixin, CarliniWagnerL2):
+ pass
+
+
+class FeedableRunBasicIterativeMethod(CleverhansAttackFeedableRunMixin, BasicIterativeMethod):
+ pass
+
+
+class CW(Attack):
+ def __init__(self, model, sess, input_ph, num_classes, source_samples = 2, binary_search_steps = 5, cw_learning_rate = 5e-3, confidence = 0, attack_iterations = 1000, attack_initial_const = 1e-2):
+ super(Attack, self).__init__()
+
+ self.model = model
+ self.sess = sess
+
+ self.x = input_ph
+ self.y = Input(shape=(num_classes,), dtype = 'float32')
+
+ abort_early = True
+ self.cw = FeedableRunCarliniWagnerL2(self.model, sess=self.sess)
+ self.cw_params = {
+ 'binary_search_steps': binary_search_steps,
+ "y": None,
+ 'abort_early': True,
+ 'max_iterations': attack_iterations,
+ 'learning_rate': cw_learning_rate ,
+ 'batch_size': source_samples,
+ 'initial_const': attack_initial_const ,
+ 'confidence': confidence,
+ 'clip_min': 0.0,
+ }
+
+ def attack(self, x, y = None, feedable_dict={}):
+ # print(self.cw_params)
+ adv = self.cw.generate_np(x, **self.cw_params, feedable_dict=feedable_dict)
+
+ if y:
+ eval_params = {'batch_size': 100}
+ preds = self.model.get_logits(self.x)
+ acc = model_eval(self.sess, self.x, self.y, preds, adv, y, args=eval_params)
+ adv_success = 1 - acc
+ print('The adversarial success rate is {}.'.format(adv_success))
+
+ return adv
+
+
+# added by AUTHOR according to the description in the paper
+class BIM(Attack):
+ def __init__(self, model, sess, input_ph, num_classes, epsilon=0.03, learning_rate = 5e-3, attack_iterations = 1000, random_init=True):
+ super(Attack, self).__init__()
+
+ self.model = model
+ self.sess = sess
+
+ self.x = input_ph
+ self.y = Input(shape=(num_classes,), dtype = 'float32')
+
+ self.bim = FeedableRunBasicIterativeMethod(self.model, sess=self.sess)
+ self.bim_params = {
+ "y": None,
+ 'nb_iter': attack_iterations,
+ 'eps_iter': learning_rate,
+ 'eps': epsilon,
+ 'rand_init': random_init,
+ 'clip_min': 0.0,
+ 'clip_max': 1.0,
+ }
+
+ def attack(self, x, y = None, feedable_dict={}):
+ # print(self.bim_params)
+ adv = self.bim.generate_np(x, **self.bim_params, feedable_dict=feedable_dict)
+
+ if y:
+ eval_params = {'batch_size': 100}
+ preds = self.model.get_logits(self.x)
+ acc = model_eval(self.sess, self.x, self.y, preds, adv, y, args=eval_params)
+ adv_success = 1 - acc
+ print('The adversarial success rate is {}.'.format(adv_success))
+
+ return adv
+
+
+class FMA(Attack):
+ def __init__(self, raw_model, model, sess, input_ph, num_classes, target_samples,
+ reference,
+ features, epsilon=0.03, num_random_features=1000,
+ learning_rate = 5e-3, attack_iterations = 1000, random_init=True,
+ verbose=False):
+ super(Attack, self).__init__()
+
+ self.raw_model = raw_model
+ self.model = model
+ self.sess = sess
+
+ self.reference = reference
+ self.features = features
+
+ assert len(target_samples) == num_classes, (len(target_samples), num_classes)
+ self.target_samples = target_samples
+
+ self.x = input_ph
+ self.y = Input(shape=(num_classes,), dtype = 'float32')
+
+ self.logits = model.get_logits(input_ph)
+
+ self.epsilon = epsilon
+ self.learning_rate = learning_rate
+ self.attack_iterations = attack_iterations
+ self.random_init = random_init
+
+ self.all_features = tf.concat(features, 1)
+ num_random_features = min(num_random_features, self.all_features.shape[1].value)
+ self.num_random_features = num_random_features
+ self.feature_indices_ph = tf.placeholder(tf.int32, shape=(num_random_features,))
+ self.target_features_ph = tf.placeholder(tf.float32,
+ shape=self.all_features.shape)
+
+ self.loss = tf.nn.l2_loss(tf.gather(self.all_features -
+ tf.expand_dims(self.target_features_ph, 0), self.feature_indices_ph, axis=1))
+ self.gradient = tf.gradients(self.loss, self.x)[0]
+
+ self.verbose = verbose
+
+ def attack(self, x, y = None, feedable_dict={}):
+ assert len(x) == 1, "attack can only process a single sample at a time"
+ # print(self.bim_params)
+
+ y = self.sess.run(self.logits, {self.x: x, **feedable_dict}).argmax(-1)[0]
+ x_target = self.target_samples[(y + 1) % 10]
+
+ from ml_loo import loo_ml_instance
+ target_features = loo_ml_instance(x_target, self.reference, self.raw_model, self.features,
+ batch_size=3100)[:, :-1]
+
+ if not self.random_init:
+ x_adv = x
+ else:
+ x_adv = np.clip(x + np.random.uniform(-self.epsilon, +self.epsilon, x.shape), 0, 1)
+
+ for i in range(self.attack_iterations):
+ feature_indices = np.random.choice(
+ np.arange(self.all_features.shape[-1].value),
+ self.num_random_features)
+ loss_value, logits_value, gradient_value = self.sess.run(
+ (self.loss, self.logits, self.gradient),
+ {
+ self.x: x_adv,
+ self.target_features_ph: target_features,
+ self.feature_indices_ph: feature_indices,
+ **feedable_dict
+ }
+ )
+ gradient_value = np.sign(gradient_value)
+ x_adv -= self.learning_rate * gradient_value
+ delta = np.clip(x_adv - x, -self.epsilon, +self.epsilon)
+ x_adv = np.clip(x + delta, 0, 1)
+
+ if self.verbose:
+ print(loss_value, logits_value)
+
+ return x_adv
diff --git a/case_studies/ml_loo/binarization_test.py b/case_studies/ml_loo/binarization_test.py
new file mode 100644
index 0000000..351947a
--- /dev/null
+++ b/case_studies/ml_loo/binarization_test.py
@@ -0,0 +1,415 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+# disable tf logging
+# some of these might have to be commented out to use verbose=True in the
+# adaptive attack
+import os
+
+import torch
+
+from ml_loo import collect_layers
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+from utils import build_dataloader_from_arrays
+
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+import warnings
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+try:
+ import cPickle as pickle
+except:
+ import _pickle as pickle
+
+import cleverhans
+from cleverhans.utils_keras import KerasModelWrapper
+
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack
+
+
+import numpy as np
+from sklearn.linear_model import LogisticRegressionCV
+from tqdm import tqdm
+
+from build_model import ImageModel
+from load_data import ImageData, split_data
+from attack_model import BIM, CW, FMA
+
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+ parser.add_argument(
+ '--attack',
+ type = str,
+ choices = ['cw', 'bim', 'fma'],
+ default = 'cw'
+ )
+ parser.add_argument(
+ '--detector-attack',
+ type = str,
+ choices = ['cw', 'bim', 'bim2'],
+ default = 'cw'
+ )
+ parser.add_argument("--n-samples", type=int, default=500)
+
+ parser.add_argument("--n-boundary-points", type=int, default=1)
+ parser.add_argument("--n-inner-points", type=int, default=999)
+
+ # default equals value for FPPR5; obtained from train_and_evaluate.py
+ parser.add_argument("--detector-threshold", type=float, default=0.6151412488088068)
+
+ parser.add_argument("--inverted-test", action="store_true")
+ args = parser.parse_args()
+
+ if args.inverted_test:
+ print("Running inverted test")
+ else:
+ print("Running normal/non-inverted test")
+
+ dict_a = vars(args)
+ args.data_model = args.dataset_name + args.model_name
+
+ # load detector
+ with open(f"{args.data_model}/models/ml_loo_{args.detector_attack}_lr.pkl", "rb") as f:
+ lr = pickle.load(f)
+
+ print('Loading dataset...')
+ dataset = ImageData(args.dataset_name)
+ model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
+
+ class ModelWrapper(cleverhans.model.Model):
+ def __init__(self, model, sess, input_ph, weight_shape, bias_shape):
+ self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
+ self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
+ self.model = model
+ self.sess = sess
+ self.input_ph = input_ph
+ self.num_classes = 2
+ self.first = True
+
+ def fprop(self, x):
+ y = self.model.get_layer(x, "flatten_1")
+ logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
+ return {"logits": logits, "probs": tf.nn.softmax(logits, -1), "predictions": tf.argmax(logits, -1)}
+
+ def get_probs(self, x):
+ return self.fprop(x)["probs"]
+
+ def predict(self, x, weights_feed_dict, logits=True):
+ if self.first:
+ self.targets = self.fprop(self.input_ph)
+ self.first = False
+ targets = self.targets
+ if logits:
+ target = targets["logits"]
+ else:
+ target = targets["probs"]
+ return self.sess.run(target, {self.input_ph: x, **weights_feed_dict})
+
+ keras_model = KerasModelWrapper(model.model)
+ wrapped_model = ModelWrapper(keras_model, model.sess, model.input_ph, (2, 256), (2,))
+
+ features = keras_model.get_layer(model.input_ph, "flatten_1")
+ feature_gradients = tf.gradients(features, model.input_ph)[0]
+ logits = keras_model.get_logits(model.input_ph)
+
+ def run_features(x: np.ndarray, features_only=True, features_and_logits=False):
+ if features_only:
+ assert not features_and_logits
+ targets = features
+ elif features_and_logits:
+ targets = (features, logits)
+ else:
+ targets = logits
+ return model.sess.run(targets,
+ feed_dict={model.input_ph: x.transpose(0, 2, 3, 1)})
+
+ def run_features_and_gradients(x: np.ndarray):
+ return model.sess.run((features, feature_gradients),
+ feed_dict={model.input_ph: x.transpose(0, 2, 3, 1)})
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=lambda x, features_only = False,
+ features_and_logits = False: run_features(x, features_only,
+ features_and_logits),
+ logit_forward_and_backward_pass=lambda x: run_features_and_gradients(x)
+ )
+
+ if args.dataset_name == 'cifar10':
+ X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
+ dataset.y_val, model, num_classes = 10,
+ split_rate = 0.8, sample_per_class = 1000)
+ else:
+ raise NotImplementedError()
+
+ if args.n_samples == -1:
+ args.n_samples = len(X_test)
+
+ X_test = X_test[:args.n_samples]
+ Y_test = Y_test[:args.n_samples]
+
+ from ml_loo import get_ml_loo_features
+
+ if args.model_name == 'resnet':
+ interested_layers = [14,24,35,45,56,67,70]
+ else:
+ raise ValueError()
+
+ # only relevant feature used by logistic regression model
+ stat_names = ['quantile']
+ reference = - dataset.x_train_mean
+ get_ml_loo_features_ = lambda x: get_ml_loo_features(model, x, reference, interested_layers, stat_names=stat_names)[:, 0]
+ detector = lambda x: lr.predict_proba(get_ml_loo_features_(x))[:, 1]
+
+ batch_size = 50
+ detector_threshold = args.detector_threshold
+
+ if args.attack == 'cw':
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = CW(
+ wrapped_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ wrapped_model.num_classes,
+ source_samples = 1,
+ binary_search_steps = 5,
+ cw_learning_rate = 1e-2,
+ confidence = 0,
+ attack_iterations = 100,
+ attack_initial_const = 1e-2,
+ )
+ original_attack_model = CW(
+ keras_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ model.num_classes,
+ source_samples = 1,
+ binary_search_steps = 5,
+ cw_learning_rate = 1e-2,
+ confidence = 0,
+ attack_iterations = 100,
+ attack_initial_const = 1e-2,
+ )
+ elif args.attack == "bim":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = BIM(
+ wrapped_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ wrapped_model.num_classes,
+ attack_iterations = 100,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 100,
+ random_init=True
+ )
+ original_attack_model = BIM(
+ keras_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ model.num_classes,
+ attack_iterations = 100,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 100,
+ random_init=True
+ )
+ elif args.attack == "fma":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ target_samples = []
+ for y in range(10):
+ target_samples.append(X_train[np.argmax(Y_train == y)])
+ target_samples = np.array(target_samples)
+ attack_model = FMA(
+ model,
+ wrapped_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ wrapped_model.num_classes,
+ target_samples=target_samples[:2],
+ reference=reference,
+ features=collect_layers(model, interested_layers),
+ attack_iterations = 500,
+ epsilon=0.03,
+ learning_rate=4 * 0.03 / 100,
+ num_random_features=3100,
+ random_init=True
+ )
+ original_attack_model = BIM(
+ keras_model,
+ wrapped_model.sess,
+ wrapped_model.input_ph,
+ model.num_classes,
+ attack_iterations = 100,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 100,
+ random_init=True
+ )
+
+ assert 0 < X_test.max() <= 1.0, (X_test.min(), X_test.max())
+ test_loader = build_dataloader_from_arrays(X_test.transpose((0, 3, 1, 2)), Y_test, batch_size=32)
+
+ def run_attack(m, l, attack_kwargs):
+ # attack_kwargs contains values that might be useful for e.g. constructing logit matching evasion attacks
+ if args.attack == "fma":
+ reference_points = attack_kwargs["reference_points_x"]
+ if len(reference_points) < 2:
+ reference_points = np.concatenate([reference_points, reference_points], 0)
+ reference_points = reference_points.transpose((0, 2, 3, 1))
+ attack_model.target_samples = reference_points
+ else:
+ del attack_kwargs
+ linear_layer = m[-1]
+ del m
+
+ weights_feed_dict = {
+ wrapped_model.weight: linear_layer.weight.data.numpy(),
+ wrapped_model.bias: linear_layer.bias.data.numpy()
+ }
+
+ for x, y in l:
+ x = x.numpy()
+ x = x.transpose((0, 2, 3, 1))
+ assert len(x) == 1
+ x_adv = attack_model.attack(x, feedable_dict=weights_feed_dict)
+
+ logits_adv = wrapped_model.predict(
+ x_adv, weights_feed_dict=weights_feed_dict, logits=True)
+ y_adv = logits_adv.argmax(-1)
+
+ is_adv = y_adv != y
+ is_not_detected = verify_input_data_fn(torch.tensor(x_adv.transpose((0, 3, 1, 2))))
+ is_adv_and_not_detected = np.logical_and(is_adv, is_not_detected)
+ is_adv_and_detected = np.logical_and(is_adv, ~is_not_detected)
+
+ if args.inverted_test:
+ test_result = is_adv_and_detected
+ else:
+ test_result = is_adv_and_not_detected
+
+ return test_result, (torch.tensor(x_adv), torch.tensor(logits_adv))
+
+
+ def get_boundary_adversarials(x, y, n_samples, epsilon):
+ """Generate adversarial examples for the base classifier that get
+ rejected by detector."""
+
+ assert len(x.shape) == 3
+ x = x.unsqueeze(0)
+ x = torch.repeat_interleave(x, n_samples, dim=0)
+
+ x = x.numpy()
+ x = x.transpose((0, 2, 3, 1))
+
+ for _ in range(25):
+ x_adv = original_attack_model.attack(x)
+ diff = x_adv - x
+ diff = diff / np.max(np.abs(diff)) * epsilon
+ x_adv = np.clip(x + diff, 0, 1)
+ is_detected = ~verify_input_data_fn(torch.tensor(x_adv.transpose((0, 3, 1, 2))))
+
+ if np.all(is_detected):
+ # generative until we finally found (an) adversarial example(s) that
+ # get(s) detected
+ break
+ else:
+ warnings.warn("Could not generate adversarial example that gets "
+ "detected after 25 trials.")
+ x_adv = x_adv.transpose((0, 3, 1, 2))
+
+ return torch.tensor(x_adv)
+
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ from active_tests.decision_boundary_binarization import format_result
+
+ if args.inverted_test:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=1,
+ )
+ else:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=args.n_boundary_points - 1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=0,
+ )
+
+ far_off_distance = 1.75
+
+ def verify_input_data_fn(x: torch.Tensor) -> np.ndarray:
+ """Checks if detector does not reject input data as adversarial, i.e.
+ input is clean."""
+ #if args.inverted_test:
+ # return detector(x.numpy().transpose((0, 2, 3, 1))) > detector_threshold
+ #else:
+ return detector(x.numpy().transpose((0, 2, 3, 1))) < detector_threshold
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, attack_kwargs: run_attack(m, l, attack_kwargs),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=0.03,
+ norm="linf",
+ lr=10000,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_inner_points=args.n_inner_points,
+ **additional_settings
+ ),
+ n_samples=args.n_samples,
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+
+ verify_valid_boundary_training_data_fn=verify_input_data_fn,
+ get_boundary_adversarials_fn=get_boundary_adversarials,
+ verify_valid_inner_training_data_fn=None,
+ verify_valid_input_validation_data_fn=None,
+ fill_batches_for_verification=False,
+ far_off_distance=far_off_distance,
+ rejection_resampling_max_repetitions=25,
+ rescale_logits="adaptive"
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies, args.n_samples))
+
+
+
+
+
+
diff --git a/case_studies/ml_loo/binarization_test.sh b/case_studies/ml_loo/binarization_test.sh
new file mode 100644
index 0000000..1b977b1
--- /dev/null
+++ b/case_studies/ml_loo/binarization_test.sh
@@ -0,0 +1,60 @@
+# strong bim/bim:
+#fpr1="0.2630430452371708"
+#fpr5="0.05742787427778243"
+#fpr10="0.006814145480778491"
+
+# weak bim/bim2:
+fpr1="0.8909015048587877"
+fpr5="0.38117096263305317"
+fpr10="0.05692284412332819"
+
+nsamples=${1:-512}
+
+echo "Testing for FPR5"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal Test (naive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd)/../../ ../../venv3.8tf/bin/python binarization_test.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --n-samples=$nsamples \
+ --detector-threshold=${fpr5} \
+ --attack=bim \
+ --detector-attack=bim
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Inverted Test (naive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd)/../../ ../../venv3.8tf/bin/python binarization_test.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --n-samples=$nsamples \
+ --detector-threshold=${fpr5} \
+ --attack=bim \
+ --detector-attack=bim \
+ --inverted-test
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal Test (adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd)/../../ ../../venv3.8tf/bin/python binarization_test.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --n-samples=$nsamples \
+ --detector-threshold=${fpr5} \
+ --attack=fma \
+ --detector-attack=bim
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Inverted Test (adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd)/../../ ../../venv3.8tf/bin/python binarization_test.py \
+ --dataset_name cifar10 \
+ --model_name resnet \
+ --n-samples=$nsamples \
+ --detector-threshold=${fpr5} \
+ --attack=fma \
+ --detector-attack=bim \
+ --inverted-test
\ No newline at end of file
diff --git a/case_studies/ml_loo/build_model.py b/case_studies/ml_loo/build_model.py
new file mode 100644
index 0000000..095e28a
--- /dev/null
+++ b/case_studies/ml_loo/build_model.py
@@ -0,0 +1,570 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+import tensorflow as tf
+import numpy as np
+import os
+from keras.layers import Flatten, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D, Convolution2D, BatchNormalization, Dense, Dropout, Activation, Embedding, Conv1D, Input, GlobalMaxPooling1D, Multiply, Lambda, Permute, GlobalAveragePooling2D
+from keras.preprocessing import sequence
+from keras.datasets import imdb, mnist
+from keras.callbacks import ModelCheckpoint
+from keras.models import Model, Sequential
+from keras.objectives import binary_crossentropy
+from keras.metrics import binary_accuracy as accuracy
+from keras.optimizers import RMSprop
+from keras import backend as K
+from keras import optimizers
+import math
+
+
+def construct_original_network(dataset_name, model_name, train):
+ data_model = dataset_name + model_name
+ if dataset_name == 'mnist':
+ input_size = 28
+ num_classes = 10
+ channel = 1
+
+
+ elif dataset_name == 'cifar10':
+ # Define the model
+ input_size = 32
+ num_classes = 10
+ channel = 3
+
+ elif dataset_name == 'cifar100':
+ # Define the model
+ input_size = 32
+ num_classes = 100
+ channel = 3
+
+ if model_name == 'scnn':
+ image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
+ net = Convolution2D(32, kernel_size=(5, 5),padding = 'same',
+ activation='relu', name = 'conv1')(image_ph)
+ net = MaxPooling2D(pool_size=(2, 2))(net)
+ net = Convolution2D(64, (5, 5),padding = 'same',
+ activation='relu', name = 'conv2')(net)
+ net = MaxPooling2D(pool_size=(2, 2))(net)
+
+ net = Flatten()(net)
+ net = Dense(1024, activation='relu',name='fc1')(net)
+ net = Dense(num_classes, activation='softmax',name='fc2')(net)
+ preds = Activation('softmax')(net)
+ model = Model(image_ph, preds)
+
+ model.compile(loss='categorical_crossentropy',
+ optimizer='adam',
+ metrics=['acc'])
+
+ elif model_name == 'cnn':
+ image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
+ net = Convolution2D(48, (3,3), padding='same', input_shape=(32, 32, 3))(image_ph)
+ net = Activation('relu')(net)
+ net = Convolution2D(48, (3, 3))(net)
+ net = Activation('relu')(net)
+ net = MaxPooling2D(pool_size=(2, 2))(net)
+ net = Dropout(0.25)(net)
+ net = Convolution2D(96, (3,3), padding='same')(net)
+ net = Activation('relu')(net)
+ net = Convolution2D(96, (3, 3))(net)
+ net = Activation('relu')(net)
+ net = MaxPooling2D(pool_size=(2, 2))(net)
+ net = Dropout(0.25)(net)
+ net = Convolution2D(192, (3,3), padding='same')(net)
+ net = Activation('relu')(net)
+ net = Convolution2D(192, (3, 3))(net)
+ net = Activation('relu')(net)
+ net = MaxPooling2D(pool_size=(2, 2))(net)
+ net = Dropout(0.25)(net)
+ net = Flatten()(net)
+ net = Dense(512)(net)
+ net = Activation('relu')(net)
+ net = Dropout(0.5)(net)
+ net = Dense(256)(net)
+ net = Activation('relu')(net)
+ net = Dropout(0.5)(net)
+ net = Dense(num_classes, activation=None)(net)
+ preds = Activation('softmax')(net)
+
+ model = Model(image_ph, preds)
+ sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
+
+ model.compile(loss='categorical_crossentropy',
+ optimizer=sgd,
+ metrics=['acc'])
+
+ # Compile the model
+ elif model_name == 'fc':
+ image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
+ net = Flatten()(image_ph)
+ net = Dense(256)(net)
+ net = Activation('relu')(net)
+
+ net = Dense(256)(net)
+ net = Activation('relu')(net)
+
+ net = Dense(256)(net)
+ net = Activation('relu')(net)
+
+ preds = Dense(num_classes, activation='softmax')(net)
+
+ model = Model(image_ph, preds)
+ sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
+
+ model.compile(loss='categorical_crossentropy',
+ optimizer=sgd,
+ metrics=['acc'])
+
+ elif model_name == 'resnet':
+ from resnet import resnet_v2, lr_schedule, lr_schedule_sgd
+
+ model, image_ph, preds = resnet_v2(input_shape=(input_size, input_size, channel), depth=20, num_classes = num_classes)
+
+ optimizer = optimizers.SGD(lr=0.1, momentum=0.9, nesterov=True)
+
+
+ model.compile(loss='categorical_crossentropy',
+ optimizer=optimizer,
+ metrics=['accuracy'])
+
+ elif model_name == 'densenet':
+ from densenet import DenseNet
+ nb_filter = -1#12 if dataset_name == 'cifar100' else -1
+
+ image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
+ model, preds = DenseNet((input_size,input_size,channel),
+ classes=num_classes, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=nb_filter, dropout_rate=0.0, weights=None, input_tensor = image_ph)
+
+ optimizer = optimizers.SGD(lr=0.1, momentum=0.9, nesterov=True)
+
+
+ model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
+
+ grads = []
+ for c in range(num_classes):
+ grads.append(tf.gradients(preds[:,c], image_ph))
+
+ grads = tf.concat(grads, axis = 0)
+ approxs = grads * tf.expand_dims(image_ph, 0)
+
+ logits = [layer.output for layer in model.layers][-2]
+ print(logits)
+
+ sess = K.get_session()
+
+ return image_ph, preds, grads, approxs, sess, model, num_classes, logits
+
+
+class ImageModel():
+ def __init__(self, model_name, dataset_name, train = False, load = False, **kwargs):
+ self.model_name = model_name
+ self.dataset_name = dataset_name
+ self.data_model = dataset_name + model_name
+ self.framework = 'keras'
+
+ # if not train:
+ # K.set_learning_phase(0)
+
+ print('Constructing network...')
+ self.input_ph, self.preds, self.grads, self.approxs, self.sess, self.model, self.num_classes, self.logits = construct_original_network(self.dataset_name, self.model_name, train = train)
+
+
+ self.layers = self.model.layers
+ self.last_hidden_layer = self.model.layers[-3]
+
+ self.y_ph = tf.placeholder(tf.float32, shape = [None, self.num_classes])
+ if load:
+ if load == True:
+ print('Loading model weights...')
+ self.model.load_weights('{}/models/original.hdf5'.format(self.data_model),
+ by_name=True)
+ elif load != False:
+ self.model.load_weights('{}/models/{}.hdf5'.format(self.data_model, load),
+ by_name=True)
+
+ self.pred_counter = 0
+
+ def train(self, dataset):
+ print('Training...')
+ if self.dataset_name == 'mnist':
+ assert self.model_name in ['cnn', 'scnn']
+ data_model = self.dataset_name + self.model_name
+ filepath="{}/models/original.hdf5".format(data_model)
+ checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
+ verbose=1, save_best_only=True, mode='max')
+ callbacks_list = [checkpoint]
+ history = self.model.fit(dataset.x_train, dataset.y_train,
+ validation_data=(dataset.x_val, dataset.y_val),
+ callbacks = callbacks_list,
+ epochs=100, batch_size=128)
+ # print(history.history)
+ elif self.dataset_name in ['cifar10', 'cifar100']:
+ from keras.preprocessing.image import ImageDataGenerator
+
+ if self.model_name == 'cnn':
+ datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True)
+ # zoom 0.2
+ datagen = create_resnet_generator(dataset.x_train)
+ callbacks_list = []
+ batch_size = 128
+ num_epochs = 200
+
+ elif self.model_name in ['resnet', 'densenet']:
+ from resnet import lr_schedule, create_resnet_generator
+ from keras.callbacks import LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
+ # zoom 0.2 horizontal_filp always True. change optimizer to sgd, and batch_size to 128.
+ datagen = ImageDataGenerator(rotation_range=15,
+ width_shift_range=5./32,
+ height_shift_range=5./32,
+ horizontal_flip = True,
+ zoom_range = 0.2)
+
+ datagen.fit(dataset.x_train, seed=0)
+
+ from resnet import lr_schedule_sgd
+ from keras.callbacks import LearningRateScheduler
+ lr_scheduler = LearningRateScheduler(lr_schedule_sgd)
+ callbacks_list = [lr_scheduler]
+ batch_size = 128 if self.dataset_name == 'cifar10' else 64
+ num_epochs = 200
+
+ filepath="{}/models/original.hdf5".format(self.data_model)
+ checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
+ verbose=1, save_best_only=True, mode='max')
+ callbacks_list.append(checkpoint)
+
+
+ model_info = self.model.fit_generator(datagen.flow(dataset.x_train,
+ dataset.y_train, batch_size = batch_size),
+ epochs = num_epochs,
+ steps_per_epoch = dataset.x_train.shape[0] // batch_size,
+ callbacks = callbacks_list,
+ validation_data = (dataset.x_val, dataset.y_val),
+ verbose = 2,
+ workers = 4)
+
+ def adv_train(self, dataset, attack_name):
+ from cleverhans.attacks import FastGradientMethod, ProjectedGradientDescent
+ from cleverhans.utils_keras import KerasModelWrapper
+ from cleverhans.loss import CrossEntropy
+ from cleverhans.train import train
+ from cleverhans.utils_tf import model_eval
+ import time, datetime
+
+ if attack_name == 'fgsm' and self.dataset_name == 'mnist':
+ wrap = KerasModelWrapper(self.model)
+ params = {'eps': 0.3,
+ 'clip_min': -1.,
+ 'clip_max': 1.}
+
+ attacker = FastGradientMethod(wrap, sess=self.sess)
+ def attack(x):
+ return attacker.generate(x, **params)
+
+ preds_adv = self.model(attack(self.input_ph))
+ loss = CrossEntropy(wrap, smoothing=0.1, attack=attack)
+
+ y_ph = tf.placeholder(tf.float32, shape = (None, self.num_classes))
+
+ def evaluate():
+ # Accuracy of adversarially trained model on legitimate test inputs
+ eval_params = {'batch_size': 128}
+ accuracy = model_eval(self.sess, self.input_ph, y_ph, self.preds, dataset.x_val, dataset.y_val, args=eval_params)
+ print('Test accuracy on legitimate examples: %0.4f' % accuracy)
+
+ # Accuracy of the adversarially trained model on adversarial examples
+ accuracy = model_eval(self.sess, self.input_ph, y_ph, preds_adv, dataset.x_val, dataset.y_val, args=eval_params)
+ print('Test accuracy on adversarial examples: %0.4f' % accuracy)
+
+ # if self.dataset_name == 'mnist':
+ train_params = {
+ 'nb_epochs': 20,
+ 'batch_size': 128,
+ 'learning_rate': 0.001,
+ 'train_dir': '{}/models'.format(self.data_model),
+ 'filename': 'adv.cpkt'
+ }
+
+ # Perform and evaluate adversarial training
+ train(self.sess, loss, dataset.x_train, dataset.y_train, evaluate=evaluate,
+ args=train_params, rng=np.random.RandomState([2017, 8, 30]))
+
+ self.model.save_weights('{}/models/{}.hdf5'.format(self.data_model, 'adv-{}'.format(attack_name)))
+
+ elif attack_name == 'pgd':
+ if self.dataset_name == 'mnist':
+ params = {'eps': 0.1,
+ # 'clip_min': -1.0,
+ # 'clip_max': 1.0,
+ 'eps_iter': 0.01,
+ 'nb_iter': 20,
+ 'epochs': 100,
+ 'batch_size': 50,
+ }
+ elif self.dataset_name == 'cifar10':
+ params = {'eps': 8.0 / 255 * 2,
+ # 'clip_min': -1.0,
+ # 'clip_max': 1.0,
+ 'eps_iter': 2.0 / 255 * 2,
+ 'nb_iter': 10,#10,#1,
+ 'epochs': 200,
+ 'batch_size': 128,
+ }
+
+ # attacker = ProjectedGradientDescent(wrap, sess=self.sess)
+
+ # import foolbox
+ # from foolbox.attacks import ProjectedGradientDescentAttack
+ from attack_model import LinfPGDAttack
+ # Main training loop
+ # fmodel = foolbox.models.KerasModel(self.model, bounds=(-1, 1), preprocessing=(0, 1))
+ attacker = LinfPGDAttack(self, params['eps'], k = params['nb_iter'], a = params['eps_iter'], clip_min = dataset.clip_min, clip_max = dataset.clip_max,
+ random_start = True, loss_func = 'xent')
+
+ def attack(x, y):
+ # return attacker(x, label=label, unpack=True, binary_search=False, epsilon=params['eps'], stepsize=params['eps_iter'],
+ # iterations=params['nb_iter'],
+ # random_start=False, return_early=True)
+ return attacker.attack(x, np.argmax(y, axis = -1))
+
+ from resnet import lr_schedule, create_resnet_generator, lr_schedule_sgd
+ from keras.preprocessing.image import ImageDataGenerator
+
+ # datagen = create_resnet_generator(dataset.x_train)
+ datagen = ImageDataGenerator(rotation_range=15,
+ width_shift_range=5./32,
+ height_shift_range=5./32,
+ horizontal_flip = True,
+ zoom_range = 0.2)
+
+ datagen.fit(dataset.x_train, seed=0)
+
+ xent = tf.reduce_mean(K.categorical_crossentropy(self.y_ph, self.preds), name='y_xent')
+
+
+ global_step = tf.train.get_or_create_global_step()
+
+ if self.dataset_name == 'cifar10':
+ momentum = 0.9
+ weight_decay = 0.0002
+ costs = []
+ print('number of trainable variables: ',len(tf.trainable_variables()))
+ for var in tf.trainable_variables():
+ if 'kernel' in var.name:
+ costs.append(tf.nn.l2_loss(var))
+ penalty = tf.add_n(costs)
+
+ loss = xent + weight_decay * penalty
+ elif self.dataset_name == 'mnist':
+ loss = xent
+
+
+ if self.dataset_name == 'cifar10':
+ boundaries = [40000,60000]
+ values = [0.1,0.01,0.001]
+ learning_rate = tf.train.piecewise_constant(
+ tf.cast(global_step, tf.int32),
+ boundaries,
+ values)
+ optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
+ elif self.dataset_name == 'mnist':
+ boundaries = [50000]
+ values = [1e-3,1e-4]
+ learning_rate = tf.train.piecewise_constant(
+ tf.cast(global_step, tf.int32),
+ boundaries,
+ values)
+ optimizer = tf.train.AdamOptimizer(learning_rate)
+
+ train_step = optimizer.minimize(loss, global_step=global_step)
+
+
+ accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.preds, 1),
+ tf.argmax(self.y_ph, 1)), tf.float32))
+
+ num_output_steps = 100 # 100
+ num_checkpoint_steps = 1000
+ batch_size = params['batch_size']
+ ii = 0
+ epochs = params['epochs']
+
+ for e in range(epochs):
+ num_batches = 0
+ for x_batch, y_batch in datagen.flow(dataset.x_train, dataset.y_train, batch_size=batch_size):
+
+ # Compute Adversarial Perturbations
+ start = time.time()
+
+ x_batch_adv = attack(x_batch, y_batch)
+
+ nat_dict = {self.input_ph: x_batch,
+ self.y_ph: y_batch}
+
+ adv_dict = {self.input_ph: x_batch_adv,
+ self.y_ph: y_batch}
+ eval_dict = {self.input_ph: dataset.x_train[:1000],
+ self.y_ph: dataset.y_train[:1000]}
+ val_dict = {self.input_ph: dataset.x_val[:1000],
+ self.y_ph: dataset.y_val[:1000]}
+ # Output to stdout
+ if ii % num_output_steps == 0:
+ nat_acc = self.sess.run(accuracy, feed_dict=eval_dict)
+ val_acc = self.sess.run(accuracy, feed_dict=val_dict)
+ adv_acc = self.sess.run(accuracy, feed_dict=adv_dict)
+
+ print('Step {} '.format(ii))
+ print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
+ print(' validation accuracy {:.4}%'.format(val_acc * 100))
+ print(' training adv accuracy {:.4}%'.format(adv_acc * 100))
+
+ if ii != 0:
+ print(' {} examples per second'.format(
+ num_output_steps * batch_size / training_time))
+ training_time = 0.0
+
+
+ # Write a checkpoint
+ if ii % num_checkpoint_steps == 0:
+ self.model.save_weights('{}/models/adv-{}-{}.hdf5'.format(self.data_model, attack_name, ii))
+
+ # Actual training step
+
+ _ = self.sess.run(train_step, feed_dict=adv_dict)
+ # print(step)
+ end = time.time()
+ training_time = end - start
+ ii += 1
+ num_batches += 1
+
+ if num_batches >= len(dataset.x_train) / batch_size:
+ break
+
+ self.model.save_weights('{}/models/adv-{}.hdf5'.format(self.data_model, attack_name))
+
+
+
+ def predict(self, x, verbose=0, batch_size = 500, logits = False):
+ x = np.array(x)
+ if len(x.shape) == 3:
+ _x = np.expand_dims(x, 0)
+ else:
+ _x = x
+
+ self.pred_counter += len(_x)
+
+ if not logits:
+ prob = self.model.predict(_x, batch_size = batch_size,
+ verbose = verbose)
+ else:
+ num_iters = int(math.ceil(len(_x) * 1.0 / batch_size))
+ probs = []
+ for i in range(num_iters):
+ # print('{} samples predicted'.format(i * batch_size))
+ x_batch = _x[i * batch_size: (i+1) * batch_size]
+
+ prob = self.sess.run(self.logits,
+ feed_dict = {self.input_ph: x_batch})
+
+ probs.append(prob)
+
+ prob = np.concatenate(probs, axis = 0)
+
+ if len(x.shape) == 3:
+ prob = prob.reshape(-1)
+
+ return prob
+
+ def compute_saliency(self, x, saliency_type = 'gradient'):
+ x = np.array(x)
+ if self.dataset_name in ['mnist', 'cifar10', 'cifar100']:
+ batchsize = 128 #if self.data in ['imdbcnn','imdblstm'] else 20
+ num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
+ approxs_val = []
+
+ for i in range(num_iters):
+ batch_data = x[i * batchsize: (i+1) * batchsize]
+
+ if saliency_type == 'gradient':
+ approxs = self.grads
+
+ elif saliency_type == 'taylor':
+ approxs = self.approxs
+
+ batch_approxs = self.sess.run(approxs, feed_dict = {self.input_ph: batch_data})
+ # [num_classes, batchsize, h, w, c]
+ approxs_val.append(batch_approxs)
+
+ approxs_val = np.concatenate(approxs_val, axis = 1)
+ # [num_classes, num_data, h, w, c]
+
+ pred_val = self.predict(x)
+
+ class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
+ # [num_data, h, w, c]
+
+ return class_specific_scores
+
+ def compute_ig(self, x, reference):
+ x = np.array(x)
+ if self.dataset_name in ['mnist', 'cifar10', 'cifar100']:
+ batchsize = 1
+ steps = 50
+
+ pred_vals = self.predict(x)
+ class_specific_scores = []
+
+ num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
+ for i in range(num_iters):
+ batch_data = x[i * batchsize: (i+1) * batchsize]
+ _, h, w, c = batch_data.shape
+ step_batch = [batch_data * float(s) / steps + reference * (1 - float(s) / steps) for s in range(1, steps+1)]
+ # [steps,batchsize, h, w, c]
+
+ step_batch = np.reshape(step_batch,
+ [-1, h, w, c])
+ # [steps * batchsize, h, w, c]
+
+ batch_grads = self.sess.run(self.grads,
+ feed_dict = {self.input_ph: step_batch})
+ # [num_classes, steps * batchsize, h, w, c]
+ num_classes, _, h, w, c = batch_grads.shape
+ grads_val = np.mean(batch_grads.reshape([num_classes, steps, -1, h, w, c]), axis = 1)
+ approxs_val = grads_val * (batch_data - reference)
+ # [num_classes, batchsize, h, w, c]
+
+ pred_val = pred_vals[i * batchsize: (i+1) * batchsize]
+ class_specific_score = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
+ # [batchsize, h, w, c]
+
+ # [batchsize, maxlen]
+ class_specific_scores.append(class_specific_score)
+
+ # [num_data, length]
+ return np.concatenate(class_specific_scores, axis = 0)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/case_studies/ml_loo/cifar10resnet/models/original.hdf5 b/case_studies/ml_loo/cifar10resnet/models/original.hdf5
new file mode 100644
index 0000000..c4cbbe3
Binary files /dev/null and b/case_studies/ml_loo/cifar10resnet/models/original.hdf5 differ
diff --git a/case_studies/ml_loo/features_extract.py b/case_studies/ml_loo/features_extract.py
new file mode 100644
index 0000000..f055bcb
--- /dev/null
+++ b/case_studies/ml_loo/features_extract.py
@@ -0,0 +1,134 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import time
+import logging
+import os
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+from cleverhans.attacks import CarliniWagnerL2
+from cleverhans.dataset import MNIST
+from cleverhans.loss import CrossEntropy
+from cleverhans.utils import grid_visual, AccuracyReport
+from cleverhans.utils import set_log_level
+from cleverhans.utils_tf import model_eval, tf_model_load
+from cleverhans.train import train
+from cleverhans.utils_keras import KerasModelWrapper
+from build_model import ImageModel
+from load_data import ImageData, split_data
+import pickle as pkl
+from attack_model import Attack, CW
+import scipy
+from ml_loo import generate_ml_loo_features
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+
+ parser.add_argument('--data_sample', type = str,
+ choices = ['x_train', 'x_val', 'x_val200'],
+ default = 'x_val200')
+
+ parser.add_argument(
+ '--attack',
+ type = str,
+ choices = ['cw', 'bim', 'bim2'],
+ default = 'cw'
+ )
+
+ parser.add_argument("--batch-size", default=500, type=int)
+
+ parser.add_argument(
+ '--det',
+ type = str,
+ choices = ['ml_loo'],
+ default = 'ml_loo'
+ )
+
+ args = parser.parse_args()
+ dict_a = vars(args)
+ data_model = args.dataset_name + args.model_name
+
+ print('Loading dataset...')
+ dataset = ImageData(args.dataset_name)
+ model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
+
+ ###########################################################
+ # Loading original, adversarial and noisy samples
+ ###########################################################
+
+ print('Loading original, adversarial and noisy samples...')
+ X_test = np.load('{}/data/{}_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
+
+ X_test_adv = np.load('{}/data/{}_adv_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
+
+ X_train = np.load('{}/data/{}_train_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
+
+ X_train_adv = np.load('{}/data/{}_train_adv_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
+
+ Y_test = model.predict(X_test)
+ print("X_test_adv: ", X_test_adv.shape)
+
+
+ x = {
+ 'train': {
+ 'original': X_train,
+ 'adv': X_train_adv,
+ },
+ 'test': {
+ 'original': X_test,
+ 'adv': X_test_adv,
+ },
+ }
+ #################################################################
+ # Extracting features for original, adversarial and noisy samples
+ #################################################################
+ cat = {'original':'ori', 'adv':'adv', 'noisy':'noisy'}
+ dt = {'train':'train', 'test':'test'}
+
+ if args.det in ['ml_loo']:
+ if args.model_name == 'resnet':
+ interested_layers = [14,24,35,45,56,67,70]
+
+ print('extracting layers ', interested_layers)
+ reference = - dataset.x_train_mean
+
+ combined_features = generate_ml_loo_features(args, data_model, reference, model, x, interested_layers, batch_size=args.batch_size)
+
+ for data_type in ['test', 'train']:
+ for category in ['original', 'adv']:
+ np.save('{}/data/{}_{}_{}_{}_{}.npy'.format(
+ data_model,
+ args.data_sample,
+ dt[data_type],
+ cat[category],
+ args.attack,
+ args.det),
+ combined_features[data_type][category])
+
+
\ No newline at end of file
diff --git a/case_studies/ml_loo/generate_attack.py b/case_studies/ml_loo/generate_attack.py
new file mode 100644
index 0000000..0efed65
--- /dev/null
+++ b/case_studies/ml_loo/generate_attack.py
@@ -0,0 +1,215 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import logging
+import os
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+from keras.layers import Input
+from cleverhans.attacks import CarliniWagnerL2
+from cleverhans.dataset import MNIST
+from cleverhans.loss import CrossEntropy
+from cleverhans.utils import grid_visual, AccuracyReport
+from cleverhans.utils import set_log_level
+from cleverhans.utils_tf import model_eval, tf_model_load
+from cleverhans.train import train
+from cleverhans.utils_keras import KerasModelWrapper
+from build_model import ImageModel
+from load_data import ImageData, split_data
+import pickle as pkl
+from keras.utils import to_categorical
+from attack_model import Attack, CW, BIM
+import scipy
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+
+ parser.add_argument('--data_sample', type = str,
+ choices = ['x_train', 'x_val', 'x_val200'],
+ default = 'x_val200')
+
+ parser.add_argument(
+ '--attack',
+ type = str,
+ choices = ['cw', 'bim', 'bim2'],
+ default = 'cw'
+ )
+
+ args = parser.parse_args()
+ dict_a = vars(args)
+ data_model = args.dataset_name + args.model_name
+
+ if data_model not in os.listdir('./'):
+ os.mkdir(data_model)
+ if 'results' not in os.listdir('./{}'.format(data_model)):
+ os.mkdir('{}/results'.format(data_model))
+ if 'models' not in os.listdir(data_model):
+ os.mkdir('{}/models'.format(data_model))
+ if 'data' not in os.listdir(data_model):
+ os.mkdir('{}/data'.format(data_model))
+ if 'figs' not in os.listdir(data_model):
+ os.mkdir('{}/figs'.format(data_model))
+
+ print('Loading dataset...')
+ dataset = ImageData(args.dataset_name)
+ model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
+
+ if args.dataset_name == 'cifar10':
+ X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
+ dataset.y_val, model, num_classes = 10,
+ split_rate = 0.8, sample_per_class = 1000)
+
+
+ print('Sanity checking...')
+ data_sample = X_test
+ print('data_sample.shape', data_sample.shape)
+ print('X_train.shape', X_train.shape)
+
+ pred_test = model.predict(dataset.x_val)
+ def cross_entropy(predictions, targets, epsilon=1e-12):
+ predictions = np.clip(predictions, epsilon, 1. - epsilon)
+ N = predictions.shape[0]
+ ce = -np.sum(targets*np.log(predictions+1e-9))/N
+ return ce
+
+ ce = cross_entropy(pred_test, dataset.y_val, epsilon=1e-12)
+ acc = np.mean(np.argmax(pred_test, axis = 1) == np.argmax(dataset.y_val, axis = 1))
+ print('The accuracy is {}. The cross entropy is {}.'.format(acc, ce))
+
+
+ if args.attack == 'cw':
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = CW(
+ KerasModelWrapper(model.model),
+ model.input_ph,
+ model.num_classes,
+ source_samples = 100,
+ binary_search_steps = 5,
+ cw_learning_rate = 1e-2,
+ confidence = 0,
+ attack_iterations = 100,
+ attack_initial_const = 1e-2,
+ )
+ elif args.attack == "bim":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = BIM(
+ KerasModelWrapper(model.model),
+ model.sess,
+ model.input_ph,
+ model.num_classes,
+ attack_iterations = 100,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 100,
+ random_init=True
+ )
+ elif args.attack == "bim2":
+ if args.dataset_name in ['cifar10']:
+ if args.model_name == 'resnet':
+ attack_model = BIM(
+ KerasModelWrapper(model.model),
+ model.sess,
+ model.input_ph,
+ model.num_classes,
+ attack_iterations = 10,
+ epsilon=0.03,
+ learning_rate=2.5 * 0.03 / 10,
+ random_init=True
+ )
+
+
+ ###################################################
+ # filter data samples with correct predictions by model and successsful attacks
+ ###################################################
+
+ data_types = ['train', 'test']
+ data = {'train': (X_train, Y_train), 'test': (X_test, Y_test)}
+ if args.data_sample == 'x_val200':
+ num_samples = {'train': 800, 'test': 200}
+
+ for data_type in data_types:
+ x, y = data[data_type]
+ print('x.shape', x.shape)
+ print('y.shape', y.shape)
+ num_successes = 0
+ oris = []
+ perturbeds = []
+
+
+ batch_size = int(np.minimum(100, num_samples[data_type]))
+ cur_batch = 0
+ conf = 15
+ epsilon = 0
+ while num_successes < num_samples[data_type]:
+ batch_x, batch_y = x[cur_batch * batch_size:(cur_batch+1) * batch_size], y[cur_batch * batch_size:(cur_batch+1) * batch_size]
+
+ print('batch_x', batch_x.shape)
+ x_adv = attack_model.attack(batch_x)
+
+ print('x_adv', x_adv.shape)
+ if x_adv.shape[0] == 0:
+ continue
+ x_adv_labels = np.argmax(model.predict(x_adv), axis = -1)
+ index_filter = (x_adv_labels != np.argmax(batch_y, axis = 1))
+ ori = batch_x[index_filter]
+ perturbed = x_adv[index_filter]
+ print('Success rate', perturbed.shape[0] / len(x_adv))
+ oris.append(ori)
+ perturbeds.append(perturbed)
+
+ cur_batch += 1
+ num_successes += len(ori)
+ print('Number of successsful samples is {}'.format(num_successes))
+
+ oris = np.concatenate(oris, axis = 0)
+ perturbeds = np.concatenate(perturbeds, axis = 0)
+
+ oris = oris[:num_samples[data_type]]
+ perturbeds = perturbeds[:num_samples[data_type]]
+
+ print('oris.shape', oris.shape)
+ print('perturbeds.shape', perturbeds.shape)
+
+ np.save('{}/data/{}{}_{}_{}.npy'.format(
+ data_model,
+ args.data_sample,
+ '' if data_type == 'test' else '_train',
+ args.attack,
+ 'ori'),
+ oris)
+ np.save('{}/data/{}{}_adv_{}_{}.npy'.format(
+ data_model,
+ args.data_sample,
+ '' if data_type == 'test' else '_train',
+ args.attack,
+ 'ori'),
+ perturbeds)
+
diff --git a/case_studies/ml_loo/load_data.py b/case_studies/ml_loo/load_data.py
new file mode 100644
index 0000000..58a6fc3
--- /dev/null
+++ b/case_studies/ml_loo/load_data.py
@@ -0,0 +1,166 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+# from model import *
+import numpy as np
+import tensorflow as tf
+import os
+import time
+import numpy as np
+import sys
+import os
+import tarfile
+import zipfile
+
+import keras
+import math
+from keras.utils import to_categorical
+
+class ImageData():
+ def __init__(self, dataset_name):
+ if dataset_name == 'mnist':
+ from keras.datasets import mnist
+ (x_train, y_train), (x_val, y_val) = mnist.load_data()
+ x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
+
+ x_val = x_val.reshape(x_val.shape[0], 28, 28, 1)
+
+ elif dataset_name == 'cifar100':
+ from keras.datasets import cifar100
+ (x_train, y_train), (x_val, y_val) = cifar100.load_data()
+
+
+ elif dataset_name == 'cifar10':
+ from keras.datasets import cifar10
+ # Load CIFAR10 Dataset
+ (x_train, y_train), (x_val, y_val) = cifar10.load_data()
+
+ x_train = x_train.astype('float32')/255
+ x_val = x_val.astype('float32')/255
+
+ y_train = to_categorical(y_train)
+ y_val = to_categorical(y_val)
+
+ x_train_mean = np.zeros(x_train.shape[1:])
+ x_train -= x_train_mean
+ x_val -= x_train_mean
+ self.clip_min = 0.0
+ self.clip_max = 1.0
+
+ self.x_train = x_train#[idx[:1000]]
+ self.x_val = x_val
+ self.y_train = y_train#[idx[:1000]]
+ self.y_val = y_val
+ self.x_train_mean = x_train_mean
+ print('self.x_val', self.x_val.shape)
+
+
+
+
+def split_data(x, y, model, num_classes = 10, split_rate = 0.8, sample_per_class = 100):
+ # print('x.shape', x.shape)
+ # print('y.shape', y.shape)
+
+ np.random.seed(10086)
+ pred = model.predict(x)
+ label_pred = np.argmax(pred, axis = 1)
+ label_truth = np.argmax(y, axis = 1)
+ correct_idx = label_pred==label_truth
+ print('Accuracy is {}'.format(np.mean(correct_idx)))
+ x, y = x[correct_idx], y[correct_idx]
+ label_pred = label_pred[correct_idx]
+
+ # print('x.shape', x.shape)
+ # print('y.shape', y.shape)
+
+ x_train, x_test, y_train, y_test = [], [], [], []
+ for class_id in range(num_classes):
+ _x = x[label_pred == class_id][:sample_per_class]
+ _y = y[label_pred == class_id][:sample_per_class]
+ l = len(_x)
+ x_train.append(_x[:int(l * split_rate)])
+ x_test.append(_x[int(l * split_rate):])
+
+ y_train.append(_y[:int(l * split_rate)])
+ y_test.append(_y[int(l * split_rate):])
+
+
+
+ x_train = np.concatenate(x_train, axis = 0)
+ x_test = np.concatenate(x_test, axis = 0)
+ y_train = np.concatenate(y_train, axis = 0)
+ y_test = np.concatenate(y_test, axis = 0)
+
+ idx_train = np.random.permutation(len(x_train))
+ idx_test = np.random.permutation(len(x_test))
+
+ x_train = x_train[idx_train]
+ y_train = y_train[idx_train]
+
+ x_test = x_test[idx_test]
+ y_test = y_test[idx_test]
+
+ return x_train, y_train, x_test, y_test
+
+
+if __name__ == '__main__':
+ import argparse
+ from build_model import ImageModel
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+
+ args = parser.parse_args()
+ dict_a = vars(args)
+
+ data_model = args.dataset_name + args.model_name
+
+ dataset = ImageData(args.dataset_name)
+
+ model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
+
+ x, y = dataset.x_val, dataset.y_val
+
+ x_train, y_train, x_test, y_test = split_data(x, y, model, num_classes = 10, split_rate = 0.8)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/case_studies/ml_loo/ml_loo.py b/case_studies/ml_loo/ml_loo.py
new file mode 100644
index 0000000..4a4bdbf
--- /dev/null
+++ b/case_studies/ml_loo/ml_loo.py
@@ -0,0 +1,334 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import tensorflow as tf
+import os
+from keras.utils import to_categorical
+import math
+import time
+import numpy as np
+import sys
+import os
+import math
+from build_model import ImageModel
+from load_data import ImageData, split_data
+import pickle as pkl
+from keras.models import Model
+from scipy.stats import kurtosis, skew
+from scipy.spatial.distance import pdist
+import time
+from sklearn.linear_model import LogisticRegressionCV
+from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score
+import matplotlib
+import matplotlib.pyplot as plt
+from scipy.stats import kurtosis, skew
+from scipy.spatial.distance import pdist
+
+
+def con(score):
+ # score (n, d)
+ score = score.reshape(len(score), -1)
+ score_mean = np.mean(score, -1, keepdims = True)
+ c_score = score - score_mean
+ c_score = np.abs(c_score)
+ return np.mean(c_score, axis = -1)
+
+
+def mad(score):
+ pd = []
+ for i in range(len(score)):
+ d = score[i]
+ median = np.median(d)
+ abs_dev = np.abs(d - median)
+ med_abs_dev = np.median(abs_dev)
+ pd.append(med_abs_dev)
+ pd = np.array(pd)
+ return pd
+
+
+def med_pdist(score):
+ pd = []
+ for i in range(len(score)):
+ d = score[i]
+ k = np.median(pdist(d.reshape(-1,1)))
+ pd.append(k)
+ pd = np.array(pd)
+ return pd
+
+
+def pd(score):
+ pd = []
+ for i in range(len(score)):
+ d = score[i]
+ k = np.mean(pdist(d.reshape(-1,1)))
+ pd.append(k)
+ pd = np.array(pd)
+ return pd
+
+
+def neg_kurtosis(score):
+ k = []
+ for i in range(len(score)):
+ di = score[i]
+ ki = kurtosis(di, nan_policy = 'raise')
+ k.append(ki)
+ k = np.array(k)
+ return -k
+
+
+def quantile(score):
+ # score (n, d)
+ score = score.reshape(len(score), -1)
+ score_75 = np.percentile(score, 75, -1)
+ score_25 = np.percentile(score, 25, -1)
+ score_qt = score_75 - score_25
+ return score_qt
+
+
+def calculate(score, stat_name):
+ if stat_name == 'variance':
+ results = np.var(score, axis = -1)
+ elif stat_name == 'std':
+ results = np.std(score, axis = -1)
+ elif stat_name == 'pdist':
+ results = pd(score)
+ elif stat_name == 'con':
+ results = con(score)
+ elif stat_name == 'med_pdist':
+ results = med_pdist(score)
+ elif stat_name == 'kurtosis':
+ results = neg_kurtosis(score)
+ elif stat_name == 'skewness':
+ results = -skew(score, axis = -1)
+ elif stat_name == 'quantile':
+ results = quantile(score)
+ elif stat_name == 'mad':
+ results = mad(score)
+ #print('results.shape', results.shape)
+ return results
+
+
+def collect_layers(model, interested_layers):
+ if model.framework == 'keras':
+ outputs = [layer.output for layer in model.layers]
+ elif model.framework == 'tensorflow':
+ outputs = model.layers
+
+ outputs = [output for i, output in enumerate(outputs) if i in interested_layers]
+ #print(outputs)
+ features = []
+ for output in outputs:
+ #print(output)
+ if len(output.get_shape())== 4:
+ features.append(
+ tf.reduce_mean(output, axis = (1, 2))
+ )
+ else:
+ features.append(output)
+ return features
+
+
+def evaluate_features(x, model, features, batch_size=500):
+ x = np.array(x)
+ if len(x.shape) == 3:
+ _x = np.expand_dims(x, 0)
+ else:
+ _x = x
+ num_iters = int(math.ceil(len(_x) * 1.0 / batch_size))
+
+ outs = []
+ for i in range(num_iters):
+ x_batch = _x[i * batch_size: (i+1) * batch_size]
+ out = model.sess.run(features,
+ feed_dict = {model.input_ph: x_batch})
+
+ outs.append(out)
+
+ num_layers = len(outs[0])
+ outputs = []
+ for l in range(num_layers):
+ outputs.append(np.concatenate([outs[s][l] for s in range(len(outs))]))
+
+ # (3073, 64)
+ # (3073, 64)
+ # (3073, 128)
+ # (3073, 128)
+ # (3073, 256)
+ # (3073, 256)
+ # (3073, 10)
+ # (3073, 1)
+ outputs = np.concatenate(outputs, axis = 1)
+ prob = outputs[:,-model.num_classes:]
+ label = np.argmax(prob[-1])
+ #print('outputs', outputs.shape)
+ #print('prob[:, label]', np.expand_dims(prob[:, label], axis = 1).shape)
+ outputs = np.concatenate([outputs, np.expand_dims(prob[:, label], axis = 1)], axis = 1)
+
+ return outputs
+
+
+def loo_ml_instance(sample, reference, model, features, batch_size=500):
+ h,w,c = sample.shape
+ sample = sample.reshape(-1)
+ reference = reference.reshape(-1)
+
+ data = []
+ st = time.time()
+ positions = np.ones((h*w*c + 1, h*w*c), dtype = np.bool)
+ for i in range(h*w*c):
+ positions[i, i] = False
+
+ data = np.where(positions, sample, reference)
+
+ data = data.reshape((-1, h, w, c))
+ features_val = evaluate_features(data, model, features, batch_size=batch_size) # (3072+1, 906+1)
+ st1 = time.time()
+
+ return features_val
+
+
+def get_ml_loo_features(model, x, reference, interested_layers, batch_size=3100,
+ stat_names=['std', 'variance', 'con', 'kurtosis', 'skewness', 'quantile', 'mad']):
+ # copied from generate_ml_loo_features
+
+ features = collect_layers(model, interested_layers)
+ all_features = []
+ for sample in x:
+ features_val = loo_ml_instance(sample, reference, model, features,
+ batch_size=batch_size)
+ features_val = np.transpose(features_val)[:,:-1]
+ single_feature = []
+ for stat_name in stat_names:
+ single_feature.append(calculate(features_val, stat_name))
+ single_feature = np.array(single_feature)
+ all_features.append(single_feature)
+ all_features = np.array(all_features)
+ return all_features
+
+def generate_ml_loo_features(args, data_model, reference, model, x, interested_layers, batch_size=500):
+ # print(args.attack)
+ # x = load_examples(data_model, attack)
+ features = collect_layers(model, interested_layers)
+
+ cat = {'original':'ori', 'adv':'adv', 'noisy':'noisy'}
+ dt = {'train':'train', 'test':'test'}
+ stat_names = ['std', 'variance', 'con', 'kurtosis', 'skewness', 'quantile', 'mad']
+
+ combined_features = {data_type: {} for data_type in ['test', 'train']}
+ for data_type in ['test', 'train']:
+ print('data_type', data_type)
+ for category in ['original', 'adv']:
+ print('category', category)
+ all_features = []
+ for i, sample in enumerate(x[data_type][category]):
+ print('Generating ML-LOO for {}th sample...'.format(i))
+ features_val = loo_ml_instance(sample, reference, model, features, batch_size=batch_size)
+
+ # (3073, 907)
+ #print('features_val.shape', features_val.shape)
+ features_val = np.transpose(features_val)[:,:-1]
+ #print('features_val.shape', features_val.shape)
+ # (906, 3073)
+ single_feature = []
+ for stat_name in stat_names:
+ #print('stat_name', stat_name)
+ single_feature.append(calculate(features_val, stat_name))
+
+ single_feature = np.array(single_feature)
+ #print('single_feature', single_feature.shape)
+ # (k, 906)
+ all_features.append(single_feature)
+ print('all_features', np.array(all_features).shape)
+ combined_features[data_type][category] = np.array(all_features)
+
+ np.save('{}/data/{}_{}_{}_{}_{}.npy'.format(
+ data_model,
+ args.data_sample,
+ dt[data_type],
+ cat[category],
+ args.attack,
+ args.det),
+ combined_features[data_type][category])
+
+ return combined_features
+
+
+def compute_stat_single_layer(output):
+ # l2dist = pdist(output)
+ # l1dist = pdist(output, 'minkowski', p = 1)
+ # sl2dist = pdist(X, 'seuclidean')
+ variance = np.sum(np.var(output, axis = 0))
+ # on = np.sum(np.linalg.norm(output, ord = 1, axis = 0))
+ con = np.sum(np.linalg.norm(output - np.mean(output, axis = 0), ord = 1, axis = 0))
+
+ return variance, con
+
+
+def load_features(data_model, attacks):
+ def softmax(x, axis):
+ """Compute softmax values for each sets of scores in x."""
+ e_x = np.exp(x - np.max(x, axis = axis, keepdims = True))
+ return e_x / e_x.sum(axis=axis, keepdims = True) # only difference
+
+ cat = {'original':'', 'adv':'_adv', 'noisy':'_noisy'}
+ dt = {'train':'_train', 'test':''}
+ features = {attack: {'train': {}, 'test': {}} for attack in attacks}
+
+ normalizer = {}
+ for attack in attacks:
+ for data_type in ['train', 'test']:
+ for category in ['original', 'adv']:
+ print('Loading data...')
+ feature = np.load('{}/data/{}{}{}_{}_{}.npy'.format(data_model,'x_val200',
+ dt[data_type],
+ cat[category],
+ attack,
+ 'ml_loo')) # [n, 3073, ...]
+ n = len(feature)
+ print('Processing...')
+ nums = [0,64,64,128,128,256,256,10]
+ splits = np.cumsum(nums) # [0,64,128,...]
+ processed = []
+ for j, s in enumerate(splits):
+ if j < len(splits) - 1:
+ separated = feature[:, :-1, s:splits[j+1]]
+
+ if j == len(splits) - 2:
+ separated = softmax(separated, axis = -1)
+
+ dist = np.var(separated, axis = 1) # [n, ...]
+ if data_type == 'train' and category == 'original' and attack == 'linfpgd':
+ avg_dist = np.mean(dist, axis = 0)
+ normalizer[j] = avg_dist
+
+ # dist /= normalizer[j]
+ dist = np.sqrt(dist)
+
+ # max_dist = np.max(dist, axis = -1)
+ print(np.mean(dist))
+ processed.append(dist.T)
+
+ processed = np.concatenate(processed, axis = 0).T
+ # processed = np.concatenate(processed, axis = )
+
+
+ print(processed.shape)
+
+ features[attack][data_type][category] = processed
+
+ return features
+
diff --git a/case_studies/ml_loo/resnet.py b/case_studies/ml_loo/resnet.py
new file mode 100644
index 0000000..4ce37a5
--- /dev/null
+++ b/case_studies/ml_loo/resnet.py
@@ -0,0 +1,294 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Trains a ResNet on the CIFAR10 dataset.
+
+ResNet v1
+[a] Deep Residual Learning for Image Recognition
+https://arxiv.org/pdf/1512.03385.pdf
+
+ResNet v2
+[b] Identity Mappings in Deep Residual Networks
+https://arxiv.org/pdf/1603.05027.pdf
+"""
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten
+from keras.optimizers import Adam
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import cifar10
+import numpy as np
+import os
+
+
+
+def lr_schedule(epoch):
+ """Learning Rate Schedule
+
+ Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
+ Called automatically every epoch as part of callbacks during training.
+
+ # Arguments
+ epoch (int): The number of epochs
+
+ # Returns
+ lr (float32): learning rate
+ """
+ lr = 1e-3
+ if epoch > 180:
+ lr *= 0.5e-3
+ elif epoch > 160:
+ lr *= 1e-3
+ elif epoch > 120:
+ lr *= 1e-2
+ elif epoch > 80:
+ lr *= 1e-1
+ print('Learning rate: ', lr)
+ return lr
+
+
+def lr_schedule_cifar100(epoch):
+ """Learning Rate Schedule
+
+ Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
+ Called automatically every epoch as part of callbacks during training.
+
+ # Arguments
+ epoch (int): The number of epochs
+
+ # Returns
+ lr (float32): learning rate
+ """
+ lr = 1e-4
+ if epoch > 180:
+ lr *= 0.5e-3
+ elif epoch > 160:
+ lr *= 1e-3
+ elif epoch > 120:
+ lr *= 1e-2
+ elif epoch > 80:
+ lr *= 1e-1
+ print('Learning rate: ', lr)
+ return lr
+
+def lr_schedule_sgd(epoch):
+ decay = epoch >= 122 and 2 or epoch >= 81 and 1 or 0
+ lr = 1e-1 * 0.1 ** decay
+ print('Learning rate: ', lr)
+ return lr
+
+def resnet_layer(inputs,
+ num_filters=16,
+ kernel_size=3,
+ strides=1,
+ activation='relu',
+ batch_normalization=True,
+ conv_first=True):
+ """2D Convolution-Batch Normalization-Activation stack builder
+
+ # Arguments
+ inputs (tensor): input tensor from input image or previous layer
+ num_filters (int): Conv2D number of filters
+ kernel_size (int): Conv2D square kernel dimensions
+ strides (int): Conv2D square stride dimensions
+ activation (string): activation name
+ batch_normalization (bool): whether to include batch normalization
+ conv_first (bool): conv-bn-activation (True) or
+ bn-activation-conv (False)
+
+ # Returns
+ x (tensor): tensor as input to the next layer
+ """
+ conv = Conv2D(num_filters,
+ kernel_size=kernel_size,
+ strides=strides,
+ padding='same',
+ kernel_initializer='he_normal',
+ kernel_regularizer=l2(1e-4))
+
+ x = inputs
+ if conv_first:
+ x = conv(x)
+ if batch_normalization:
+ x = BatchNormalization()(x)
+ if activation is not None:
+ x = Activation(activation)(x)
+ else:
+ if batch_normalization:
+ x = BatchNormalization()(x)
+ if activation is not None:
+ x = Activation(activation)(x)
+ x = conv(x)
+ return x
+
+
+def resnet_v2(input_shape, depth, num_classes=10):
+ """ResNet Version 2 Model builder [b]
+
+ Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
+ bottleneck layer
+ First shortcut connection per layer is 1 x 1 Conv2D.
+ Second and onwards shortcut connection is identity.
+ At the beginning of each stage, the feature map size is halved (downsampled)
+ by a convolutional layer with strides=2, while the number of filter maps is
+ doubled. Within each stage, the layers have the same number filters and the
+ same filter map sizes.
+ Features maps sizes:
+ conv1 : 32x32, 16
+ stage 0: 32x32, 64
+ stage 1: 16x16, 128
+ stage 2: 8x8, 256
+
+ # Arguments
+ input_shape (tensor): shape of input image tensor
+ depth (int): number of core convolutional layers
+ num_classes (int): number of classes (CIFAR10 has 10)
+
+ # Returns
+ model (Model): Keras model instance
+ """
+ if (depth - 2) % 9 != 0:
+ raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
+ # Start model definition.
+ num_filters_in = 16
+ num_res_blocks = int((depth - 2) / 9)
+
+ inputs = Input(shape=input_shape)
+ # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
+ x = resnet_layer(inputs=inputs,
+ num_filters=num_filters_in,
+ conv_first=True)
+
+ # Instantiate the stack of residual units
+ for stage in range(3):
+ for res_block in range(num_res_blocks):
+ activation = 'relu'
+ batch_normalization = True
+ strides = 1
+ if stage == 0:
+ num_filters_out = num_filters_in * 4
+ if res_block == 0: # first layer and first stage
+ activation = None
+ batch_normalization = False
+ else:
+ num_filters_out = num_filters_in * 2
+ if res_block == 0: # first layer but not first stage
+ strides = 2 # downsample
+
+ # bottleneck residual unit
+ y = resnet_layer(inputs=x,
+ num_filters=num_filters_in,
+ kernel_size=1,
+ strides=strides,
+ activation=activation,
+ batch_normalization=batch_normalization,
+ conv_first=False)
+
+ y = resnet_layer(inputs=y,
+ num_filters=num_filters_in,
+ conv_first=False)
+ y = resnet_layer(inputs=y,
+ num_filters=num_filters_out,
+ kernel_size=1,
+ conv_first=False)
+ if res_block == 0:
+ # linear projection residual shortcut connection to match
+ # changed dims
+ x = resnet_layer(inputs=x,
+ num_filters=num_filters_out,
+ kernel_size=1,
+ strides=strides,
+ activation=None,
+ batch_normalization=False)
+
+ x = keras.layers.add([x, y])
+
+ num_filters_in = num_filters_out
+
+ # Add classifier on top.
+ # v2 has BN-ReLU before Pooling
+ x = BatchNormalization()(x)
+ x = Activation('relu')(x)
+ pool_size = int(x.get_shape()[1])
+ x = AveragePooling2D(pool_size=pool_size)(x)
+ y = Flatten()(x)
+ outputs = Dense(num_classes,
+ activation=None,
+ kernel_initializer='he_normal')(y)
+
+ outputs = Activation('softmax')(outputs)
+
+ # Instantiate model.
+ model = Model(inputs=inputs, outputs=outputs)
+ return model, inputs, outputs
+
+
+
+def create_resnet_generator(x_train):
+ # This will do preprocessing and realtime data augmentation:
+ datagen = ImageDataGenerator(
+ # set input mean to 0 over the dataset
+ featurewise_center=False,
+ # set each sample mean to 0
+ samplewise_center=False,
+ # divide inputs by std of dataset
+ featurewise_std_normalization=False,
+ # divide each input by its std
+ samplewise_std_normalization=False,
+ # apply ZCA whitening
+ zca_whitening=False,
+ # epsilon for ZCA whitening
+ zca_epsilon=1e-06,
+ # randomly rotate images in the range (deg 0 to 180)
+ rotation_range=0,
+ # randomly shift images horizontally
+ width_shift_range=0.1,
+ # randomly shift images vertically
+ height_shift_range=0.1,
+ # set range for random shear
+ shear_range=0.,
+ # set range for random zoom
+ zoom_range=0.,
+ # set range for random channel shifts
+ channel_shift_range=0.,
+ # set mode for filling points outside the input boundaries
+ fill_mode='nearest',
+ # value used for fill_mode = "constant"
+ cval=0.,
+ # randomly flip images
+ horizontal_flip=True,
+ # randomly flip images
+ vertical_flip=False,
+ # set rescaling factor (applied before any other transformation)
+ rescale=None,
+ # set function that will be applied on each input
+ preprocessing_function=None,
+ # image data format, either "channels_first" or "channels_last"
+ data_format=None,
+ # fraction of images reserved for validation (strictly between 0 and 1)
+ validation_split=0.0)
+
+ # Compute quantities required for featurewise normalization
+ # (std, mean, and principal components if ZCA whitening is applied).
+ datagen.fit(x_train)
+ return datagen
+
+
diff --git a/case_studies/ml_loo/supp.pdf b/case_studies/ml_loo/supp.pdf
new file mode 100644
index 0000000..35cf90d
Binary files /dev/null and b/case_studies/ml_loo/supp.pdf differ
diff --git a/case_studies/ml_loo/train_and_evaluate.py b/case_studies/ml_loo/train_and_evaluate.py
new file mode 100644
index 0000000..fb15770
--- /dev/null
+++ b/case_studies/ml_loo/train_and_evaluate.py
@@ -0,0 +1,206 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+try:
+ import cPickle as pickle
+except:
+ import _pickle as pickle
+import logging
+import os
+import numpy as np
+import time
+import os
+import math
+import pickle as pkl
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from matplotlib.patches import Rectangle
+from sklearn.preprocessing import scale, MinMaxScaler, StandardScaler
+from sklearn.metrics import accuracy_score, precision_score, recall_score
+from scipy.stats import pearsonr
+from scipy.stats import kurtosis, skew
+from scipy.spatial.distance import pdist
+from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
+from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score
+
+
+color_dict = {
+ 'ml_loo': 'red',
+}
+
+linestyles = {
+ 'ml_loo': '-',
+}
+
+labels = {
+ 'ml_loo': 'ML-LOO',
+}
+
+labels_attack = {
+ 'cw': 'C&W',
+ 'bim': 'PGD',
+ 'bim2': 'PGD (fewer steps)',
+}
+
+labels_data = {
+ 'cifar10': 'CIFAR-10',
+}
+
+labels_model = {
+ 'resnet': 'ResNet',
+}
+
+
+def load_data(args, attack, det, magnitude = 0.0):
+ x, y = {}, {}
+ for data_type in ['train', 'test']:
+ if det == 'ml_loo':
+ data_ori = np.load('{}/data/{}_{}_{}_{}_{}.npy'.format(
+ args.data_model,
+ args.data_sample,
+ data_type,
+ 'ori',
+ attack,
+ 'ml_loo'))
+
+ data_adv = np.load('{}/data/{}_{}_{}_{}_{}.npy'.format(
+ args.data_model,
+ args.data_sample,
+ data_type,
+ 'adv',
+ attack,
+ 'ml_loo'))
+ d = len(data_ori)
+ print('detect using {}'.format(det))
+ print('using adv only')
+ print('data_ori', data_ori.shape)
+ # Use only IQR features (5th dimension).
+ data_ori = data_ori[:, [5], :]
+ data_adv = data_adv[:, [5], :]
+ data_ori = data_ori.reshape(d, -1)
+ data_adv = data_adv.reshape(d, -1)
+ # (200, 1)
+
+ d = len(data_ori)
+ x[data_type] = np.vstack([data_ori, data_adv])
+ y[data_type] = np.concatenate((np.zeros(d), np.ones(d)))
+
+
+ idx_train = np.random.permutation(len(x['train']))
+ x['train'] = x['train'][idx_train]
+ y['train'] = y['train'][idx_train]
+ return x, y
+
+
+def train_and_evaluate(args, detections, attack, fpr_upper = 1.0):
+ plt.figure(figsize = (10, 8))
+ font = {'weight': 'bold', 'size': 16}
+ matplotlib.rc('font', **font)
+
+ auc_dict = {}
+ tpr1 = {}
+ tpr5 = {}
+ tpr10 = {}
+
+ for det in detections:
+ # Load data
+ x, y = load_data(args, attack, det)
+ x_train, y_train = x['train'], y['train']
+ x_test, y_test = x['test'], y['test']
+
+ x_train = x_train.reshape(len(x_train), -1)
+ x_test = x_test.reshape(len(x_test), -1)
+ # Train
+ # TODO: verify this. max_iter=10000 was added by AUTHOR/zimmerrol
+ lr = LogisticRegressionCV(n_jobs=-1, max_iter=10000).fit(x_train, y_train)
+
+ with open(f"{args.data_model}/models/{det}_{attack}_lr.pkl", "wb") as f:
+ pickle.dump(lr, f)
+
+ # Predict
+ pred = lr.predict_proba(x_test)[:, 1]
+ pred = lr.predict_proba(x_test)[:, 1]
+ # Evaluate.
+ fpr, tpr, thresholds = roc_curve(y_test, pred, drop_intermediate=False)
+ def find_nearest(array, value):
+ array = np.asarray(array)
+ idx = (np.abs(array - value)).argmin()
+ return idx
+ auc_dict[det] = auc(fpr, tpr)
+ tpr1[det] = tpr[find_nearest(fpr, 0.01)]
+ tpr5[det] = tpr[find_nearest(fpr, 0.05)]
+ tpr10[det] = tpr[find_nearest(fpr, 0.10)]
+ plt.plot(
+ fpr, tpr,
+ label="{0} (AUC: {1:0.3f})".format(labels[det], auc(fpr, tpr)),
+ color=color_dict[det],
+ linestyle=linestyles[det],
+ linewidth=4)
+ print("Threshold for FPR1:", thresholds[find_nearest(fpr, 0.01)])
+ print("Threshold for FPR5:", thresholds[find_nearest(fpr, 0.05)])
+ print("Threshold for FPR10:", thresholds[find_nearest(fpr, 0.10)])
+
+ # 0.2 was fpr_upper before
+ plt.xlim([0.0, 0.2])
+ plt.ylim([0.0, 1.0])
+ plt.xlabel('False Positive Rate', fontsize = 32)
+ plt.ylabel('True Positive Rate', fontsize = 32)
+ plt.title('{} ({}, {})'.format(labels_attack[attack], labels_data[args.dataset_name], labels_model[args.model_name]), fontsize = 32)
+ plt.legend(loc="lower right", fontsize = 22)
+ plt.show()
+ figure_name = '{}/figs/mad_transfer_roc_{}_{}_{}.pdf'.format(args.data_model, args.data_sample, attack, attack)
+ plt.savefig(figure_name)
+
+
+ return auc_dict, tpr1, tpr5, tpr10
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--dataset_name', type = str,
+ choices = ['cifar10'],
+ default = 'cifar10')
+ parser.add_argument('--model_name', type = str,
+ choices = ['resnet'],
+ default = 'resnet')
+ parser.add_argument('--data_sample', type = str,
+ choices = ['x_val200'],
+ default = 'x_val200')
+ parser.add_argument(
+ '--attack',
+ type = str,
+ choices = ['cw', 'bim', 'bim2'],
+ default = 'cw'
+ )
+
+ args = parser.parse_args()
+ dict_a = vars(args)
+ args.data_model = args.dataset_name + args.model_name
+
+ train_and_evaluate(args, ['ml_loo'], args.attack, fpr_upper = 1.0)
+
+
+
+
+
+
+
diff --git a/case_studies/mmt/LICENSE b/case_studies/mmt/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/case_studies/mmt/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/case_studies/mmt/README.md b/case_studies/mmt/README.md
new file mode 100644
index 0000000..a286a42
--- /dev/null
+++ b/case_studies/mmt/README.md
@@ -0,0 +1,116 @@
+# Max-Mahalanobis Training
+Max-Mahalanobis Training (MMT) is a novel training method, which can learn more robust models without hurting clean accuracy and with little extra computational cost.
+Technical details are specified in:
+
+[Max-Mahalanobis Linear Discriminant Analysis Networks](http://proceedings.mlr.press/v80/pang18a/pang18a.pdf) (ICML 2018)
+
+Tianyu Pang, Chao Du and Jun Zhu
+
+[Rethinking Softmax Cross-Entropy Loss for Adversarial Robustness](https://arxiv.org/pdf/1905.10626.pdf) (ICLR 2020)
+
+Tianyu Pang, Kun Xu, Yinpeng Dong, Chao Du, Ning Chen and Jun Zhu
+
+## Environment settings and libraries we used in our experiments
+
+This project is tested under the following environment settings:
+- OS: Ubuntu 16.04.3
+- GPU: Geforce 1080 Ti or Tesla P100
+- Cuda: 9.0, Cudnn: v7.03
+- Python: 2.7.12
+- cleverhans: 2.1.0
+- Keras: 2.2.4
+- tensorflow-gpu: 1.9.0
+
+We also thank the authors of [keras-resnet](https://github.com/raghakot/keras-resnet) for providing their code. Our codes are widely adapted from their repositories. For convenience, we provide the *requirement.txt* file to install the virtualenv that is sufficient run the codes.
+
+In the following, we first provide the codes for training. After that, the evaluation codes, such as attacking, are provided.
+
+## Demo of MM centers (mean_logits in the code)
+
+
+This plot shows the MM centers under different number of classes `L`.
+
+## Training codes
+
+### Standard training with the SCE loss
+
+Let `dataset` be `mnist`, `cifar10` or `cifar100`, the command for training models with the SCE loss is
+```shell
+python train.py --batch_size=50 --dataset=[dataset] --optimizer='mom' --lr=0.01 --version=2 --use_MMLDA=False --use_BN=True --use_dense=True --use_leaky=False
+```
+Here the initial learning rate is `0.01`, the optimizer is `mom` and we use the `Resnet-v2` architecture proposed by [He et al. (2016)](https://arxiv.org/abs/1603.05027). The training epoch on MNIST is set as 40, on CIFAR-10 and CIFAR-100 is set as 200.
+
+### Standard training with the MMC loss
+Similarly, let `dataset` be `mnist`, `cifar10` or `cifar100`, the command for training models with the MMC loss is
+```shell
+python train.py --batch_size=50 --mean_var=10 --dataset=[dataset] -optimizer='mom' --lr=0.01 --version=2 --use_MMLDA=True --use_ball=True --use_BN=True --use_random=False --use_dense=True --use_leaky=False
+```
+Here the basic training setting, e.g., learning rate and optimizer are the same as them for the SCE loss. The `meanvar` parameter is the $C_{MM}$ of the MMC loss in the paper. When the bool flag `use_ball` is False, the command run the training with the MMLDA loss.
+
+### Adversarial training with the SCE loss
+For the adversarial training, we apply the most widely studied PGD-based method proposed by [Madry et al. (2017)](https://arxiv.org/abs/1706.06083).
+```shell
+python advtrain.py --batch_size=50 --dataset=[dataset] --optimizer='mom' --lr=0.01 --version=2 --adv_ratio=1.0 --use_MMLDA=False --use_ball=False --use_target=False --attack_method='MadryEtAl' --use_BN=True --use_random=False
+```
+Here the `adv_ratio` is set as 1, which means we only use adversarial examples in the training phase as suggested in previous work. The bool flag `use_target` indicates whether uses targeted attack or untargeted attack when crafting adversarial examples for training.
+
+### Adversarial training with the MMC loss
+The adversarial training command is similar for the MMC loss
+```shell
+python advtrain.py --batch_size=50 --mean_var=10 --dataset=[dataset] --optimizer='mom' --lr=0.01 --version=2 --adv_ratio=1.0 --use_MMLDA=True --use_ball=True --use_target=True --attack_method='MadryEtAl' --use_BN=True --use_random=False
+```
+
+## Evaluation codes
+
+The pretrained models are provided below for Resnet110 (n=18):
+
+[MMC (CIFAR-10)](http://ml.cs.tsinghua.edu.cn/~tianyu/MMC/pretrained_models/MMC_mom_cifar10/model.200.h5)
+
+[MMC (CIFAR-100)](http://ml.cs.tsinghua.edu.cn/~tianyu/MMC/pretrained_models/MMC_mom_cifar100/model.200.h5)
+
+[MMC + adv-training (CIFAR-10)](http://ml.cs.tsinghua.edu.cn/~tianyu/MMC/pretrained_models/MMC_mom_advtrain_cifar10/model.180.h5)
+
+[MMC + adv-training (CIFAR-100)](http://ml.cs.tsinghua.edu.cn/~tianyu/MMC/pretrained_models/MMC_mom_advtrain_cifar100/model.180.h5).
+
+### White-box L-infinity attack (PGD)
+In this setting, the attacking methods are usually iterative-based. For examples, the command of applying targeted PGD-10 to evade the models trained by the MMC loss is
+```shell
+python advtest_iterative.py --batch_size=50 --attack_method='MadryEtAl' --attack_method_for_advtrain=None --dataset=[dataset] --target=True --num_iter=10 --use_ball=True --use_MMLDA=True --use_advtrain=False --epoch=[epoch] --use_BN=True --normalize_output_for_ball=False --use_random=False --use_target=False
+```
+Here `attack_method` could be 'MadryEtAl' (PGD), 'FastGradientMethod' (FGSM), 'MomentumIterativeMethod' (MIM) and 'BasicIterativeMethod' (BIM). The `target` indicates whether use targeted or untargeted attack; `num_iter` is the iteration steps of the performed attacks; `epoch` is the epoch of the checkpoint to load; `normalize_output_for_ball` is a bool flag to decide whether apply a softmax function to return predictions in the inference phase.
+
+**Note that our evaluation is based on cleverhans: 2.1.0. To perform adaptive attack, please manually modify the function** ```model_loss``` **of the file** ```utils_tf.py```**by substituting the softmax cross-entropy loss with other adaptive objectives, e.g.,** ```out=-tf.reduce_sum(logits * y, axis=-1)```.
+
+When attacking the adversarially trained models, we should set the `use_advtrain` as True, and the `attack_method_for_advtrain` to be 'MadryEtAl' since we use the PGD-based adversarial training methods. The `use_target` is set the same as in the training codes. For examples, the command of applying untargeted PGD to evade the models adversarially trained by the MMC loss is
+```shell
+python advtest_iterative.py --mean_var=10 --batch_size=50 --attack_method='MadryEtAl' --attack_method_for_advtrain='MadryEtAl' --dataset=[dataset] --target=False --num_iter=10 --use_ball=True --use_MMLDA=True --use_advtrain=True --epoch=[epoch] --use_BN=True --normalize_output_for_ball=False --use_random=False --adv_ratio=1.0 --use_target=False
+```
+Note that here we set `normalize_output_for_ball` be False to perform an adaptive attacks.
+
+### White-box L-2 attack (C&W)
+In this setting, the attacking methods are usually optimization-based. In the C&W method, there is a binary search mechanism for the constant parameter to find sucessful adversarial examples with minimal distortion. The command below gives an example of applying targeted C&W attack on the models trained by the MMC loss.
+```shell
+python advtest_others.py --mean_var=10 --batch_size=50 --attack_method='CarliniWagnerL2' --attack_method_for_advtrain=None --dataset=[dataset] --target=True --use_ball=True --use_MMLDA=True --use_advtrain=False --adv_ratio=1.0 --use_target=False --epoch=[epoch] --use_BN=True --normalize_output_for_ball=False --use_random=False --use_dense=True --use_leaky=False --CW_confidence=0.
+```
+The specific parameter settings of the C&W attack can be found in the code. The `attack_method` could also be 'ElasticNetMethod' to perform EAD attack.
+
+### Black-box transfer-based attack (MIM & PGD)
+For the black-box transfer-based setting, we apply the MIM and PGD attacks. An example command using the untargeted PGD-10 attack is shown below
+```shell
+python advtest_iterative_blackbox.py --batch_size=50 --optimizer='Adam' --attack_method='MadryEtAl' --dataset=[dataset] --target=False --num_iter=10 --use_random=False --use_dense=True --use_leaky=False --epoch=[epoch] --use_BN=True --model_1='AT-MMC-100' --model_2='SCE'
+```
+Here `model_1` is the substitute model used to craft adversarial examples, `model_2` is the original model used to classify these adversarial examples. These two parameters could be `SCE`, `MMC-10`, `MMC-100`, `AT-SCE`, `AT-MMC-10`, `AT-MMC-100`. The `epoch` here is the training epoch of checkpoint for both the model_1 and model_2.
+
+### Black-box gradient-free attack (SPSA)
+For the black-box gradient-free setting, we apply the SPSA attack. This attacks is based on numerical approximations of the model gradients, and can evade the defenses that based on gradient masking. An example command is given below for untargeted SPSA-10 attack on the models trained by the MMC loss.
+```shell
+python advtest_others.py --mean_var=10 --batch_size=50 --attack_method='SPSA' --attack_method_for_advtrain=None --dataset=[dataset] --target=False --use_ball=True --use_MMLDA=True --use_advtrain=False --adv_ratio=1.0 --use_target=False --epoch=[epoch] --use_BN=True -normalize_output_for_ball=False --use_random=False --use_dense=True --use_leaky=False --SPSA_epsilon=8
+```
+More details of the parameter settings can be found in the code.
+
+### General-purpose attack
+To further test the robustness of our method, we investigate the general-purpose attacks. We add the Gaussian random noise and random rotation transformation on the input images to perform the attacks. An example command is
+```shell
+python advtest_simple_transform.py --mean_var=10 --batch_size=50 --attack_method='Rotation' --attack_method_for_advtrain='MadryEtAl' --dataset=[dataset] --use_ball=True --use_MMLDA=True --use_advtrain=True --epoch=[epoch] --adv_ratio=1.0 --use_target=False --normalize_output_for_ball=False
+```
+The `attack_method` could be 'Rotation' for rotation transformation or 'Gaussian' for Gaussian noise. Detailed parameter settings are provided in the code.
diff --git a/case_studies/mmt/adaptive_attack.py b/case_studies/mmt/adaptive_attack.py
new file mode 100644
index 0000000..9f6ca0c
--- /dev/null
+++ b/case_studies/mmt/adaptive_attack.py
@@ -0,0 +1,378 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cleverhans.attacks import Attack
+import tensorflow as tf
+import warnings
+import numpy as np
+
+from cleverhans import utils_tf
+from cleverhans.utils_tf import clip_eta
+from cleverhans.attacks import optimize_linear
+from six.moves import xrange
+
+
+def fgm(x,
+ features,
+ logit_means,
+ y=None,
+ eps=0.3,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ targeted=False,
+ sanity_checks=True,
+ projection="linf"):
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if clip_min is not None:
+ asserts.append(utils_tf.assert_greater_equal(
+ x, tf.cast(clip_min, x.dtype)))
+
+ if clip_max is not None:
+ asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
+
+ if y is None:
+ raise NotImplementedError("labels must be supplied")
+
+ # Compute loss
+ loss, loss_diff = loss_fn(logit_means=logit_means, labels=y,
+ features=features)
+ if targeted:
+ loss = -loss
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x)
+
+ # optimal_perturbation = optimize_linear(grad, eps, ord)
+
+ if projection == "l2":
+ square = tf.maximum(1e-12,
+ tf.reduce_sum(tf.square(grad),
+ reduction_indices=list(
+ xrange(1, len(grad.get_shape()))),
+ keepdims=True))
+ optimal_perturbation = grad / tf.sqrt(square)
+
+ # Scale perturbation to be the solution for the norm=eps rather than
+ # norm=1 problem
+ scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
+ else:
+ optimal_perturbation = tf.sign(grad)
+ scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = x + scaled_perturbation
+ adv_x = x + utils_tf.clip_eta(adv_x - x, ord, eps)
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) or (clip_max is not None):
+ # We don't currently support one-sided clipping
+ assert clip_min is not None and clip_max is not None
+ adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
+
+ if sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ return adv_x, loss_diff
+
+
+def loss_fn(logit_means,
+ sentinel=None,
+ labels=None,
+ features=None,
+ dim=-1, ):
+ """
+ Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
+ deprecated warning
+ """
+ # Make sure that all arguments were passed as named arguments.
+ if sentinel is not None:
+ name = "softmax_cross_entropy_with_logits"
+ raise ValueError("Only call `%s` with "
+ "named arguments (labels=..., logits=..., ...)"
+ % name)
+ if labels is None or features is None:
+ raise ValueError("Both labels and features must be provided.")
+
+ labels_oh = tf.stop_gradient(labels)
+ labels = tf.argmax(labels_oh, -1)
+
+ # find target labels
+ # ignore logit means for classes we are not considering (only relevant for
+ # binarization test)
+ logit_means = logit_means[:labels_oh.shape[-1]]
+ distances = tf.reduce_mean((tf.expand_dims(features, 1) - tf.expand_dims(logit_means, 0)) ** 2, -1)
+ distances = distances + 1e9 * labels_oh
+ target_labels = tf.argmin(distances, -1)
+
+
+ # target_labels = (labels + 1) % 2
+
+ target_logit_means = tf.gather(logit_means, target_labels)
+ source_logit_means = tf.gather(logit_means, labels)
+
+ dist = tf.reduce_mean((features - target_logit_means) ** 2, -1)
+ dist_other = tf.reduce_mean((features - source_logit_means) ** 2, -1)
+
+ dist_diff = dist - dist_other
+
+ # invert sign so that we perform gradient ascent instead of descent
+ return -tf.reduce_sum(dist), dist_diff
+
+
+class FeatureSpaceProjectedGradientDescent(Attack):
+ """
+ This class implements either the Basic Iterative Method
+ (Kurakin et al. 2016) when rand_init is set to 0. or the
+ Madry et al. (2017) method when rand_minmax is larger than 0.
+ Paper link (Kurakin et al. 2016): https://arxiv.org/pdf/1607.02533.pdf
+ Paper link (Madry et al. 2017): https://arxiv.org/pdf/1706.06083.pdf
+ :param model: cleverhans.model.Model
+ :param sess: optional tf.Session
+ :param dtypestr: dtype of the data
+ :param default_rand_init: whether to use random initialization by default
+ :param kwargs: passed through to super constructor
+ """
+
+ def __init__(self, model, logit_means, sess=None, dtypestr='float32',
+ default_rand_init=True, max_steps=99999, projection='linf', **kwargs):
+ """
+ Create a ProjectedGradientDescent instance.
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+
+ super(FeatureSpaceProjectedGradientDescent, self).__init__(model, sess=sess,
+ dtypestr=dtypestr,
+ **kwargs)
+ self.feedable_kwargs = ('eps', 'eps_iter', 'y', 'y_target', 'clip_min',
+ 'clip_max')
+ self.structural_kwargs = ['ord', 'nb_iter', 'rand_init', 'sanity_checks']
+ self.logit_means = logit_means
+ self.default_rand_init = default_rand_init
+ self.max_steps = max_steps
+ self.projection = projection
+
+ def generate(self, x, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param kwargs: See `parse_params`
+ """
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if self.clip_min is not None:
+ asserts.append(utils_tf.assert_greater_equal(x,
+ tf.cast(self.clip_min,
+ x.dtype)))
+
+ if self.clip_max is not None:
+ asserts.append(utils_tf.assert_less_equal(x,
+ tf.cast(self.clip_max,
+ x.dtype)))
+
+ # Initialize loop variables
+ if self.rand_init:
+ eta = tf.random_uniform(tf.shape(x),
+ tf.cast(-self.rand_minmax, x.dtype),
+ tf.cast(self.rand_minmax, x.dtype),
+ dtype=x.dtype)
+ else:
+ eta = tf.zeros(tf.shape(x))
+
+ # Clip eta
+ eta = clip_eta(eta, self.ord, self.eps)
+ adv_x = x + eta
+ if self.clip_min is not None or self.clip_max is not None:
+ adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ if self.y_target is not None:
+ raise NotImplementedError("Targeted mode not fully implemented yet")
+ elif self.y is not None:
+ y = self.y
+ targeted = False
+ else:
+ raise NotImplementedError("labels must be supplied")
+
+ y_kwarg = 'y_target' if targeted else 'y'
+ fgm_params = {
+ 'eps': self.eps_iter,
+ y_kwarg: y,
+ 'ord': self.ord,
+ 'clip_min': self.clip_min,
+ 'clip_max': self.clip_max,
+ "logit_means": self.logit_means
+ }
+ if self.ord == 1:
+ raise NotImplementedError("It's not clear that FGM is a good inner loop"
+ " step for PGD when ord=1, because ord=1 FGM "
+ " changes only one pixel at a time. We need "
+ " to rigorously test a strong ord=1 PGD "
+ "before enabling this feature.")
+
+ # Use getattr() to avoid errors in eager execution attacks
+
+ def cond(i, _, _2, loss_diff, first_idx_done):
+ return tf.reduce_any(
+ tf.logical_or(
+ tf.less(i, self.nb_iter),
+ tf.logical_and(
+ tf.greater(loss_diff, tf.zeros([])),
+ tf.less(i, self.max_steps)
+ )
+ # tf.logical_or(
+ # tf.less_equal(first_idx_done, tf.zeros([])),
+ # tf.logical_and(
+ # i < 2000,
+ # tf.logical_not(
+ # tf.logical_and(
+ # tf.less(loss_diff, tf.zeros([])),
+ # tf.less(first_idx_done + 10, i)
+ # ))
+ # )
+ # )
+ )
+ )
+
+ def body(i, adv_x, _, _2, first_idx_done):
+ adv_x_before = adv_x
+ adv_x, loss_diff = fgm(adv_x, features=self.model.get_mmd_features(adv_x),
+ **fgm_params, projection=self.projection)
+
+ # adv_x = tf.Print(adv_x, [i, first_idx_done, loss_diff])
+
+ # Clipping perturbation eta to self.ord norm ball
+ eta = adv_x - x
+ eta = clip_eta(eta, self.ord, self.eps)
+ adv_x = x + eta
+
+ # Redo the clipping.
+ # FGM already did it, but subtracting and re-adding eta can add some
+ # small numerical error.
+ if self.clip_min is not None or self.clip_max is not None:
+ adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ first_idx_done = tf.where(
+ tf.logical_and(first_idx_done > 0, loss_diff < 0),
+ first_idx_done,
+ i * tf.where(loss_diff < 0, tf.ones(tf.shape(adv_x)[0]), tf.zeros(tf.shape(adv_x)[0]))
+ )
+
+ return i + 1, adv_x, adv_x_before, loss_diff, first_idx_done
+
+ _, _, adv_x, _, _ = tf.while_loop(cond, body,
+ [tf.zeros([]), adv_x, adv_x,
+ tf.ones(tf.shape(adv_x)[0]),
+ -1 * tf.ones(tf.shape(adv_x)[0])],
+ back_prop=True)
+
+ # Asserts run only on CPU.
+ # When multi-GPU eval code tries to force all PGD ops onto GPU, this
+ # can cause an error.
+ common_dtype = tf.float64
+ asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps_iter,
+ dtype=common_dtype),
+ tf.cast(self.eps,
+ dtype=common_dtype)))
+ if self.ord == np.inf and self.clip_min is not None:
+ # The 1e-6 is needed to compensate for numerical error.
+ # Without the 1e-6 this fails when e.g. eps=.2, clip_min=.5,
+ # clip_max=.7
+ asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps, x.dtype),
+ 1e-6 + tf.cast(self.clip_max,
+ x.dtype)
+ - tf.cast(self.clip_min,
+ x.dtype)))
+
+ if self.sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ return adv_x
+
+ def parse_params(self,
+ eps=0.3,
+ eps_iter=0.05,
+ nb_iter=10,
+ y=None,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ y_target=None,
+ rand_init=None,
+ rand_minmax=0.3,
+ sanity_checks=True,
+ **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+ Attack-specific parameters:
+ :param eps: (optional float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (optional float) step size for each attack iteration
+ :param nb_iter: (optional int) Number of attack iterations.
+ :param y: (optional) A tensor with the true labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ :param sanity_checks: bool Insert tf asserts checking values
+ (Some tests need to run with no sanity checks because the
+ tests intentionally configure the attack strangely)
+ """
+
+ # Save attack-specific parameters
+ self.eps = eps
+ if rand_init is None:
+ rand_init = self.default_rand_init
+ self.rand_init = rand_init
+ if self.rand_init:
+ self.rand_minmax = eps
+ else:
+ self.rand_minmax = 0.
+ self.eps_iter = eps_iter
+ self.nb_iter = nb_iter
+ self.y = y
+ self.y_target = y_target
+ self.ord = ord
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ if isinstance(eps, float) and isinstance(eps_iter, float):
+ # If these are both known at compile time, we can check before anything
+ # is run. If they are tf, we can't check them yet.
+ assert eps_iter <= eps, (eps_iter, eps)
+
+ if self.y is not None and self.y_target is not None:
+ raise ValueError("Must not set both y and y_target")
+ # Check if order of the norm is acceptable given current implementation
+ if self.ord not in [np.inf, 1, 2]:
+ raise ValueError("Norm order must be either np.inf, 1, or 2.")
+ self.sanity_checks = sanity_checks
+
+ if len(kwargs.keys()) > 0:
+ warnings.warn("kwargs is unused and will be removed on or after "
+ "2019-04-26.")
+
+ return True
diff --git a/case_studies/mmt/adversarial_evaluation.sh b/case_studies/mmt/adversarial_evaluation.sh
new file mode 100644
index 0000000..b582819
--- /dev/null
+++ b/case_studies/mmt/adversarial_evaluation.sh
@@ -0,0 +1,21 @@
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$(pwd) venv3.8tf/bin/python case_studies/mmt/advtest_iterative.py --mean_var=10 --batch_size=50 \
+ --attack_method_for_advtrain='MadryEtAl' \
+ --dataset=cifar10 --target=False --use_ball=True \
+ --use_MMLDA=True --use_advtrain=False --epoch=200 \
+ --use_BN=True --normalize_output_for_ball=False --use_random=False \
+ --adv_ratio=1.0 --use_target=False \
+ --checkpoint=checkpoints/mmt_mmc_rn110.h5 \
+ --n_samples=512 \
+ --attack_method='MadryEtAl' \
+ --num_iter=50
+
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$(pwd) venv3.8tf/bin/python case_studies/mmt/advtest_iterative.py --mean_var=10 --batch_size=50 \
+ --attack_method_for_advtrain='MadryEtAl' \
+ --dataset=cifar10 --target=False --use_ball=True \
+ --use_MMLDA=True --use_advtrain=False --epoch=200 \
+ --use_BN=True --normalize_output_for_ball=False --use_random=False \
+ --adv_ratio=1.0 --use_target=False \
+ --checkpoint=checkpoints/mmt_mmc_rn110.h5 \
+ --n_samples=512 \
+ --attack_method='Adaptive' \
+ --num_iter=50
\ No newline at end of file
diff --git a/case_studies/mmt/advtest_iterative.py b/case_studies/mmt/advtest_iterative.py
new file mode 100644
index 0000000..3179558
--- /dev/null
+++ b/case_studies/mmt/advtest_iterative.py
@@ -0,0 +1,402 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import logging
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from mmt_utils.model import resnet_v1, resnet_v2
+import cleverhans.attacks as attacks
+from cleverhans.utils_tf import model_eval
+from mmt_utils.keras_wraper_ensemble import KerasModelWrapper
+from mmt_utils.utils_model_eval import model_eval_targetacc
+from sklearn.metrics import roc_auc_score
+from fgm_patched import fgm_patched
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_integer('version', 2, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
+tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_integer('num_iter', 10, '')
+tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
+tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
+tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
+
+tf.app.flags.DEFINE_integer('n_samples', 512, '')
+tf.app.flags.DEFINE_string('checkpoint', None, '')
+# For calculate AUC-scores
+tf.app.flags.DEFINE_bool('is_calculate_auc', False, 'whether to calculate auc scores')
+tf.app.flags.DEFINE_bool('is_auc_metric_softmax_for_MMC', False, 'whether use softmax to calculate auc metrics for MMC')
+
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+ x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
+
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+else:
+ print('Unknown dataset')
+
+
+# These parameters are usually fixed
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 18 # n=5 for resnet-32 v1, n=18 for Resnet110 (according to README.md)
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+else:
+ name_random = ''
+
+if FLAGS.use_leaky==True:
+ name_leaky = '_withleaky'
+else:
+ name_leaky = ''
+
+if FLAGS.use_dense==True:
+ name_dense = ''
+else:
+ name_dense = '_nodense'
+
+
+#Load means in MMLDA
+kernel_dict = loadmat('case_studies/mmt/kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
+mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
+
+
+#MMLDA prediction function
+def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ if FLAGS.normalize_output_for_ball==False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+# Load the data.
+y_test_target = np.zeros_like(y_test)
+for i in range(y_test.shape[0]):
+ l = np.random.randint(num_class)
+ while l == y_test[i][0]:
+ l = np.random.randint(num_class)
+ y_test_target[i][0] = l
+print('Finish crafting y_test_target!!!!!!!!!!!')
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+clip_min = 0.0
+clip_max = 1.0
+# If subtract pixel mean is enabled
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+ clip_min -= np.max(x_train_mean)
+ clip_max -= np.min(x_train_mean)
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test = keras.utils.to_categorical(y_test, num_class)
+y_test_target = keras.utils.to_categorical(y_test_target, num_class)
+
+
+# Define input TF placeholder
+y_place = tf.placeholder(tf.float32, shape=(None, num_class))
+y_target = tf.placeholder(tf.float32, shape=(None, num_class))
+sess = tf.Session()
+keras.backend.set_session(sess)
+
+
+model_input = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model,_,_,_,final_features = resnet_v2(immediate_input=model_input, input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model,_,_,_,final_features = resnet_v1(immediate_input=model_input, input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+print("depth", depth)
+print("#layers", len(original_model.layers))
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+
+#Whether use target attack for adversarial training
+if FLAGS.use_target==False:
+ is_target = ''
+else:
+ is_target = 'target'
+
+
+if FLAGS.use_advtrain==True:
+ dirr = 'advtrained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
+ adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
+ mean_var = int(FLAGS.mean_var)
+else:
+ dirr = 'trained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = ''
+ adv_ratio_name = ''
+ mean_var = FLAGS.mean_var
+
+
+if FLAGS.use_MMLDA==True:
+ print('Using MMLDA')
+ new_layer = Lambda(MMLDA_layer)
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ use_ball_=''
+ if FLAGS.use_ball==False:
+ print('Using softmax function')
+ use_ball_='_softmax'
+ filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+else:
+ print('Using softmax loss')
+ model = original_model
+ filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+
+
+print(filepath_dir)
+model.load_weights(FLAGS.checkpoint)
+
+# TODO: ch_compatibility_mode needs to be set to true since the
+# authors of the model messed logits and predictions up which accidentially
+# worked with ch2.1.0 but not with later version: setting this flag modifies
+# the model so that one can reproduce their results with later ch versions.
+wrap_ensemble = KerasModelWrapper(model, num_class=num_class, binarized_model=False,
+ ch_compatibility_mode=True)
+
+#model.load_weights(filepath_dir)
+
+
+# Initialize the attack method
+if FLAGS.attack_method == 'MadryEtAl':
+ att = attacks.MadryEtAl(wrap_ensemble)
+elif FLAGS.attack_method == 'FastGradientMethod':
+ att = attacks.FastGradientMethod(wrap_ensemble)
+elif FLAGS.attack_method == 'MomentumIterativeMethod':
+ att = attacks.MomentumIterativeMethod(wrap_ensemble)
+elif FLAGS.attack_method == 'BasicIterativeMethod':
+ att = attacks.BasicIterativeMethod(wrap_ensemble)
+elif FLAGS.attack_method == "Adaptive":
+ from adaptive_attack import FeatureSpaceProjectedGradientDescent
+ att = FeatureSpaceProjectedGradientDescent(wrap_ensemble, logit_means=mean_logits,
+ max_steps=600, projection="linf")
+
+
+# Consider the attack to be constant
+eval_par = {'batch_size': FLAGS.batch_size}
+
+if FLAGS.n_samples != -1:
+ print(f"Using {FLAGS.n_samples} random test samples")
+ inds = np.arange(len(x_test))
+ np.random.shuffle(inds)
+ inds = inds[:FLAGS.n_samples]
+ x_test, y_test = x_test[inds], y_test[inds]
+
+
+if FLAGS.is_calculate_auc:
+ # Calculate model preds for clean inputs
+ avg_score_nor = np.array([])
+ nor_indicator = np.ones((1000,), dtype=int)
+ for i in range(10):
+ avg_score_nor_batch = sess.run(tf.reduce_max(model(x_place),axis=-1), feed_dict={x_place:x_test[i*100:(i+1)*100]})
+ avg_score_nor = np.concatenate((avg_score_nor, avg_score_nor_batch), axis=0)
+ print('Calculate score for nor images with batch', i)
+
+ # Calculate model preds for adv inputs
+ eps_ = 8 / 256.0
+ if FLAGS.target==False:
+ y_target = None
+ if FLAGS.attack_method == 'FastGradientMethod':
+ att_params = {'eps': eps_,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'y_target': y_target}
+ else:
+ att_params = {'eps': eps_,
+ #'eps_iter': eps_*1.0/FLAGS.num_iter,
+ #'eps_iter': 3.*eps_/FLAGS.num_iter,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_iter': FLAGS.num_iter,
+ 'y_target': y_target}
+ if FLAGS.attack_method == "Adaptive":
+ att_params["y"] = y_place
+ att_params['eps_iter'] = 1 / 256.
+ print("clip_min", clip_min)
+ print("clip_max", clip_max)
+ adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
+ preds = tf.reduce_max(model(adv_x),axis=-1)
+
+ if FLAGS.is_auc_metric_softmax_for_MMC==True:
+ preds = tf.reduce_max(tf.nn.softmax(model(adv_x)),axis=-1)
+
+ avg_score_adv = np.array([])
+ adv_indicator = np.zeros((1000,), dtype=int)
+ if FLAGS.target==True:
+ for i in range(10):
+ avg_score_adv_batch = sess.run(preds, feed_dict={x_place:x_test[i*100:(i+1)*100], y_target:y_test_target[i*100:(i+1)*100]})
+ avg_score_adv = np.concatenate((avg_score_adv, avg_score_adv_batch), axis=0)
+ print('Calculate score for target attack images with batch', i)
+ else:
+ for i in range(10):
+ avg_score_adv_batch = sess.run(preds, feed_dict={x_place:x_test[i*100:(i+1)*100]})
+ avg_score_adv = np.concatenate((avg_score_adv, avg_score_adv_batch), axis=0)
+ print('Calculate score for untarget attack images with batch', i)
+
+ score_all = np.concatenate((avg_score_nor,avg_score_adv), axis=0)
+ indicator_all = np.concatenate((nor_indicator,adv_indicator), axis=0)
+ print('AUC score is', roc_auc_score(indicator_all, score_all))
+
+else:
+ clip_min = clip_min.item()
+ clip_max = clip_max.item()
+ for eps in range(4):
+ eps_ = (eps+1) * 8
+ print('eps is %d'%eps_)
+ eps_ = eps_ / 256.0
+ if FLAGS.target==False:
+ y_target = None
+ if FLAGS.attack_method == 'FastGradientMethod':
+ att_params = {'eps': eps_,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'y_target': y_target}
+ else:
+ att_params = {'eps': eps_,
+ #'eps_iter': eps_*1.0/FLAGS.num_iter,
+ #'eps_iter': 3.*eps_/FLAGS.num_iter,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_iter': FLAGS.num_iter,
+ 'y_target': y_target,
+ 'y': y_place,
+ }
+ if FLAGS.attack_method == "Adaptive":
+ att_params["y"] = y_place
+ att_params['eps_iter'] = 0.5 / 256.
+
+ # debugging statements
+ print("att_params", att_params)
+ if FLAGS.attack_method != "Adaptive":
+ import cleverhans.attacks
+ cleverhans.attacks.fgm = fgm_patched
+ print("patched fgm function")
+
+ #clean_preds = model(x_place)
+ #ll = sess.run(clean_preds, {x_place: x_test[:16]})
+ #import pdb; pdb.set_trace()
+
+ adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
+ preds = model(adv_x)
+
+ if FLAGS.target==False:
+ acc = model_eval(sess, x_place, y_place, preds, x_test, y_test, args=eval_par)
+ print('adv_acc: %.3f' %acc)
+ else:
+ acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test, y_test, y_test_target, args=eval_par)
+ print('adv_acc_target: %.3f' %acc)
diff --git a/case_studies/mmt/advtest_iterative_blackbox.py b/case_studies/mmt/advtest_iterative_blackbox.py
new file mode 100644
index 0000000..8d30bfd
--- /dev/null
+++ b/case_studies/mmt/advtest_iterative_blackbox.py
@@ -0,0 +1,404 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from utils.model import resnet_v1, resnet_v2
+import cleverhans.attacks as attacks
+from cleverhans.utils_tf import model_eval
+from utils.keras_wraper_ensemble import KerasModelWrapper
+from utils.utils_model_eval import model_eval_targetacc
+
+FLAGS = tf.app.flags.FLAGS
+
+#Common Flags for two models
+tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_integer('version', 2, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
+tf.app.flags.DEFINE_integer('num_iter', 10, '')
+tf.app.flags.DEFINE_string('dataset', 'cifar10', '')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
+tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
+tf.app.flags.DEFINE_integer('epoch', 180, 'the epoch of model to load')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+
+# SCE, MMC-10, MMC-100, AT-SCE, AT-MMC-10, AT-MMC-100
+tf.app.flags.DEFINE_string('model_1', 'SCE', '')
+tf.app.flags.DEFINE_string('model_2', 'MMC-10', '')
+
+#Specific Flags for model 1
+tf.app.flags.DEFINE_float('mean_var_1', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('attack_method_for_advtrain_1', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_bool('use_target_1', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_bool('use_ball_1', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA_1', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_advtrain_1', True, 'whether use advtraining or normal training')
+tf.app.flags.DEFINE_float('adv_ratio_1', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_bool('normalize_output_for_ball_1', True, 'whether apply softmax in the inference phase')
+
+#Specific Flags for model 2
+tf.app.flags.DEFINE_float('mean_var_2', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('attack_method_for_advtrain_2', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_bool('use_target_2', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_bool('use_ball_2', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA_2', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_advtrain_2', True, 'whether use advtraining or normal training')
+tf.app.flags.DEFINE_float('adv_ratio_2', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_bool('normalize_output_for_ball_2', True, 'whether apply softmax in the inference phase')
+
+##### model 1 is the substitute model used to craft adversarial examples, model 2 is the original model used to classify these adversarial examples.
+
+def return_paras(model_name):
+ if model_name == 'SCE':
+ return 0, None, False, False, False, False, 0.0, True
+ elif model_name == 'MMC-10':
+ return 10.0, None, False, True, True, False, 0.0, False
+ elif model_name == 'MMC-100':
+ return 100.0, None, False, True, True, False, 0.0, False
+ elif model_name == 'AT-SCE':
+ return 0, 'MadryEtAl', True, False, False, True, 1.0, True
+ elif model_name == 'AT-MMC-10':
+ return 10, 'MadryEtAl', True, True, True, True, 1.0, False
+ elif model_name == 'AT-MMC-100':
+ return 100, 'MadryEtAl', True, True, True, True, 1.0, False
+ else:
+ return None
+
+
+FLAGS.mean_var_1, FLAGS.attack_method_for_advtrain_1, FLAGS.use_target_1, FLAGS.use_ball_1, \
+FLAGS.use_MMLDA_1, FLAGS.use_advtrain_1, FLAGS.adv_ratio_1, FLAGS.normalize_output_for_ball_1 = return_paras(FLAGS.model_1)
+
+
+FLAGS.mean_var_2, FLAGS.attack_method_for_advtrain_2, FLAGS.use_target_2, FLAGS.use_ball_2, \
+FLAGS.use_MMLDA_2, FLAGS.use_advtrain_2, FLAGS.adv_ratio_2, FLAGS.normalize_output_for_ball_2 = return_paras(FLAGS.model_2)
+
+
+
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+ x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
+
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+else:
+ print('Unknown dataset')
+
+
+# These parameters are usually fixed
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 5 # n=5 for resnet-32 v1
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+else:
+ name_random = ''
+
+if FLAGS.use_leaky==True:
+ name_leaky = '_withleaky'
+else:
+ name_leaky = ''
+
+if FLAGS.use_dense==True:
+ name_dense = ''
+else:
+ name_dense = '_nodense'
+
+
+#Load means in MMLDA
+kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
+mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits_1 = FLAGS.mean_var_1 * tf.constant(mean_logits,dtype=tf.float32)
+mean_logits_2 = FLAGS.mean_var_2 * tf.constant(mean_logits,dtype=tf.float32)
+
+
+#MMLDA prediction function
+def MMLDA_layer_1(x, means=mean_logits_1, num_class=num_class, use_ball=FLAGS.use_ball_1):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ if FLAGS.normalize_output_for_ball_1==False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+def MMLDA_layer_2(x, means=mean_logits_2, num_class=num_class, use_ball=FLAGS.use_ball_2):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ if FLAGS.normalize_output_for_ball_2==False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+# Load the data.
+y_test_target = np.zeros_like(y_test)
+for i in range(y_test.shape[0]):
+ l = np.random.randint(num_class)
+ while l == y_test[i][0]:
+ l = np.random.randint(num_class)
+ y_test_target[i][0] = l
+print('Finish crafting y_test_target!!!!!!!!!!!')
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+clip_min = 0.0
+clip_max = 1.0
+# If subtract pixel mean is enabled
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+ clip_min -= x_train_mean
+ clip_max -= x_train_mean
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test = keras.utils.to_categorical(y_test, num_class)
+y_test_target = keras.utils.to_categorical(y_test_target, num_class)
+
+
+# Define input TF placeholder
+y_place = tf.placeholder(tf.float32, shape=(None, num_class))
+y_target = tf.placeholder(tf.float32, shape=(None, num_class))
+sess = tf.Session()
+keras.backend.set_session(sess)
+
+
+model_input_1 = Input(shape=input_shape)
+model_input_2 = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model_1,_,_,_,final_features_1 = resnet_v2(input=model_input_1, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model_1,_,_,_,final_features_1 = resnet_v1(input=model_input_1, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+if version == 2:
+ original_model_2,_,_,_,final_features_2 = resnet_v2(input=model_input_2, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model_2,_,_,_,final_features_2 = resnet_v1(input=model_input_2, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+
+
+
+
+##### Load model 1 #####
+#Whether use target attack for adversarial training
+if FLAGS.use_target_1==False:
+ is_target_1 = ''
+else:
+ is_target_1 = 'target'
+
+if FLAGS.use_advtrain_1==True:
+ dirr_1 = 'advtrained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain_1 = '_'+is_target_1+FLAGS.attack_method_for_advtrain_1
+ adv_ratio_name_1 = '_advratio'+str(FLAGS.adv_ratio_1)
+ mean_var_1 = int(FLAGS.mean_var_1)
+else:
+ dirr_1 = 'trained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain_1 = ''
+ adv_ratio_name_1 = ''
+ mean_var_1 = FLAGS.mean_var_1
+
+if FLAGS.use_MMLDA_1==True:
+ print('Using MMLDA for model 1, the substitute model')
+ new_layer_1 = Lambda(MMLDA_layer_1)
+ predictions_1 = new_layer_1(final_features_1)
+ model_1 = Model(input=model_input_1, output=predictions_1)
+ use_ball_1=''
+ if FLAGS.use_ball_1==False:
+ print('Using softmax function for model 1')
+ use_ball_1='_softmax'
+ filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_meanvar'+str(mean_var_1) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+name_dense+name_random+use_ball_1+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+else:
+ print('Using softmax loss for model 1')
+ model_1 = original_model_1
+ filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+wrap_ensemble_1 = KerasModelWrapper(model_1, num_class=num_class)
+model_1.load_weights(filepath_dir_1)
+
+
+
+
+##### Load model 2 #####
+#Whether use target attack for adversarial training
+if FLAGS.use_target_2==False:
+ is_target_2 = ''
+else:
+ is_target_2 = 'target'
+
+if FLAGS.use_advtrain_2==True:
+ dirr_2 = 'advtrained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain_2 = '_'+is_target_2+FLAGS.attack_method_for_advtrain_2
+ adv_ratio_name_2 = '_advratio'+str(FLAGS.adv_ratio_2)
+ mean_var_2 = int(FLAGS.mean_var_2)
+else:
+ dirr_2 = 'trained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain_2 = ''
+ adv_ratio_name_2 = ''
+ mean_var_2 = FLAGS.mean_var_2
+
+if FLAGS.use_MMLDA_2==True:
+ print('Using MMLDA for model 2, the original model')
+ new_layer_2 = Lambda(MMLDA_layer_2)
+ predictions_2 = new_layer_2(final_features_2)
+ model_2 = Model(input=model_input_2, output=predictions_2)
+ use_ball_2=''
+ if FLAGS.use_ball_2==False:
+ print('Using softmax function for model 2')
+ use_ball_2='_softmax'
+ filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_meanvar'+str(mean_var_2) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+name_dense+name_random+use_ball_2+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+else:
+ print('Using softmax loss for model 2')
+ model_2 = original_model_2
+ filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+wrap_ensemble_2 = KerasModelWrapper(model_2, num_class=num_class)
+model_2.load_weights(filepath_dir_2)
+
+
+
+
+
+
+# Initialize the attack method
+if FLAGS.attack_method == 'MadryEtAl':
+ att = attacks.MadryEtAl(wrap_ensemble_1)
+elif FLAGS.attack_method == 'FastGradientMethod':
+ att = attacks.FastGradientMethod(wrap_ensemble_1)
+elif FLAGS.attack_method == 'MomentumIterativeMethod':
+ att = attacks.MomentumIterativeMethod(wrap_ensemble_1)
+elif FLAGS.attack_method == 'BasicIterativeMethod':
+ att = attacks.BasicIterativeMethod(wrap_ensemble_1)
+
+
+# Consider the attack to be constant
+eval_par = {'batch_size': FLAGS.batch_size}
+
+for eps in range(2):
+ eps_ = (eps+1) * 8
+ print('eps is %d'%eps_)
+ eps_ = eps_ / 256.0
+ if FLAGS.target==False:
+ y_target = None
+ if FLAGS.attack_method == 'FastGradientMethod':
+ att_params = {'eps': eps_,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'y_target': y_target}
+ else:
+ att_params = {'eps': eps_,
+ #'eps_iter': eps_*1.0/FLAGS.num_iter,
+ #'eps_iter': 3.*eps_/FLAGS.num_iter,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_iter': FLAGS.num_iter,
+ 'y_target': y_target}
+ adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
+ preds = model_2(adv_x)
+ if FLAGS.target==False:
+ acc = model_eval(sess, x_place, y_place, preds, x_test, y_test, args=eval_par)
+ print('adv_acc of model 1 transfer to model 2 is: %.3f' %acc)
+ else:
+ acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test, y_test, y_test_target, args=eval_par)
+ print('adv_acc_target of model 1 transfer to model 2 is: %.3f' %acc)
diff --git a/case_studies/mmt/advtest_others.py b/case_studies/mmt/advtest_others.py
new file mode 100644
index 0000000..d2cc485
--- /dev/null
+++ b/case_studies/mmt/advtest_others.py
@@ -0,0 +1,422 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from utils.model import resnet_v1, resnet_v2
+import cleverhans.attacks as attacks
+from cleverhans.utils_tf import model_eval
+from utils.keras_wraper_ensemble import KerasModelWrapper
+from utils.utils_model_eval import model_eval_targetacc, model_eval_for_SPSA, model_eval_for_SPSA_targetacc
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_integer('version', 2, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
+tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
+tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
+tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
+
+tf.app.flags.DEFINE_float('CW_confidence', 1.0, 'the confidence for CW-L2 attacks')
+tf.app.flags.DEFINE_float('SPSA_epsilon', 8, 'the eps for SPSA attacks in 256 pixel values')
+
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+ x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
+
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+else:
+ print('Unknown dataset')
+
+
+# These parameters are usually fixed
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 5 # n=5 for resnet-32 v1
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+else:
+ name_random = ''
+
+if FLAGS.use_leaky==True:
+ name_leaky = '_withleaky'
+else:
+ name_leaky = ''
+
+if FLAGS.use_dense==True:
+ name_dense = ''
+else:
+ name_dense = '_nodense'
+
+
+#Load means in MMLDA
+kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
+mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
+
+
+#MMLDA prediction function
+def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ if FLAGS.normalize_output_for_ball==False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+# Load the data.
+y_test_target = np.zeros_like(y_test)
+for i in range(y_test.shape[0]):
+ l = np.random.randint(num_class)
+ while l == y_test[i][0]:
+ l = np.random.randint(num_class)
+ y_test_target[i][0] = l
+print('Finish crafting y_test_target!!!!!!!!!!!')
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+clip_min = 0.0
+clip_max = 1.0
+# If subtract pixel mean is enabled
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+ clip_min -= x_train_mean
+ clip_max -= x_train_mean
+print (np.min(x_train_mean))
+print (np.max(x_train_mean))
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test_index = np.squeeze(np.copy(y_test).astype('int32'))
+y_test = keras.utils.to_categorical(y_test, num_class)
+y_test_target_index = np.squeeze(np.copy(y_test_target).astype('int32'))
+y_test_target = keras.utils.to_categorical(y_test_target, num_class)
+
+
+# Define input TF placeholder
+y_place = tf.placeholder(tf.float32, shape=(None, num_class))
+y_target = tf.placeholder(tf.float32, shape=(None, num_class))
+sess = tf.Session()
+keras.backend.set_session(sess)
+
+
+model_input = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+
+#Whether use target attack for adversarial training
+if FLAGS.use_target==False:
+ is_target = ''
+else:
+ is_target = 'target'
+
+
+if FLAGS.use_advtrain==True:
+ dirr = 'advtrained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
+ adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
+ mean_var = int(FLAGS.mean_var)
+else:
+ dirr = 'trained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = ''
+ adv_ratio_name = ''
+ mean_var = FLAGS.mean_var
+
+
+if FLAGS.use_MMLDA==True:
+ print('Using MMLDA')
+ new_layer = Lambda(MMLDA_layer)
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ use_ball_=''
+ if FLAGS.use_ball==False:
+ print('Using softmax function')
+ use_ball_='_softmax'
+ filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+else:
+ print('Using softmax loss')
+ model = original_model
+ filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+
+
+wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
+
+
+model.load_weights(filepath_dir)
+
+# Initialize the attack method
+if FLAGS.attack_method == 'SaliencyMapMethod':
+ num_samples = 100
+ eval_par = {'batch_size': 1}
+ att = attacks.SaliencyMapMethod(wrap_ensemble, sess=sess)
+ att_params = {
+ 'theta': 1.,
+ 'gamma': 0.1,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ }
+ adv_x = att.generate(x_place, **att_params)
+elif FLAGS.attack_method == 'CarliniWagnerL2': #Update on 2019.3.29
+ num_samples = 500
+ eval_par = {'batch_size': 10}
+ att = attacks.CarliniWagnerL2(wrap_ensemble, sess=sess)
+ if FLAGS.target==False:
+ att_params = {
+ 'batch_size': 10,
+ 'confidence': FLAGS.CW_confidence,
+ 'learning_rate': 5e-3,
+ 'binary_search_steps': 9,
+ 'max_iterations': 1000,
+ 'initial_const': 0.01,
+ 'abort_early': True,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max
+ }
+ else:
+ att_params = {
+ 'batch_size': 10,
+ 'confidence': FLAGS.CW_confidence,
+ 'y_target': y_target,
+ 'learning_rate': 5e-3,
+ 'binary_search_steps': 9,
+ 'max_iterations': 1000,
+ 'initial_const': 0.01,
+ 'abort_early': True,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max
+ }
+ if FLAGS.use_MMLDA == True and FLAGS.use_ball == True:
+ is_MMC = True
+ else:
+ is_MMC = False
+ adv_x = att.generate(x_place, is_MMC=is_MMC, **att_params)
+elif FLAGS.attack_method == 'ElasticNetMethod':
+ num_samples = 1000
+ eval_par = {'batch_size': 100}
+ att = attacks.ElasticNetMethod(wrap_ensemble, sess=sess)
+ att_params = {
+ 'batch_size': 100,
+ 'confidence': 0.1,
+ 'learning_rate': 0.01,
+ 'binary_search_steps': 1,
+ 'max_iterations': 1000,
+ 'initial_const': 1.0,
+ 'beta': 1e-2,
+ 'fista': True,
+ 'decision_rule': 'EN',
+ 'clip_min': clip_min,
+ 'clip_max': clip_max
+ }
+ adv_x = att.generate(x_place, **att_params)
+elif FLAGS.attack_method == 'DeepFool':
+ num_samples = 1000
+ eval_par = {'batch_size': 1}
+ att = attacks.DeepFool(wrap_ensemble, sess=sess)
+ att_params = {
+ 'max_iter': 10,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_candidate': 1
+ }
+ adv_x = att.generate(x_place, **att_params)
+elif FLAGS.attack_method == 'LBFGS':
+ num_samples = 1000
+ eval_par = {'batch_size': 1}
+ att = attacks.LBFGS(wrap_ensemble, sess=sess)
+ clip_min = np.mean(clip_min)
+ clip_max = np.mean(clip_max)
+ att_params = {
+ 'y_target': y_target,
+ 'batch_size': 1,
+ 'binary_search_steps': 1,
+ 'max_iterations': 100,
+ 'initial_const': 1.0,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max
+ }
+ adv_x = att.generate(x_place, **att_params)
+elif FLAGS.attack_method == 'SPSA': #Update on 2019.3.29
+ num_samples = 1000
+ eval_par = {'batch_size': 1}
+ x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))
+ y_index = tf.placeholder(tf.uint8, shape=())
+ if FLAGS.target:
+ y_target_index = tf.placeholder(tf.uint8, shape=())
+ else:
+ y_target_index = None
+ att = attacks.SPSA(wrap_ensemble, sess=sess)
+ if FLAGS.use_MMLDA == True and FLAGS.use_ball == True:
+ is_MMC = True
+ else:
+ is_MMC = False
+ adv_x = att.generate(x, y_index, y_target=y_target_index, epsilon=FLAGS.SPSA_epsilon / 256., num_steps=10,
+ is_targeted=FLAGS.target, early_stop_loss_threshold=None,
+ learning_rate=0.01, delta=0.01, batch_size=128, spsa_iters=1,
+ is_debug=False, is_MMC=is_MMC)
+
+
+
+preds = wrap_ensemble.get_probs(adv_x)
+if FLAGS.attack_method == 'LBFGS':
+ print(model_eval_targetacc(
+ sess,
+ x_place,
+ y_place,
+ y_target,
+ preds,
+ x_test[:num_samples],
+ y_test[:num_samples],
+ y_test_target[:num_samples],
+ args=eval_par))
+elif FLAGS.attack_method == 'SPSA':
+ if FLAGS.target==False:
+ acc = model_eval_for_SPSA(
+ sess,
+ x,
+ y_place,
+ y_index,
+ preds,
+ x_test[:num_samples],
+ y_test_index[:num_samples],
+ y_test[:num_samples],
+ args=eval_par)
+ print('adv_acc: %.3f' %acc)
+ else:
+ acc = model_eval_for_SPSA_targetacc(
+ sess,
+ x,
+ y_place,
+ y_index,
+ y_target_index,
+ preds,
+ x_test[:num_samples],
+ y_test_index[:num_samples],
+ y_test[:num_samples],
+ y_test_target_index[:num_samples],
+ args=eval_par)
+ print('adv_acc_target: %.3f' %acc)
+elif FLAGS.attack_method == 'CarliniWagnerL2':
+ l2dis_test = np.zeros((num_samples,))
+ reshape_dis = tf.reshape(x_place - adv_x, shape = [-1, 3072])
+ if FLAGS.target==False:
+ for i in range(int(num_samples/10)):
+ l2dis_test[i*10:(i+1)*10]=sess.run(tf.norm(reshape_dis, ord=2, axis=-1), feed_dict={x_place: x_test[i*10:(i+1)*10], \
+ y_place: y_test[i*10:(i+1)*10]})
+ print('Predict batch for test ', i, ', l2dis_mean is ', np.mean(l2dis_test[i*10:(i+1)*10]))
+ print('Total l2dismean is ',np.mean(l2dis_test))
+ acc = model_eval(sess, x_place, y_place, preds, x_test[:num_samples], y_test[:num_samples], args=eval_par)
+ print('adv_acc: %.3f' %acc)
+ else:
+ for i in range(int(num_samples/10)):
+ l2dis_test[i*10:(i+1)*10]=sess.run(tf.norm(reshape_dis, ord=2, axis=-1), feed_dict={x_place: x_test[i*10:(i+1)*10], \
+ y_place: y_test[i*10:(i+1)*10], y_target: y_test_target[i*10:(i+1)*10]})
+ print('Predict batch for test ', i, ', l2dis_mean is ', np.mean(l2dis_test[i*10:(i+1)*10]))
+ print('Total l2dismean is ',np.mean(l2dis_test))
+ acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test[:num_samples], y_test[:num_samples], y_test_target[:num_samples], args=eval_par)
+ print('adv_acc_target: %.3f' %acc)
+
diff --git a/case_studies/mmt/advtest_simple_transform.py b/case_studies/mmt/advtest_simple_transform.py
new file mode 100644
index 0000000..e843333
--- /dev/null
+++ b/case_studies/mmt/advtest_simple_transform.py
@@ -0,0 +1,280 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from utils.model import resnet_v1, resnet_v2
+import cleverhans.attacks as attacks
+from cleverhans.utils_tf import model_eval
+from utils.keras_wraper_ensemble import KerasModelWrapper
+from utils.utils_model_eval import model_eval_targetacc
+from sklearn.metrics import roc_auc_score
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('attack_method', 'gaussian', '')
+tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
+tf.app.flags.DEFINE_integer('version', 2, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
+tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_integer('num_iter', 10, '')
+tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
+tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
+tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
+
+
+
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+ x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
+
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+else:
+ print('Unknown dataset')
+
+
+# These parameters are usually fixed
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 5 # n=5 for resnet-32 v1
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+else:
+ name_random = ''
+
+if FLAGS.use_leaky==True:
+ name_leaky = '_withleaky'
+else:
+ name_leaky = ''
+
+if FLAGS.use_dense==True:
+ name_dense = ''
+else:
+ name_dense = '_nodense'
+
+
+#Load means in MMLDA
+kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
+mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
+
+
+#MMLDA prediction function
+def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ if FLAGS.normalize_output_for_ball==False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+# Load the data.
+y_test_target = np.zeros_like(y_test)
+for i in range(y_test.shape[0]):
+ l = np.random.randint(num_class)
+ while l == y_test[i][0]:
+ l = np.random.randint(num_class)
+ y_test_target[i][0] = l
+print('Finish crafting y_test_target!!!!!!!!!!!')
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+clip_min = 0.0
+clip_max = 1.0
+# If subtract pixel mean is enabled
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+ clip_min -= np.max(x_train_mean)
+ clip_max -= np.min(x_train_mean)
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test = keras.utils.to_categorical(y_test, num_class)
+y_test_target = keras.utils.to_categorical(y_test_target, num_class)
+
+
+# Define input TF placeholder
+y_place = tf.placeholder(tf.float32, shape=(None, num_class))
+y_target = tf.placeholder(tf.float32, shape=(None, num_class))
+sess = tf.Session()
+keras.backend.set_session(sess)
+
+
+model_input = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+
+#Whether use target attack for adversarial training
+if FLAGS.use_target==False:
+ is_target = ''
+else:
+ is_target = 'target'
+
+
+if FLAGS.use_advtrain==True:
+ dirr = 'advtrained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
+ adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
+ mean_var = int(FLAGS.mean_var)
+else:
+ dirr = 'trained_models/'+FLAGS.dataset+'/'
+ attack_method_for_advtrain = ''
+ adv_ratio_name = ''
+ mean_var = FLAGS.mean_var
+
+
+if FLAGS.use_MMLDA==True:
+ print('Using MMT Training Scheme')
+ new_layer = Lambda(MMLDA_layer)
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ use_ball_=''
+ if FLAGS.use_ball==False:
+ print('Using softmax function (MMLDA)')
+ use_ball_='_softmax'
+ filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+else:
+ print('Using softmax loss')
+ model = original_model
+ filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+ +'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
+
+
+wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
+
+
+model.load_weights(filepath_dir)
+
+
+if FLAGS.attack_method == 'Rotation':
+ datagen = ImageDataGenerator(
+ rotation_range=30)
+
+ data_generate=datagen.flow(x_test, y_test, batch_size=100)
+
+ accuracy = 0
+ with sess.as_default():
+ for i in range(10):
+ test_batch = data_generate.next()
+ test_batch_data = test_batch[0]
+ test_batch_label = test_batch[1]
+ correct_preds = tf.equal(tf.argmax(y_place, axis=-1),
+ tf.argmax(model(x_place), axis=-1))
+ cur_corr_preds = correct_preds.eval(feed_dict={x_place: test_batch_data, y_place: test_batch_label})
+ accuracy += cur_corr_preds.sum()
+ print (accuracy)
+ accuracy /= 10.
+ print ('Accuracy is: ', accuracy)
+
+elif FLAGS.attack_method == 'Gaussian':
+ accuracy = 0
+ with sess.as_default():
+ for i in range(10):
+ correct_preds = tf.equal(tf.argmax(y_place, axis=-1),
+ tf.argmax(model(x_place+tf.random_normal([100,32,32,3],mean=0.0,stddev=0.05)), axis=-1))
+ cur_corr_preds = correct_preds.eval(feed_dict={x_place: x_test[i*100:(i+1)*100], y_place: y_test[i*100:(i+1)*100]})
+ accuracy += cur_corr_preds.sum()
+ print (accuracy)
+ accuracy /= 10.
+ print ('Accuracy is: ', accuracy)
diff --git a/case_studies/mmt/advtrain.py b/case_studies/mmt/advtrain.py
new file mode 100644
index 0000000..949132b
--- /dev/null
+++ b/case_studies/mmt/advtrain.py
@@ -0,0 +1,303 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from utils.model import resnet_v1, resnet_v2
+
+from utils.keras_wraper_ensemble import KerasModelWrapper
+import cleverhans.attacks as attacks
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 50, '')
+tf.app.flags.DEFINE_integer('mean_var', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_integer('version', 2, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax loss')
+tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
+tf.app.flags.DEFINE_string('attack_method', 'MadryEtAl', 'the attack used to craft adversarial examples for adv training')
+tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+else:
+ print('Unknown dataset')
+
+# These parameters are usually fixed
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 5 # n=5 for resnet-32 v1
+
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+else:
+ name_random = ''
+
+#Load means in MMLDA
+kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
+mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
+
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+# If subtract pixel mean is enabled
+clip_min = 0.0
+clip_max = 1.0
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+ clip_min -= x_train_mean
+ clip_max -= x_train_mean
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test = keras.utils.to_categorical(y_test, num_class)
+
+
+def dot_loss(y_true, y_pred):
+ return - tf.reduce_sum(y_pred * y_true, axis=-1) #batch_size X 1
+
+#MMLDA prediction function
+def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ return logits
+ else:
+ logits = logits - tf.reduce_max(logits, axis=-1, keepdims=True) #Avoid numerical rounding
+ logits = logits - tf.log(tf.reduce_sum(tf.exp(logits), axis=-1, keepdims=True)) #Avoid numerical rounding
+ return logits
+
+
+def lr_schedule(epoch):
+ lr = FLAGS.lr
+ if epoch > epochs_inter[1]:
+ lr *= 1e-2
+ elif epoch > epochs_inter[0]:
+ lr *= 1e-1
+ print('Learning rate: ', lr)
+ return lr
+
+
+model_input = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, use_BN=FLAGS.use_BN)
+else:
+ original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, use_BN=FLAGS.use_BN)
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+#Whether use target attack for adversarial training
+if FLAGS.use_target==False:
+ is_target = ''
+ y_target = None
+else:
+ is_target = 'target'
+ y_target = tf.multinomial(tf.ones((FLAGS.batch_size,num_class)),1) #batch_size x 1
+ y_target = tf.one_hot(tf.reshape(y_target,(FLAGS.batch_size,)),num_class) #batch_size x num_class
+
+
+if FLAGS.use_MMLDA==True:
+ print('Using MMT Training Scheme')
+ new_layer = Lambda(MMLDA_layer)
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ use_ball_=''
+ train_loss = dot_loss
+ if FLAGS.use_ball==False:
+ print('Using softmax function (MMLDA)')
+ use_ball_='_softmax'
+ filepath_dir = 'advtrained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_meanvar'+str(FLAGS.mean_var) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +'_'+is_target+FLAGS.attack_method \
+ +'_advratio'+str(FLAGS.adv_ratio)+BN_name+name_random \
+ +use_ball_
+else:
+ print('Using softmax loss')
+ model = original_model
+ train_loss = keras.losses.categorical_crossentropy
+ filepath_dir = 'advtrained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+'_'+is_target+FLAGS.attack_method+'_advratio'+str(FLAGS.adv_ratio)+BN_name
+
+wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
+
+
+eps = 8. / 256.
+if FLAGS.attack_method == 'MadryEtAl':
+ print('apply '+is_target+'PGD'+' for advtrain')
+ att = attacks.MadryEtAl(wrap_ensemble)
+ att_params = {
+ 'eps': eps,
+ #'eps_iter': 3.*eps/10.,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_iter': 10,
+ 'y_target': y_target
+ }
+elif FLAGS.attack_method == 'MomentumIterativeMethod':
+ print('apply '+is_target+'MIM'+' for advtrain')
+ att = attacks.MomentumIterativeMethod(wrap_ensemble)
+ att_params = {
+ 'eps': eps,
+ #'eps_iter': 3.*eps/10.,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'nb_iter': 10,
+ 'y_target': y_target
+ }
+elif FLAGS.attack_method == 'FastGradientMethod':
+ print('apply '+is_target+'FGSM'+' for advtrain')
+ att = attacks.FastGradientMethod(wrap_ensemble)
+ att_params = {'eps': eps,
+ 'clip_min': clip_min,
+ 'clip_max': clip_max,
+ 'y_target': y_target}
+
+adv_x = tf.stop_gradient(att.generate(model_input, **att_params))
+adv_output = model(adv_x)
+normal_output = model(model_input)
+
+
+def adv_train_loss(_y_true, _y_pred):
+ return (1-FLAGS.adv_ratio) * train_loss(_y_true, normal_output) + FLAGS.adv_ratio * train_loss(_y_true, adv_output)
+
+
+if FLAGS.optimizer=='Adam':
+ model.compile(
+ loss=adv_train_loss,
+ optimizer=Adam(lr=lr_schedule(0)),
+ metrics=['accuracy'])
+ print('Using Adam optimizer')
+elif FLAGS.optimizer=='mom':
+ model.compile(
+ loss=adv_train_loss,
+ optimizer=SGD(lr=lr_schedule(0), momentum=0.9),
+ metrics=['accuracy'])
+ print('Using momentum optimizer')
+model.summary()
+
+
+# Prepare model model saving directory.
+save_dir = os.path.join(os.getcwd(), filepath_dir)
+model_name = 'model.{epoch:03d}.h5'
+if not os.path.isdir(save_dir):
+ os.makedirs(save_dir)
+filepath = os.path.join(save_dir, model_name)
+
+# Prepare callbacks for model saving and for learning rate adjustment.
+checkpoint = ModelCheckpoint(
+ filepath=filepath, monitor='val_loss', mode='min', verbose=2, save_best_only=False, save_weights_only=True, period=5)
+
+lr_scheduler = LearningRateScheduler(lr_schedule)
+
+
+callbacks = [checkpoint, lr_scheduler]
+
+
+
+
+# Run training, with data augmentation.
+
+print('Using real-time data augmentation.')
+# This will do preprocessing and realtime data augmentation:
+datagen = ImageDataGenerator(
+ # epsilon for ZCA whitening
+ zca_epsilon=1e-06,
+ # randomly shift images horizontally
+ width_shift_range=0.1,
+ # randomly shift images vertically
+ height_shift_range=0.1,
+ # set mode for filling points outside the input boundaries
+ fill_mode='nearest',
+ # randomly flip images
+ horizontal_flip=True)
+
+# Compute quantities required for featurewise normalization
+datagen.fit(x_train)
+
+# Fit the model on the batches generated by datagen.flow().
+model.fit_generator(
+ datagen.flow(x_train, y_train, batch_size=FLAGS.batch_size),
+ validation_data=(x_test, y_test),
+ epochs=epochs,
+ verbose=2,
+ workers=4,
+ callbacks=callbacks)
diff --git a/case_studies/mmt/binarization_test.sh b/case_studies/mmt/binarization_test.sh
new file mode 100644
index 0000000..1f4195c
--- /dev/null
+++ b/case_studies/mmt/binarization_test.sh
@@ -0,0 +1,43 @@
+nsamples=${1:-512}
+epsilon=${2:-8}
+
+kwargs=""
+kwargs="--sample_from_corners=True"
+
+echo "#samples: $nsamples"
+echo "epsilon: $epsilon"
+echo "kwargs: $kwargs"
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Original attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$(pwd) venv3.8tf/bin/python case_studies/mmt/binarization_test_iterative.py --mean_var=10 --batch_size=50 \
+ --attack_method_for_advtrain='MadryEtAl' \
+ --dataset=cifar10 --use_ball=True \
+ --use_MMLDA=True --use_advtrain=False --epoch=200 \
+ --use_BN=True --normalize_output_for_ball=False --use_random=False \
+ --adv_ratio=1.0 --use_target=False \
+ --checkpoint=checkpoints/mmt_mmc_rn110.h5 \
+ --n_samples=$nsamples \
+ --attack_method='MadryEtAl' \
+ --num_iter=50 \
+ --epsilon=$epsilon \
+ $kwargs
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$(pwd) venv3.8tf/bin/python case_studies/mmt/binarization_test_iterative.py --mean_var=10 --batch_size=50 \
+ --attack_method_for_advtrain='MadryEtAl' \
+ --dataset=cifar10 --use_ball=True \
+ --use_MMLDA=True --use_advtrain=False --epoch=200 \
+ --use_BN=True --normalize_output_for_ball=False --use_random=False \
+ --adv_ratio=1.0 --use_target=False \
+ --checkpoint=checkpoints/mmt_mmc_rn110.h5 \
+ --n_samples=$nsamples \
+ --attack_method='Adaptive' \
+ --num_iter=50 \
+ --epsilon=$epsilon \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/mmt/binarization_test_iterative.py b/case_studies/mmt/binarization_test_iterative.py
new file mode 100644
index 0000000..9a134a3
--- /dev/null
+++ b/case_studies/mmt/binarization_test_iterative.py
@@ -0,0 +1,566 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import logging
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import pdb
+from functools import partial
+
+import keras
+import torch
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, \
+ ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+
+from torch.utils.data import DataLoader
+
+from active_tests.decision_boundary_binarization import LogitRescalingType
+from active_tests.decision_boundary_binarization import \
+ _train_logistic_regression_classifier
+from active_tests.decision_boundary_binarization import format_result
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+from utils import build_dataloader_from_arrays
+from mmt_utils.model import resnet_v1, resnet_v2
+import cleverhans.attacks as attacks
+from cleverhans.utils_tf import model_eval
+from mmt_utils.keras_wraper_ensemble import KerasModelWrapper
+from mmt_utils.utils_model_eval import model_eval_targetacc
+from sklearn.metrics import roc_auc_score
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def main():
+ tf.app.flags.DEFINE_integer('epsilon', 8, 'attack radius')
+ tf.app.flags.DEFINE_integer('n_inner_points', 999, '')
+ tf.app.flags.DEFINE_integer('n_boundary_points', 1, '')
+ tf.app.flags.DEFINE_integer('n_samples', 512, '')
+
+ tf.app.flags.DEFINE_integer('batch_size', 512, 'batch_size for attack')
+ tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+ tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
+ tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
+ tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod',
+ '')
+ tf.app.flags.DEFINE_integer('version', 2, '')
+ tf.app.flags.DEFINE_bool('use_target', False,
+ 'whether use target attack or untarget attack for adversarial training')
+ tf.app.flags.DEFINE_integer('num_iter', 10, '')
+ tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
+ tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+ tf.app.flags.DEFINE_bool('use_advtrain', True,
+ 'whether use advtraining or normal training')
+ tf.app.flags.DEFINE_float('adv_ratio', 1.0,
+ 'the ratio of adversarial examples in each mini-batch')
+ tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
+ tf.app.flags.DEFINE_bool('use_BN', True,
+ 'whether use batch normalization in the network')
+ tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+ tf.app.flags.DEFINE_bool('normalize_output_for_ball', True,
+ 'whether apply softmax in the inference phase')
+ tf.app.flags.DEFINE_bool('use_random', False,
+ 'whether use random center or MMLDA center in the network')
+ tf.app.flags.DEFINE_bool('use_dense', True,
+ 'whether use extra dense layer in the network')
+ tf.app.flags.DEFINE_bool('use_leaky', False,
+ 'whether use leaky relu in the network')
+
+ tf.app.flags.DEFINE_string('checkpoint', None, '')
+
+ # For calculate AUC-scores
+ tf.app.flags.DEFINE_bool('is_calculate_auc', False,
+ 'whether to calculate auc scores')
+ tf.app.flags.DEFINE_bool('is_auc_metric_softmax_for_MMC', False,
+ 'whether use softmax to calculate auc metrics for MMC')
+
+ tf.app.flags.DEFINE_bool('sample_from_corners', False, '')
+
+ run_test()
+
+
+# MMLDA prediction function
+def MMLDA_layer(x, means, num_class, use_ball,
+ normalize_output_for_ball=None):
+ if normalize_output_for_ball is None:
+ normalize_output_for_ball = FLAGS.normalize_output_for_ball
+
+ # x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x, axis=1),
+ [1, num_class, 1]) # batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means, axis=0) # 1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand),
+ axis=-1) # batch_size X num_class
+ if use_ball == True:
+ if normalize_output_for_ball == False:
+ return logits
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+ else:
+ return tf.nn.softmax(logits, axis=-1)
+
+
+def setup_model_and_load_data():
+ # Load the dataset
+ if FLAGS.dataset == 'mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.expand_dims(x_train, axis=3)
+ x_test = np.expand_dims(x_test, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30, 40]
+ x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
+
+ elif FLAGS.dataset == 'cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100, 150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+ elif FLAGS.dataset == 'cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100, 150]
+ x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
+
+ else:
+ print('Unknown dataset')
+
+ # These parameters are usually fixed
+ subtract_pixel_mean = True
+ version = FLAGS.version # Model version
+ n = 18 # n=5 for resnet-32 v1, n=18 for Resnet110 (according to README.md)
+
+ # Computed depth from supplied model parameter n
+ if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+ elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = 256
+
+ if FLAGS.use_random == True:
+ name_random = '_random'
+ else:
+ name_random = ''
+
+ if FLAGS.use_leaky == True:
+ name_leaky = '_withleaky'
+ else:
+ name_leaky = ''
+
+ if FLAGS.use_dense == True:
+ name_dense = ''
+ else:
+ name_dense = '_nodense'
+
+ # Load means in MMLDA
+ kernel_dict = loadmat(
+ 'case_studies/mmt/kernel_paras/meanvar1_featuredim' + str(
+ feature_dim) + '_class' + str(
+ num_class) + name_random + '.mat')
+ mean_logits = kernel_dict['mean_logits'] # num_class X num_dense
+ mean_logits = FLAGS.mean_var * tf.constant(mean_logits, dtype=tf.float32)
+
+
+ # Load the data.
+ # Input image dimensions.
+ input_shape = x_train.shape[1:]
+
+ # Normalize data.
+ x_train = x_train.astype('float32') / 255
+ x_test = x_test.astype('float32') / 255
+
+ # clip_min = 0.0
+ # clip_max = 1.0
+ # If subtract pixel mean is enabled
+ if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0, keepdims=True)
+ # x_train -= x_train_mean
+ # x_test -= x_train_mean
+ # clip_min -= np.max(x_train_mean)
+ # clip_max -= np.min(x_train_mean)
+
+ # Convert class vectors to binary class matrices.
+ y_train = keras.utils.to_categorical(y_train, num_class)
+ y_test = keras.utils.to_categorical(y_test, num_class)
+
+ # Define input TF placeholder
+ y_place = tf.placeholder(tf.float32, shape=(None, num_class))
+ sess = tf.Session()
+ keras.backend.set_session(sess)
+
+ model_input = Input(shape=input_shape)
+
+ if subtract_pixel_mean:
+ normalized_model_input = Lambda(lambda x: x - x_train_mean)(model_input)
+ else:
+ normalized_model_input = model_input
+
+ # preprocessed_input =
+
+ # dim of logtis is batchsize x dim_means
+ if version == 2:
+ original_model, _, _, _, final_features = resnet_v2(
+ immediate_input=normalized_model_input, input=model_input, depth=depth,
+ num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense,
+ use_leaky=FLAGS.use_leaky)
+ else:
+ original_model, _, _, _, final_features = resnet_v1(
+ immediate_input=normalized_model_input, input=model_input, depth=depth,
+ num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense,
+ use_leaky=FLAGS.use_leaky)
+
+ if FLAGS.use_BN == True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+ else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+ # Whether use target attack for adversarial training
+ if FLAGS.use_target == False:
+ is_target = ''
+ else:
+ is_target = 'target'
+
+ if FLAGS.use_advtrain == True:
+ dirr = 'advtrained_models/' + FLAGS.dataset + '/'
+ attack_method_for_advtrain = '_' + is_target + FLAGS.attack_method_for_advtrain
+ adv_ratio_name = '_advratio' + str(FLAGS.adv_ratio)
+ mean_var = int(FLAGS.mean_var)
+ else:
+ dirr = 'trained_models/' + FLAGS.dataset + '/'
+ attack_method_for_advtrain = ''
+ adv_ratio_name = ''
+ mean_var = FLAGS.mean_var
+
+ if FLAGS.use_MMLDA == True:
+ print('Using MMLDA')
+ new_layer = Lambda(partial(MMLDA_layer, means=mean_logits,
+ num_class=num_class, use_ball=FLAGS.use_ball))
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ else:
+ print('Using softmax loss')
+ model = original_model
+
+ model.load_weights(FLAGS.checkpoint)
+
+ return (model, model_input, final_features, mean_logits), \
+ x_place, y_place, sess, (x_test, y_test)
+
+
+def run_attack(m, l, kwargs, preds, x_adv, x_ph, y_ph, sess):
+ del kwargs
+ del m
+
+ for x, y in l:
+ x = x.numpy().transpose(0, 2, 3, 1)
+ y = y.numpy()
+
+ y_oh = keras.utils.to_categorical(y, 2)
+
+ x_adv_np, logits = sess.run((x_adv, preds), {x_ph: x, y_ph: y_oh})
+ y_pred = logits.argmax(-1)
+
+ print(logits)
+
+ is_adv = y_pred != y
+
+ x_adv_np = x_adv_np.transpose((0, 3, 1, 2))
+
+ return is_adv, (torch.tensor(x_adv_np, dtype=torch.float32),
+ torch.tensor(logits, dtype=torch.float32)
+ )
+
+
+def train_classifier(
+ n_features: int,
+ train_loader: DataLoader,
+ raw_train_loader: DataLoader,
+ logits: torch.Tensor,
+ device: str,
+ rescale_logits: LogitRescalingType,
+ linear_layer,
+ clean_preds,
+ x_ph,
+ sess,
+ binarized_model_wrapper
+):
+ # del raw_train_loader
+ assert rescale_logits is None
+
+ cls = _train_logistic_regression_classifier(
+ n_features,
+ train_loader,
+ logits if logits is not None else None,
+ "sklearn",
+ 20000,
+ device,
+ n_classes=2,
+ rescale_logits=rescale_logits,
+ solution_goodness="perfect",
+ class_weight={0: 1, 1:5}
+ )
+
+ clw = cls.weight.data.detach().numpy()
+ clb = cls.bias.data.detach().numpy()
+
+ # since the first two MMT weights look roughly like this:
+ # 1: (10, 0, ..., 0)
+ # 2: (-1, 9.9, 0, ..., 0)
+ # we can easily construct a weight matrix that remaps the feature space to
+ # these two vectors
+ nw = np.zeros((256, 256))
+ nb = np.zeros(256)
+ nw[:2] = clw
+ nb[:2] = clb
+ linear_layer.set_weights((nw.T, nb))
+
+ # now test
+ n_correct_inner = 0
+ n_correct_outer = 0
+ n_total_inner = 0
+ n_total_outer = 0
+ for x, y in raw_train_loader:
+ x = x.numpy().transpose((0, 2, 3, 1))
+ y = y.numpy()
+
+ logits = sess.run(clean_preds, {x_ph: x})
+ y_pred = logits.argmax(-1)
+ is_correct = y_pred == y
+
+ n_correct_inner += is_correct[y == 0].sum()
+ n_correct_outer += is_correct[y == 1].sum()
+ n_total_inner += (y == 0).sum()
+ n_total_outer += (y == 1).sum()
+
+ accuracy_inner = n_correct_inner / n_total_inner
+ accuracy_outer = n_correct_outer / n_total_outer
+ if accuracy_outer != 1.0:
+ raise RuntimeError(f"Solver failed to find solution that perfectly detects boundary samples {accuracy_outer}")
+ if accuracy_inner == 0:
+ raise RuntimeError(f"Solver failed to find solution that detects (at least some) inner samples {accuracy_inner}")
+
+ return binarized_model_wrapper
+
+
+def setup_binarized_model(sess, model_input, final_features, mean_logits):
+ assert FLAGS.use_ball
+
+ # means_ph = tf.placeholder(tf.float32, shape=[2, mean_logits.shape[1]])
+ # means = tf.Variable(np.zeros([2, mean_logits.shape[1]], dtype=np.float32),
+ # name="binarized_model_means")
+ # set_means = means.assign(means_ph)
+
+ new_layer = Lambda(partial(MMLDA_layer, means=mean_logits[:2],
+ num_class=2, use_ball=FLAGS.use_ball,
+ normalize_output_for_ball=False))
+ linear_layer = Dense(256)
+ transformed_features = linear_layer(final_features)
+ predictions = new_layer(transformed_features)
+ model = Model(input=model_input, output=predictions)
+
+ # will be used by binarization test to eval the model
+ binarized_model_wrapper = BinarizedModelWrapper(model_input, predictions, sess)
+
+ return model, linear_layer, binarized_model_wrapper
+
+
+class BinarizedModelWrapper:
+ def __init__(self, input, output, sess):
+ self.input = input
+ self.output = output
+ self.sess = sess
+
+ def __call__(self, x):
+ return_torch = False
+ if isinstance(x, torch.Tensor):
+ x = x.cpu().numpy()
+ return_torch = True
+ if isinstance(x, np.ndarray):
+ if x.shape[1] == 3:
+ x = x.transpose(0, 2, 3, 1)
+
+ out = self.sess.run(self.output, {self.input: x})
+ if return_torch:
+ out = torch.tensor(out, dtype=torch.float32)
+ return out
+
+
+def run_test():
+ (model, model_input, final_features, mean_logits), \
+ x_place, y_place, sess, (x_test, y_test) = \
+ setup_model_and_load_data()
+
+ del y_place
+ y_place = tf.placeholder(tf.float32, shape=(None, 2))
+
+ binarized_model, linear_layer, binarized_model_wrapper = \
+ setup_binarized_model(
+ sess,
+ model_input,
+ final_features,
+ mean_logits)
+
+
+ bin_clean_preds = binarized_model(x_place)
+ clean_preds = model(x_place)
+ wrap_ensemble = KerasModelWrapper(binarized_model, num_class=2, binarized_model=True)
+
+ # Initialize the attack method
+ if FLAGS.attack_method == 'MadryEtAl':
+ att = attacks.MadryEtAl(wrap_ensemble)
+ elif FLAGS.attack_method == 'FastGradientMethod':
+ att = attacks.FastGradientMethod(wrap_ensemble)
+ elif FLAGS.attack_method == 'MomentumIterativeMethod':
+ att = attacks.MomentumIterativeMethod(wrap_ensemble)
+ elif FLAGS.attack_method == 'BasicIterativeMethod':
+ att = attacks.BasicIterativeMethod(wrap_ensemble)
+ elif FLAGS.attack_method == "Adaptive":
+ from adaptive_attack import FeatureSpaceProjectedGradientDescent
+ att = FeatureSpaceProjectedGradientDescent(wrap_ensemble, logit_means=mean_logits,
+ projection="l2")
+
+ # Consider the attack to be constant
+ eval_par = {'batch_size': FLAGS.batch_size}
+
+ # TODO: shouldn't this be a 255?
+ eps_ = FLAGS.epsilon / 256.0
+ print("Epsilon:", eps_)
+
+ y_target = None
+ if FLAGS.attack_method == 'FastGradientMethod':
+ att_params = {
+ 'eps': eps_,
+ 'clip_min': 0,
+ 'clip_max': 1,
+ 'y_target': y_target
+ }
+ else:
+ att_params = {
+ 'eps': eps_,
+ # 'eps_iter': eps_*1.0/FLAGS.num_iter,
+ # 'eps_iter': 3.*eps_/FLAGS.num_iter,
+ 'eps_iter': 2. / 256.,
+ 'clip_min': 0,
+ 'clip_max': 1,
+ 'nb_iter': FLAGS.num_iter,
+ 'y_target': y_target
+ }
+ if FLAGS.attack_method == "Adaptive":
+ att_params["y"] = y_place
+ att_params['eps_iter'] = 0.03 / 256.
+
+ print("att_params", att_params)
+ if FLAGS.attack_method != "Adaptive":
+ import cleverhans.attacks
+ from fgm_patched import fgm_patched
+ cleverhans.attacks.fgm = fgm_patched
+ print("patched fgm function")
+
+ adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
+ bin_adv_preds = binarized_model(adv_x)
+
+ def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
+ x_np = np.transpose(x_np, (0, 2, 3, 1))
+
+ if features_only:
+ f = sess.run(final_features, {model_input: x_np})
+
+ return f
+ elif features_and_logits:
+ f, l = sess.run((final_features,
+ clean_preds), {model_input: x_np})
+ f = np.stack(f, 1)
+ return f, l
+ else:
+ l = sess.run(clean_preds, {model_input: x_np})
+ return l
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=_model_forward_pass,
+ logit_forward_and_backward_pass=None
+ )
+
+ test_loader = build_dataloader_from_arrays(x_test.transpose((0, 3, 1, 2)),
+ y_test,
+ batch_size=FLAGS.batch_size)
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=partial(run_attack, preds=bin_adv_preds, sess=sess, x_ph=x_place,
+ y_ph=y_place, x_adv=adv_x),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=eps_,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=FLAGS.n_boundary_points,
+ n_inner_points=FLAGS.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=FLAGS.n_samples,
+ device="cpu",
+ batch_size=FLAGS.batch_size,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ train_classifier_fn=partial(
+ train_classifier,
+ linear_layer=linear_layer,
+ clean_preds=bin_clean_preds,
+ x_ph=x_place,
+ sess=sess,
+ binarized_model_wrapper=binarized_model_wrapper
+ ),
+ fail_on_exception=False,
+ rescale_logits=None,
+ sample_training_data_from_corners=FLAGS.sample_from_corners,
+ # decision_boundary_closeness=0.9999,
+ )
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ FLAGS.n_samples))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/mmt/fgm_patched.py b/case_studies/mmt/fgm_patched.py
new file mode 100644
index 0000000..0f2c130
--- /dev/null
+++ b/case_studies/mmt/fgm_patched.py
@@ -0,0 +1,102 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cleverhans.attacks import optimize_linear
+from cleverhans import utils_tf
+import numpy as np
+import tensorflow as tf
+
+
+def fgm_patched(x,
+ logits,
+ y=None,
+ eps=0.3,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ targeted=False,
+ sanity_checks=True):
+ """
+ TensorFlow implementation of the Fast Gradient Method.
+ :param x: the input placeholder
+ :param logits: output of model.get_logits
+ :param y: (optional) A placeholder for the true labels. If targeted
+ is true, then provide the target label. Otherwise, only provide
+ this parameter if you'd like to use true labels when crafting
+ adversarial samples. Otherwise, model predictions are used as
+ labels to avoid the "label leaking" effect (explained in this
+ paper: https://arxiv.org/abs/1611.01236). Default is None.
+ Labels should be one-hot-encoded.
+ :param eps: the epsilon (input variation parameter)
+ :param ord: (optional) Order of the norm (mimics NumPy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: Minimum float value for adversarial example components
+ :param clip_max: Maximum float value for adversarial example components
+ :param targeted: Is the attack targeted or untargeted? Untargeted, the
+ default, will try to make the label incorrect. Targeted
+ will instead try to move in the direction of being more
+ like y.
+ :return: a tensor for the adversarial example
+ """
+
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if clip_min is not None:
+ asserts.append(utils_tf.assert_greater_equal(
+ x, tf.cast(clip_min, x.dtype)))
+
+ if clip_max is not None:
+ asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
+
+ # Make sure the caller has not passed probs by accident
+ assert logits.op.type != 'Softmax'
+
+ if y is None:
+ # Using model predictions as ground truth to avoid label leaking
+ preds_max = tf.reduce_max(logits, 1, keepdims=True)
+ y = tf.to_float(tf.equal(logits, preds_max))
+ y = tf.stop_gradient(y)
+ y = y / tf.reduce_sum(y, 1, keepdims=True)
+
+ # Compute loss
+ from cleverhans.compat import softmax_cross_entropy_with_logits
+ #loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
+
+ loss = -tf.reduce_sum(logits * y, axis=-1)
+ if targeted:
+ loss = -loss
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x)
+
+ optimal_perturbation = optimize_linear(grad, eps, ord)
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = x + optimal_perturbation
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) or (clip_max is not None):
+ # We don't currently support one-sided clipping
+ assert clip_min is not None and clip_max is not None
+ adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
+
+ if sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ # adv_x = tf.Print(adv_x, [loss, logits, y, grad])
+
+ return adv_x
+
diff --git a/case_studies/mmt/kernel_paras/craft_MMcenters.m b/case_studies/mmt/kernel_paras/craft_MMcenters.m
new file mode 100644
index 0000000..d99e10a
--- /dev/null
+++ b/case_studies/mmt/kernel_paras/craft_MMcenters.m
@@ -0,0 +1,18 @@
+clc
+clear all
+L=10;
+D=256;
+mean_var=1;
+
+mean_logits=zeros(L,D);
+mean_logits(1,1)=1;
+for k=2:L
+ for j =1:k-1
+ mean_logits(k,j)=-(1/(L-1)+dot(mean_logits(k,:),mean_logits(j,:)))/mean_logits(j,j);
+ end
+ mean_logits(k,k)=sqrt(abs(1-norm(mean_logits(k,:))^2));
+end
+mean_logits=mean_logits*mean_var;
+
+
+save(['meanvar1_featuredim',num2str(D),'_class',num2str(L),'.mat'],'mean_logits')
diff --git a/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class10.mat b/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class10.mat
new file mode 100644
index 0000000..92870e6
Binary files /dev/null and b/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class10.mat differ
diff --git a/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class100.mat b/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class100.mat
new file mode 100644
index 0000000..1e9c24c
Binary files /dev/null and b/case_studies/mmt/kernel_paras/meanvar1_featuredim256_class100.mat differ
diff --git a/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class10.mat b/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class10.mat
new file mode 100644
index 0000000..fb34d00
Binary files /dev/null and b/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class10.mat differ
diff --git a/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class100.mat b/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class100.mat
new file mode 100644
index 0000000..5f8b000
Binary files /dev/null and b/case_studies/mmt/kernel_paras/meanvar1_featuredim64_class100.mat differ
diff --git a/case_studies/mmt/mmt_utils/__init__.py b/case_studies/mmt/mmt_utils/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/mmt/mmt_utils/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/mmt/mmt_utils/craft_MMLDA_means_whenDlessthanL.py b/case_studies/mmt/mmt_utils/craft_MMLDA_means_whenDlessthanL.py
new file mode 100644
index 0000000..14a2f54
--- /dev/null
+++ b/case_studies/mmt/mmt_utils/craft_MMLDA_means_whenDlessthanL.py
@@ -0,0 +1,40 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+
+L = 10 #Number of classes
+d = 2 #Dimension of features
+lr = 0.0001 #Learning rate
+mean_var = 1
+steps = 10000 #optimization steps
+
+z = tf.get_variable("auxiliary_variable", [d, L]) #dxL
+x = z / tf.norm(z, axis=0, keepdims=True) #dxL, normalized in each column
+XTX = tf.matmul(x, x, transpose_a=True) - 2 * tf.eye(L)#LxL, each element is the dot-product of two means, the diag elements are -1
+cost = tf.reduce_max(XTX) #single element
+opt = tf.train.AdamOptimizer(learning_rate=lr)
+opt_op = opt.minimize(cost)
+with tf.Session() as sess:
+ sess.run(tf.initializers.global_variables())
+ for i in range(steps):
+ _, loss = sess.run([opt_op, cost])
+ min_distance2 = loss
+ print('Step %d, min_distance2: %f'%(i, min_distance2))
+
+ mean_logits = sess.run(x)
+
+mean_logits = mean_var * mean_logits.T
+import scipy.io as sio
+sio.savemat('/MMC/kernel_paras/meanvar1_featuredim'+str(d)+'_class'+str(L)+'.mat', {'mean_logits': mean_logits})
diff --git a/case_studies/mmt/mmt_utils/keras_wraper_ensemble.py b/case_studies/mmt/mmt_utils/keras_wraper_ensemble.py
new file mode 100644
index 0000000..c05f000
--- /dev/null
+++ b/case_studies/mmt/mmt_utils/keras_wraper_ensemble.py
@@ -0,0 +1,181 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Model construction utilities based on keras
+"""
+import warnings
+from distutils.version import LooseVersion
+import keras
+from keras.models import Sequential
+from keras.layers import Dense, Activation, Flatten
+
+from cleverhans.model import Model, NoSuchLayerError
+import tensorflow as tf
+
+
+class KerasModelWrapper(Model):
+ """
+ An implementation of `Model` that wraps a Keras model. It
+ specifically exposes the hidden features of a model by creating new models.
+ The symbolic graph is reused and so there is little overhead. Splitting
+ in-place operations can incur an overhead.
+ """
+
+ # set ch_compatibility_mode=True to use this class with ch3.0.1 (added by AUTHOR)
+ def __init__(self, model, num_class=10, binarized_model=False,
+ ch_compatibility_mode=False):
+ """
+ Create a wrapper for a Keras model
+ :param model: A Keras model
+ """
+ super(KerasModelWrapper, self).__init__()
+
+ if model is None:
+ raise ValueError('model argument must be supplied.')
+
+ self.model = model
+ self.keras_model = None
+ self.num_classes = num_class
+ self.binarized_model=binarized_model
+
+ self.ch_compatibility_mode = ch_compatibility_mode
+
+ def _get_softmax_name(self):
+ """
+ Looks for the name of the softmax layer.
+ :return: Softmax layer name
+ """
+ for layer in self.model.layers:
+ cfg = layer.get_config()
+ if cfg['name'] == 'average_1':
+ return layer.name
+
+ raise Exception("No softmax layers found")
+
+ def _get_logits_name(self):
+ """
+ Looks for the name of the layer producing the logits.
+ :return: name of layer producing the logits
+ """
+ softmax_name = self._get_softmax_name()
+ softmax_layer = self.model.get_layer(softmax_name)
+
+ if not isinstance(softmax_layer, Activation):
+ # In this case, the activation is part of another layer
+ return softmax_name
+
+ if hasattr(softmax_layer, 'inbound_nodes'):
+ warnings.warn(
+ "Please update your version to keras >= 2.1.3; "
+ "support for earlier keras versions will be dropped on "
+ "2018-07-22")
+ node = softmax_layer.inbound_nodes[0]
+ else:
+ node = softmax_layer._inbound_nodes[0]
+
+ logits_name = node.inbound_layers[0].name
+
+ return logits_name
+
+ def get_mmd_features(self, x):
+ outs = self.fprop(x)
+ if self.binarized_model:
+ return outs["dense_3"]
+ else:
+ return outs["dense_1"]
+
+
+ def get_logits(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the logits
+ """
+ # logits_name = self._get_logits_name()
+ # logits_layer = self.get_layer(x, logits_name)
+
+ # # Need to deal with the case where softmax is part of the
+ # # logits layer
+ # if logits_name == self._get_softmax_name():
+ # softmax_logit_layer = self.get_layer(x, logits_name)
+
+ # # The final op is the softmax. Return its input
+ # logits_layer = softmax_logit_layer._op.inputs[0]
+ prob = self.get_probs(x)
+ if self.ch_compatibility_mode:
+ return prob
+ logits = tf.log(prob)
+ #logits = prob
+ return logits
+
+ def get_probs(self, x):
+ """
+ :param x: A symbolic representation of the network input.
+ :return: A symbolic representation of the probs
+ """
+
+ return self.model(x)
+
+ def get_layer_names(self):
+ """
+ :return: Names of all the layers kept by Keras
+ """
+ layer_names = [x.name for x in self.model.layers]
+ return layer_names
+
+ def fprop(self, x):
+ """
+ Exposes all the layers of the model returned by get_layer_names.
+ :param x: A symbolic representation of the network input
+ :return: A dictionary mapping layer names to the symbolic
+ representation of their output.
+ """
+ from keras.models import Model as KerasModel
+
+ if self.keras_model is None:
+ # Get the input layer
+ new_input = self.model.get_input_at(0)
+
+ # Make a new model that returns each of the layers as output
+ out_layers = [x_layer.output for x_layer in self.model.layers]
+ self.keras_model = KerasModel(new_input, out_layers)
+
+ # and get the outputs for that model on the input x
+ outputs = self.keras_model(x)
+
+ # Keras only returns a list for outputs of length >= 1, if the model
+ # is only one layer, wrap a list
+ if len(self.model.layers) == 1:
+ outputs = [outputs]
+
+ # compute the dict to return
+ fprop_dict = dict(zip(self.get_layer_names(), outputs))
+
+ return fprop_dict
+
+ def get_layer(self, x, layer):
+ """
+ Expose the hidden features of a model given a layer name.
+ :param x: A symbolic representation of the network input
+ :param layer: The name of the hidden layer to return features at.
+ :return: A symbolic representation of the hidden features
+ :raise: NoSuchLayerError if `layer` is not in the model.
+ """
+ # Return the symbolic representation for this layer.
+ output = self.fprop(x)
+ try:
+ requested = output[layer]
+ except KeyError:
+ raise NoSuchLayerError()
+ return requested
\ No newline at end of file
diff --git a/case_studies/mmt/mmt_utils/model.py b/case_studies/mmt/mmt_utils/model.py
new file mode 100644
index 0000000..6868e4f
--- /dev/null
+++ b/case_studies/mmt/mmt_utils/model.py
@@ -0,0 +1,245 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, GlobalAveragePooling2D
+from keras.regularizers import l2
+from keras.models import Model
+
+
+
+
+def resnet_layer(inputs,
+ num_filters=16,
+ kernel_size=3,
+ strides=1,
+ activation='relu',
+ batch_normalization=True,
+ conv_first=True):
+ """2D Convolution-Batch Normalization-Activation stack builder
+ # Arguments
+ inputs (tensor): input tensor from input image or previous layer
+ num_filters (int): Conv2D number of filters
+ kernel_size (int): Conv2D square kernel dimensions
+ strides (int): Conv2D square stride dimensions
+ activation (string): activation name
+ batch_normalization (bool): whether to include batch normalization
+ conv_first (bool): conv-bn-activation (True) or
+ bn-activation-conv (False)
+ # Returns
+ x (tensor): tensor as input to the next layer
+ """
+ conv = Conv2D(
+ num_filters,
+ kernel_size=kernel_size,
+ strides=strides,
+ padding='same',
+ kernel_initializer='he_normal',
+ kernel_regularizer=l2(1e-4))
+
+ x = inputs
+ if conv_first:
+ x = conv(x)
+ if batch_normalization:
+ x = BatchNormalization()(x)
+ if activation is not None:
+ x = Activation(activation)(x)
+ else:
+ if batch_normalization:
+ x = BatchNormalization()(x)
+ if activation is not None:
+ x = Activation(activation)(x)
+ x = conv(x)
+ return x
+
+
+def resnet_v1(immediate_input, input, depth, num_classes=10, num_dims=64, use_BN=True, use_dense=True, use_leaky=False):
+ """ResNet Version 1 Model builder [a]
+ Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
+ Last ReLU is after the shortcut connection.
+ At the beginning of each stage, the feature map size is halved (downsampled)
+ by a convolutional layer with strides=2, while the number of filters is
+ doubled. Within each stage, the layers have the same number filters and the
+ same number of filters.
+ Features maps sizes:
+ stage 0: 32x32, 16
+ stage 1: 16x16, 32
+ stage 2: 8x8, 64
+ The Number of parameters is approx the same as Table 6 of [a]:
+ ResNet20 0.27M
+ ResNet32 0.46M
+ ResNet44 0.66M
+ ResNet56 0.85M
+ ResNet110 1.7M
+ # Arguments
+ input_shape (tensor): shape of input image tensor
+ depth (int): number of core convolutional layers
+ num_classes (int): number of classes (CIFAR10 has 10)
+ # Returns
+ model (Model): Keras model instance
+ """
+ if (depth - 2) % 6 != 0:
+ raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
+ # Start model definition.
+ num_filters = 16
+ num_res_blocks = int((depth - 2) / 6)
+
+ inputs = immediate_input
+ x = resnet_layer(inputs=inputs, batch_normalization=use_BN)
+ # Instantiate the stack of residual units
+ for stack in range(3):
+ for res_block in range(num_res_blocks):
+ strides = 1
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ strides = 2 # downsample
+ y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides, batch_normalization=use_BN)
+ y = resnet_layer(inputs=y, num_filters=num_filters, activation=None, batch_normalization=use_BN)
+ if stack > 0 and res_block == 0: # first layer but not first stack
+ # linear projection residual shortcut connection to match
+ # changed dims
+ x = resnet_layer(
+ inputs=x,
+ num_filters=num_filters,
+ kernel_size=1,
+ strides=strides,
+ activation=None,
+ batch_normalization=False)
+ x = keras.layers.add([x, y])
+ if use_leaky==True:
+ x = keras.layers.LeakyReLU(alpha=0.1)(x)
+ else:
+ x = Activation('relu')(x)
+ num_filters *= 2
+
+ # Add classifier on top.
+ # v1 does not use BN after last shortcut connection-ReLU
+ x = GlobalAveragePooling2D()(x)
+ #final_features = Flatten()(x)
+ if use_dense==True:
+ final_features = Dense(
+ num_dims, kernel_initializer='he_normal')(x)
+ else:
+ final_features = x
+ logits = Dense(
+ num_classes, kernel_initializer='he_normal')(final_features)
+ outputs = Activation('softmax')(logits)
+
+ # Instantiate model.
+ model = Model(inputs=inputs, outputs=outputs)
+ return model, inputs, outputs, logits, final_features
+
+
+def resnet_v2(immediate_input, input, depth, num_classes=10, num_dims=256, use_BN=True, use_dense=True, use_leaky=False):
+ """ResNet Version 2 Model builder [b]
+ Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
+ bottleneck layer
+ First shortcut connection per layer is 1 x 1 Conv2D.
+ Second and onwards shortcut connection is identity.
+ At the beginning of each stage, the feature map size is halved (downsampled)
+ by a convolutional layer with strides=2, while the number of filter maps is
+ doubled. Within each stage, the layers have the same number filters and the
+ same filter map sizes.
+ Features maps sizes:
+ conv1 : 32x32, 16
+ stage 0: 32x32, 64
+ stage 1: 16x16, 128
+ stage 2: 8x8, 256
+ # Arguments
+ input_shape (tensor): shape of input image tensor
+ depth (int): number of core convolutional layers
+ num_classes (int): number of classes (CIFAR10 has 10)
+ # Returns
+ model (Model): Keras model instance
+ """
+ if (depth - 2) % 9 != 0:
+ raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
+ # Start model definition.
+ num_filters_in = 16
+ num_res_blocks = int((depth - 2) / 9)
+
+ inputs = immediate_input
+ # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
+ x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True, batch_normalization=use_BN)
+
+ # Instantiate the stack of residual units
+ for stage in range(3):
+ for res_block in range(num_res_blocks):
+ activation = 'relu'
+ batch_normalization = use_BN
+ strides = 1
+ if stage == 0:
+ num_filters_out = num_filters_in * 4
+ if res_block == 0: # first layer and first stage
+ activation = None
+ batch_normalization = False
+ else:
+ num_filters_out = num_filters_in * 2
+ if res_block == 0: # first layer but not first stage
+ strides = 2 # downsample
+
+ # bottleneck residual unit
+ y = resnet_layer(
+ inputs=x,
+ num_filters=num_filters_in,
+ kernel_size=1,
+ strides=strides,
+ activation=activation,
+ batch_normalization=batch_normalization,
+ conv_first=False)
+ y = resnet_layer(
+ inputs=y, num_filters=num_filters_in, conv_first=False, batch_normalization=use_BN)
+ y = resnet_layer(
+ inputs=y,
+ num_filters=num_filters_out,
+ kernel_size=1,
+ conv_first=False, batch_normalization=use_BN)
+ if res_block == 0:
+ # linear projection residual shortcut connection to match
+ # changed dims
+ x = resnet_layer(
+ inputs=x,
+ num_filters=num_filters_out,
+ kernel_size=1,
+ strides=strides,
+ activation=None,
+ batch_normalization=False)
+ x = keras.layers.add([x, y])
+
+ num_filters_in = num_filters_out
+
+ # Add classifier on top.
+ # v2 has BN-ReLU before Pooling
+ if use_BN:
+ x = BatchNormalization()(x)
+
+ if use_leaky==True:
+ x = keras.layers.LeakyReLU(alpha=0.1)(x)
+ else:
+ x = Activation('relu')(x)
+
+ x = GlobalAveragePooling2D()(x)
+ #final_features = Flatten()(x)
+ if use_dense==True:
+ final_features = Dense(
+ num_dims, kernel_initializer='he_normal')(x)
+ else:
+ final_features = x
+ logits = Dense(
+ num_classes, kernel_initializer='he_normal')(final_features)
+ outputs = Activation('softmax')(logits)
+ # Instantiate model.
+ model = Model(inputs=input, outputs=outputs)
+ return model, input, outputs, logits, final_features
\ No newline at end of file
diff --git a/case_studies/mmt/mmt_utils/utils_model_eval.py b/case_studies/mmt/mmt_utils/utils_model_eval.py
new file mode 100644
index 0000000..a5488de
--- /dev/null
+++ b/case_studies/mmt/mmt_utils/utils_model_eval.py
@@ -0,0 +1,334 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from distutils.version import LooseVersion
+import logging
+import math
+
+
+import numpy as np
+
+import tensorflow as tf
+from cleverhans.utils import batch_indices, _ArgsWrapper, create_logger
+
+_logger = create_logger("cleverhans.utils.tf")
+_logger.setLevel(logging.INFO)
+
+zero = tf.constant(0, dtype=tf.float32)
+num_classes = 10
+log_offset = 1e-20
+det_offset = 1e-6
+
+def ensemble_diversity(y_true, y_pred, num_model):
+ bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
+ mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
+ mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
+ mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
+ matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
+ all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
+ return all_log_det
+
+def model_eval_targetacc(sess, x, y, y_target, predictions, X_test=None, Y_test=None, Y_test_target=None,
+ feed=None, args=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :return: a float with the accuracy value
+ """
+ args = _ArgsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test_target is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument and Y_test_target argument"
+ "must be supplied.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ # Init result var
+ accuracy = 0.0
+
+ with sess.as_default():
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
+ dtype=X_test.dtype)
+ Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
+ dtype=Y_test.dtype)
+ Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
+ dtype=Y_test_target.dtype)
+ for batch in range(nb_batches):
+ if batch % 100 == 0 and batch > 0:
+ _logger.debug("Batch " + str(batch))
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+
+ # The last batch may be smaller than all others. This should not
+ # affect the accuarcy disproportionately.
+ cur_batch_size = end - start
+ X_cur[:cur_batch_size] = X_test[start:end]
+ Y_cur[:cur_batch_size] = Y_test[start:end]
+ Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
+ feed_dict = {x: X_cur, y: Y_cur, y_target: Y_cur_target}
+ if feed is not None:
+ feed_dict.update(feed)
+ cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
+
+ accuracy += cur_corr_preds[:cur_batch_size].sum()
+
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+
+def model_eval_for_SPSA_targetacc(sess, x, y, y_index, y_target, predictions, X_test=None, Y_test_index=None, Y_test=None, Y_test_target=None,
+ feed=None, args=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :return: a float with the accuracy value
+ """
+ args = _ArgsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None or Y_test_index is None:
+ raise ValueError("X_test argument and Y_test and Y_test_index argument "
+ "must be supplied.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ # Init result var
+ accuracy = 0.0
+
+ with sess.as_default():
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
+ dtype=X_test.dtype)
+ Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
+ dtype=Y_test.dtype)
+ Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
+ dtype=Y_test_target.dtype)
+ for batch in range(nb_batches):
+ print('Sample %d finished'%batch)
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+
+ # The last batch may be smaller than all others. This should not
+ # affect the accuarcy disproportionately.
+ cur_batch_size = end - start
+ X_cur[:cur_batch_size] = X_test[start:end]
+ Y_cur[:cur_batch_size] = Y_test[start:end]
+ #Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
+ feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start], y_target: Y_test_target[start]}
+ if feed is not None:
+ feed_dict.update(feed)
+ cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
+
+ accuracy += cur_corr_preds[:cur_batch_size].sum()
+
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+def model_eval_for_SPSA(sess, x, y, y_index, predictions, X_test=None, Y_test_index=None, Y_test=None,
+ feed=None, args=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :return: a float with the accuracy value
+ """
+ args = _ArgsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None or Y_test_index is None:
+ raise ValueError("X_test argument and Y_test and Y_test_index argument "
+ "must be supplied.")
+
+ # Define accuracy symbolically
+ if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
+ correct_preds = tf.equal(tf.argmax(y, axis=-1),
+ tf.argmax(predictions, axis=-1))
+ else:
+ correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
+ tf.argmax(predictions,
+ axis=tf.rank(predictions) - 1))
+
+ # Init result var
+ accuracy = 0.0
+
+ with sess.as_default():
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
+ dtype=X_test.dtype)
+ Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
+ dtype=Y_test.dtype)
+ for batch in range(nb_batches):
+ print('Sample %d finished'%batch)
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+
+ # The last batch may be smaller than all others. This should not
+ # affect the accuarcy disproportionately.
+ cur_batch_size = end - start
+ X_cur[:cur_batch_size] = X_test[start:end]
+ Y_cur[:cur_batch_size] = Y_test[start:end]
+ #Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
+ feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start]}
+ if feed is not None:
+ feed_dict.update(feed)
+ cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
+
+ accuracy += cur_corr_preds[:cur_batch_size].sum()
+
+ assert end >= len(X_test)
+
+ # Divide by number of examples to get final value
+ accuracy /= len(X_test)
+
+ return accuracy
+
+def get_ensemble_diversity_values(sess, x, y, predictions, number_model, X_test=None, Y_test=None,
+ feed=None, args=None):
+ """
+ Compute the accuracy of a TF model on some data
+ :param sess: TF session to use
+ :param x: input placeholder
+ :param y: output placeholder (for labels)
+ :param predictions: model output predictions
+ :param X_test: numpy array with training inputs
+ :param Y_test: numpy array with training outputs
+ :param feed: An optional dictionary that is appended to the feeding
+ dictionary before the session runs. Can be used to feed
+ the learning phase of a Keras model for instance.
+ :param args: dict or argparse `Namespace` object.
+ Should contain `batch_size`
+ :return: a float with the accuracy value
+ """
+ args = _ArgsWrapper(args or {})
+
+ assert args.batch_size, "Batch size was not given in args dict"
+ if X_test is None or Y_test is None:
+ raise ValueError("X_test argument and Y_test argument"
+ "must be supplied.")
+
+ ensemble_diversity_records = np.array([])
+ get_batch_ensemble_diversity = ensemble_diversity(y, predictions, number_model)
+ with sess.as_default():
+ # Compute number of batches
+ nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
+ assert nb_batches * args.batch_size >= len(X_test)
+
+ X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
+ dtype=X_test.dtype)
+ Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
+ dtype=Y_test.dtype)
+ for batch in range(nb_batches):
+ if batch % 100 == 0 and batch > 0:
+ _logger.debug("Batch " + str(batch))
+
+ # Must not use the `batch_indices` function here, because it
+ # repeats some examples.
+ # It's acceptable to repeat during training, but not eval.
+ start = batch * args.batch_size
+ end = min(len(X_test), start + args.batch_size)
+
+ # The last batch may be smaller than all others. This should not
+ # affect the accuarcy disproportionately.
+ cur_batch_size = end - start
+ X_cur[:cur_batch_size] = X_test[start:end]
+ Y_cur[:cur_batch_size] = Y_test[start:end]
+ feed_dict = {x: X_cur, y: Y_cur}
+ if feed is not None:
+ feed_dict.update(feed)
+ ensemble_diversity_records_batch = get_batch_ensemble_diversity.eval(feed_dict=feed_dict)
+
+ ensemble_diversity_records = np.concatenate((ensemble_diversity_records, ensemble_diversity_records_batch), axis=0)
+
+ assert end >= len(X_test)
+
+ return ensemble_diversity_records #len(X_test) X 1
\ No newline at end of file
diff --git a/case_studies/mmt/requirements.txt b/case_studies/mmt/requirements.txt
new file mode 100644
index 0000000..fea6967
--- /dev/null
+++ b/case_studies/mmt/requirements.txt
@@ -0,0 +1,153 @@
+absl-py==0.2.0
+ansible==2.0.0.2
+astor==0.6.2
+attrs==15.2.0
+backports-abc==0.5
+backports.functools-lru-cache==1.5
+backports.shutil-get-terminal-size==1.0.0
+backports.weakref==1.0.post1
+bleach==1.5.0
+blessings==1.6.1
+certifi==2018.10.15
+chardet==3.0.4
+cleverhans==2.1.0
+Click==7.0
+cloudpickle==0.6.1
+configobj==5.0.6
+configparser==3.5.0
+cryptography==1.2.3
+cycler==0.10.0
+Cython==0.29.3
+dask==1.1.0
+decorator==4.3.0
+defusedxml==0.5.0
+ecdsa==0.13
+entrypoints==0.3
+enum34==1.1.6
+Flask==1.0.2
+funcsigs==1.0.2
+functools32==3.2.3.post2
+futures==3.2.0
+gast==0.2.0
+gpustat==0.4.1
+grpcio==1.11.0
+gyp==0.1
+h5py==2.8.0
+html5lib==0.9999999
+httplib2==0.9.1
+idna==2.7
+ImageHash==4.0
+imageio==2.4.1
+imgaug==0.2.7
+iotop==0.6
+ipaddress==1.0.16
+ipykernel==4.10.0
+ipython==5.8.0
+ipython-genutils==0.2.0
+ipywidgets==7.4.2
+itsdangerous==1.1.0
+Jinja2==2.10
+jsonschema==2.6.0
+jupyter==1.0.0
+jupyter-client==5.2.4
+jupyter-console==5.2.0
+jupyter-core==4.4.0
+kaggle==1.5.1.1
+Keras==2.2.4
+Keras-Applications==1.0.6
+Keras-Preprocessing==1.0.5
+keras-tqdm==2.0.1
+kiwisolver==1.0.1
+Landscape-Client==16.3+bzr834
+lap==0.4.0
+Mako==1.0.3
+Markdown==2.6.11
+MarkupSafe==1.1.0
+matplotlib==2.2.3
+mistune==0.8.4
+mnist==0.2.2
+mock==2.0.0
+nbconvert==5.4.0
+nbformat==4.4.0
+netaddr==0.7.18
+networkx==2.2
+nose==1.3.7
+notebook==5.7.4
+numexpr==2.6.9
+numpy==1.14.3
+nvidia-ml-py==375.53.1
+ofed-le-utils==1.0.3
+opencv-python==4.0.0.21
+PAM==0.4.2
+pandas==0.23.4
+pandocfilters==1.4.2
+paramiko==1.16.0
+pathlib2==2.3.3
+pbr==4.0.2
+pexpect==4.0.1
+pickleshare==0.7.5
+Pillow==5.4.1
+pkg-resources==0.0.0
+prometheus-client==0.5.0
+prompt-toolkit==1.0.15
+protobuf==3.6.1
+psutil==5.4.3
+ptyprocess==0.5
+PuLP==1.6.8
+pyasn1==0.1.9
+pyasn1-modules==0.0.7
+pycodestyle==2.4.0
+pycrypto==2.6.1
+pycups==1.9.73
+pycurl==7.43.0
+Pygments==2.3.1
+pygobject==3.20.0
+PyMySQL==0.7.2
+PyOpenGL==3.0.2
+pyOpenSSL==0.15.1
+pyparsing==2.3.1
+pyserial==3.0.1
+pysmbc==1.0.15.5
+Pyste==0.9.10
+python-apt==1.1.0b1+ubuntu0.16.4.2
+python-dateutil==2.7.3
+python-slugify==2.0.1
+pytz==2018.7
+PyWavelets==1.0.1
+PyYAML==3.13
+pyzmq==17.1.2
+qtconsole==4.4.3
+requests==2.20.0
+scandir==1.9.0
+scikit-image==0.13.0
+scikit-learn==0.20.2
+scipy==0.17.0
+seaborn==0.9.0
+Send2Trash==1.5.0
+service-identity==16.0.0
+Shapely==1.6.4.post2
+simplegeneric==0.8.1
+singledispatch==3.4.0.3
+six==1.10.0
+sklearn==0.0
+subprocess32==3.5.3
+tables==3.4.4
+tensorboard==1.9.0
+tensorflow-gpu==1.9.0
+tensorflow-hub==0.2.0
+termcolor==1.1.0
+terminado==0.8.1
+testpath==0.4.2
+toolz==0.9.0
+tornado==5.1.1
+tqdm==4.29.1
+traitlets==4.3.2
+trash-cli==0.12.9.14
+Twisted==16.0.0
+Unidecode==1.0.23
+urllib3==1.22
+virtualenv==15.0.1
+wcwidth==0.1.7
+Werkzeug==0.14.1
+widgetsnbextension==3.4.2
+zope.interface==4.1.3
diff --git a/case_studies/mmt/train.py b/case_studies/mmt/train.py
new file mode 100644
index 0000000..f42169f
--- /dev/null
+++ b/case_studies/mmt/train.py
@@ -0,0 +1,255 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import keras
+from keras.layers import Dense, Conv2D, BatchNormalization, Activation
+from keras.layers import AveragePooling2D, Input, Flatten, Lambda
+from keras.optimizers import Adam, SGD
+from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from keras.regularizers import l2
+from keras import backend as K
+from keras.models import Model
+from keras.datasets import mnist, cifar10, cifar100
+import tensorflow as tf
+import numpy as np
+import os
+from scipy.io import loadmat
+import math
+from utils.model import resnet_v1, resnet_v2
+
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 64, '')
+tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
+tf.app.flags.DEFINE_string('optimizer', 'mom', '')
+tf.app.flags.DEFINE_integer('version', 1, '')
+tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
+tf.app.flags.DEFINE_integer('feature_dim', 256, '')
+tf.app.flags.DEFINE_bool('is_2d_demo', False, 'whether is a 2d demo on MNIST')
+tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
+tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
+tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
+tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
+tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
+tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
+tf.app.flags.DEFINE_string('dataset', 'mnist', '')
+
+
+
+random_seed = '' # '' or '2' or '3'
+# Load the dataset
+if FLAGS.dataset=='mnist':
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
+ x_train = np.repeat(np.expand_dims(x_train, axis=3), 3, axis=3)
+ x_test = np.repeat(np.expand_dims(x_test, axis=3), 3, axis=3)
+ epochs = 50
+ num_class = 10
+ epochs_inter = [30,40]
+elif FLAGS.dataset=='cifar10':
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+ epochs = 200
+ num_class = 10
+ epochs_inter = [100,150]
+elif FLAGS.dataset=='cifar100':
+ (x_train, y_train), (x_test, y_test) = cifar100.load_data()
+ epochs = 200
+ num_class = 100
+ epochs_inter = [100,150]
+else:
+ print('Unknown dataset')
+
+
+# Training parameters
+subtract_pixel_mean = True
+version = FLAGS.version # Model version
+n = 5 # n=5 for resnet-32 v1
+
+# Computed depth from supplied model parameter n
+if version == 1:
+ depth = n * 6 + 2
+ feature_dim = 64
+elif version == 2:
+ depth = n * 9 + 2
+ feature_dim = FLAGS.feature_dim
+
+if FLAGS.use_random==True:
+ name_random = '_random'
+ random_seed = '2'
+else:
+ name_random = ''
+
+if FLAGS.use_leaky==True:
+ name_leaky = '_withleaky'
+else:
+ name_leaky = ''
+
+if FLAGS.use_dense==True:
+ name_dense = ''
+else:
+ name_dense = '_nodense'
+
+if FLAGS.is_2d_demo==True:
+ is_2d_demo = '_demoMNIST'
+else:
+ is_2d_demo = ''
+
+
+#Load centers in MMC
+kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+'.mat')
+mean_logits_np = kernel_dict['mean_logits'] #num_class X num_dense
+mean_logits = FLAGS.mean_var * tf.constant(mean_logits_np,dtype=tf.float32)
+
+# Input image dimensions.
+input_shape = x_train.shape[1:]
+
+# Normalize data.
+x_train = x_train.astype('float32') / 255
+x_test = x_test.astype('float32') / 255
+
+# If subtract pixel mean is enabled
+if subtract_pixel_mean:
+ x_train_mean = np.mean(x_train, axis=0)
+ x_train -= x_train_mean
+ x_test -= x_train_mean
+
+# Convert class vectors to binary class matrices.
+y_train = keras.utils.to_categorical(y_train, num_class)
+y_test = keras.utils.to_categorical(y_test, num_class)
+
+
+def dot_loss(y_true, y_pred):
+ return - tf.reduce_sum(y_pred * y_true, axis=-1) #batch_size X 1
+
+#MMLDA prediction function
+def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
+ #x_shape = batch_size X num_dense
+ x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
+ mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
+ logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
+ if use_ball==True:
+ return logits
+ else:
+ logits = logits - tf.reduce_max(logits, axis=-1, keepdims=True) #Avoid numerical rounding
+ logits = logits - tf.log(tf.reduce_sum(tf.exp(logits), axis=-1, keepdims=True)) #Avoid numerical rounding
+ return logits
+
+def lr_schedule(epoch):
+ lr = FLAGS.lr
+ if epoch > epochs_inter[1]:
+ lr *= 1e-2
+ elif epoch > epochs_inter[0]:
+ lr *= 1e-1
+ print('Learning rate: ', lr)
+ return lr
+
+model_input = Input(shape=input_shape)
+
+#dim of logtis is batchsize x dim_means
+if version == 2:
+ original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, num_dims=feature_dim, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+else:
+ original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
+ use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
+
+if FLAGS.use_BN==True:
+ BN_name = '_withBN'
+ print('Use BN in the model')
+else:
+ BN_name = '_noBN'
+ print('Do not use BN in the model')
+
+if FLAGS.use_MMLDA==True:
+ print('Using MM Training Scheme')
+ new_layer = Lambda(MMLDA_layer)
+ predictions = new_layer(final_features)
+ model = Model(input=model_input, output=predictions)
+ use_ball_=''
+ train_loss = dot_loss
+ if FLAGS.use_ball==False:
+ print('Using softmax function (MMLDA)')
+ use_ball_='_softmax'
+ filepath_dir = 'trained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_meanvar'+str(FLAGS.mean_var) \
+ +'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size) \
+ +BN_name+name_leaky+name_dense+name_random+random_seed+use_ball_+is_2d_demo
+else:
+ print('Using softmax loss')
+ model = original_model
+ train_loss = keras.losses.categorical_crossentropy
+ filepath_dir = 'trained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_'+FLAGS.optimizer \
+ +'_lr'+str(FLAGS.lr) \
+ +'_batchsize'+str(FLAGS.batch_size)+BN_name+name_leaky
+
+if FLAGS.optimizer=='Adam':
+ model.compile(
+ loss=train_loss,
+ optimizer=Adam(lr=lr_schedule(0)),
+ metrics=['accuracy'])
+elif FLAGS.optimizer=='mom':
+ model.compile(
+ loss=train_loss,
+ optimizer=SGD(lr=lr_schedule(0), momentum=0.9),
+ metrics=['accuracy'])
+model.summary()
+
+
+# Prepare model model saving directory.
+save_dir = os.path.join(os.getcwd(), filepath_dir)
+model_name = 'model.{epoch:03d}.h5'
+if not os.path.isdir(save_dir):
+ os.makedirs(save_dir)
+filepath = os.path.join(save_dir, model_name)
+
+# Prepare callbacks for model saving and for learning rate adjustment.
+checkpoint = ModelCheckpoint(
+ filepath=filepath, monitor='val_loss', mode='min', verbose=2, save_best_only=False, save_weights_only=True, period=5)
+
+lr_scheduler = LearningRateScheduler(lr_schedule)
+
+
+callbacks = [checkpoint, lr_scheduler]
+
+
+# Run training, with data augmentation.
+print('Using real-time data augmentation.')
+# This will do preprocessing and realtime data augmentation:
+datagen = ImageDataGenerator(
+ # epsilon for ZCA whitening
+ zca_epsilon=1e-06,
+ # randomly shift images horizontally
+ width_shift_range=0.1,
+ # randomly shift images vertically
+ height_shift_range=0.1,
+ # set mode for filling points outside the input boundaries
+ fill_mode='nearest',
+ # randomly flip images
+ horizontal_flip=True)
+
+# Compute quantities required for featurewise normalization
+datagen.fit(x_train)
+
+# Fit the model on the batches generated by datagen.flow().
+model.fit_generator(
+ datagen.flow(x_train, y_train, batch_size=FLAGS.batch_size),
+ validation_data=(x_test, y_test),
+ epochs=epochs,
+ verbose=2,
+ workers=4,
+ callbacks=callbacks)
diff --git a/case_studies/odds/original/README.txt b/case_studies/odds/original/README.txt
new file mode 100644
index 0000000..e603fab
--- /dev/null
+++ b/case_studies/odds/original/README.txt
@@ -0,0 +1,7 @@
+Note: The default parameter settings in this repository, specifically the number of samples and types of noise used to generate statistics, are meant as a quick proof-of-concept to run on minimal hardware. To achieve higher robustness to stronger attacks, it is necessary to increase these parameters.
+
+The code of our method is in tf_robustify. It works with both pytorch and tensorflow.
+Two scripts are provided: tensorflow_example.py and torch_example.py.
+The torch example is the easiest, as it can just be run and will fetch both data as well as model by itself.
+For the tensorflow example, use fetch_Madry_ResNet.py to fetch the models and place them in the appropriate ckpt_dir (parameter of tensorflow_example.py).
+
diff --git a/case_studies/odds/original/__init__.py b/case_studies/odds/original/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/odds/original/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/odds/original/adversarial_evaluation.py b/case_studies/odds/original/adversarial_evaluation.py
new file mode 100644
index 0000000..58689ad
--- /dev/null
+++ b/case_studies/odds/original/adversarial_evaluation.py
@@ -0,0 +1,359 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import functools
+
+import cleverhans.model
+import torch
+from cleverhans import utils_tf
+from cleverhans.attacks import Attack
+import cleverhans.attacks
+from cleverhans.utils_tf import clip_eta
+
+# disable tf logging
+# some of these might have to be commented out to use verbose=True in the
+# adaptive attack
+import warnings
+import logging
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import os
+import math
+import numpy as np
+import tensorflow as tf
+
+from cleverhans.attacks import MadryEtAl
+from cleverhans.dataset import CIFAR10
+from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import \
+ make_wresnet as ResNet
+from cleverhans.utils_tf import initialize_uninitialized_global_variables
+
+import tf_robustify
+from cleverhans.augmentation import random_horizontal_flip, random_shift
+
+from logit_matching_attack import \
+ ProjectedGradientDescentWithDetectorLogitMatching
+
+
+def init_defense(sess, x, preds, batch_size, multi_noise=False):
+ data = CIFAR10()
+
+ if multi_noise:
+ defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
+ "defense_alignment_data_multi_noise")
+ else:
+ defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
+ "defense_alignment_data")
+
+ if os.path.exists(defense_data_path):
+ print("Trying to load defense statistics")
+ load_alignments_dir = defense_data_path
+ save_alignments_dir = None
+ else:
+ print("Defense statistics not found; generating and saving them now.")
+ load_alignments_dir = None
+ save_alignments_dir = defense_data_path
+
+ dataset_size = data.x_train.shape[0]
+ dataset_train = data.to_tensorflow()[0]
+ dataset_train = dataset_train.map(
+ lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
+ dataset_train = dataset_train.batch(batch_size)
+ dataset_train = dataset_train.prefetch(16)
+ x_train, y_train = data.get_set('train')
+ x_train *= 255
+
+ nb_classes = y_train.shape[1]
+
+ n_collect = 10000 # TODO: for debugging set to 100, otherwise to 10000
+ p_ratio_cutoff = .999
+ just_detect = True
+ clip_alignments = True
+ fit_classifier = True
+ noise_eps = 'n30.0'
+ num_noise_samples = 256
+
+ if multi_noise:
+ noises = 'n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008'.split(
+ ',')
+ noise_eps_detect = []
+ for n in noises:
+ new_noise = n[0] + str(float(n[1:]) * 255)
+ noise_eps_detect.append(new_noise)
+ else:
+ noise_eps_detect = 'n30.0'
+
+ # these attack parameters are just for initializing the defense
+ eps = 8.0
+ pgd_params = {
+ 'eps': eps,
+ 'eps_iter': (eps / 5),
+ 'nb_iter': 10,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+
+ logits_op = preds.op
+ while logits_op.type != 'MatMul':
+ logits_op = logits_op.inputs[0].op
+ latent_x_tensor, weights = logits_op.inputs
+ logits_tensor = preds
+
+ predictor = tf_robustify.collect_statistics(
+ x_train[:n_collect], y_train[:n_collect], x, sess,
+ logits_tensor=logits_tensor,
+ latent_x_tensor=latent_x_tensor,
+ weights=weights,
+ nb_classes=nb_classes,
+ p_ratio_cutoff=p_ratio_cutoff,
+ noise_eps=noise_eps,
+ noise_eps_detect=noise_eps_detect,
+ pgd_eps=pgd_params['eps'],
+ pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'],
+ pgd_iters=pgd_params['nb_iter'],
+ save_alignments_dir=save_alignments_dir,
+ load_alignments_dir=load_alignments_dir,
+ clip_min=pgd_params['clip_min'],
+ clip_max=pgd_params['clip_max'],
+ batch_size=batch_size,
+ num_noise_samples=num_noise_samples,
+ debug_dict=None,
+ debug=False,
+ targeted=False,
+ pgd_train=None,
+ fit_classifier=fit_classifier,
+ clip_alignments=clip_alignments,
+ just_detect=just_detect,
+ )
+
+ next(predictor)
+ return predictor
+
+
+def do_eval(sess, x, x_adv, logits, preds, x_set,
+ y_set, predictor, batch_size, attack_kwargs={}):
+ n_batches = math.ceil(x_set.shape[0] / batch_size)
+
+ # first generative adversarial examples
+ x_adv_set, logits_set, p_set = [], [], []
+ for b in range(n_batches):
+ values = sess.run((x_adv, logits, preds),
+ {**attack_kwargs,
+ x: x_set[b * batch_size:(b + 1) * batch_size]})
+ x_adv_set.append(values[0])
+ logits_set.append(values[1])
+ p_set.append(values[2])
+ x_adv_set = np.concatenate(x_adv_set)
+ logits_set = np.concatenate(logits_set)
+ p_set = np.concatenate(p_set)
+
+ del x_set
+
+ # now run test
+ p_set, p_det = np.concatenate(
+ [predictor.send(x_adv_set[b * batch_size:(b + 1) * batch_size]) for b in
+ range(n_batches)]).T
+
+ correctly_classified = np.equal(p_set,
+ y_set[:len(p_set)].argmax(-1))
+
+ adversarial_example_detected = np.equal(p_det, True)
+ # model_fooled = np.logical_or(
+ # np.logical_and(~correctly_classified, ~adversarial_example_detected), # fooled classifier & evaded detector
+ # np.logical_and(correctly_classified, adversarial_example_detected) # did not fool classifier but triggered detector (false positive)
+ # )
+ model_fooled = np.logical_and(~correctly_classified,
+ ~adversarial_example_detected) # fooled classifier & evaded detector
+
+ correctly_classified_not_detected = np.logical_and(correctly_classified,
+ ~adversarial_example_detected)
+
+ # print(len(adversarial_example_detected), np.sum(~correctly_classified),
+ # np.sum(adversarial_example_detected))
+
+ # asr = model_fooled.mean()
+ # acc = correctly_classified.mean()
+ # print('Accuracy of base model: %0.4f' % acc)
+ # print('ASR (w/ detection defense): %0.4f' % asr)
+
+ return model_fooled, correctly_classified, adversarial_example_detected
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--debug', action='store_true')
+ parser.add_argument('--multi-noise', action='store_true')
+ parser.add_argument("--n-samples", default=512, type=int)
+ parser.add_argument("--batch-size", default=512, type=int)
+ parser.add_argument("--epsilon", default=8, type=int)
+ parser.add_argument("--attack", choices=("clean", "original", "adaptive",
+ "adaptive-eot"),
+ default="original")
+ args = parser.parse_args()
+
+ # load data
+ data = CIFAR10()
+ x_test, y_test = data.get_set('test')
+
+ sess = tf.Session()
+
+ img_rows, img_cols, nchannels = x_test.shape[1:4]
+ nb_classes = y_test.shape[1]
+
+ # define model & restore weights
+ # Define input TF placeholder
+ x_placeholder = tf.placeholder(
+ tf.float32, shape=(None, img_rows, img_cols, nchannels))
+ # needed for adaptive attack
+ x_reference_placeholder = tf.placeholder(
+ tf.float32, shape=(None, img_rows, img_cols, nchannels))
+ cifar_model = ResNet(scope='ResNet')
+
+ ckpt = tf.train.get_checkpoint_state("checkpoints/tf_madry_wrn_vanilla")
+ saver = tf.train.Saver(var_list=dict(
+ (v.name.split('/', 1)[1].split(':')[0], v) for v in
+ tf.global_variables()))
+ saver.restore(sess, ckpt.model_checkpoint_path)
+ initialize_uninitialized_global_variables(sess)
+
+ logits = cifar_model.get_logits(x_placeholder)
+
+ # setup defense
+ # if multi_noise = True, instantiate the defense with 9 types of noise.
+ # if multi_noise = False, instantiate the defense with a single type of high-magnitude noise.
+ print("multi noise:", args.multi_noise)
+ defense_predictor = init_defense(sess, x_placeholder, logits, args.batch_size,
+ multi_noise=args.multi_noise)
+
+ # prepare dataloader
+ random_indices = list(range(len(x_test)))
+ np.random.shuffle(random_indices)
+ x_batch = []
+ y_batch = []
+ for j in range(args.n_samples):
+ x_, y_ = x_test[random_indices[j]], y_test[random_indices[j]]
+ x_batch.append(x_)
+ y_batch.append(y_)
+ x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
+ y_batch = np.array(y_batch)
+
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(x_batch, y_batch,
+ batch_size=args.batch_size)
+
+ original_pgd_params = {
+ # ord: ,
+ 'eps': args.epsilon,
+ 'eps_iter': (args.epsilon / 5.0),
+ 'nb_iter': 10,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+ adaptive_pgd_params = {
+ # ord: ,
+ 'eps': args.epsilon,
+ 'eps_iter': args.epsilon / 100.0,
+ 'nb_iter': 100,
+ 'clip_min': 0,
+ 'clip_max': 255,
+ 'x_reference': x_reference_placeholder,
+ }
+
+ if args.attack == "clean":
+ adv_x = x_placeholder
+ else:
+ if args.attack == "original":
+ pgd = MadryEtAl(cifar_model, sess=sess)
+ print("Using MadryEtAl attack")
+ elif args.attack == "adaptive":
+ pgd = ProjectedGradientDescentWithDetectorLogitMatching(
+ cifar_model,
+ lambda x: cifar_model.get_logits(x),
+ sess=sess,
+ verbose=False)
+ print("Using logit-matching attack")
+ elif args.attack == "adaptive-eot":
+ pgd = ProjectedGradientDescentWithDetectorLogitMatching(
+ cifar_model,
+ lambda x: cifar_model.get_logits(x),
+ sess=sess,
+ eot_ensemble_size=20,
+ verbose=False)
+ print("Using logit-matching attack w/ EOT")
+ else:
+ raise ValueError("invalid attack")
+
+ pgd_params = original_pgd_params if args.attack == "original" else adaptive_pgd_params
+ adv_x = tf.stop_gradient(pgd.generate(x_placeholder, **pgd_params))
+
+ adv_logits = cifar_model.get_logits(adv_x)
+ adv_predictions = tf.argmax(adv_logits, 1)
+
+ def run_eval(l):
+ # should_be_rejected = ~verify_valid_input_data(kwargs["reference_points_x"])
+ # print("should_be_rejected", should_be_rejected)
+
+ is_advs = []
+ correctly_classifieds = []
+ adv_detecteds = []
+ model_fooleds = []
+ for x, y in l:
+ x = x.numpy().transpose((0, 2, 3, 1)) * 255.0
+ y = y.numpy()
+
+ # pick targets. We'll keep it simple and just target the logits
+ # of the first clean example, except for inputs that have the
+ # same class as that example. For those, we target the logits
+ # of the first clean example w/ different class.
+ y_cls = np.argmax(y, -1)
+ reference_x = x.copy()
+ reference_x[:] = x[0]
+ # get first element that has different class than first sample
+ idx = np.argmax(y_cls != y_cls[0])
+ reference_x[y_cls == y_cls[0]] = x[idx]
+
+ print(x.shape, reference_x.shape)
+
+ model_fooled_np, correctly_classified_np, adv_detected_np = do_eval(
+ sess=sess, x=x_placeholder, x_adv=adv_x,
+ batch_size=args.batch_size,
+ logits=adv_logits,
+ preds=adv_predictions, x_set=x, y_set=y,
+ predictor=defense_predictor,
+ attack_kwargs={x_reference_placeholder: reference_x}
+ )
+
+ # print(is_adv_np, y, logits_np)
+ adv_detecteds.append(adv_detected_np)
+ correctly_classifieds.append(correctly_classified_np)
+ model_fooleds.append(model_fooled_np)
+ adv_detecteds = np.concatenate(adv_detecteds)
+ correctly_classifieds = np.concatenate(correctly_classifieds)
+ model_fooleds = np.concatenate(model_fooleds)
+
+ print("ASR:", np.mean(model_fooleds))
+ print("correctly_classifieds", np.mean(correctly_classifieds),
+ "adversarial detected", np.mean(adv_detecteds))
+
+ run_eval(test_loader)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/odds/original/adversarial_evaluation.sh b/case_studies/odds/original/adversarial_evaluation.sh
new file mode 100644
index 0000000..9f66e34
--- /dev/null
+++ b/case_studies/odds/original/adversarial_evaluation.sh
@@ -0,0 +1,19 @@
+nsamples=${1:-512}
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Original attack"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) venv3.8tf/bin/python case_studies/odds/original/adversarial_evaluation.py \
+ --n-samples=$nsamples --attack=original
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Adaptive attack"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) venv3.8tf/bin/python case_studies/odds/original/adversarial_evaluation.py \
+ --n-samples=$nsamples --attack=adaptive
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Adaptive attack w/ EOT attack"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) venv3.8tf/bin/python case_studies/odds/original/adversarial_evaluation.py \
+ --n-samples=$nsamples --attack=adaptive-eot --batch-size=128
\ No newline at end of file
diff --git a/case_studies/odds/original/binarization_test.py b/case_studies/odds/original/binarization_test.py
new file mode 100644
index 0000000..c7cc8cd
--- /dev/null
+++ b/case_studies/odds/original/binarization_test.py
@@ -0,0 +1,550 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import functools
+
+import cleverhans.model
+import torch
+from cleverhans import utils_tf
+from cleverhans.attacks import Attack
+import cleverhans.attacks
+from cleverhans.utils_tf import clip_eta
+
+# disable tf logging
+# some of these might have to be commented out to use verbose=True in the
+# adaptive attack
+import warnings
+import logging
+
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+import tensorflow as tf
+
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import os
+import math
+import numpy as np
+import tensorflow as tf
+
+from cleverhans.attacks import MadryEtAl
+from cleverhans.dataset import CIFAR10
+from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import \
+ make_wresnet as ResNet
+from cleverhans.utils_tf import initialize_uninitialized_global_variables
+
+import tf_robustify
+from cleverhans.augmentation import random_horizontal_flip, random_shift
+
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack, format_result
+from argparse_utils import DecisionBoundaryBinarizationSettings
+from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
+
+from logit_matching_attack import \
+ ProjectedGradientDescentWithDetectorLogitMatching
+
+
+def init_defense(sess, x, preds, batch_size, multi_noise=False):
+ data = CIFAR10()
+
+ if multi_noise:
+ defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
+ "defense_alignment_data_multi_noise")
+ else:
+ defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
+ "defense_alignment_data")
+
+ if os.path.exists(defense_data_path):
+ print("Trying to load defense statistics")
+ load_alignments_dir = defense_data_path
+ save_alignments_dir = None
+ else:
+ print("Defense statistics not found; generating and saving them now.")
+ load_alignments_dir = None
+ save_alignments_dir = defense_data_path
+
+ dataset_size = data.x_train.shape[0]
+ dataset_train = data.to_tensorflow()[0]
+ dataset_train = dataset_train.map(
+ lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
+ dataset_train = dataset_train.batch(batch_size)
+ dataset_train = dataset_train.prefetch(16)
+ x_train, y_train = data.get_set('train')
+ x_train *= 255
+
+ nb_classes = y_train.shape[1]
+
+ n_collect = 10000 # TODO: for debugging set to 100, otherwise to 10000
+ p_ratio_cutoff = .999
+ just_detect = True
+ clip_alignments = True
+ fit_classifier = True
+ noise_eps = 'n30.0'
+ num_noise_samples = 256
+
+ if multi_noise:
+ noises = 'n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008'.split(
+ ',')
+ noise_eps_detect = []
+ for n in noises:
+ new_noise = n[0] + str(float(n[1:]) * 255)
+ noise_eps_detect.append(new_noise)
+ else:
+ noise_eps_detect = 'n30.0'
+
+ # these attack parameters are just for initializing the defense
+ eps = 8.0
+ pgd_params = {
+ 'eps': eps,
+ 'eps_iter': (eps / 5),
+ 'nb_iter': 10,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+
+ logits_op = preds.op
+ while logits_op.type != 'MatMul':
+ logits_op = logits_op.inputs[0].op
+ latent_x_tensor, weights = logits_op.inputs
+ logits_tensor = preds
+
+ predictor = tf_robustify.collect_statistics(
+ x_train[:n_collect], y_train[:n_collect], x, sess,
+ logits_tensor=logits_tensor,
+ latent_x_tensor=latent_x_tensor,
+ weights=weights,
+ nb_classes=nb_classes,
+ p_ratio_cutoff=p_ratio_cutoff,
+ noise_eps=noise_eps,
+ noise_eps_detect=noise_eps_detect,
+ pgd_eps=pgd_params['eps'],
+ pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'],
+ pgd_iters=pgd_params['nb_iter'],
+ save_alignments_dir=save_alignments_dir,
+ load_alignments_dir=load_alignments_dir,
+ clip_min=pgd_params['clip_min'],
+ clip_max=pgd_params['clip_max'],
+ batch_size=batch_size,
+ num_noise_samples=num_noise_samples,
+ debug_dict=None,
+ debug=False,
+ targeted=False,
+ pgd_train=None,
+ fit_classifier=fit_classifier,
+ clip_alignments=clip_alignments,
+ just_detect=just_detect,
+ )
+
+ next(predictor)
+ return predictor
+
+
+def do_binarized_eval(sess, x, y, x_adv, binarized_logits, binarized_preds, x_set,
+ y_set, predictor, batch_size, binarized_sess_kwargs={},
+ inverted_detector_test=False):
+ n_batches = math.ceil(x_set.shape[0] / batch_size)
+
+ # first generative adversarial examples
+ x_adv_set, binarized_logits_set, binarized_p_set = [], [], []
+ for b in range(n_batches):
+ values = sess.run((x_adv, binarized_logits, binarized_preds),
+ {**binarized_sess_kwargs,
+ x: x_set[b * batch_size:(b + 1) * batch_size],
+ y: y_set[b * batch_size:(b + 1) * batch_size]})
+ x_adv_set.append(values[0])
+ binarized_logits_set.append(values[1])
+ binarized_p_set.append(values[2])
+ x_adv_set = np.concatenate(x_adv_set)
+ binarized_logits_set = np.concatenate(binarized_logits_set)
+ binarized_p_set = np.concatenate(binarized_p_set)
+
+ # now run test
+ p_set, p_det = np.concatenate(
+ [predictor.send(x_adv_set[b * batch_size:(b + 1) * batch_size]) for b in
+ range(n_batches)]).T
+
+ correctly_classified = np.equal(binarized_p_set,
+ y_set[:len(p_set)].argmax(-1))
+ if inverted_detector_test:
+ adversarial_example_detected = np.equal(p_det, False)
+ else:
+ adversarial_example_detected = np.equal(p_det, True)
+ # model_fooled = np.logical_or(
+ # np.logical_and(~correctly_classified, ~adversarial_example_detected), # fooled classifier & evaded detector
+ # np.logical_and(correctly_classified, adversarial_example_detected) # did not fool classifier but triggered detector (false positive)
+ # )
+ model_fooled = np.logical_and(~correctly_classified,
+ ~adversarial_example_detected) # fooled classifier & evaded detector
+
+ # print(len(adversarial_example_detected), np.sum(~correctly_classified),
+ # np.sum(adversarial_example_detected))
+
+ # asr = model_fooled.mean()
+ # acc = correctly_classified.mean()
+ # print('Accuracy of base model: %0.4f' % acc)
+ # print('ASR (w/ detection defense): %0.4f' % asr)
+ #print(model_fooled, ~correctly_classified, ~adversarial_example_detected)
+ #print(binarized_logits_set)
+
+ return model_fooled, (x_adv_set, binarized_logits_set)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--debug', action='store_true')
+ parser.add_argument('--multi-noise', action='store_true')
+ parser.add_argument("--n-samples", default=512, type=int)
+ parser.add_argument("--n-boundary-points", default=49, type=int)
+ parser.add_argument("--n-inner-points", default=10, type=int)
+ parser.add_argument("--batch-size", default=512, type=int)
+ parser.add_argument("--attack", choices=("original", "adaptive",
+ "adaptive-eot"),
+ default="original")
+ parser.add_argument("--dont-verify-training-data", action="store_true")
+ parser.add_argument("--use-boundary-adverarials", action="store_true")
+ parser.add_argument("--inverted-test", action="store_true")
+ args = parser.parse_args()
+
+ if args.inverted_test:
+ print("Running inverted test")
+ else:
+ print("Running normal/non-inverted test")
+
+ # load data
+ data = CIFAR10()
+ x_test, y_test = data.get_set('test')
+
+ sess = tf.Session()
+
+ img_rows, img_cols, nchannels = x_test.shape[1:4]
+ nb_classes = y_test.shape[1]
+
+ # define model & restore weights
+ # Define input TF placeholder
+ x_placeholder = tf.placeholder(
+ tf.float32, shape=(None, img_rows, img_cols, nchannels))
+ y_placeholder = tf.placeholder(tf.int32, shape=(None,))
+ # needed for adaptive attack
+ x_reference_placeholder = tf.placeholder(
+ tf.float32, shape=(None, img_rows, img_cols, nchannels))
+ cifar_model = ResNet(scope='ResNet')
+
+ ckpt = tf.train.get_checkpoint_state("checkpoints/tf_madry_wrn_vanilla")
+ saver = tf.train.Saver(var_list=dict(
+ (v.name.split('/', 1)[1].split(':')[0], v) for v in
+ tf.global_variables()))
+ saver.restore(sess, ckpt.model_checkpoint_path)
+ initialize_uninitialized_global_variables(sess)
+
+ class Model:
+ def __init__(self, model):
+ self.model = model
+
+ def __call__(self, x, features_only=True):
+ assert features_only
+ return self.get_features(x)
+
+ def get_features(self, x):
+ return self.model.fprop(x * 255.0)["Flatten2"]
+
+ def get_features_and_gradients(self, x):
+ features = self.model.fprop(x * 255.0)["Flatten2"]
+ grad = tf.gradients(features, x)[0]
+ return features, grad
+
+ def get_features_logits_and_gradients(self, x):
+ values = self.model.fprop(x * 255.0)
+ features = values["Flatten2"]
+ predictions = values["logits"]
+ grad = tf.gradients(features, x)[0]
+ return features, grad, predictions
+
+ model = Model(cifar_model)
+
+ features, feature_gradients, logits = model.get_features_logits_and_gradients(
+ x_placeholder)
+
+ # setup defense
+ # if multi_noise = True, instantiate the defense with 9 types of noise.
+ # if multi_noise = False, instantiate the defense with a single type of high-magnitude noise.
+ print("multi noise:", args.multi_noise)
+ defense_predictor = init_defense(sess, x_placeholder, logits, args.batch_size,
+ multi_noise=args.multi_noise)
+
+ class ModelWrapper(cleverhans.model.Model):
+ def __init__(self, model, weight_shape, bias_shape):
+ self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
+ self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
+ self.model = model
+ self.first = True
+
+ def fprop(self, x, **kwargs):
+ y = self.model.get_features(x, *kwargs)
+ logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
+ return {"logits": logits}
+
+ def logits_and_predictions(self, x=None):
+ if x == None: assert not self.first
+ if self.first:
+ self.logits = self(x)
+ self.predictions = tf.argmax(self.logits, 1)
+ self.first = False
+ return self.logits, self.predictions
+
+ def run_features(x: np.ndarray, features_only=True,
+ features_and_logits=False):
+ if features_only:
+ assert not features_and_logits
+ targets = features
+ elif features_and_logits:
+ targets = (features, logits)
+ else:
+ targets = logits
+ x = x.transpose(0, 2, 3, 1) * 255.0
+ return sess.run(targets,
+ feed_dict={x_placeholder: x})
+
+ def run_features_and_gradients(x: np.ndarray):
+ x = x.transpose(0, 2, 3, 1) * 255.0
+ return sess.run((features, feature_gradients),
+ feed_dict={x_placeholder: x})
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=lambda x, features_only=False,
+ features_and_logits=False: run_features(x, features_only,
+ features_and_logits),
+ logit_forward_and_backward_pass=lambda x: run_features_and_gradients(x)
+ )
+
+ # prepare dataloader
+ random_indices = list(range(len(x_test)))
+ np.random.shuffle(random_indices)
+ x_batch = []
+ y_batch = []
+ for j in range(args.n_samples):
+ x_, y_ = x_test[random_indices[j]], y_test[random_indices[j]]
+ x_batch.append(x_)
+ y_batch.append(y_)
+ x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
+ y_batch = np.array(y_batch)
+
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
+
+ # TODO: update shapes? apparently not necessary...
+ wrapped_model = ModelWrapper(model, (2, 640), (2,))
+
+ baseline_cifar_pgd = MadryEtAl(cifar_model, sess=sess)
+ original_pgd_params = {
+ # ord: ,
+ 'eps': 8,
+ 'eps_iter': (8 / 5),
+ 'nb_iter': 10,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+ adaptive_pgd_params = {
+ # ord: ,
+ 'eps': 8,
+ 'eps_iter': 8.0 / 300,
+ 'nb_iter': 300,
+ 'clip_min': 0,
+ 'clip_max': 255,
+ 'x_reference': x_reference_placeholder,
+ 'y': y_placeholder
+ }
+
+ if args.attack == "original":
+ pgd = MadryEtAl(wrapped_model, sess=sess)
+ print("Using MadryEtAl attack")
+ elif args.attack == "adaptive":
+ pgd = ProjectedGradientDescentWithDetectorLogitMatching(
+ wrapped_model,
+ lambda x: model.model.get_logits(x),
+ sess=sess,
+ verbose=False)
+ print("Using logit-matching attack")
+ elif args.attack == "adaptive-eot":
+ pgd = ProjectedGradientDescentWithDetectorLogitMatching(
+ wrapped_model,
+ lambda x: model.model.get_logits(x),
+ sess=sess,
+ eot_ensemble_size=20,
+ verbose=False)
+ print("Using logit-matching attack w/ EOT")
+ else:
+ raise ValueError("invalid attack")
+
+ # was 1.75
+ far_off_distance = 1.75 # TODO, was 1.01
+
+ larger_pgd_params = {**original_pgd_params}
+ larger_pgd_params["eps"] *= far_off_distance
+
+ pgd_params = original_pgd_params if args.attack == "original" else adaptive_pgd_params
+
+ adv_x = tf.stop_gradient(pgd.generate(x_placeholder, **pgd_params))
+
+ cifar_adv_x = tf.stop_gradient(
+ baseline_cifar_pgd.generate(x_placeholder, **original_pgd_params))
+ larger_cifar_adv_x = tf.stop_gradient(
+ baseline_cifar_pgd.generate(x_placeholder, **original_pgd_params))
+
+ adv_binarized_logits = wrapped_model.get_logits(adv_x)
+ adv_binarized_predictions = tf.argmax(adv_binarized_logits, 1)
+
+ def run_attack(m, l, kwargs, inverted_detector_test=False):
+ linear_layer = m[-1]
+ del m
+
+ weights_feed_dict = {
+ wrapped_model.weight: linear_layer.weight.data.numpy(),
+ wrapped_model.bias: linear_layer.bias.data.numpy()
+ }
+
+ if "reference_points_x" in kwargs:
+ weights_feed_dict[x_reference_placeholder] = \
+ kwargs["reference_points_x"].numpy().transpose((0, 2, 3, 1)) * 255.0
+
+ # should_be_rejected = ~verify_valid_input_data(kwargs["reference_points_x"])
+ # print("should_be_rejected", should_be_rejected)
+
+ for x, y in l:
+ x = x.numpy().transpose((0, 2, 3, 1)) * 255.0
+ y = y.numpy()
+
+ is_adv_np, (x_adv_np, logits_np) = do_binarized_eval(
+ sess=sess, x=x_placeholder, y=y_placeholder, x_adv=adv_x,
+ batch_size=args.batch_size,
+ binarized_logits=adv_binarized_logits,
+ binarized_preds=adv_binarized_predictions, x_set=x, y_set=y,
+ predictor=defense_predictor, binarized_sess_kwargs=weights_feed_dict,
+ inverted_detector_test=inverted_detector_test
+ )
+
+ # print(is_adv_np, y, logits_np)
+
+ return is_adv_np, (torch.Tensor(x_adv_np), torch.Tensor(logits_np))
+
+ def verify_valid_input_data(x_set):
+ """Returns True if something is not detected as an adversarial example."""
+ x_set = x_set.numpy().transpose((0, 2, 3, 1)) * 255.0
+ n_batches = math.ceil(x_set.shape[0] / args.batch_size)
+ _, p_det = np.concatenate(
+ [defense_predictor.send(
+ x_set[b * args.batch_size:(b + 1) * args.batch_size]
+ ) for b in range(n_batches)]
+ ).T
+ # p_det is True of a possible adversarial example has been detected
+ valid_sample = np.equal(p_det, False)
+
+ return valid_sample
+
+ def get_boundary_adversarials(x, y, n_samples, epsilon):
+ """Generate adversarial examples for the base classifier."""
+ assert len(x.shape) == 3
+ del y
+ device = x.device
+ x = x.unsqueeze(0).numpy()
+ x = x.transpose((0, 2, 3, 1)) * 255.0
+ x = np.repeat(x, n_samples, axis=0)
+
+ # select correct tf placeholder depending on the epsilon ball
+ if epsilon == pgd_params["eps"] / 255.0:
+ x_adv_ph = cifar_adv_x
+ elif epsilon == larger_pgd_params["eps"] / 255.0:
+ x_adv_ph = larger_cifar_adv_x
+ else:
+ raise ValueError("Cannot generate adversarials at eps =", epsilon)
+
+ for _ in range(10):
+ x_advs = []
+ for x_ in np.array_split(x, int(np.ceil(len(x) / args.batch_size))):
+ x_advs.append(sess.run(x_adv_ph, feed_dict={x_placeholder: x_}))
+ x_adv = np.concatenate(x_advs, 0)
+ x_adv = x_adv.transpose((0, 3, 1, 2)) / 255.0
+
+ x_adv = torch.Tensor(x_adv, device=device)
+
+ # make sure adversarial examples are really detected as adversarial examples
+ is_valid = verify_valid_input_data(x_adv)
+ is_invalid = ~is_valid
+ if np.all(is_invalid):
+ # generative until we finally found an adversarial example that gets
+ # detected
+ break
+ else:
+ warnings.warn("Could not generate adversarial example that gets "
+ "detected after 10 trials.")
+
+ return x_adv
+
+ if args.inverted_test:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=1,
+ )
+ else:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=args.n_boundary_points - 1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=0,
+ )
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=functools.partial(
+ run_attack,
+ inverted_detector_test=args.inverted_test
+ ),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=8 / 255.0,
+ norm="linf",
+ lr=10000,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ **additional_settings,
+ ),
+ rescale_logits="adaptive",
+ n_samples=args.n_samples,
+ device="cpu",
+ batch_size=args.batch_size,
+ # decision_boundary_closeness=0.999,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ # verify_valid_boundary_training_data_fn=None if args.dont_verify_training_data else verify_valid_input_data,
+ verify_valid_boundary_training_data_fn=verify_valid_input_data,
+ get_boundary_adversarials_fn=get_boundary_adversarials,
+ verify_valid_inner_training_data_fn=None,
+ verify_valid_input_validation_data_fn=None,
+ # verify_valid_input_data if args.use_boundary_adverarials else None,
+ # get_boundary_adversarials_fn=get_boundary_adversarials if args.use_boundary_adverarials else None,
+ fill_batches_for_verification=False,
+ far_off_distance=far_off_distance
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/odds/original/binarization_test.sh b/case_studies/odds/original/binarization_test.sh
new file mode 100644
index 0000000..fc42e48
--- /dev/null
+++ b/case_studies/odds/original/binarization_test.sh
@@ -0,0 +1,95 @@
+nsamples=${1:-512}
+direction=${2:-normal}
+echo "Evaluating on $nsamples samples"
+echo "Direction: $direction"
+multinoise=${3:-}
+
+
+if [[ "$direction" == "normal" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Original attack"
+ echo "Normal test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=original \
+ $multinoise
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Adaptive attack"
+ echo "Normal test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=adaptive \
+ $multinoise
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Adaptive attack w/ EOT"
+ echo "Normal test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=adaptive-eot \
+ $multinoise
+else
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Original attack"
+ echo "Inverted test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=original \
+ --inverted-test \
+ $multinoise
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Adaptive attack"
+ echo "Inverted test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=adaptive \
+ --inverted-test \
+ $multinoise
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner"
+ echo "Adaptive attack w/ EOT"
+ echo "Inverted test"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/odds/original/binarization_test.py \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --dont-verify-training-data \
+ --attack=adaptive-eot \
+ --inverted-test \
+ $multinoise
+fi
\ No newline at end of file
diff --git a/case_studies/odds/original/carlini_wagner_attack.py b/case_studies/odds/original/carlini_wagner_attack.py
new file mode 100644
index 0000000..d33f58a
--- /dev/null
+++ b/case_studies/odds/original/carlini_wagner_attack.py
@@ -0,0 +1,250 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# from https://github.com/rwightman/pytorch-nips2017-attack-example
+"""PyTorch Carlini and Wagner L2 attack algorithm.
+
+Based on paper by Carlini & Wagner, https://arxiv.org/abs/1608.04644 and a reference implementation at
+https://github.com/tensorflow/cleverhans/blob/master/cleverhans/attacks_tf.py
+"""
+import os
+import sys
+import torch
+import numpy as np
+from torch import optim
+from torch import autograd
+from carlini_wagner_helpers import *
+
+
+class AttackCarliniWagnerL2:
+
+ def __init__(self, targeted=False, search_steps=5, max_steps=1000, cuda=True, debug=False, num_classes=10, clip_min=-1., clip_max=1., confidence=20, initial_const=.1, learning_rate=1e-4):
+ self.debug = debug
+ self.targeted = targeted
+ self.num_classes = num_classes
+ self.confidence = confidence # FIXME need to find a good value for this, 0 value used in paper not doing much...
+ self.initial_const = initial_const # bumped up from default of .01 in reference code... 10 in mnist tutorial
+ self.binary_search_steps = search_steps
+ # self.repeat = self.binary_search_steps >= 10
+ self.repeat = False
+ self.max_steps = max_steps
+ self.abort_early = True
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+ self.cuda = cuda
+ # self.clamp_fn = 'tanh' # set to something else perform a simple clamp instead of tanh
+ self.clamp_fn = '' # set to something else perform a simple clamp instead of tanh
+ self.init_rand = False # an experiment, does a random starting point help?
+ self.learning_rate = learning_rate
+
+ def _compare(self, output, target):
+ if not isinstance(output, (float, int, np.int64)):
+ output = np.copy(output)
+ if self.targeted:
+ output[target] -= self.confidence
+ else:
+ output[target] += self.confidence
+ output = np.argmax(output)
+ if isinstance(output, np.int64):
+ output = output.item()
+ if self.targeted:
+ return output == target
+ else:
+ return output != target
+
+ def _loss(self, output, target, dist, scale_const):
+ # compute the probability of the label class versus the maximum other
+ real = (target * output).sum(1)
+ other = ((1. - target) * output - target * 10000.).max(1)[0]
+ if self.targeted:
+ # if targeted, optimize for making the other class most likely
+ loss1 = torch.clamp(other - real + self.confidence, min=0.) # equiv to max(..., 0.)
+ else:
+ # if non-targeted, optimize for making this class least likely.
+ loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
+ loss1 = torch.sum(scale_const * loss1)
+
+ loss2 = dist.sum()
+
+ loss = loss1 + loss2
+ return loss
+
+ def _optimize(self, optimizer, model, input_var, modifier_var, target_var, scale_const_var, input_orig=None):
+ # apply modifier and clamp resulting image to keep bounded from clip_min to clip_max
+ if self.clamp_fn == 'tanh':
+ input_adv = tanh_rescale(modifier_var + input_var, self.clip_min, self.clip_max)
+ else:
+ input_adv = torch.clamp(modifier_var + input_var, self.clip_min, self.clip_max)
+
+ output = model(input_adv)
+
+ # distance to the original input data
+ if input_orig is None:
+ dist = l2_dist(input_adv, input_var, keepdim=False)
+ else:
+ dist = l2_dist(input_adv, input_orig, keepdim=False)
+
+ loss = self._loss(output, target_var, dist, scale_const_var)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ loss_np = loss.data.item()
+ dist_np = dist.data.cpu().numpy()
+ output_np = output.data.cpu().numpy()
+ input_adv_np = input_adv.data.cpu().numpy() # back to BHWC for numpy consumption
+ return loss_np, dist_np, output_np, input_adv_np
+
+ def run(self, model, input, target, batch_idx=0):
+ batch_size = input.size(0)
+
+ # set the lower and upper bounds accordingly
+ lower_bound = np.zeros(batch_size)
+ scale_const = np.ones(batch_size) * self.initial_const
+ upper_bound = np.ones(batch_size) * 1e10
+
+ # python/numpy placeholders for the overall best l2, label score, and adversarial image
+ o_best_l2 = [1e10] * batch_size
+ o_best_score = [-1] * batch_size
+ o_best_attack = input.cpu().numpy()
+
+ # setup input (image) variable, clamp/scale as necessary
+ if self.clamp_fn == 'tanh':
+ # convert to tanh-space, input already int -1 to 1 range, does it make sense to do
+ # this as per the reference implementation or can we skip the arctanh?
+ input_var = autograd.Variable(torch_arctanh(input), requires_grad=False)
+ input_orig = tanh_rescale(input_var, self.clip_min, self.clip_max)
+ else:
+ input_var = autograd.Variable(input, requires_grad=False)
+ input_orig = None
+
+ # setup the target variable, we need it to be in one-hot form for the loss function
+ target_onehot = torch.zeros(target.size() + (self.num_classes,))
+ if self.cuda:
+ target_onehot = target_onehot.cuda()
+ target_onehot.scatter_(1, target.unsqueeze(1), 1.)
+ target_var = autograd.Variable(target_onehot, requires_grad=False)
+
+ # setup the modifier variable, this is the variable we are optimizing over
+ modifier = torch.zeros(input_var.size()).float()
+ if self.init_rand:
+ # Experiment with a non-zero starting point...
+ modifier = torch.normal(means=modifier, std=0.001)
+ if self.cuda:
+ modifier = modifier.cuda()
+ modifier_var = autograd.Variable(modifier, requires_grad=True)
+
+ optimizer = optim.Adam([modifier_var], lr=self.learning_rate)
+
+ for search_step in range(self.binary_search_steps):
+ print('Batch: {0:>3}, search step: {1}'.format(batch_idx, search_step))
+ if self.debug:
+ print('Const:')
+ for i, x in enumerate(scale_const):
+ print(i, x)
+ best_l2 = [1e10] * batch_size
+ best_score = [-1] * batch_size
+
+ # The last iteration (if we run many steps) repeat the search once.
+ if self.repeat and search_step == self.binary_search_steps - 1:
+ scale_const = upper_bound
+
+ scale_const_tensor = torch.from_numpy(scale_const).float()
+ if self.cuda:
+ scale_const_tensor = scale_const_tensor.cuda()
+ scale_const_var = autograd.Variable(scale_const_tensor, requires_grad=False)
+
+ prev_loss = 1e6
+ for step in range(self.max_steps):
+ # perform the attack
+ loss, dist, output, adv_img = self._optimize(
+ optimizer,
+ model,
+ input_var,
+ modifier_var,
+ target_var,
+ scale_const_var,
+ input_orig)
+
+ if step % 100 == 0 or step == self.max_steps - 1:
+ print('Step: {0:>4}, loss: {1:6.4f}, dist: {2:8.5f}, modifier mean: {3:.5e}'.format(
+ step, loss, dist.mean(), modifier_var.data.mean()))
+
+ if self.abort_early and step % (self.max_steps // 10) == 0:
+ if loss > prev_loss * .9999:
+ print('Aborting early...')
+ break
+ prev_loss = loss
+
+ # update best result found
+ for i in range(batch_size):
+ target_label = target[i]
+ output_logits = output[i]
+ output_label = np.argmax(output_logits)
+ di = dist[i]
+ if self.debug:
+ if step % 100 == 0:
+ print('{0:>2} dist: {1:.5f}, output: {2:>3}, {3:5.3}, target {4:>3}'.format(
+ i, di, output_label, output_logits[output_label], target_label))
+ if di < best_l2[i] and self._compare(output_logits, target_label):
+ if self.debug:
+ print('{0:>2} best step, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
+ i, best_l2[i], di))
+ best_l2[i] = di
+ best_score[i] = output_label
+ if di < o_best_l2[i] and self._compare(output_logits, target_label):
+ if self.debug:
+ print('{0:>2} best total, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
+ i, o_best_l2[i], di))
+ o_best_l2[i] = di
+ o_best_score[i] = output_label
+ o_best_attack[i] = adv_img[i]
+
+ sys.stdout.flush()
+ # end inner step loop
+
+ # adjust the constants
+ batch_failure = 0
+ batch_success = 0
+ for i in range(batch_size):
+ if self._compare(best_score[i], target[i]) and best_score[i] != -1:
+ # successful, do binary search and divide const by two
+ upper_bound[i] = min(upper_bound[i], scale_const[i])
+ if upper_bound[i] < 1e9:
+ scale_const[i] = (lower_bound[i] + upper_bound[i]) / 2
+ if self.debug:
+ print('{0:>2} successful attack, lowering const to {1:.3f}'.format(
+ i, scale_const[i]))
+ else:
+ # failure, multiply by 10 if no solution found
+ # or do binary search with the known upper bound
+ lower_bound[i] = max(lower_bound[i], scale_const[i])
+ if upper_bound[i] < 1e9:
+ scale_const[i] = (lower_bound[i] + upper_bound[i]) / 2
+ else:
+ scale_const[i] *= 10
+ if self.debug:
+ print('{0:>2} failed attack, raising const to {1:.3f}'.format(
+ i, scale_const[i]))
+ if self._compare(o_best_score[i], target[i]) and o_best_score[i] != -1:
+ batch_success += 1
+ else:
+ batch_failure += 1
+
+ print('Num failures: {0:2d}, num successes: {1:2d}\n'.format(batch_failure, batch_success))
+ sys.stdout.flush()
+ # end outer search loop
+
+ return o_best_attack
diff --git a/case_studies/odds/original/carlini_wagner_helpers.py b/case_studies/odds/original/carlini_wagner_helpers.py
new file mode 100644
index 0000000..7f4857e
--- /dev/null
+++ b/case_studies/odds/original/carlini_wagner_helpers.py
@@ -0,0 +1,89 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# from https://github.com/rwightman/pytorch-nips2017-attack-example
+import torch
+import operator as op
+import functools as ft
+
+
+'''reduce_* helper functions reduce tensors on all dimensions but the first.
+They are intended to be used on batched tensors where dim 0 is the batch dim.
+'''
+
+
+def reduce_sum(x, keepdim=True):
+ # silly PyTorch, when will you get proper reducing sums/means?
+ for a in reversed(range(1, x.dim())):
+ x = x.sum(a, keepdim=keepdim)
+ return x
+
+
+def reduce_mean(x, keepdim=True):
+ numel = ft.reduce(op.mul, x.size()[1:])
+ x = reduce_sum(x, keepdim=keepdim)
+ return x / numel
+
+
+def reduce_min(x, keepdim=True):
+ for a in reversed(range(1, x.dim())):
+ x = x.min(a, keepdim=keepdim)[0]
+ return x
+
+
+def reduce_max(x, keepdim=True):
+ for a in reversed(range(1, x.dim())):
+ x = x.max(a, keepdim=keepdim)[0]
+ return x
+
+
+def torch_arctanh(x, eps=1e-6):
+ x *= (1. - eps)
+ return (torch.log((1 + x) / (1 - x))) * 0.5
+
+
+def l2r_dist(x, y, keepdim=True, eps=1e-8):
+ d = (x - y)**2
+ d = reduce_sum(d, keepdim=keepdim)
+ d += eps # to prevent infinite gradient at 0
+ return d.sqrt()
+
+
+def l2_dist(x, y, keepdim=True):
+ d = (x - y)**2
+ return reduce_sum(d, keepdim=keepdim)
+ # d = torch.abs(x - y)
+ # return reduce_max(d, keepdim=keepdim)
+
+
+def l1_dist(x, y, keepdim=True):
+ d = torch.abs(x - y)
+ return reduce_sum(d, keepdim=keepdim)
+
+
+def l2_norm(x, keepdim=True):
+ norm = reduce_sum(x*x, keepdim=keepdim)
+ return norm.sqrt()
+
+
+def l1_norm(x, keepdim=True):
+ return reduce_sum(x.abs(), keepdim=keepdim)
+
+
+def rescale(x, x_min=-1., x_max=1.):
+ return x * (x_max - x_min) + x_min
+
+
+def tanh_rescale(x, x_min=-1., x_max=1.):
+ return (torch.tanh(x) + 1) * 0.5 * (x_max - x_min) + x_min
diff --git a/case_studies/odds/original/cifar_model.py b/case_studies/odds/original/cifar_model.py
new file mode 100644
index 0000000..101f2ad
--- /dev/null
+++ b/case_studies/odds/original/cifar_model.py
@@ -0,0 +1,140 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import torch as th
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+from collections import OrderedDict
+
+model_urls = {
+ 'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
+ 'cifar10_tiny': os.path.expanduser('~/models/advhyp/cifar10_tiny/cifar10_tiny-38058c52.pth'),
+ 'cifar10_tinyb': os.path.expanduser('~/models/advhyp/cifar10_tinyb/cifar10_tinyb-7ab86c47.pth'),
+ 'carlini': os.path.expanduser('~/models/advhyp/carlini/carlini-caa52d4e.pth'),
+ 'cifar10_tinyb_adv': os.path.expanduser('~/models/advhyp/cifar10_tinyb_adv/cifar10_tinyb_adv-ac4936dc.pth'),
+ 'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
+}
+
+class CIFAR(nn.Module):
+ def __init__(self, features, n_channel, num_classes):
+ super(CIFAR, self).__init__()
+ assert isinstance(features, nn.Sequential), type(features)
+ self.features = features
+ self.classifier = nn.Sequential(
+ nn.Linear(n_channel, num_classes)
+ )
+
+ def forward(self, x):
+ x = self.features(x)
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ return x
+
+
+class Carlini(nn.Module):
+ def __init__(self, features, n_channel):
+ super(Carlini, self).__init__()
+ assert isinstance(features, nn.Sequential), type(features)
+ self.features = features
+ self.classifier = nn.Sequential(
+ nn.Linear(n_channel, 256),
+ nn.ReLU(),
+ nn.Dropout(),
+ nn.Linear(256, 256),
+ nn.ReLU(),
+ nn.Linear(256, 10)
+ )
+
+ def forward(self, x):
+ x = self.features(x)
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ return x
+
+
+def make_layers(cfg, batch_norm=False):
+ layers = []
+ in_channels = 3
+ for i, v in enumerate(cfg):
+ if v == 'M':
+ layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
+ else:
+ padding = v[1] if isinstance(v, tuple) else 1
+ out_channels = v[0] if isinstance(v, tuple) else v
+ conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
+ if batch_norm:
+ layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
+ else:
+ layers += [conv2d, nn.ReLU()]
+ in_channels = out_channels
+ return nn.Sequential(*layers)
+
+def cifar10_tiny(n_channel, pretrained=False, map_location=None, padding=1, trained_adv=False):
+ if padding == 1:
+ cfg = [(n_channel, padding), 'M', (n_channel, padding), 'M', (2*n_channel, padding), 'M', (2*n_channel, 0), 'M']
+ elif padding == 0:
+ cfg = [(n_channel, padding), (n_channel, padding), 'M', (2*n_channel, padding), 'M', (2*n_channel, 0), 'M']
+ layers = make_layers(cfg, batch_norm=False)
+ model = CIFAR(layers, n_channel=2*n_channel if padding == 1 else 4*2*n_channel, num_classes=10)
+ if pretrained or trained_adv:
+ if padding == 1:
+ state_dict = th.load(model_urls['cifar10_tiny'], map_location=map_location)
+ elif padding == 0:
+ if trained_adv and pretrained:
+ state_dict = th.load(model_urls['cifar10_tinyb_adv'], map_location=map_location)
+ else:
+ state_dict = th.load(model_urls['cifar10_tinyb'], map_location=map_location)
+ assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
+ model.load_state_dict(state_dict)
+ return model
+
+def cifar10(n_channel, pretrained=False, map_location=None, trained_adv=False):
+ cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
+ layers = make_layers(cfg, batch_norm=True)
+ model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
+ if pretrained or trained_adv:
+ if trained_adv and pretrained:
+ m = th.load(model_urls['cifar10_adv'], map_location=map_location)
+ else:
+ m = model_zoo.load_url(model_urls['cifar10'], map_location=map_location)
+ state_dict = m.state_dict() if isinstance(m, nn.Module) else m
+ assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
+ model.load_state_dict(state_dict)
+ return model
+
+def carlini(pretrained=False, map_location=None, trained_adv=False):
+ cfg = [(64, 0), (64, 0), 'M', (128, 0), (128, 0), 'M']
+ layers = make_layers(cfg, batch_norm=False)
+ model = Carlini(layers, n_channel=128*5*5)
+ if pretrained or trained_adv:
+ if trained_adv and pretrained:
+ state_dict= th.load(model_urls['carlini_adv'], map_location=map_location)
+ else:
+ state_dict = th.load(model_urls['carlini'], map_location=map_location)
+ assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
+ model.load_state_dict(state_dict)
+ return model
+
+def cifar100(n_channel, pretrained=None):
+ cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
+ layers = make_layers(cfg, batch_norm=True)
+ model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
+ if pretrained is not None:
+ m = model_zoo.load_url(model_urls['cifar100'])
+ state_dict = m.state_dict() if isinstance(m, nn.Module) else m
+ assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
+ model.load_state_dict(state_dict)
+ return model
+
diff --git a/case_studies/odds/original/fetch_Madry_ResNet.py b/case_studies/odds/original/fetch_Madry_ResNet.py
new file mode 100644
index 0000000..496c3f5
--- /dev/null
+++ b/case_studies/odds/original/fetch_Madry_ResNet.py
@@ -0,0 +1,60 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Downloads a model, computes its SHA256 hash and unzips it
+ at the proper location."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import sys
+import zipfile
+import hashlib
+
+if len(sys.argv) == 1 or sys.argv[1] not in ['natural',
+ 'adv_trained',
+ 'secret']:
+ print('Usage: python fetch_Madry_ResNet.py [natural, adv_trained]')
+ sys.exit(1)
+
+if sys.argv[1] == 'natural':
+ url = 'https://www.dropbox.com/s/cgzd5odqoojvxzk/natural.zip?dl=1'
+elif sys.argv[1] == 'adv_trained':
+ url = 'https://www.dropbox.com/s/g4b6ntrp8zrudbz/adv_trained.zip?dl=1'
+else: # fetch secret model
+ url = 'https://www.dropbox.com/s/ywc0hg8lr5ba8zd/secret.zip?dl=1'
+
+fname = url.split('/')[-1].split('?')[0] # get the name of the file
+
+# model download
+print('Downloading models')
+if sys.version_info >= (3,):
+ import urllib.request
+ urllib.request.urlretrieve(url, fname)
+else:
+ import urllib
+ urllib.urlretrieve(url, fname)
+
+# computing model hash
+sha256 = hashlib.sha256()
+with open(fname, 'rb') as f:
+ data = f.read()
+ sha256.update(data)
+print('SHA256 hash: {}'.format(sha256.hexdigest()))
+
+# extracting model
+print('Extracting model')
+with zipfile.ZipFile(fname, 'r') as model_zip:
+ model_zip.extractall()
+ print('Extracted model in {}'.format(model_zip.namelist()[0]))
diff --git a/case_studies/odds/original/logit_matching_attack.py b/case_studies/odds/original/logit_matching_attack.py
new file mode 100644
index 0000000..4b9bc16
--- /dev/null
+++ b/case_studies/odds/original/logit_matching_attack.py
@@ -0,0 +1,325 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import functools
+
+import cleverhans.model
+import torch
+from cleverhans import utils_tf
+from cleverhans.attacks import Attack
+import cleverhans.attacks
+from cleverhans.utils_tf import clip_eta
+
+import os
+import math
+import numpy as np
+import tensorflow as tf
+
+
+class ProjectedGradientDescentWithDetectorLogitMatching(Attack):
+ def __init__(self, model, get_features_for_detector,
+ sess=None, dtypestr='float32',
+ default_rand_init=True, verbose=False, eot_ensemble_size=None,
+ eot_multinoise=False, **kwargs):
+ """
+ Create a ProjectedGradientDescent instance.
+ Note: the model parameter should be an instance of the
+ cleverhans.model.Model abstraction provided by CleverHans.
+ """
+
+ super(ProjectedGradientDescentWithDetectorLogitMatching, self).__init__(model, sess=sess,
+ dtypestr=dtypestr, **kwargs)
+ self.feedable_kwargs = ('eps', 'eps_iter', 'clip_min',
+ 'clip_max', 'loss_lambda')
+ self.structural_kwargs = ['ord', 'nb_iter', 'rand_init', 'sanity_checks']
+ self.default_rand_init = default_rand_init
+ self.get_features_for_detector = get_features_for_detector
+ self.verbose = verbose
+
+ self.eot_ensemble_size = eot_ensemble_size
+ assert eot_ensemble_size is None or eot_ensemble_size > 0
+ self.eot_multinoise = eot_multinoise
+
+ def generate(self, x, x_reference, **kwargs):
+ """
+ Generate symbolic graph for adversarial examples and return.
+ :param x: The model's symbolic inputs.
+ :param kwargs: See `parse_params`
+ """
+ # Parse and save attack-specific parameters
+ assert self.parse_params(**kwargs)
+
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if self.clip_min is not None:
+ asserts.append(utils_tf.assert_greater_equal(x,
+ tf.cast(self.clip_min,
+ x.dtype)))
+
+ if self.clip_max is not None:
+ asserts.append(utils_tf.assert_less_equal(x,
+ tf.cast(self.clip_max,
+ x.dtype)))
+
+ # Initialize loop variables
+ if self.rand_init:
+ eta = tf.random_uniform(tf.shape(x),
+ tf.cast(-self.rand_minmax, x.dtype),
+ tf.cast(self.rand_minmax, x.dtype),
+ dtype=x.dtype)
+ else:
+ eta = tf.zeros(tf.shape(x))
+
+ # Clip eta
+ eta = clip_eta(eta, self.ord, self.eps)
+ adv_x = x + eta
+ if self.clip_min is not None or self.clip_max is not None:
+ adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ fgm_params = {
+ 'eps': self.eps_iter,
+ 'ord': self.ord,
+ 'clip_min': self.clip_min,
+ 'clip_max': self.clip_max,
+ "eot_ensemble_size": self.eot_ensemble_size,
+ "eot_multinoise": self.eot_multinoise,
+ }
+ if self.ord == 1:
+ raise NotImplementedError("It's not clear that FGM is a good inner loop"
+ " step for PGD when ord=1, because ord=1 FGM "
+ " changes only one pixel at a time. We need "
+ " to rigorously test a strong ord=1 PGD "
+ "before enabling this feature.")
+
+ def cond(i, _):
+ return tf.less(i, self.nb_iter)
+
+ def body(i, adv_x):
+ adv_x = self.fgm_generate(x_adv=adv_x,
+ x_reference=x_reference,
+ **fgm_params, step=i)
+
+ # Clipping perturbation eta to self.ord norm ball
+ eta = adv_x - x
+ eta = clip_eta(eta, self.ord, self.eps)
+ adv_x = x + eta
+
+ # Redo the clipping.
+ # FGM already did it, but subtracting and re-adding eta can add some
+ # small numerical error.
+ if self.clip_min is not None or self.clip_max is not None:
+ adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
+
+ return i + 1, adv_x
+
+ _, adv_x = tf.while_loop(cond, body, [tf.zeros([]), adv_x], back_prop=True)
+
+
+ # Asserts run only on CPU.
+ # When multi-GPU eval code tries to force all PGD ops onto GPU, this
+ # can cause an error.
+ #asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps_iter,
+ # dtype=self.eps.dtype),
+ # self.eps))
+ if self.ord == np.inf and self.clip_min is not None:
+ # The 1e-6 is needed to compensate for numerical error.
+ # Without the 1e-6 this fails when e.g. eps=.2, clip_min=.5,
+ # clip_max=.7
+ asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps, x.dtype),
+ 1e-6 + tf.cast(self.clip_max,
+ x.dtype)
+ - tf.cast(self.clip_min,
+ x.dtype)))
+
+ if self.sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ return adv_x
+
+ def fgm_generate(self,
+ x_adv,
+ x_reference,
+ step,
+ eps=0.3,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ targeted=False,
+ sanity_checks=True,
+ eot_ensemble_size=None,
+ eot_multinoise=False):
+ asserts = []
+
+ # If a data range was specified, check that the input was in that range
+ if clip_min is not None:
+ asserts.append(utils_tf.assert_greater_equal(
+ x_adv, tf.cast(clip_min, x_adv.dtype)))
+
+ if clip_max is not None:
+ asserts.append(utils_tf.assert_less_equal(x_adv, tf.cast(clip_max, x_adv.dtype)))
+
+ if targeted:
+ raise ValueError("targeted mode not supported")
+
+ # while this check looks good in theory in pratice it doesnt make sense
+ # since the softmax op might not add an additional AddV2 operation at the end
+ # Make sure the caller has not passed probs by accident
+ #assert logits.op.type != 'Softmax'
+ #assert target_logits.op.type != 'Softmax'
+
+ target_detector_features = tf.stop_gradient(
+ self.get_features_for_detector(x_reference))
+ labels = tf.one_hot(self.y, 2)
+ if eot_ensemble_size is None:
+ # no EOT
+ detector_features = self.get_features_for_detector(x_adv)
+ classifier_logits = self.model.get_logits(x_adv)
+ real = tf.reduce_sum(labels * classifier_logits, -1)
+ other = tf.reduce_max((1-labels) * classifier_logits - (labels*10000), -1)
+ classifier_loss = -tf.clip_by_value(real - other, -1e-2, 1e9)
+ detector_features_matching_loss = -tf.reduce_mean(
+ tf.reduce_sum((detector_features - target_detector_features)**2,-1),
+ 0)
+ loss = self.loss_lambda * classifier_loss + (1.0 - self.loss_lambda) * detector_features_matching_loss
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x_adv)
+ grad = tf.stop_gradient(grad)
+ else:
+ grads = []
+ for i in range(eot_ensemble_size):
+ if i == 0:
+ # dont add noise to first forward pass
+ x_adv_noisy = x_adv
+ else:
+ if eot_multinoise:
+ if i % 2 == 0:
+ noise = tf.random.normal(tf.shape(x_adv), 0.0, 1.0)
+ elif i % 2 == 1:
+ noise = tf.random.uniform(tf.shape(x_adv), -1.0, 1.0)
+ else:
+ # defined in https://github.com/wielandbrendel/adaptive_attacks_paper/blob/master/02_odds/Attack.ipynb
+ # but doesnt make sense to me since this never gets called
+ noise = tf.sign(tf.random.uniform(tf.shape(x_adv), -1.0, 1.0))
+ noise *= 0.01 * 255.0
+ else:
+ noise = tf.random.normal(tf.shape(x_adv), 0.0, 1.0)
+ noise *= 2.0
+ x_adv_noisy = tf.clip_by_value(x_adv + noise, 0, 255.0)
+ detector_features = self.get_features_for_detector(x_adv_noisy)
+ classifier_logits = self.model.get_logits(x_adv_noisy)
+ real = tf.reduce_sum(labels * classifier_logits, -1)
+ other = tf.reduce_max((1-labels) * classifier_logits - (labels*10000), -1)
+ classifier_loss = -tf.clip_by_value(real - other, -1e-2, 1e9)
+ detector_features_matching_loss = -tf.reduce_mean(
+ tf.reduce_sum((detector_features - target_detector_features)**2,-1),
+ 0)
+ loss = self.loss_lambda * classifier_loss + (1.0 - self.loss_lambda) * detector_features_matching_loss
+
+ # Define gradient of loss wrt input
+ grad, = tf.gradients(loss, x_adv_noisy)
+ grad = tf.stop_gradient(grad)
+ grads.append(grad)
+ grad = tf.reduce_mean(grads, axis=0)
+
+ optimal_perturbation = cleverhans.attacks.optimize_linear(grad, eps, ord)
+
+ # Add perturbation to original example to obtain adversarial example
+ adv_x = x_adv + optimal_perturbation
+
+ # If clipping is needed, reset all values outside of [clip_min, clip_max]
+ if (clip_min is not None) or (clip_max is not None):
+ # We don't currently support one-sided clipping
+ assert clip_min is not None and clip_max is not None
+ adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
+
+ if sanity_checks:
+ with tf.control_dependencies(asserts):
+ adv_x = tf.identity(adv_x)
+
+ if self.verbose:
+ adv_x = tf.Print(adv_x, [step, loss, classifier_loss, detector_features_matching_loss])
+
+ return adv_x
+
+ def parse_params(self,
+ eps=0.3,
+ eps_iter=0.05,
+ nb_iter=10,
+ y=None,
+ ord=np.inf,
+ clip_min=None,
+ clip_max=None,
+ y_target=None,
+ rand_init=None,
+ rand_minmax=0.3,
+ sanity_checks=True,
+ loss_lambda=0.5,
+ **kwargs):
+ """
+ Take in a dictionary of parameters and applies attack-specific checks
+ before saving them as attributes.
+ Attack-specific parameters:
+ :param eps: (optional float) maximum distortion of adversarial example
+ compared to original input
+ :param eps_iter: (optional float) step size for each attack iteration
+ :param nb_iter: (optional int) Number of attack iterations.
+ :param y: (optional) A tensor with the true labels.
+ :param y_target: (optional) A tensor with the labels to target. Leave
+ y_target=None if y is also set. Labels should be
+ one-hot-encoded.
+ :param ord: (optional) Order of the norm (mimics Numpy).
+ Possible values: np.inf, 1 or 2.
+ :param clip_min: (optional float) Minimum input component value
+ :param clip_max: (optional float) Maximum input component value
+ :param sanity_checks: bool Insert tf asserts checking values
+ (Some tests need to run with no sanity checks because the
+ tests intentionally configure the attack strangely)
+ """
+
+ # Save attack-specific parameters
+ self.eps = eps
+ if rand_init is None:
+ rand_init = self.default_rand_init
+ self.rand_init = rand_init
+ if self.rand_init:
+ self.rand_minmax = eps
+ else:
+ self.rand_minmax = 0.
+ self.eps_iter = eps_iter
+ self.nb_iter = nb_iter
+ self.y = y
+ self.y_target = y_target
+ self.ord = ord
+ self.clip_min = clip_min
+ self.clip_max = clip_max
+
+ self.loss_lambda = loss_lambda
+
+ if isinstance(eps, float) and isinstance(eps_iter, float):
+ # If these are both known at compile time, we can check before anything
+ # is run. If they are tf, we can't check them yet.
+ assert eps_iter <= eps, (eps_iter, eps)
+
+ if self.y is not None and self.y_target is not None:
+ raise ValueError("Must not set both y and y_target")
+ # Check if order of the norm is acceptable given current implementation
+ if self.ord not in [np.inf, 1, 2]:
+ raise ValueError("Norm order must be either np.inf, 1, or 2.")
+ self.sanity_checks = sanity_checks
+
+ return True
diff --git a/case_studies/odds/original/tensorflow_example.py b/case_studies/odds/original/tensorflow_example.py
new file mode 100644
index 0000000..fd7a829
--- /dev/null
+++ b/case_studies/odds/original/tensorflow_example.py
@@ -0,0 +1,499 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+based on cleverhans' cifar10_tutorial_tf except that it uses Madry's wide-ResNet
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import logging
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.platform import flags
+import time
+
+from cleverhans.attacks import MadryEtAl, CarliniWagnerL2
+from cleverhans.augmentation import random_horizontal_flip, random_shift
+from cleverhans.dataset import CIFAR10
+from cleverhans.loss import CrossEntropy
+from cleverhans.model_zoo.all_convolutional import ModelAllConvolutional
+from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import make_wresnet as ResNet
+from cleverhans.utils_tf import initialize_uninitialized_global_variables
+from cleverhans.train import train
+from cleverhans.utils import AccuracyReport, set_log_level
+from cleverhans.utils_tf import tf_model_load, model_eval
+
+from cleverhans.evaluation import batch_eval
+import math
+import tqdm
+import os
+import tf_robustify
+from tensorboardX import SummaryWriter
+import pickle
+
+FLAGS = flags.FLAGS
+
+NB_EPOCHS = 6
+BATCH_SIZE = 128
+LEARNING_RATE = 0.001
+CLEAN_TRAIN = True
+BACKPROP_THROUGH_ATTACK = False
+NB_FILTERS = 64
+
+ARCHITECTURE = 'ResNet'
+LOAD_MODEL = True
+
+os.makedirs('logs', exist_ok=True)
+swriter = SummaryWriter('logs')
+
+def cifar10_tutorial(
+ train_start=0,
+ train_end=60000,
+ test_start=0,
+ test_end=10000,
+ nb_epochs=NB_EPOCHS,
+ batch_size=BATCH_SIZE,
+ architecture=ARCHITECTURE,
+ load_model=LOAD_MODEL,
+ ckpt_dir='None',
+ learning_rate=LEARNING_RATE,
+ clean_train=CLEAN_TRAIN,
+ backprop_through_attack=BACKPROP_THROUGH_ATTACK,
+ nb_filters=NB_FILTERS,
+ num_threads=None,
+ label_smoothing=0.):
+ """
+ CIFAR10 cleverhans tutorial
+ :param train_start: index of first training set example
+ :param train_end: index of last training set example
+ :param test_start: index of first test set example
+ :param test_end: index of last test set example
+ :param nb_epochs: number of epochs to train model
+ :param batch_size: size of training batches
+ :param learning_rate: learning rate for training
+ :param clean_train: perform normal training on clean examples only
+ before performing adversarial training.
+ :param backprop_through_attack: If True, backprop through adversarial
+ example construction process during
+ adversarial training.
+ :param label_smoothing: float, amount of label smoothing for cross entropy
+ :return: an AccuracyReport object
+ """
+
+ # Object used to keep track of (and return) key accuracies
+ report = AccuracyReport()
+
+ # Set TF random seed to improve reproducibility
+ tf.set_random_seed(int(time.time() * 1000) % 2**31)
+ np.random.seed(int(time.time() * 1001) % 2**31)
+
+ # Set logging level to see debug information
+ set_log_level(logging.DEBUG)
+
+ # Create TF session
+ if num_threads:
+ config_args = dict(intra_op_parallelism_threads=1)
+ else:
+ config_args = {}
+ sess = tf.Session(config=tf.ConfigProto(**config_args))
+
+ # Get CIFAR10 data
+ data = CIFAR10(train_start=train_start, train_end=train_end,
+ test_start=test_start, test_end=test_end)
+ dataset_size = data.x_train.shape[0]
+ dataset_train = data.to_tensorflow()[0]
+ dataset_train = dataset_train.map(
+ lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
+ dataset_train = dataset_train.batch(batch_size)
+ dataset_train = dataset_train.prefetch(16)
+ x_train, y_train = data.get_set('train')
+
+ pgd_train = None
+ if FLAGS.load_pgd_train_samples:
+ pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(FLAGS.load_pgd_train_samples))
+ x_train = np.load(os.path.join(pgd_path, 'train_clean.npy'))
+ y_train = np.load(os.path.join(pgd_path, 'train_y.npy'))
+ pgd_train = np.load(os.path.join(pgd_path, 'train_pgd.npy'))
+ if x_train.shape[1] == 3:
+ x_train = x_train.transpose((0, 2, 3, 1))
+ pgd_train = pgd_train.transpose((0, 2, 3, 1))
+ if len(y_train.shape) == 1:
+ y_tmp = np.zeros((len(y_train), np.max(y_train)+1), y_train.dtype)
+ y_tmp[np.arange(len(y_tmp)), y_train] = 1.
+ y_train = y_tmp
+
+ x_test, y_test = data.get_set('test')
+ pgd_test = None
+ if FLAGS.load_pgd_test_samples:
+ pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(FLAGS.load_pgd_test_samples))
+ x_test = np.load(os.path.join(pgd_path, 'test_clean.npy'))
+ y_test = np.load(os.path.join(pgd_path, 'test_y.npy'))
+ pgd_test = np.load(os.path.join(pgd_path, 'test_pgd.npy'))
+ if x_test.shape[1] == 3:
+ x_test = x_test.transpose((0, 2, 3, 1))
+ pgd_test = pgd_test.transpose((0, 2, 3, 1))
+ if len(y_test.shape) == 1:
+ y_tmp = np.zeros((len(y_test), np.max(y_test)+1), y_test.dtype)
+ y_tmp[np.arange(len(y_tmp)), y_test] = 1.
+ y_test = y_tmp
+
+ train_idcs = np.arange(len(x_train))
+ np.random.shuffle(train_idcs)
+ x_train, y_train = x_train[train_idcs], y_train[train_idcs]
+ if pgd_train is not None:
+ pgd_train = pgd_train[train_idcs]
+ test_idcs = np.arange(len(x_test))[:FLAGS.test_size]
+ np.random.shuffle(test_idcs)
+ x_test, y_test = x_test[test_idcs], y_test[test_idcs]
+ if pgd_test is not None:
+ pgd_test = pgd_test[test_idcs]
+
+ # Use Image Parameters
+ img_rows, img_cols, nchannels = x_test.shape[1:4]
+ nb_classes = y_test.shape[1]
+
+ # Define input TF placeholder
+ x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
+ nchannels))
+ y = tf.placeholder(tf.float32, shape=(None, nb_classes))
+
+ # Train an MNIST model
+ train_params = {
+ 'nb_epochs': nb_epochs,
+ 'batch_size': batch_size,
+ 'learning_rate': learning_rate
+ }
+ eval_params = {'batch_size': batch_size}
+ pgd_params = {
+ # ord: ,
+ 'eps': FLAGS.eps,
+ 'eps_iter': (FLAGS.eps / 5),
+ 'nb_iter': 10,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+ cw_params = {
+ 'binary_search_steps': FLAGS.cw_search_steps,
+ 'max_iterations': FLAGS.cw_steps, #1000
+ 'abort_early': True,
+ 'learning_rate': FLAGS.cw_lr,
+ 'batch_size': batch_size,
+ 'confidence': 0,
+ 'initial_const': FLAGS.cw_c,
+ 'clip_min': 0,
+ 'clip_max': 255
+ }
+
+ # Madry dosen't divide by 255
+ x_train *= 255
+ x_test *= 255
+ if pgd_train is not None:
+ pgd_train *= 255
+ if pgd_test is not None:
+ pgd_test *= 255
+
+ print('x_train amin={} amax={}'.format(np.amin(x_train), np.amax(x_train)))
+ print('x_test amin={} amax={}'.format(np.amin(x_test), np.amax(x_test)))
+
+ print('clip_min : {}, clip_max : {} >> CHECK WITH WHICH VALUES THE CLASSIFIER WAS PRETRAINED !!! <<'
+ .format(pgd_params['clip_min'], pgd_params['clip_max']))
+
+ rng = np.random.RandomState() # [2017, 8, 30]
+ debug_dict = dict() if FLAGS.save_debug_dict else None
+
+ def do_eval(preds, x_set, y_set, report_key, is_adv=None, predictor=None, x_adv=None):
+ if predictor is None:
+ acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
+ else:
+ do_eval(preds, x_set, y_set, report_key, is_adv=is_adv)
+ if x_adv is not None:
+ x_set_adv, = batch_eval(sess, [x], [x_adv], [x_set], batch_size=batch_size)
+ assert x_set.shape == x_set_adv.shape
+ x_set = x_set_adv
+ n_batches = math.ceil(x_set.shape[0] / batch_size)
+ p_set, p_det = np.concatenate([predictor.send(x_set[b*batch_size:(b+1)*batch_size]) for b in tqdm.trange(n_batches)]).T
+ acc = np.equal(p_set, y_set[:len(p_set)].argmax(-1)).mean()
+ # if is_adv:
+ # import IPython ; IPython.embed() ; exit(1)
+ if FLAGS.save_debug_dict:
+ debug_dict['x_set'] = x_set
+ debug_dict['y_set'] = y_set
+ ddfn = 'logs/debug_dict_{}.pkl'.format('adv' if is_adv else 'clean')
+ if not os.path.exists(ddfn):
+ with open(ddfn, 'wb') as f:
+ pickle.dump(debug_dict, f)
+ debug_dict.clear()
+ if is_adv is None:
+ report_text = None
+ elif is_adv:
+ report_text = 'adversarial'
+ else:
+ report_text = 'legitimate'
+ if report_text:
+ print('Test accuracy on %s examples %s: %0.4f' % (report_text, 'with correction' if predictor is not None else 'without correction', acc))
+ if is_adv is not None:
+ label = 'test_acc_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
+ swriter.add_scalar(label, acc)
+ if predictor is not None:
+ detect = np.equal(p_det, is_adv).mean()
+ label = 'test_det_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
+ print(label, detect)
+ swriter.add_scalar(label, detect)
+ label = 'test_dac_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
+ swriter.add_scalar(label, np.equal(p_set, y_set[:len(p_set)].argmax(-1))[np.equal(p_det, is_adv)].mean())
+
+ return acc
+
+ if clean_train:
+ if architecture == 'ConvNet':
+ model = ModelAllConvolutional('model1', nb_classes, nb_filters,
+ input_shape=[32, 32, 3])
+ elif architecture == 'ResNet':
+ model = ResNet(scope='ResNet')
+ else:
+ raise Exception('Specify valid classifier architecture!')
+
+ preds = model.get_logits(x)
+ loss = CrossEntropy(model, smoothing=label_smoothing)
+
+ if load_model:
+ model_name = 'naturally_trained'
+ if FLAGS.load_adv_trained:
+ model_name = 'adv_trained'
+ if ckpt_dir is not 'None':
+ ckpt = tf.train.get_checkpoint_state(os.path.join(
+ os.path.expanduser(ckpt_dir), model_name))
+ else:
+ ckpt = tf.train.get_checkpoint_state(
+ './models/' + model_name)
+ ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
+
+ saver = tf.train.Saver(var_list=dict((v.name.split('/', 1)[1].split(':')[0], v) for v in tf.global_variables()))
+ saver.restore(sess, ckpt_path)
+ print('\nMODEL SUCCESSFULLY LOADED from : {}'.format(ckpt_path))
+
+ initialize_uninitialized_global_variables(sess)
+
+ else:
+ def evaluate():
+ do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False)
+
+ train(sess, loss, None, None,
+ dataset_train=dataset_train, dataset_size=dataset_size,
+ evaluate=evaluate, args=train_params, rng=rng,
+ var_list=model.get_params())
+
+ logits_op = preds.op
+ while logits_op.type != 'MatMul':
+ logits_op = logits_op.inputs[0].op
+ latent_x_tensor, weights = logits_op.inputs
+ logits_tensor = preds
+
+ nb_classes = weights.shape[-1].value
+
+ if not FLAGS.save_pgd_samples:
+ noise_eps = FLAGS.noise_eps.split(',')
+ if FLAGS.noise_eps_detect is None:
+ FLAGS.noise_eps_detect = FLAGS.noise_eps
+ noise_eps_detect = FLAGS.noise_eps_detect.split(',')
+ if pgd_train is not None:
+ pgd_train = pgd_train[:FLAGS.n_collect]
+ if not FLAGS.passthrough:
+ predictor = tf_robustify.collect_statistics(x_train[:FLAGS.n_collect], y_train[:FLAGS.n_collect], x, sess, logits_tensor=logits_tensor, latent_x_tensor=latent_x_tensor, weights=weights, nb_classes=nb_classes, p_ratio_cutoff=FLAGS.p_ratio_cutoff, noise_eps=noise_eps, noise_eps_detect=noise_eps_detect, pgd_eps=pgd_params['eps'], pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'], pgd_iters=pgd_params['nb_iter'], save_alignments_dir='logs/stats' if FLAGS.save_alignments else None, load_alignments_dir=os.path.expanduser('~/data/advhyp/madry/stats') if FLAGS.load_alignments else None, clip_min=pgd_params['clip_min'], clip_max=pgd_params['clip_max'], batch_size=batch_size, num_noise_samples=FLAGS.num_noise_samples, debug_dict=debug_dict, debug=FLAGS.debug, targeted=False, pgd_train=pgd_train, fit_classifier=FLAGS.fit_classifier, clip_alignments=FLAGS.clip_alignments, just_detect=FLAGS.just_detect)
+ else:
+ def _predictor():
+ _x = yield
+ while(_x is not None):
+ _y = sess.run(preds, {x: _x}).argmax(-1)
+ _x = yield np.stack((_y, np.zeros_like(_y)), -1)
+ predictor = _predictor()
+ next(predictor)
+ if FLAGS.save_alignments:
+ exit(0)
+
+ # Evaluate the accuracy of the model on clean examples
+ acc_clean = do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False, predictor=predictor)
+
+ # Initialize the PGD attack object and graph
+ if FLAGS.attack == 'pgd':
+ pgd = MadryEtAl(model, sess=sess)
+ adv_x = pgd.generate(x, **pgd_params)
+ elif FLAGS.attack == 'cw':
+ cw = CarliniWagnerL2(model, sess=sess)
+ adv_x = cw.generate(x, **cw_params)
+ elif FLAGS.attack == 'mean':
+ pgd = MadryEtAl(model, sess=sess)
+ mean_eps = FLAGS.mean_eps * FLAGS.eps
+ def _attack_mean(x):
+ x_many = tf.tile(x[None], (FLAGS.mean_samples, 1, 1, 1))
+ x_noisy = x_many + tf.random_uniform(x_many.shape, -mean_eps, mean_eps)
+ x_noisy = tf.clip_by_value(x_noisy, 0, 255)
+ x_pgd = pgd.generate(x_noisy, **pgd_params)
+ x_clip = tf.minimum(x_pgd, x_many + FLAGS.eps)
+ x_clip = tf.maximum(x_clip, x_many - FLAGS.eps)
+ x_clip = tf.clip_by_value(x_clip, 0, 255)
+ return x_clip
+ adv_x = tf.map_fn(_attack_mean, x)
+ adv_x = tf.reduce_mean(adv_x, 1)
+
+
+ preds_adv = model.get_logits(adv_x)
+
+ if FLAGS.save_pgd_samples:
+ for ds, y, name in ((x_train, y_train, 'train'), (x_test, y_test, 'test')):
+ train_batches = math.ceil(len(ds) / FLAGS.batch_size)
+ train_pgd = np.concatenate([sess.run(adv_x, {x: ds[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size]}) for b in tqdm.trange(train_batches)])
+ np.save('logs/{}_clean.npy'.format(name), ds / 255.)
+ np.save('logs/{}_y.npy'.format(name), y)
+ train_pgd /= 255.
+ np.save('logs/{}_pgd.npy'.format(name), train_pgd)
+ exit(0)
+
+ # Evaluate the accuracy of the model on adversarial examples
+ if not FLAGS.load_pgd_test_samples:
+ acc_pgd = do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True, predictor=predictor, x_adv=adv_x)
+ else:
+ acc_pgd = do_eval(preds, pgd_test, y_test, 'clean_train_adv_eval', True, predictor=predictor)
+ swriter.add_scalar('test_acc_mean', (acc_clean + acc_pgd) / 2., 0)
+
+ print('Repeating the process, using adversarial training')
+
+ exit(0)
+ # Create a new model and train it to be robust to MadryEtAl
+ if architecture == 'ConvNet':
+ model2 = ModelAllConvolutional('model2', nb_classes, nb_filters,
+ input_shape=[32, 32, 3])
+ elif architecture == 'ResNet':
+ model = ResNet()
+ else:
+ raise Exception('Specify valid classifier architecture!')
+
+ pgd2 = MadryEtAl(model2, sess=sess)
+
+ def attack(x):
+ return pgd2.generate(x, **pgd_params)
+
+ loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
+ preds2 = model2.get_logits(x)
+ adv_x2 = attack(x)
+
+ if not backprop_through_attack:
+ # For some attacks, enabling this flag increases the cost of
+ # training, but gives the defender the ability to anticipate how
+ # the atacker will change their strategy in response to updates to
+ # the defender's parameters.
+ adv_x2 = tf.stop_gradient(adv_x2)
+ preds2_adv = model2.get_logits(adv_x2)
+
+ if load_model:
+ if ckpt_dir is not 'None':
+ ckpt = tf.train.get_checkpoint_state(os.path.join(
+ os.path.expanduser(ckpt_dir), 'adv_trained'))
+ else:
+ ckpt = tf.train.get_checkpoint_state('./models/adv_trained')
+ ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
+
+ assert ckpt_path and tf_model_load(
+ sess, file_path=ckpt_path), '\nMODEL LOADING FAILED'
+ print('\nMODEL SUCCESSFULLY LOADED from : {}'.format(ckpt_path))
+
+ initialize_uninitialized_global_variables(sess)
+
+ else:
+
+ def evaluate2():
+ # Accuracy of adversarially trained model on legitimate test inputs
+ do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
+ # Accuracy of the adversarially trained model on adversarial
+ # examples
+ do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)
+
+ # Perform and evaluate adversarial training
+ train(sess, loss2, None, None,
+ dataset_train=dataset_train, dataset_size=dataset_size,
+ evaluate=evaluate2, args=train_params, rng=rng,
+ var_list=model2.get_params())
+
+ # Evaluate model
+ do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
+ do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)
+
+ return report
+
+
+def main(argv=None):
+ from cleverhans_tutorials import check_installation
+ check_installation(__file__)
+
+ cifar10_tutorial(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
+ learning_rate=FLAGS.learning_rate,
+ clean_train=FLAGS.clean_train,
+ architecture=FLAGS.architecture,
+ load_model=FLAGS.load_model,
+ ckpt_dir=FLAGS.ckpt_dir,
+ backprop_through_attack=FLAGS.backprop_through_attack,
+ nb_filters=FLAGS.nb_filters,
+ test_end=FLAGS.test_size)
+
+
+if __name__ == '__main__':
+ flags.DEFINE_integer('nb_filters', NB_FILTERS,
+ 'Model size multiplier')
+ flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
+ 'Number of epochs to train model')
+ flags.DEFINE_integer('batch_size', BATCH_SIZE,
+ 'Size of training batches')
+ flags.DEFINE_float('learning_rate', LEARNING_RATE,
+ 'Learning rate for training')
+ flags.DEFINE_string('architecture', ARCHITECTURE,
+ 'Architecture [ResNet, ConvNet]')
+ flags.DEFINE_bool('load_model', LOAD_MODEL, 'Load Pretrained Model')
+ flags.DEFINE_string('ckpt_dir', '~/models/advhyp/madry/models',
+ 'ckpt_dir [path_to_checkpoint_dir]')
+ flags.DEFINE_bool('clean_train', CLEAN_TRAIN, 'Train on clean examples')
+ flags.DEFINE_bool('backprop_through_attack', BACKPROP_THROUGH_ATTACK,
+ ('If True, backprop through adversarial example '
+ 'construction process during adversarial training'))
+ flags.DEFINE_integer('n_collect', 10000, '')
+ flags.DEFINE_float('p_ratio_cutoff', .999, '')
+ flags.DEFINE_float('eps', 8., '')
+ flags.DEFINE_string('noise_eps', 'n18.0,n24.0,n30.0', '')
+ flags.DEFINE_string('noise_eps_detect', 'n30.0', '')
+ flags.DEFINE_bool('debug', False, 'for debugging')
+ flags.DEFINE_integer('test_size', 10000, '')
+ flags.DEFINE_bool('save_alignments', False, '')
+ flags.DEFINE_bool('load_alignments', False, '')
+ flags.DEFINE_integer('num_noise_samples', 256, '')
+ flags.DEFINE_integer('rep', 0, '')
+ flags.DEFINE_bool('save_debug_dict', False, '')
+ flags.DEFINE_bool('save_pgd_samples', False, '')
+ flags.DEFINE_string('load_pgd_train_samples', None, '')
+ flags.DEFINE_string('load_pgd_test_samples', None, '')
+ flags.DEFINE_bool('fit_classifier', True, '')
+ flags.DEFINE_bool('clip_alignments', True, '')
+ flags.DEFINE_string('attack', 'pgd', '')
+ flags.DEFINE_bool('passthrough', False, '')
+ flags.DEFINE_integer('cw_steps', 300, '')
+ flags.DEFINE_integer('cw_search_steps', 20, '')
+ flags.DEFINE_float('cw_lr', 1e-1, '')
+ flags.DEFINE_float('cw_c', 1e-4, '')
+ flags.DEFINE_bool('just_detect', False, '')
+ flags.DEFINE_integer('mean_samples', 16, '')
+ flags.DEFINE_float('mean_eps', .1, '')
+ flags.DEFINE_bool('load_adv_trained', False, '')
+
+ tf.app.run()
diff --git a/case_studies/odds/original/tf_robustify/__init__.py b/case_studies/odds/original/tf_robustify/__init__.py
new file mode 100644
index 0000000..6e7faf8
--- /dev/null
+++ b/case_studies/odds/original/tf_robustify/__init__.py
@@ -0,0 +1,522 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+
+import os
+import numpy as np
+import tqdm
+import math
+import scipy.stats
+from absl import logging
+from collections import OrderedDict
+from sklearn.linear_model import LogisticRegression
+import itertools as itt
+from types import SimpleNamespace
+
+
+logging.set_verbosity(logging.INFO)
+
+
+def collect_statistics(x_train, y_train, x_ph=None, sess=None,
+ latent_and_logits_fn_th=None, latent_x_tensor=None, logits_tensor=None,
+ nb_classes=None, weights=None, cuda=True, targeted=False, noise_eps=8e-3,
+ noise_eps_detect=None, num_noise_samples=256, batch_size=256,
+ pgd_eps=8/255, pgd_lr=1/4, pgd_iters=10, clip_min=-1., clip_max=1.,
+ p_ratio_cutoff=20., save_alignments_dir=None, load_alignments_dir=None,
+ debug_dict=None, debug=False, clip_alignments=True, pgd_train=None,
+ fit_classifier=False, just_detect=False):
+ assert len(x_train) == len(y_train)
+ if pgd_train is not None:
+ assert len(pgd_train) == len(x_train)
+
+ if x_ph is not None:
+ import tensorflow as tf
+ backend = 'tf'
+ assert sess is not None
+ assert latent_and_logits_fn_th is None
+ assert latent_x_tensor is not None
+ assert logits_tensor is not None
+ assert nb_classes is not None
+ assert weights is not None
+ else:
+ import torch as th
+ backend = 'th'
+ assert x_ph is None
+ assert sess is None
+ assert latent_and_logits_fn_th is not None
+ assert latent_x_tensor is None
+ assert logits_tensor is None
+ assert nb_classes is not None
+ assert weights is not None
+ cuda = th.cuda.is_available() and cuda
+
+ def latent_fn_th(x):
+ return to_np(latent_and_logits_fn_th(to_th(x))[0])
+
+ def logits_fn_th(x):
+ return latent_and_logits_fn_th(x)[1]
+
+ def to_th(x, dtype=np.float32):
+ x = th.from_numpy(x.astype(dtype))
+ if cuda:
+ x = x.cuda()
+ return x
+
+ def to_np(x):
+ return x.detach().cpu().numpy()
+
+ if debug:
+ logging.set_verbosity(logging.DEBUG)
+
+ try:
+ len(noise_eps)
+ if isinstance(noise_eps, str):
+ raise TypeError()
+ except TypeError:
+ noise_eps = [noise_eps]
+
+ if noise_eps_detect is None:
+ noise_eps_detect = noise_eps
+
+ try:
+ len(noise_eps_detect)
+ if isinstance(noise_eps_detect, str):
+ raise TypeError()
+ except TypeError:
+ noise_eps_detect = [noise_eps_detect]
+
+ noise_eps_all = set(noise_eps + noise_eps_detect)
+
+ pgd_lr = pgd_eps * pgd_lr
+ n_batches = math.ceil(x_train.shape[0] / batch_size)
+
+ if len(y_train.shape) == 2:
+ y_train = y_train.argmax(-1)
+
+ if backend == 'tf':
+ y_ph = tf.placeholder(tf.int64, [None])
+ loss_tensor = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_tensor, labels=y_ph))
+ pgd_gradients = tf.gradients(loss_tensor, x_ph)[0]
+ preds_tensor = tf.arg_max(logits_tensor, -1)
+ else:
+ loss_fn = th.nn.CrossEntropyLoss(reduce='sum')
+ if cuda:
+ loss_fn = loss_fn.cuda()
+
+ def get_noise_samples(x, num_samples, noise_eps, clip=False):
+ if isinstance(noise_eps, float):
+ kind = 'u'
+ eps = noise_eps
+ else:
+ kind, eps = noise_eps[:1], float(noise_eps[1:])
+
+ if isinstance(x, np.ndarray):
+ if kind == 'u':
+ noise = np.random.uniform(-1., 1., size=(num_samples,) + x.shape[1:])
+ elif kind == 'n':
+ noise = np.random.normal(0., 1., size=(num_samples,) + x.shape[1:])
+ elif kind == 's':
+ noise = np.random.uniform(-1., 1., size=(num_samples,) + x.shape[1:])
+ noise = np.sign(noise)
+ x_noisy = x + noise * eps
+ if clip:
+ x_noisy = x_noisy.clip(clip_min, clip_max)
+ elif backend == 'tf':
+ shape = (num_samples,) + tuple(s.value for s in x.shape[1:])
+ if kind == 'u':
+ noise = tf.random_uniform(shape=shape, minval=-1., maxval=1.)
+ elif kind == 'n':
+ noise = tf.random_normal(shape=shape, mean=0., stddev=1.)
+ elif kind == 's':
+ noise = tf.random_uniform(shape=shape, minval=-1., maxval=1.)
+ noise = tf.sign(noise)
+ x_noisy = x + noise * eps
+ if clip:
+ x_noisy = tf.clip_by_value(x_noisy, clip_min, clip_max)
+ elif backend == 'th':
+ if kind == 'u':
+ noise = x.new_zeros((num_samples,) + x.shape[1:]).uniform_(-1., 1.)
+ elif kind == 'n':
+ noise = x.new_zeros((num_samples,) + x.shape[1:]).normal_(0., 1.)
+ elif kind == 's':
+ noise = x.new_zeros((num_samples,) + x.shape[1:]).uniform_(-1., 1.)
+ noise.sign_()
+ x_noisy = x + noise * eps
+ if clip:
+ x_noisy.clamp_(clip_min, clip_max)
+ return x_noisy
+
+ def attack_pgd(x, x_pred, targeted=False):
+ x_pgd = get_noise_samples(x, x.shape[0], pgd_eps, clip=True)
+
+ for _ in range(pgd_iters):
+ if backend == 'tf':
+ x_grads = sess.run(pgd_gradients, {x_ph: x_pgd, y_ph: x_pred})
+ else:
+ x_th = to_th(x_pgd).requires_grad_(True)
+ x_grads = to_np(th.autograd.grad(loss_fn(logits_fn_th(x_th), to_th(x_pred, np.long)), [x_th])[0])
+
+ x_pgd += pgd_lr * np.sign(x_grads) * (-2. * (targeted - 1/2))
+ x_pgd = x_pgd.clip(x - pgd_eps, x + pgd_eps)
+ x_pgd = x_pgd.clip(clip_min, clip_max)
+ if debug:
+ break
+ return x_pgd
+
+ def get_latent_and_pred(x):
+ if backend == 'tf':
+ return sess.run([latent_x_tensor, preds_tensor], {x_ph: x})
+ else:
+ l, p = map(to_np, latent_and_logits_fn_th(to_th(x)))
+ return l, p.argmax(-1)
+
+ x_preds_clean = []
+ x_train_pgd = []
+ x_preds_pgd = []
+ latent_clean = []
+ latent_pgd = []
+
+ if not load_alignments_dir:
+ for b in tqdm.trange(n_batches, desc='creating adversarial samples'):
+ x_batch = x_train[b*batch_size:(b+1)*batch_size]
+ lc, pc = get_latent_and_pred(x_batch)
+ x_preds_clean.append(pc)
+ latent_clean.append(lc)
+
+ if not just_detect:
+ if pgd_train is not None:
+ x_pgd = pgd_train[b*batch_size:(b+1)*batch_size]
+ else:
+ if targeted:
+ x_pgd = np.stack([attack_pgd(x_batch, np.ones_like(pc) * i, targeted=True) for i in range(nb_classes)], 1)
+ else:
+ x_pgd = attack_pgd(x_batch, pc, targeted=False)
+ x_train_pgd.append(x_pgd)
+
+ if targeted:
+ pps, lps = [], []
+ for i in range(x_pgd.shape[1]):
+ lp, pp = get_latent_and_pred(x_pgd[:, i])
+ pps.append(pp)
+ lps.append(lp)
+ x_preds_pgd.append(np.stack(pps, 1))
+ latent_pgd.append(np.stack(lps, 1))
+ else:
+ lp, pp = get_latent_and_pred(x_pgd)
+ x_preds_pgd.append(pp)
+ latent_pgd.append(lp)
+
+ x_preds_clean, latent_clean = map(np.concatenate, (x_preds_clean, latent_clean))
+ if not just_detect:
+ x_train_pgd, x_preds_pgd, latent_pgd = map(np.concatenate, (x_train_pgd, x_preds_pgd, latent_pgd))
+
+ valid_idcs = []
+ if not just_detect:
+ for i, (pc, pp, y) in enumerate(zip(x_preds_clean, x_preds_pgd, y_train)):
+ if y == pc and pc != pp:
+ # if y == pc:
+ valid_idcs.append(i)
+ else:
+ valid_idcs = list(range(len(x_preds_clean)))
+
+ logging.info('valid idcs ratio: {}'.format(len(valid_idcs) / len(y_train)))
+ if targeted:
+ for i, xpp in enumerate(x_preds_pgd.T):
+ logging.info('pgd success class {}: {}'.format(i, (xpp == i).mean()))
+
+ x_train, y_train, x_preds_clean, latent_clean = (a[valid_idcs] for a in (x_train, y_train, x_preds_clean, latent_clean))
+ if not just_detect:
+ x_train_pgd, x_preds_pgd, latent_pgd = (a[valid_idcs] for a in (x_train_pgd, x_preds_pgd, latent_pgd))
+
+ if backend == 'tf':
+ weights = tf.transpose(weights, (1, 0))
+ weights_np = sess.run(weights)
+ else:
+ weights_np = weights.cpu().numpy()
+ big_memory = weights.shape[0] > 20
+ logging.info('BIG MEMORY: {}'.format(big_memory))
+ if not big_memory:
+ wdiffs = weights[None, :, :] - weights[:, None, :]
+ wdiffs_np = weights_np[None, :, :] - weights_np[:, None, :]
+
+ if backend == 'tf':
+ # lat_ph = tf.placeholder(tf.float32, [weights.shape[-1]])
+ # pred_ph = tf.placeholder(tf.int64)
+ # if big_memory:
+ # wdiffs_relevant = weights[pred_ph, None] - weights
+ # else:
+ # wdiffs_relevant = wdiffs[:, pred_ph]
+ # lat_diff_tensor = lat_ph[None] - latent_x_tensor
+ # alignments_tensor = tf.matmul(lat_diff_tensor, wdiffs_relevant, transpose_b=True)
+
+ # def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
+ # x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
+ # return sess.run(alignments_tensor, {x_ph: x_noisy, lat_ph: lat, pred_ph: pred})[:, idx_wo_pc]
+ lat_ph = tf.placeholder(tf.float32, [weights.shape[-1]])
+ wdiffs_relevant_ph = tf.placeholder(tf.float32, [weights.shape[-1], nb_classes])
+ lat_diff_tensor = lat_ph[None] - latent_x_tensor
+ alignments_tensor = tf.matmul(lat_diff_tensor, wdiffs_relevant_ph)
+
+ def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
+ if big_memory:
+ wdiffs_relevant = weights_np[pred, None] - weights_np
+ else:
+ wdiffs_relevant = wdiffs_np[:, pred]
+ x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
+ # return sess.run(alignments_tensor, {x_ph: x_noisy, lat_ph: lat, wdiffs_relevant_ph:wdiffs_relevant.T})[:, idx_wo_pc]
+ lat_x = sess.run(latent_x_tensor, {x_ph: x_noisy})
+ lat_diffs = lat[None] - lat_x
+ return np.matmul(lat_diffs, wdiffs_relevant.T)[:, idx_wo_pc]
+
+ else:
+
+ def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
+ x, lat = map(to_th, (x, lat))
+ if big_memory:
+ wdiffs_relevant = weights[pred, None] - weights
+ else:
+ wdiffs_relevant = wdiffs[:, pred]
+ x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
+ lat_noisy, _ = latent_and_logits_fn_th(x_noisy)
+ lat_diffs = lat[None] - lat_noisy
+ return to_np(th.matmul(lat_diffs, wdiffs_relevant.transpose(1, 0)))[:, idx_wo_pc]
+
+ if debug_dict is not None:
+ debug_dict['weights'] = weights_np
+ debug_dict['wdiffs'] = wdiffs_np
+
+ def _compute_alignments(x, lat, pred, source=None, noise_eps=noise_eps_all):
+ if source is None:
+ idx_wo_pc = [i for i in range(nb_classes) if i != pred]
+ assert len(idx_wo_pc) == nb_classes - 1
+ else:
+ idx_wo_pc = source
+
+ alignments = OrderedDict()
+ for neps in noise_eps:
+ alignments[neps] = _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps)
+ # if debug_dict is not None:
+ # debug_dict.setdefault('lat', []).append(lat)
+ # debug_dict.setdefault('lat_noisy', []).append(lat_noisy)
+ # debug_dict['weights'] = weights
+ # debug_dict['wdiffs'] = wdiffs
+ return alignments, idx_wo_pc
+
+ def _collect_wdiff_stats(x_set, latent_set, x_preds_set, clean, save_alignments_dir=None, load_alignments_dir=None):
+ if clean:
+ wdiff_stats = {(tc, tc, e): [] for tc in range(nb_classes) for e in noise_eps_all}
+ name = 'clean'
+ else:
+ wdiff_stats = {(sc, tc, e): [] for sc in range(nb_classes) for tc in range(nb_classes) for e in noise_eps_all if sc != tc}
+ name = 'adv'
+
+ def _compute_stats_from_values(v, raw=False):
+ if not v.shape:
+ return None
+ v = v.mean(1)
+ if debug:
+ v = np.concatenate([v, v*.5, v*1.5])
+ if clean or not fit_classifier:
+ if v.shape[0] < 3:
+ return None
+ return v.mean(0), v.std(0)
+ else:
+ return v
+
+ for neps in noise_eps_all:
+ neps_keys = {k for k in wdiff_stats.keys() if k[-1] == neps}
+ loading = load_alignments_dir
+ if loading:
+ for k in neps_keys:
+ fn = 'alignments_{}_{}.npy'.format(name, str(k))
+ load_fn = os.path.join(load_alignments_dir, fn)
+ if not os.path.exists(load_fn):
+ loading = False
+ break
+ v = np.load(load_fn)
+ wdiff_stats[k] = _compute_stats_from_values(v)
+ logging.info('loading alignments from {} for {}'.format(load_alignments_dir, neps))
+ if not loading:
+ for x, lc, pc, pcc in tqdm.tqdm(zip(x_set, latent_set, x_preds_set, x_preds_clean), total=len(x_set), desc='collecting stats for {}'.format(neps)):
+ if len(lc.shape) == 2:
+ alignments = []
+ for i, (xi, lci, pci) in enumerate(zip(x, lc, pc)):
+ if i == pcc:
+ continue
+ alignments_i, _ = _compute_alignments(xi, lci, i, source=pcc, noise_eps=[neps])
+ for e, a in alignments_i.items():
+ wdiff_stats[(pcc, i, e)].append(a)
+ else:
+ alignments, idx_wo_pc = _compute_alignments(x, lc, pc, noise_eps=[neps])
+ for e, a in alignments.items():
+ wdiff_stats[(pcc, pc, e)].append(a)
+
+ saving = save_alignments_dir and not loading
+ if saving:
+ logging.info('saving alignments to {} for {}'.format(save_alignments_dir, neps))
+ if debug:
+ some_v = None
+ for k in neps_keys:
+ some_v = some_v or wdiff_stats[k]
+ for k in neps_keys:
+ wdiff_stats[k] = wdiff_stats[k] or some_v
+
+ for k in neps_keys:
+ wdsk = wdiff_stats[k]
+ if len(wdsk):
+ wdiff_stats[k] = np.stack(wdsk)
+ else:
+ wdiff_stats[k] = np.array(None)
+ if saving:
+ fn = 'alignments_{}_{}.npy'.format(name, str(k))
+ save_fn = os.path.join(save_alignments_dir, fn)
+ os.makedirs(os.path.dirname(save_fn), exist_ok=True)
+ wds = wdiff_stats[k]
+ np.save(save_fn, wds)
+ wdiff_stats[k] = _compute_stats_from_values(wdiff_stats[k])
+ return wdiff_stats
+
+ save_alignments_dir_clean = os.path.join(save_alignments_dir, 'clean') if save_alignments_dir else None
+ save_alignments_dir_pgd = os.path.join(save_alignments_dir, 'pgd') if save_alignments_dir else None
+ load_alignments_dir_clean = os.path.join(load_alignments_dir, 'clean') if load_alignments_dir else None
+ load_alignments_dir_pgd = os.path.join(load_alignments_dir, 'pgd') if load_alignments_dir else None
+ if load_alignments_dir:
+ load_alignments_dir_clean, load_alignments_dir_pgd = map(lambda s: '{}_{}'.format(s, 'clip' if clip_alignments else 'noclip'), (load_alignments_dir_clean, load_alignments_dir_pgd))
+ if save_alignments_dir:
+ save_alignments_dir_clean, save_alignments_dir_pgd = map(lambda s: '{}_{}'.format(s, 'clip' if clip_alignments else 'noclip'), (save_alignments_dir_clean, save_alignments_dir_pgd))
+ wdiff_stats_clean = _collect_wdiff_stats(x_train, latent_clean, x_preds_clean, clean=True, save_alignments_dir=save_alignments_dir_clean, load_alignments_dir=load_alignments_dir_clean)
+ if not just_detect:
+ wdiff_stats_pgd = _collect_wdiff_stats(x_train_pgd, latent_pgd, x_preds_pgd, clean=False, save_alignments_dir=save_alignments_dir_pgd, load_alignments_dir=load_alignments_dir_pgd)
+
+ if debug_dict is not None and False:
+ esizes = OrderedDict((k, []) for k in noise_eps_all)
+ for k, (mc, sc) in wdiff_stats_clean.items():
+ mp, sp = wdiff_stats_pgd[k]
+ esizes[k[-1]].append(np.abs(mp - mc) / ((sp + sc) / 2.))
+ debug_dict['effect_sizes'] = OrderedDict((k, np.array(v)) for k, v in esizes.items())
+
+ wdiff_stats_clean_detect = [np.stack([wdiff_stats_clean[(p, p, eps)] for eps in noise_eps_detect]) for p in range(nb_classes)]
+ wdiff_stats_clean_detect = [s.transpose((1, 0, 2)) if len(s.shape) == 3 else None for s in wdiff_stats_clean_detect]
+ wdiff_stats_pgd_classify = []
+ if not just_detect:
+ for tc in range(nb_classes):
+ tc_stats = []
+ for sc in range(nb_classes):
+ if sc == tc:
+ continue
+ sc_stats = [wdiff_stats_pgd[(sc, tc, eps)] for eps in noise_eps]
+ if sc_stats[0] is None:
+ tc_stats.append(None)
+ else:
+ tc_stats.append(np.stack(sc_stats, 1))
+ wdiff_stats_pgd_classify.append(tc_stats)
+
+ if fit_classifier:
+ logging.info('fitting classifier')
+ for tc in tqdm.trange(nb_classes):
+ tc_X = []
+ tc_Y = []
+ idx_wo_tc = [sc for sc in range(nb_classes) if sc != tc]
+ for i, sc in enumerate(idx_wo_tc):
+ sc_data = wdiff_stats_pgd_classify[tc][i]
+ if sc_data is not None:
+ sc_data = sc_data.reshape(sc_data.shape[0], -1)
+ for d in sc_data:
+ tc_X.append(d.ravel())
+ tc_Y.append(sc)
+ Y_unq = np.unique(tc_Y)
+ if len(Y_unq) == 0:
+ lr = SimpleNamespace(predict=lambda x: np.array(tc))
+ elif len(Y_unq) == 1:
+ lr = SimpleNamespace(predict=lambda x: np.array(tc_Y[0]))
+ else:
+ tc_X = np.stack(tc_X)
+ tc_Y = np.array(tc_Y)
+ lr = LogisticRegression(solver='lbfgs', multi_class='multinomial', max_iter=1000)
+ lr.fit(tc_X, tc_Y)
+ wdiff_stats_pgd_classify[tc] = lr
+
+ batch = yield
+
+ while batch is not None:
+ batch_latent, batch_pred = get_latent_and_pred(batch)
+ if debug_dict is not None:
+ debug_dict.setdefault('batch_pred', []).append(batch_pred)
+ corrected_pred = []
+ detection = []
+ for b, lb, pb in zip(batch, batch_latent, batch_pred):
+ b_align, idx_wo_pb = _compute_alignments(b, lb, pb)
+ b_align_det = np.stack([b_align[eps] for eps in noise_eps_detect])
+ b_align = np.stack([b_align[eps] for eps in noise_eps])
+
+ wdsc_det_pb = wdiff_stats_clean_detect[pb]
+ if wdsc_det_pb is None:
+ z_hit = False
+ else:
+ wdm_det, wds_det = wdsc_det_pb
+ z_clean = (b_align_det - wdm_det[:, None]) / wds_det[:, None]
+ z_clean_mean = z_clean.mean(1)
+ z_cutoff = scipy.stats.norm.ppf(p_ratio_cutoff)
+ z_hit = z_clean_mean.mean(0).max(-1) > z_cutoff
+
+ if not just_detect:
+ if fit_classifier:
+ lr = wdiff_stats_pgd_classify[pb]
+ b_align = b_align.mean(1).reshape((1, -1))
+ lr_pred = lr.predict(b_align)
+ else:
+ wdp = wdiff_stats_pgd_classify[pb]
+ if wdp is None:
+ z_pgd_mode = None
+ else:
+ wdp_not_none_idcs = [i for i, w in enumerate(wdp) if w is not None]
+ if len(wdp_not_none_idcs) == 0:
+ z_pgd_mode = None
+ else:
+ wdp = np.stack([wdp[i] for i in wdp_not_none_idcs], 2)
+ idx_wo_pb_wdp = [idx_wo_pb[i] for i in wdp_not_none_idcs]
+ ssidx = np.arange(wdp.shape[-2])
+ wdp = wdp[:, :, ssidx, ssidx]
+ wdmp, wdsp = wdp
+ b_align = b_align[:, :, wdp_not_none_idcs]
+ z_pgd = (b_align - wdmp[:, None]) / wdsp[:, None]
+ z_pgd_mean = z_pgd.mean(1)
+ z_pgd_mode = scipy.stats.mode(z_pgd_mean.argmax(-1)).mode[0]
+ if z_hit:
+ if not just_detect:
+ if fit_classifier:
+ print(lr_pred)
+ pb = lr_pred.item()
+ else:
+ if z_pgd_mode is not None:
+ pb = idx_wo_pb_wdp[z_pgd_mode]
+ detection.append(True)
+ else:
+ detection.append(False)
+ if debug_dict is not None:
+ debug_dict.setdefault('b_align', []).append(b_align)
+ # debug_dict.setdefault('stats', []).append((wdm_det, wds_det, wdmp, wdsp))
+ # debug_dict.setdefault('p_ratio', []).append(p_ratio)
+ # debug_dict.setdefault('p_clean', []).append(p_clean)
+ # debug_dict.setdefault('p_pgd', []).append(p_pgd)
+ debug_dict.setdefault('z_clean', []).append(z_clean)
+ # debug_dict.setdefault('z_conf', []).append(z_conf)
+ # debug_dict.setdefault('z_pgdm', []).append(z_pgdm)
+ # debug_dict.setdefault('z_pgd', []).append(z_pgd)
+ corrected_pred.append(pb)
+ if debug_dict is not None:
+ debug_dict.setdefault('detection', []).append(detection)
+ debug_dict.setdefault('corrected_pred', []).append(corrected_pred)
+ batch = yield np.stack((corrected_pred, detection), -1)
diff --git a/case_studies/odds/original/torch_example.py b/case_studies/odds/original/torch_example.py
new file mode 100644
index 0000000..8b96322
--- /dev/null
+++ b/case_studies/odds/original/torch_example.py
@@ -0,0 +1,789 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python3
+
+import matplotlib
+matplotlib.use('Agg')
+
+import io
+from PIL import Image
+from matplotlib import pyplot as plt
+import os
+import sys
+import torch as th
+import torchvision as tv
+import torch.nn.functional as F
+from torch.autograd import Variable
+import math
+import tqdm
+from filelock import FileLock
+import threading
+import time
+import signal
+import numpy as np
+import itertools as itt
+import scipy.linalg
+import scipy.stats
+from scipy.spatial.distance import pdist, squareform
+import cifar_model
+from sklearn.decomposition import PCA
+from sklearn.metrics import confusion_matrix
+import tf_robustify
+import vgg
+import carlini_wagner_attack
+
+
+os.system("taskset -p 0xffffffff %d" % os.getpid())
+
+import sh
+sh.rm('-rf', 'logs')
+
+import logging
+logging.basicConfig(level=logging.INFO, stream=sys.stdout)
+
+from tensorboardX.writer import SummaryWriter
+swriter = SummaryWriter('logs')
+add_scalar_old = swriter.add_scalar
+
+def add_scalar_and_log(key, value, global_step=0):
+ logging.info('{}:{}: {}'.format(global_step, key, value))
+ add_scalar_old(key, value, global_step)
+
+swriter.add_scalar = add_scalar_and_log
+
+def str2bool(x):
+ return x.lower() == 'true'
+
+def new_inception_conv2d_forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ return F.relu(x, inplace=False)
+
+tv.models.inception.BasicConv2d.forward = new_inception_conv2d_forward
+
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--ds', default='cifar10')
+parser.add_argument('--model', default='cifar10')
+parser.add_argument('--batch_size', default=32, type=int)
+parser.add_argument('--eval_bs', default=256, type=int)
+parser.add_argument('--eval_batches', default=None, type=int)
+parser.add_argument('--epochs', default=50, type=int)
+parser.add_argument('--num_evals', default=20, type=int)
+parser.add_argument('--train_log_after', default=0, type=int)
+parser.add_argument('--stop_after', default=-1, type=int)
+parser.add_argument('--cuda', default=True, type=str2bool)
+parser.add_argument('--optim', default='sgd', type=str)
+parser.add_argument('--lr', default=1e-4, type=float)
+parser.add_argument('--attack_lr', default=.25, type=float)
+parser.add_argument('--eps', default=8/255, type=float)
+parser.add_argument('--eps_rand', default=None, type=float)
+parser.add_argument('--eps_eval', default=None, type=float)
+parser.add_argument('--rep', default=0, type=int)
+parser.add_argument('--img_size', default=32, type=int)
+parser.add_argument('--iters', default=10, type=int)
+parser.add_argument('--noise_eps', default='n0.01,s0.01,u0.01,n0.02,s0.02,u0.02,s0.03,n0.03,u0.03', type=str)
+parser.add_argument('--noise_eps_detect', default='n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008', type=str)
+parser.add_argument('--clip_alignments', default=True, type=str2bool)
+parser.add_argument('--pgd_strength', default=1., type=float)
+
+parser.add_argument('--debug', default=False, type=str2bool)
+parser.add_argument('--mode', default='eval', type=str)
+parser.add_argument('--constrained', default=True, type=str2bool)
+parser.add_argument('--clamp_attack', default=False, type=str2bool)
+parser.add_argument('--clamp_uniform', default=False, type=str2bool)
+parser.add_argument('--train_adv', default=False, type=str2bool)
+parser.add_argument('--wdiff_samples', default=256, type=int)
+parser.add_argument('--maxp_cutoff', default=.999, type=float)
+parser.add_argument('--collect_targeted', default=False, type=str2bool)
+parser.add_argument('--n_collect', default=10000, type=int)
+parser.add_argument('--save_alignments', default=False, type=str2bool)
+parser.add_argument('--load_alignments', default=False, type=str2bool)
+parser.add_argument('--save_pgd_samples', default=False, type=str2bool)
+parser.add_argument('--load_pgd_train_samples', default=None, type=str)
+parser.add_argument('--load_pgd_test_samples', default=None, type=str)
+parser.add_argument('--fit_classifier', default=True, type=str2bool)
+parser.add_argument('--just_detect', default=False, type=str2bool)
+parser.add_argument('--attack', default='pgd', type=str)
+parser.add_argument('--cw_confidence', default=0, type=float)
+parser.add_argument('--cw_c', default=1e-4, type=float)
+parser.add_argument('--cw_lr', default=1e-4, type=float)
+parser.add_argument('--cw_steps', default=300, type=int)
+parser.add_argument('--cw_search_steps', default=10, type=int)
+parser.add_argument('--mean_samples', default=16, type=int)
+parser.add_argument('--mean_eps', default=.1, type=float)
+
+args = parser.parse_args()
+
+args.cuda = args.cuda and th.cuda.is_available()
+
+args.eps_rand = args.eps_rand or args.eps
+
+args.eps_eval = args.eps_eval or args.eps
+
+args.mean_eps = args.mean_eps * args.eps_eval
+
+
+def check_pid():
+ while os.getppid() != 1:
+ time.sleep(.1)
+ os.kill(os.getpid(), signal.SIGKILL)
+
+
+def init_worker(worker_id):
+ thread = threading.Thread(target=check_pid)
+ thread.daemon = True
+ thread.start()
+
+
+def gini_coef(a):
+ a = th.sort(a, dim=-1)[0]
+ n = a.shape[1]
+ index = th.arange(1, n+1)[None, :].float()
+ return (th.sum((2 * index - n - 1) * a, -1) / (n * th.sum(a, -1)))
+
+
+def main():
+ nrms = dict(imagenet=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), imagenet64=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), cifar10=([.5, .5, .5], [.5, .5, .5]))[args.ds]
+ if args.ds == 'cifar10' and args.model.startswith('vgg'):
+ nrms = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
+
+ if args.ds == 'imagenet':
+ if args.model.startswith('inception'):
+ transforms = [
+ tv.transforms.Resize((299, 299)),
+ ]
+ else:
+ transforms = [
+ tv.transforms.Resize((256, 256)),
+ tv.transforms.CenterCrop(224)
+ ]
+ else:
+ transforms = [tv.transforms.Resize((args.img_size, args.img_size))]
+ transforms = tv.transforms.Compose(
+ transforms + [
+ tv.transforms.ToTensor(),
+ ])
+ clip_min = 0.
+ clip_max = 1.
+ nrms_mean, nrms_std = [th.FloatTensor(n)[None, :, None, None] for n in nrms]
+ if args.cuda:
+ nrms_mean, nrms_std = map(th.Tensor.cuda, (nrms_mean, nrms_std))
+
+ if args.ds == 'cifar10':
+ data_dir = os.path.expanduser('~/data/cifar10')
+ os.makedirs(data_dir, exist_ok=True)
+ with FileLock(os.path.join(data_dir, 'lock')):
+ train_ds = tv.datasets.CIFAR10(data_dir, train=True, transform=transforms, download=True)
+ test_ds = tv.datasets.CIFAR10(data_dir, train=False, transform=transforms, download=True)
+ elif args.ds == 'imagenet':
+ train_folder = os.path.expanduser('~/../stuff/imagenet/train')
+ test_folder = os.path.expanduser('~/../stuff/imagenet/val')
+
+ with FileLock(os.path.join(os.path.dirname(train_folder), 'lock')):
+ train_ds = tv.datasets.ImageFolder(train_folder, transform=transforms)
+ test_ds = tv.datasets.ImageFolder(test_folder, transform=transforms)
+
+ if args.load_pgd_test_samples:
+ pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(args.load_pgd_test_samples))
+ x_test = np.load(os.path.join(pgd_path, 'test_clean.npy'))
+ y_test = np.load(os.path.join(pgd_path, 'test_y.npy'))
+ pgd_test = np.load(os.path.join(pgd_path, 'test_pgd.npy'))
+ if x_test.shape[-1] == 3:
+ x_test = x_test.transpose((0, 3, 1, 2))
+ pgd_test = pgd_test.transpose((0, 3, 1, 2))
+ if len(y_test.shape) == 2:
+ y_test = y_test.argmax(-1)
+ test_ds = th.utils.data.TensorDataset(*map(th.from_numpy, (x_test, y_test, pgd_test)))
+
+ train_loader = th.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True, pin_memory=True, worker_init_fn=init_worker)
+ test_loader = th.utils.data.DataLoader(test_ds, batch_size=args.eval_bs, shuffle=True, num_workers=1, drop_last=False, pin_memory=True, worker_init_fn=init_worker)
+
+ if args.ds == 'imagenet64' or args.ds == 'imagenet':
+ with FileLock(os.path.join(os.path.dirname(train_folder), 'lock')):
+ if args.model in tv.models.__dict__:
+ if args.model.startswith('inception'):
+ net = tv.models.__dict__[args.model](pretrained=True, transform_input=False)
+ else:
+ net = tv.models.__dict__[args.model](pretrained=True)
+ else:
+ raise ValueError('Unknown model: {}'.format(args.model))
+ elif args.ds == 'cifar10':
+ if args.model == 'tiny':
+ net = cifar_model.cifar10_tiny(32, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu')
+ elif args.model == 'tinyb':
+ net = cifar_model.cifar10_tiny(32, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', padding=0, trained_adv=args.train_adv)
+ elif args.model.startswith('vgg'):
+ net = vgg.__dict__[args.model]()
+ cp_path = os.path.expanduser('~/models/advhyp/vgg/{}/checkpoint.tar'.format(args.model))
+ checkpoint = th.load(cp_path, map_location='cpu')
+ state_dict = {k.replace('module.', ''): v for k, v in checkpoint['state_dict'].items()}
+ net.load_state_dict(state_dict)
+ elif args.model == 'carlini':
+ net = cifar_model.carlini(pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', trained_adv=args.train_adv)
+ else:
+ net = cifar_model.cifar10(128, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', trained_adv=args.train_adv)
+ print(net)
+
+ def get_layers():
+ return itt.chain(net.features.children(), net.classifier.children())
+
+ def get_layer_names():
+ return [l.__class__.__name__ for l in get_layers()]
+
+ if args.cuda:
+ net.cuda()
+
+ def net_forward(x, layer_by_layer=False, from_layer=0):
+ x = x - nrms_mean # cannot be inplace
+ x.div_(nrms_std)
+ if not layer_by_layer:
+ return net(x)
+ cldr = list(net.children())
+ if args.model.startswith('resnet'):
+ x = net.conv1(x)
+ x = net.bn1(x)
+ x = net.relu(x)
+ x = net.maxpool(x)
+
+ x = net.layer1(x)
+ x = net.layer2(x)
+ x = net.layer3(x)
+ x = net.layer4(x)
+
+ outputs = [net.avgpool(x)]
+ flat_features = outputs[-1].view(x.size(0), -1)
+ outputs.append(net.fc(flat_features))
+ elif args.model.startswith('inception'):
+ # 299 x 299 x 3
+ x = net.Conv2d_1a_3x3(x)
+ # 149 x 149 x 32
+ x = net.Conv2d_2a_3x3(x)
+ # 147 x 147 x 32
+ x = net.Conv2d_2b_3x3(x)
+ # 147 x 147 x 64
+ x = F.max_pool2d(x, kernel_size=3, stride=2)
+ # 73 x 73 x 64
+ x = net.Conv2d_3b_1x1(x)
+ # 73 x 73 x 80
+ x = net.Conv2d_4a_3x3(x)
+ # 71 x 71 x 192
+ x = F.max_pool2d(x, kernel_size=3, stride=2)
+ # 35 x 35 x 192
+ x = net.Mixed_5b(x)
+ # 35 x 35 x 256
+ x = net.Mixed_5c(x)
+ # 35 x 35 x 288
+ x = net.Mixed_5d(x)
+ # 35 x 35 x 288
+ x = net.Mixed_6a(x)
+ # 17 x 17 x 768
+ x = net.Mixed_6b(x)
+ # 17 x 17 x 768
+ x = net.Mixed_6c(x)
+ # 17 x 17 x 768
+ x = net.Mixed_6d(x)
+ # 17 x 17 x 768
+ x = net.Mixed_6e(x)
+ # 17 x 17 x 768
+ x = net.Mixed_7a(x)
+ # 8 x 8 x 1280
+ x = net.Mixed_7b(x)
+ # 8 x 8 x 2048
+ x = net.Mixed_7c(x)
+ # 8 x 8 x 2048
+ x = F.avg_pool2d(x, kernel_size=8)
+ # 1 x 1 x 2048
+ outputs = [F.dropout(x, training=net.training)]
+ # 1 x 1 x 2048
+ flat_features = outputs[-1].view(x.size(0), -1)
+ # 2048
+ outputs.append(net.fc(flat_features))
+ # 1000 (num_classes)
+ else:
+ outputs = [net.features(x)]
+ for cidx, c in enumerate(net.classifier.children()):
+ flat_features = outputs[-1].view(x.size(0), -1)
+ outputs.append(c(flat_features))
+ return outputs
+
+ loss_fn = th.nn.CrossEntropyLoss(reduce=False)
+ loss_fn_adv = th.nn.CrossEntropyLoss(reduce=False)
+ if args.cuda:
+ loss_fn.cuda()
+ loss_fn_adv.cuda()
+
+ def get_outputs(x, y, from_layer=0):
+ outputs = net_forward(x, layer_by_layer=True, from_layer=from_layer)
+ logits = outputs[-1]
+ loss = loss_fn(logits, y)
+ _, preds = th.max(logits, 1)
+ return outputs, loss, preds
+
+ def get_loss_and_preds(x, y):
+ logits = net_forward(x, layer_by_layer=False)
+ loss = loss_fn(logits, y)
+ _, preds = th.max(logits, 1)
+ return loss, preds
+
+ def clip(x, cmin, cmax):
+ return th.min(th.max(x, cmin), cmax)
+
+ def project(x, x_orig, eps):
+ dx = x - x_orig
+ dx = dx.flatten(1)
+ dx /= th.norm(dx, p=2, dim=1, keepdim=True) + 1e-9
+ dx *= eps
+ return x_orig + dx.view(x.shape)
+
+
+ if args.attack == 'cw':
+ cw_attack = carlini_wagner_attack.AttackCarliniWagnerL2(cuda=args.cuda, clip_min=clip_min, clip_max=clip_max, confidence=args.cw_confidence, initial_const=args.cw_c / (255**2.), max_steps=args.cw_steps, search_steps=args.cw_search_steps, learning_rate=args.cw_lr)
+ def attack_cw(x, y):
+ x_adv = cw_attack.run(net_forward, x, y)
+ x_adv = th.from_numpy(x_adv)
+ if args.cuda:
+ x_adv = x_adv.cuda()
+ return x_adv
+
+ def attack_mean(x, y, eps=args.eps):
+ x_advs = attack_pgd(x, y, eps)
+ for _ in tqdm.trange(args.mean_samples, desc='mean attack samples'):
+ x_noisy = x + th.empty_like(x).uniform_(-args.mean_eps, args.mean_eps)
+ x_advs += attack_pgd(x_noisy, y, eps)
+ x_advs = x_advs / (args.mean_samples + 1)
+ x_advs.clamp_(clip_min, clip_max)
+ x_advs = clip(x_advs, x-eps, x+eps)
+ return x_advs
+
+ def attack_anti(x, y, eps=args.eps):
+ pass
+
+ def attack_pgd(x, y, eps=args.eps, l2=False):
+ if l2:
+ eps = np.sqrt(np.prod(x.shape[1:])) * eps
+ x_orig = x
+ x = th.empty_like(x).copy_(x)
+ x.requires_grad_(True)
+ x.data.add_(th.empty_like(x).uniform_(-eps, eps))
+ x.data.clamp_(clip_min, clip_max)
+ for i in range(args.iters):
+ if x.grad is not None:
+ x.grad.zero_()
+
+ logits = net_forward(x)
+ loss = th.sum(loss_fn_adv(logits, y))
+ loss.backward()
+ if args.constrained:
+ if l2:
+ gx = x.grad.flatten(1)
+ gx /= th.norm(gx, p=2, dim=-1, keepdim=True) + 1e-9
+ gx = gx.view(x.shape)
+ x.data.add_(args.attack_lr * eps * gx)
+ x.data = project(x.data, x_orig, eps)
+ else:
+ x.data.add_(args.attack_lr * eps * th.sign(x.grad))
+ x.data = clip(x.data, x_orig-eps, x_orig+eps)
+ x.data.clamp_(clip_min, clip_max)
+ else:
+ x.data += args.attack_lr * eps * x.grad
+ if args.debug:
+ break
+
+ if args.pgd_strength < 1.:
+ mask = (x.data.new_zeros(len(x)).uniform_() <= args.pgd_strength).float()
+ for _ in x.shape[1:]:
+ mask = mask[:, None]
+ x.data = x.data * mask + x_orig.data * (1. - mask)
+
+ x = x.detach()
+ inf_norm = (x - x_orig).abs().max().cpu().numpy().item()
+ if args.clamp_attack:
+ with th.no_grad():
+ diff = th.sign(x - x_orig) * inf_norm
+ x = x_orig + diff
+ x = clip(x, clip_min, clip_max)
+ # if args.constrained:
+ # assert inf_norm < eps * (1.001), 'inf norm {} > {}'.format(inf_norm, eps)
+ return x
+
+ eval_after = math.floor(args.epochs * len(train_ds) / args.batch_size / args.num_evals)
+
+ global_step = 0
+
+ def run_train():
+ nonlocal global_step # noqa: E999
+ if args.model == 'carlini':
+ optim = th.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, nesterov=True)
+ elif args.model == 'cifar10':
+ optim = th.optim.Adam(net.parameters(), lr=args.lr)
+ else:
+ optim = th.optim.RMSprop(net.parameters(), lr=args.lr)
+
+ logging.info('train')
+ for epoch in tqdm.trange(args.epochs):
+ for batch in tqdm.tqdm(train_loader):
+ x, y = batch
+ if args.cuda:
+ x, y = x.cuda(), y.cuda()
+
+ if global_step % eval_after == 0:
+ run_eval_basic(True)
+
+ if args.train_adv:
+ x_adv = attack_pgd(x, y)
+ x = th.cat((x, x_adv))
+ y = th.cat((y, y))
+
+ net.zero_grad()
+ loss, _ = get_loss_and_preds(x, y)
+ loss = loss.mean()
+
+ net.zero_grad()
+ loss.backward()
+ optim.step()
+ if args.model == 'carlini' and not args.train_adv:
+ for pg in optim.param_groups:
+ pg['lr'] = args.lr * ((1. - 1e-6)**global_step)
+
+ global_step += 1
+
+ if args.model == 'cifar10' and not args.train_adv:
+ if epoch == 80 or epoch == 120:
+ for pg in optim.param_groups:
+ pg['lr'] *= .1
+
+ with open('logs/model.ckpt', 'wb') as f:
+ th.save(net.state_dict(), f)
+
+ def run_eval_basic(with_attack=True):
+ logging.info('eval')
+ eval_loss_clean = []
+ eval_acc_clean = []
+ eval_loss_rand = []
+ eval_acc_rand = []
+ eval_loss_pgd = []
+ eval_acc_pgd = []
+ eval_loss_pand = []
+ eval_acc_pand = []
+ all_outputs = []
+ diffs_rand, diffs_pgd, diffs_pand = [], [], []
+ eval_preds_clean, eval_preds_rand, eval_preds_pgd, eval_preds_pand = [], [], [], []
+ norms_clean, norms_pgd, norms_rand, norms_pand = [], [], [], []
+ norms_dpgd, norms_drand, norms_dpand = [], [], []
+ eval_important_valid = []
+ eval_loss_incr = []
+ eval_conf_pgd = []
+ wdiff_corrs = []
+ udiff_corrs = []
+ grad_corrs = []
+ minps_clean = []
+ minps_pgd = []
+ acc_clean_after_corr = []
+ acc_pgd_after_corr = []
+ eval_det_clean = []
+ eval_det_pgd = []
+
+ net.train(False)
+ for eval_batch in tqdm.tqdm(itt.islice(test_loader, args.eval_batches)):
+ x, y = eval_batch
+ if args.cuda:
+ x, y = x.cuda(), y.cuda()
+
+ loss_clean, preds_clean = get_loss_and_preds(x, y)
+
+ eval_loss_clean.append((loss_clean.data).cpu().numpy())
+ eval_acc_clean.append((th.eq(preds_clean, y).float()).cpu().numpy())
+ eval_preds_clean.extend(preds_clean)
+
+ if with_attack:
+ if args.clamp_uniform:
+ x_rand = x + th.sign(th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)) * args.eps_rand
+ else:
+ x_rand = x + th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)
+ loss_rand, preds_rand = get_loss_and_preds(x_rand, y)
+
+ eval_loss_rand.append((loss_rand.data).cpu().numpy())
+ eval_acc_rand.append((th.eq(preds_rand, y).float()).cpu().numpy())
+ eval_preds_rand.extend(preds_rand)
+
+ if not args.load_pgd_test_samples:
+ x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval)
+ loss_pgd, preds_pgd = get_loss_and_preds(x_pgd, y)
+
+ eval_loss_pgd.append((loss_pgd.data).cpu().numpy())
+ eval_acc_pgd.append((th.eq(preds_pgd, y).float()).cpu().numpy())
+ eval_preds_pgd.extend(preds_pgd)
+
+ loss_incr = loss_pgd - loss_clean
+ eval_loss_incr.append(loss_incr.detach().cpu())
+
+ x_pand = x_pgd + th.empty_like(x_pgd).uniform_(-args.eps_rand, args.eps_rand)
+ loss_pand, preds_pand = get_loss_and_preds(x_pand, y)
+
+ eval_loss_pand.append((loss_pand.data).cpu().numpy())
+ eval_acc_pand.append((th.eq(preds_pand, y).float()).cpu().numpy())
+ eval_preds_pand.extend(preds_pand)
+
+
+ if args.debug:
+ break
+
+ swriter.add_scalar('eval_loss_clean', np.concatenate(eval_loss_clean).mean(), global_step)
+ swriter.add_scalar('eval_acc_clean', np.concatenate(eval_acc_clean).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_rand', np.concatenate(eval_loss_rand).mean(), global_step)
+ swriter.add_scalar('eval_acc_rand', np.concatenate(eval_acc_rand).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_pgd', np.concatenate(eval_loss_pgd).mean(), global_step)
+ swriter.add_scalar('eval_acc_pgd', np.concatenate(eval_acc_pgd).mean(), global_step)
+ swriter.add_scalar('eval_loss_incr', th.cat(eval_loss_incr).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_pand', np.concatenate(eval_loss_pand).mean(), global_step)
+ swriter.add_scalar('eval_acc_pand', np.concatenate(eval_acc_pand).mean(), global_step)
+ net.train(False)
+
+
+ def run_eval(with_attack=True):
+ logging.info('eval')
+ net.train(False)
+ eval_loss_clean = []
+ eval_acc_clean = []
+ eval_loss_rand = []
+ eval_acc_rand = []
+ eval_loss_pgd = []
+ eval_acc_pgd = []
+ eval_loss_pand = []
+ eval_acc_pand = []
+ all_outputs = []
+ diffs_rand, diffs_pgd, diffs_pand = [], [], []
+ eval_preds_clean, eval_preds_rand, eval_preds_pgd, eval_preds_pand = [], [], [], []
+ norms_clean, norms_pgd, norms_rand, norms_pand = [], [], [], []
+ norms_dpgd, norms_drand, norms_dpand = [], [], []
+ eval_important_valid = []
+ eval_loss_incr = []
+ eval_conf_pgd = []
+ wdiff_corrs = []
+ udiff_corrs = []
+ grad_corrs = []
+ minps_clean = []
+ minps_pgd = []
+ acc_clean_after_corr = []
+ acc_pgd_after_corr = []
+ eval_det_clean = []
+ eval_det_pgd = []
+ eval_x_pgd_l0 = []
+ eval_x_pgd_l2 = []
+
+ all_eval_important_pixels = []
+ all_eval_important_single_pixels = []
+ all_eval_losses_per_pixel = []
+
+ if args.save_pgd_samples:
+ for loader, name in ((train_loader, 'train'), (test_loader, 'test')):
+ train_x = []
+ train_y = []
+ train_pgd = []
+ for eval_batch in tqdm.tqdm(loader):
+ x, y = eval_batch
+ if args.cuda:
+ x, y = x.cuda(), y.cuda()
+ _, p = get_loss_and_preds(x, y)
+ train_pgd.append(attack_pgd(x, p, eps=args.eps_eval).cpu().numpy())
+ train_x.append(x.cpu().numpy())
+ train_y.append(y.cpu().numpy())
+ train_pgd = np.concatenate(train_pgd)
+ train_x = np.concatenate(train_x)
+ train_y = np.concatenate(train_y)
+ np.save('logs/{}_pgd.npy'.format(name), train_pgd)
+ np.save('logs/{}_clean.npy'.format(name), train_x)
+ np.save('logs/{}_y.npy'.format(name), train_y)
+ exit(0)
+
+ X, Y = [], []
+ with th.no_grad():
+ for eval_batch in tqdm.tqdm(train_loader):
+ x, y = eval_batch
+ X.append(x.cpu().numpy()), Y.append(y.cpu().numpy())
+ if args.n_collect > 0 and sum(len(x) for x in X) > args.n_collect:
+ if args.ds.startswith('imagenet'):
+ break
+ y_nc, y_cts = np.unique(Y, return_counts=True)
+ if y_nc.size == 1000:
+ if np.all(y_cts >= 5):
+ break
+ else:
+ break
+ logging.debug('need more samples, have {} classes with min size {}...'.format(y_nc.size, np.min(y_cts)))
+ if args.debug:
+ break
+ X, Y = map(np.concatenate, (X, Y))
+
+ pgd_train = None
+ if args.load_pgd_train_samples:
+ pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(args.load_pgd_train_samples))
+ X = np.load(os.path.join(pgd_path, 'train_clean.npy'))
+ Y = np.load(os.path.join(pgd_path, 'train_y.npy'))
+ pgd_train = np.load(os.path.join(pgd_path, 'train_pgd.npy'))
+ if X.shape[-1] == 3:
+ X = X.transpose((0, 3, 1, 2))
+ pgd_train = pgd_train.transpose((0, 3, 1, 2))
+ if len(Y.shape) == 2:
+ Y = Y.argmax(-1)
+
+ if args.model.startswith('resnet') or args.model.startswith('inception'):
+ w_cls = net.fc.weight
+ else:
+ w_cls = list(net.classifier.children())[-1].weight
+ nb_classes = w_cls.shape[0]
+
+ if args.n_collect > 0 and args.load_pgd_train_samples:
+ all_idcs = np.arange(len(X))
+ while True:
+ np.random.shuffle(all_idcs)
+ idcs = all_idcs[:args.n_collect]
+ Y_partial = Y[idcs]
+ y_nc = np.unique(Y_partial).size
+ if y_nc == nb_classes:
+ break
+ logging.debug('only have {} classes, reshuffling...'.format(y_nc))
+ X, Y = X[idcs], Y[idcs]
+ if pgd_train is not None:
+ pgd_train = pgd_train[idcs]
+
+ def latent_and_logits_fn(x):
+ lat, log = net_forward(x, True)[-2:]
+ lat = lat.reshape(lat.shape[0], -1)
+ return lat, log
+
+ noise_eps_detect = args.noise_eps_detect
+ if noise_eps_detect is None:
+ noise_eps_detect = args.noise_eps
+
+ predictor = tf_robustify.collect_statistics(X, Y, latent_and_logits_fn_th=latent_and_logits_fn, nb_classes=nb_classes, weights=w_cls, cuda=args.cuda, debug=args.debug, targeted=args.collect_targeted, noise_eps=args.noise_eps.split(','), noise_eps_detect=noise_eps_detect.split(','), num_noise_samples=args.wdiff_samples, batch_size=args.eval_bs, pgd_eps=args.eps, pgd_lr=args.attack_lr, pgd_iters=args.iters, clip_min=clip_min, clip_max=clip_max, p_ratio_cutoff=args.maxp_cutoff, save_alignments_dir='logs/stats' if args.save_alignments else None, load_alignments_dir=os.path.expanduser('~/data/advhyp/{}/stats'.format(args.model)) if args.load_alignments else None, clip_alignments=args.clip_alignments, pgd_train=pgd_train, fit_classifier=args.fit_classifier, just_detect=args.just_detect)
+ next(predictor)
+ if args.save_alignments:
+ exit(0)
+
+ for eval_batch in tqdm.tqdm(itt.islice(test_loader, args.eval_batches)):
+ if args.load_pgd_test_samples:
+ x, y, x_pgd = eval_batch
+ else:
+ x, y = eval_batch
+ if args.cuda:
+ x, y = x.cuda(), y.cuda()
+ if args.load_pgd_test_samples:
+ x_pgd = x_pgd.cuda()
+
+ loss_clean, preds_clean = get_loss_and_preds(x, y)
+
+ eval_loss_clean.append((loss_clean.data).cpu().numpy())
+ eval_acc_clean.append((th.eq(preds_clean, y).float()).cpu().numpy())
+ eval_preds_clean.extend(preds_clean)
+
+ if with_attack:
+ if args.clamp_uniform:
+ x_rand = x + th.sign(th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)) * args.eps_rand
+ else:
+ x_rand = x + th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)
+ loss_rand, preds_rand = get_loss_and_preds(x_rand, y)
+
+ eval_loss_rand.append((loss_rand.data).cpu().numpy())
+ eval_acc_rand.append((th.eq(preds_rand, y).float()).cpu().numpy())
+ eval_preds_rand.extend(preds_rand)
+
+ if args.attack == 'pgd':
+ if not args.load_pgd_test_samples:
+ x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval)
+ elif args.attack == 'pgdl2':
+ x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval, l2=True)
+ elif args.attack == 'cw':
+ x_pgd = attack_cw(x, preds_clean)
+
+ elif args.attack == 'mean':
+ x_pgd = attack_mean(x, preds_clean, eps=args.eps_eval)
+ eval_x_pgd_l0.append(th.max(th.abs((x - x_pgd).view(x.size(0), -1)), -1)[0].detach().cpu().numpy())
+ eval_x_pgd_l2.append(th.norm((x - x_pgd).view(x.size(0), -1), p=2, dim=-1).detach().cpu().numpy())
+
+ loss_pgd, preds_pgd = get_loss_and_preds(x_pgd, y)
+
+ eval_loss_pgd.append((loss_pgd.data).cpu().numpy())
+ eval_acc_pgd.append((th.eq(preds_pgd, y).float()).cpu().numpy())
+ conf_pgd = confusion_matrix(preds_clean.cpu(), preds_pgd.cpu(), np.arange(nb_classes))
+ conf_pgd -= np.diag(np.diag(conf_pgd))
+ eval_conf_pgd.append(conf_pgd)
+ eval_preds_pgd.extend(preds_pgd)
+
+ loss_incr = loss_pgd - loss_clean
+ eval_loss_incr.append(loss_incr.detach().cpu())
+
+ x_pand = x_pgd + th.empty_like(x_pgd).uniform_(-args.eps_rand, args.eps_rand)
+ loss_pand, preds_pand = get_loss_and_preds(x_pand, y)
+
+ eval_loss_pand.append((loss_pand.data).cpu().numpy())
+ eval_acc_pand.append((th.eq(preds_pand, y).float()).cpu().numpy())
+ eval_preds_pand.extend(preds_pand)
+
+ preds_clean_after_corr, det_clean = predictor.send(x.cpu().numpy()).T
+ preds_pgd_after_corr, det_pgd = predictor.send(x_pgd.cpu().numpy()).T
+
+ acc_clean_after_corr.append(preds_clean_after_corr == y.cpu().numpy())
+ acc_pgd_after_corr.append(preds_pgd_after_corr == y.cpu().numpy())
+
+ eval_det_clean.append(det_clean)
+ eval_det_pgd.append(det_pgd)
+
+ if args.debug:
+ break
+
+ swriter.add_scalar('eval_loss_clean', np.concatenate(eval_loss_clean).mean(), global_step)
+ swriter.add_scalar('eval_acc_clean', np.concatenate(eval_acc_clean).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_rand', np.concatenate(eval_loss_rand).mean(), global_step)
+ swriter.add_scalar('eval_acc_rand', np.concatenate(eval_acc_rand).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_pgd', np.concatenate(eval_loss_pgd).mean(), global_step)
+ swriter.add_scalar('eval_acc_pgd', np.concatenate(eval_acc_pgd).mean(), global_step)
+ swriter.add_scalar('eval_loss_incr', th.cat(eval_loss_incr).mean(), global_step)
+
+ swriter.add_scalar('eval_loss_pand', np.concatenate(eval_loss_pand).mean(), global_step)
+ swriter.add_scalar('eval_acc_pand', np.concatenate(eval_acc_pand).mean(), global_step)
+
+ swriter.add_histogram('class_dist_clean', th.stack(eval_preds_clean), global_step)
+ swriter.add_histogram('class_dist_rand', th.stack(eval_preds_rand), global_step)
+ swriter.add_histogram('class_dist_pgd', th.stack(eval_preds_pgd), global_step)
+ swriter.add_histogram('class_dist_pand', th.stack(eval_preds_pand), global_step)
+
+ swriter.add_scalar('acc_clean_after_corr', np.concatenate(acc_clean_after_corr).mean(), global_step)
+ swriter.add_scalar('acc_pgd_after_corr', np.concatenate(acc_pgd_after_corr).mean(), global_step)
+ swriter.add_scalar('det_clean', np.concatenate(eval_det_clean).mean(), global_step)
+ swriter.add_scalar('det_pgd', np.concatenate(eval_det_pgd).mean(), global_step)
+
+ swriter.add_scalar('x_pgd_l0', np.concatenate(eval_x_pgd_l0).mean(), global_step)
+ swriter.add_scalar('x_pgd_l2', np.concatenate(eval_x_pgd_l2).mean(), global_step)
+
+ net.train(True)
+
+ if args.mode == 'eval':
+ for p in net.parameters():
+ p.requires_grad_(False)
+ run_eval()
+ elif args.mode == 'train':
+ run_train()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/case_studies/odds/original/vgg.py b/case_studies/odds/original/vgg.py
new file mode 100644
index 0000000..aef147e
--- /dev/null
+++ b/case_studies/odds/original/vgg.py
@@ -0,0 +1,177 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Modified from https://github.com/pytorch/vision.git
+'''
+import math
+
+import torch.nn as nn
+import torch.nn.init as init
+
+__all__ = [
+ 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
+ 'vgg19_bn', 'vgg19', 'vgg11_nd', 'vgg11_nd_s', 'vgg13_nd', 'vgg13_nd_s', 'vgg16_nd', 'vgg16_nd_s', 'vgg19_nd', 'vgg19_nd_s',
+ 'vgg11_nd_ss', 'vgg13_nd_ss', 'vgg16_nd_ss', 'vgg19_nd_ss',
+]
+
+
+class VGG(nn.Module):
+ '''
+ VGG model
+ '''
+ def __init__(self, features, dropout=True, small=False, supersmall=False):
+ super(VGG, self).__init__()
+ self.features = features
+ cls_layers = []
+ if dropout or supersmall:
+ cls_layers.append(nn.Dropout())
+ if not (small or supersmall):
+ cls_layers.append(nn.Linear(512, 512))
+ cls_layers.append(nn.ReLU())
+ if dropout:
+ cls_layers.append(nn.Dropout())
+ if not supersmall:
+ cls_layers.append(nn.Linear(512, 512))
+ cls_layers.append(nn.ReLU())
+ cls_layers.append(nn.Linear(512, 10))
+
+ self.classifier = nn.Sequential(*cls_layers)
+ # Initialize weights
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ m.bias.data.zero_()
+
+
+ def forward(self, x):
+ x = self.features(x)
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ return x
+
+
+def make_layers(cfg, batch_norm=False):
+ layers = []
+ in_channels = 3
+ for v in cfg:
+ if v == 'M':
+ layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
+ else:
+ conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
+ if batch_norm:
+ layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
+ else:
+ layers += [conv2d, nn.ReLU(inplace=False)]
+ in_channels = v
+ return nn.Sequential(*layers)
+
+
+cfg = {
+ 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+ 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+ 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
+ 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
+ 512, 512, 512, 512, 'M'],
+}
+
+
+def vgg11():
+ """VGG 11-layer model (configuration "A")"""
+ return VGG(make_layers(cfg['A']))
+
+def vgg11_nd():
+ """VGG 11-layer model (configuration "A")"""
+ return VGG(make_layers(cfg['A']), dropout=False)
+
+def vgg11_nd_s():
+ """VGG 11-layer model (configuration "A")"""
+ return VGG(make_layers(cfg['A']), dropout=False, small=True)
+
+def vgg11_nd_ss():
+ """VGG 11-layer model (configuration "A")"""
+ return VGG(make_layers(cfg['A']), dropout=False, small=True, supersmall=True)
+
+
+def vgg11_bn():
+ """VGG 11-layer model (configuration "A") with batch normalization"""
+ return VGG(make_layers(cfg['A'], batch_norm=True))
+
+
+def vgg13():
+ """VGG 13-layer model (configuration "B")"""
+ return VGG(make_layers(cfg['B']))
+
+def vgg13_nd():
+ """VGG 13-layer model (configuration "B")"""
+ return VGG(make_layers(cfg['B']), dropout=False)
+
+def vgg13_nd_s():
+ """VGG 13-layer model (configuration "B")"""
+ return VGG(make_layers(cfg['B']), dropout=False, small=True)
+
+def vgg13_nd_ss():
+ """VGG 13-layer model (configuration "B")"""
+ return VGG(make_layers(cfg['B']), dropout=False, small=True, supersmall=True)
+
+
+def vgg13_bn():
+ """VGG 13-layer model (configuration "B") with batch normalization"""
+ return VGG(make_layers(cfg['B'], batch_norm=True))
+
+
+def vgg16():
+ """VGG 16-layer model (configuration "D")"""
+ return VGG(make_layers(cfg['D']))
+
+def vgg16_nd():
+ """VGG 16-layer model (configuration "D")"""
+ return VGG(make_layers(cfg['D']), dropout=False)
+
+def vgg16_nd_s():
+ """VGG 16-layer model (configuration "D")"""
+ return VGG(make_layers(cfg['D']), dropout=False, small=True)
+
+def vgg16_nd_ss():
+ """VGG 16-layer model (configuration "D")"""
+ return VGG(make_layers(cfg['D']), dropout=False, small=True, supersmall=True)
+
+
+def vgg16_bn():
+ """VGG 16-layer model (configuration "D") with batch normalization"""
+ return VGG(make_layers(cfg['D'], batch_norm=True))
+
+
+def vgg19():
+ """VGG 19-layer model (configuration "E")"""
+ return VGG(make_layers(cfg['E']))
+
+def vgg19_nd():
+ """VGG 19-layer model (configuration "E")"""
+ return VGG(make_layers(cfg['E']), dropout=False)
+
+def vgg19_nd_s():
+ """VGG 19-layer model (configuration "E")"""
+ return VGG(make_layers(cfg['E']), dropout=False, small=True)
+
+def vgg19_nd_ss():
+ """VGG 19-layer model (configuration "E")"""
+ return VGG(make_layers(cfg['E']), dropout=False, small=True, supersmall=True)
+
+
+
+def vgg19_bn():
+ """VGG 19-layer model (configuration 'E') with batch normalization"""
+ return VGG(make_layers(cfg['E'], batch_norm=True))
diff --git a/case_studies/odds/setup_venv.sh b/case_studies/odds/setup_venv.sh
new file mode 100644
index 0000000..987999f
--- /dev/null
+++ b/case_studies/odds/setup_venv.sh
@@ -0,0 +1,11 @@
+set -e
+apt-get update
+apt install python3.8-venv
+python -m venv /projects/active-adversarial-tests/venv3.8tf --system-site-packages
+source /projects/active-adversarial-tests/venv3.8tf/bin/activate
+pip install robustml
+pip install cleverhans==3.0.1 # updating causes package inconsistencies
+pip install keras==2.2.4 # updating causes package inconsistencies
+pip install line_profiler
+/projects/active-adversarial-tests/install_requirements.sh
+
diff --git a/case_studies/pcl_defense/README.md b/case_studies/pcl_defense/README.md
new file mode 100644
index 0000000..9fe851c
--- /dev/null
+++ b/case_studies/pcl_defense/README.md
@@ -0,0 +1,68 @@
+
+# Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks (ICCV'19)
+
+
+
+This repository is an PyTorch implementation of the ICCV'19 paper [Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks](https://arxiv.org/abs/1904.00887).
+
+To counter adversarial attacks, we propose Prototype Conformity Loss to class-wise disentangle intermediate features of a deep network. From the figure, it can be observed that the main reason for the existence of such adversarial samples is the close proximity of learnt features in the latent feature space.
+
+We provide scripts for reproducing the results from our paper.
+
+
+## Clone the repository
+Clone this repository into any place you want.
+```bash
+git clone https://github.com/aamir-mustafa/pcl-adversarial-defense
+cd pcl-adversarial-defense
+```
+## Softmax (Cross-Entropy) Training
+To expedite the process of forming clusters for our proposed loss, we initially train the model using cross-entropy loss.
+
+``softmax_training.py`` -- ( For initial softmax training).
+
+* The trained checkpoints will be saved in ``Models_Softmax`` folder.
+
+
+## Prototype Conformity Loss
+The deep features for the prototype conformity loss are extracted from different intermediate layers using auxiliary branches, which map the features to a lower dimensional output as shown in the following figure.
+
+
+
+
+
+``pcl_training.py`` -- ( Joint supervision with cross-entropy and our loss).
+
+* The trained checkpoints will be saved in ``Models_PCL`` folder.
+
+## Adversarial Training
+``pcl_training_adversarial_fgsm.py`` -- ( Adversarial Training using FGSM Attack).
+
+``pcl_training_adversarial_pgd.py`` -- ( Adversarial Training using PGD Attack).
+
+
+
+## Testing Model's Robustness against White-Box Attacks
+
+``robustness.py`` -- (Evaluate trained model's robustness against various types of attacks).
+
+## Comparison of Softmax Trained Model and Our Model
+Retained classification accuracy of the model's under various types of adversarial attacks:
+
+| Training Scheme | No Attack | FGSM | BIM | MIM | PGD |
+| :------- | :---------- | :----- |:------ |:------ |:------ |
+| Softmax | 92.15 | 21.48 | 0.01 | 0.02 | 0.00 |
+| Ours | 89.55 | 55.76 | 39.75 | 36.44 | 31.10 |
+
+
+## Citation
+```
+@InProceedings{Mustafa_2019_ICCV,
+author = {Mustafa, Aamir and Khan, Salman and Hayat, Munawar and Goecke, Roland and Shen, Jianbing and Shao, Ling},
+title = {Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks},
+booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
+month = {October},
+year = {2019}
+}
+```
+
diff --git a/case_studies/pcl_defense/binarization_test.sh b/case_studies/pcl_defense/binarization_test.sh
new file mode 100644
index 0000000..2fa03fe
--- /dev/null
+++ b/case_studies/pcl_defense/binarization_test.sh
@@ -0,0 +1,20 @@
+nsamples=${1:-512}
+attack=${2:-pgd}
+epsilon=${3:-8}
+
+# kwargs=""
+kwargs="--sample-from-corners"
+
+echo "Attack: $attack"
+echo "Epsilon: $epsilon"
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+ --binarization-test \
+ --epsilon=$epsilon \
+ --n-boundary-points=1 \
+ --n-inner-points=999 \
+ --attack=$attack \
+ --num-samples-test=$nsamples \
+ $kwargs
diff --git a/case_studies/pcl_defense/binarization_test_increased_hardness.sh b/case_studies/pcl_defense/binarization_test_increased_hardness.sh
new file mode 100644
index 0000000..fa4fa3a
--- /dev/null
+++ b/case_studies/pcl_defense/binarization_test_increased_hardness.sh
@@ -0,0 +1,42 @@
+attack=${1:-pgd}
+nsamples=${2:-2048}
+mode=$3
+epsilon=${4:-8}
+
+if [ -z ${mode+x} ]; then
+ echo "No hardness mode specified. Choose from: ninner, gap"
+ exit -1
+fi
+
+echo "Attack: $attack, #Samples: $nsamples, epsilon: $epsilon"
+echo ""
+
+if [[ "$mode" == "ninner" ]]; then
+ for ninner in 49 99 199 299 399 499 599 699 799 899 999 1999; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, $ninner inner"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+ --binarization-test \
+ --epsilon=$epsilon \
+ --attack=$attack \
+ --num-samples-test=$nsamples \
+ --n-boundary-points=1 \
+ --n-inner-points=999
+ done
+elif [[ "$mode" == "gap" ]]; then
+ for closeness in 0.9999 0.999 0.95 0.90 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 1999 inner, closeness of $closeness"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+ --binarization-test \
+ --epsilon=$epsilon \
+ --attack=$attack \
+ --num-samples-test=$nsamples \
+ --n-boundary-points=1 \
+ --n-inner-points=1999 \
+ --decision-boundary-closeness=$closeness
+ # -n-inner-points was 999 before 25.02.2022
+ done
+fi
\ No newline at end of file
diff --git a/case_studies/pcl_defense/contrastive_proximity.py b/case_studies/pcl_defense/contrastive_proximity.py
new file mode 100644
index 0000000..a46f5ab
--- /dev/null
+++ b/case_studies/pcl_defense/contrastive_proximity.py
@@ -0,0 +1,60 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+
+class Con_Proximity(nn.Module):
+
+ def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True):
+ super(Con_Proximity, self).__init__()
+ self.num_classes = num_classes
+ self.feat_dim = feat_dim
+ self.use_gpu = use_gpu
+
+ if self.use_gpu:
+ self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())#100 x feats- for 100 centers
+ else:
+ self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
+
+ def forward(self, x, labels):
+
+ batch_size = x.size(0)
+ distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
+ torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
+ distmat.addmm_(1, -2, x, self.centers.t())
+
+ classes = torch.arange(self.num_classes).long()
+ if self.use_gpu: classes = classes.cuda()
+ labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
+ mask = labels.eq(classes.expand(batch_size, self.num_classes))
+
+ dist = []
+ for i in range(batch_size):
+
+ k= mask[i].clone().to(dtype=torch.int8)
+
+ k= -1* k +1
+
+ kk= k.clone().to(dtype=torch.uint8)
+
+ value = distmat[i][kk]
+
+ value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
+
+ dist.append(value)
+ dist = torch.cat(dist)
+ loss = dist.mean()
+
+ return loss
diff --git a/case_studies/pcl_defense/eval.sh b/case_studies/pcl_defense/eval.sh
new file mode 100644
index 0000000..b16ad9f
--- /dev/null
+++ b/case_studies/pcl_defense/eval.sh
@@ -0,0 +1,25 @@
+
+echo "Baseline:"
+#python3 case_studies/pcl_defense/robustness.py --baseline
+#python3 case_studies/pcl_defense/robustness.py --baseline --binarization-test --epsilon=4
+
+echo "Model w/ their defense"
+#python3 case_studies/pcl_defense/robustness.py
+#python3 case_studies/pcl_defense/robustness.py --binarization-test --epsilon=8 --n-boundary-points=1 --n-inner-points=999
+#python3 case_studies/pcl_defense/robustness.py --binarization-test --epsilon=6
+#python3 case_studies/pcl_defense/robustness.py --binarization-test --epsilon=4
+
+
+PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+ --binarization-test --epsilon=8 --n-boundary-points=1 \
+ --n-inner-points=999
+PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+ --binarization-test --epsilon=8 --n-boundary-points=1 --n-inner-points=999 \
+ --attack=autopgd
+
+#PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+# --binarization-test --epsilon=8 --n-boundary-points=1 \
+# --n-inner-points=999 --use-autopgd-boundary-adversarials
+#PYTHONPATH=$(pwd) python3 case_studies/pcl_defense/robustness.py \
+# --binarization-test --epsilon=8 --n-boundary-points=1 --n-inner-points=999 \
+# --use-autopgd-boundary-adversarials --attack=autopgd
\ No newline at end of file
diff --git a/case_studies/pcl_defense/pcl_training.py b/case_studies/pcl_defense/pcl_training.py
new file mode 100644
index 0000000..c068a93
--- /dev/null
+++ b/case_studies/pcl_defense/pcl_training.py
@@ -0,0 +1,297 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Created on Wed Jan 23 10:15:27 2019
+
+@author: aamir-mustafa
+Implementation Part 2 of Paper:
+ "Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
+
+Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
+against adversarial attacks is chosen.
+
+"""
+
+#Essential Imports
+import os
+import sys
+import argparse
+import datetime
+import time
+import os.path as osp
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from pcl_utils import AverageMeter, Logger
+from proximity import Proximity
+from contrastive_proximity import Con_Proximity
+from resnet_model import * # Imports the ResNet Model
+
+
+parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
+parser.add_argument('-j', '--workers', default=4, type=int,
+ help="number of data loading workers (default: 4)")
+parser.add_argument('--train-batch', default=128, type=int, metavar='N',
+ help='train batchsize')
+parser.add_argument('--test-batch', default=100, type=int, metavar='N',
+ help='test batchsize')
+parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
+ help='Decrease learning rate at these epochs.')
+parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for CE Loss")
+parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
+parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
+parser.add_argument('--lr_conprox', type=float, default=0.0001, help="learning rate for Con-Proximity Loss") # as per paper
+parser.add_argument('--weight-conprox', type=float, default=0.0001, help="weight for Con-Proximity Loss") # as per paper
+parser.add_argument('--max-epoch', type=int, default=400)
+parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
+parser.add_argument('--eval-freq', type=int, default=10)
+parser.add_argument('--print-freq', type=int, default=50)
+parser.add_argument('--gpu', type=str, default='0')
+parser.add_argument('--seed', type=int, default=1)
+parser.add_argument('--use-cpu', action='store_true')
+parser.add_argument('--save-dir', type=str, default='log')
+
+args = parser.parse_args()
+state = {k: v for k, v in args._get_kwargs()}
+
+
+def main():
+ torch.manual_seed(args.seed)
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
+ use_gpu = torch.cuda.is_available()
+ if args.use_cpu: use_gpu = False
+
+ sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss' + '.txt'))
+
+ if use_gpu:
+ print("Currently using GPU: {}".format(args.gpu))
+ cudnn.benchmark = True
+ torch.cuda.manual_seed_all(args.seed)
+ else:
+ print("Currently using CPU")
+
+ # Data Load
+ num_classes=10
+ print('==> Preparing dataset')
+ transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
+ download=True, transform=transform_train)
+ trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
+ shuffle=True, num_workers=args.workers)
+
+ testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
+ download=True, transform=transform_test)
+ testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
+ shuffle=False, num_workers=args.workers)
+
+# Loading the Model
+ model = resnet(num_classes=num_classes,depth=110)
+
+ if True:
+ model = nn.DataParallel(model).cuda()
+
+ criterion_xent = nn.CrossEntropyLoss()
+ criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
+
+ optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
+ optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
+
+ optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
+ optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
+
+
+ filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
+ checkpoint = torch.load(filename)
+
+ model.load_state_dict(checkpoint['state_dict'])
+ optimizer_model.load_state_dict= checkpoint['optimizer_model']
+
+ start_time = time.time()
+
+ for epoch in range(args.max_epoch):
+
+ adjust_learning_rate(optimizer_model, epoch)
+ adjust_learning_rate_prox(optimizer_prox_1024, epoch)
+ adjust_learning_rate_prox(optimizer_prox_256, epoch)
+
+ adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
+ adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
+
+ print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
+ train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch)
+
+ if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
+ print("==> Test") #Tests after every 10 epochs
+ acc, err = test(model, testloader, use_gpu, num_classes, epoch)
+ print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
+
+ state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
+ 'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
+ 'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
+ 'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
+
+ torch.save(state_, 'Models_PCL/CIFAR10_PCL.pth.tar')
+
+ elapsed = round(time.time() - start_time)
+ elapsed = str(datetime.timedelta(seconds=elapsed))
+ print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
+
+def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch):
+
+ model.train()
+ xent_losses = AverageMeter() #Computes and stores the average and current value
+ prox_losses_1024 = AverageMeter()
+ prox_losses_256= AverageMeter()
+
+ conprox_losses_1024 = AverageMeter()
+ conprox_losses_256= AverageMeter()
+ losses = AverageMeter()
+
+ #Batchwise training
+ for batch_idx, (data, labels) in enumerate(trainloader):
+ if use_gpu:
+ data, labels = data.cuda(), labels.cuda()
+ feats128, feats256, feats1024, outputs = model(data)
+ loss_xent = criterion_xent(outputs, labels)
+
+ loss_prox_1024 = criterion_prox_1024(feats1024, labels)
+ loss_prox_256= criterion_prox_256(feats256, labels)
+
+ loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
+ loss_conprox_256= criterion_conprox_256(feats256, labels)
+
+ loss_prox_1024 *= args.weight_prox
+ loss_prox_256 *= args.weight_prox
+
+ loss_conprox_1024 *= args.weight_conprox
+ loss_conprox_256 *= args.weight_conprox
+
+ loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
+ optimizer_model.zero_grad()
+
+ optimizer_prox_1024.zero_grad()
+ optimizer_prox_256.zero_grad()
+
+ optimizer_conprox_1024.zero_grad()
+ optimizer_conprox_256.zero_grad()
+
+ loss.backward()
+ optimizer_model.step()
+
+ for param in criterion_prox_1024.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_1024.step()
+
+ for param in criterion_prox_256.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_256.step()
+
+
+ for param in criterion_conprox_1024.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_1024.step()
+
+ for param in criterion_conprox_256.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_256.step()
+
+ losses.update(loss.item(), labels.size(0))
+ xent_losses.update(loss_xent.item(), labels.size(0))
+ prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
+ prox_losses_256.update(loss_prox_256.item(), labels.size(0))
+
+ conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
+ conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
+
+ if (batch_idx+1) % args.print_freq == 0:
+ print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
+ .format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
+ prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
+ conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
+ conprox_losses_256.avg ))
+
+
+def test(model, testloader, use_gpu, num_classes, epoch):
+ model.eval()
+ correct, total = 0, 0
+
+ with torch.no_grad():
+ for data, labels in testloader:
+ if True:
+ data, labels = data.cuda(), labels.cuda()
+ feats128, feats256, feats1024, outputs = model(data)
+ predictions = outputs.data.max(1)[1]
+ total += labels.size(0)
+ correct += (predictions == labels.data).sum()
+
+
+ acc = correct * 100. / total
+ err = 100. - acc
+ return acc, err
+
+def adjust_learning_rate(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_model'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_model'] = state['lr_model']
+
+def adjust_learning_rate_prox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_prox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_prox'] = state['lr_prox']
+
+def adjust_learning_rate_conprox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_conprox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_conprox'] = state['lr_conprox']
+if __name__ == '__main__':
+ main()
+
+
+
+
+
diff --git a/case_studies/pcl_defense/pcl_training_adversarial_fgsm.py b/case_studies/pcl_defense/pcl_training_adversarial_fgsm.py
new file mode 100644
index 0000000..50026b8
--- /dev/null
+++ b/case_studies/pcl_defense/pcl_training_adversarial_fgsm.py
@@ -0,0 +1,335 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Created on Wed Jan 23 10:15:27 2019
+
+@author: aamir-mustafa
+Implementation Part 2 of Paper:
+ "Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
+
+Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
+against adversarial attacks is chosen.
+This coe implements Adversarial Training using FGSM Attack.
+"""
+
+#Essential Imports
+import os
+import sys
+import argparse
+import datetime
+import time
+import os.path as osp
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from pcl_utils import AverageMeter, Logger
+from proximity import Proximity
+from contrastive_proximity import Con_Proximity
+from resnet_model import * # Imports the ResNet Model
+
+
+parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
+parser.add_argument('-j', '--workers', default=4, type=int,
+ help="number of data loading workers (default: 4)")
+parser.add_argument('--train-batch', default=64, type=int, metavar='N',
+ help='train batchsize')
+parser.add_argument('--test-batch', default=100, type=int, metavar='N',
+ help='test batchsize')
+parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
+ help='Decrease learning rate at these epochs.')
+parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for model")
+parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
+parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
+parser.add_argument('--lr_conprox', type=float, default=0.00001, help="learning rate for Con-Proximity Loss") # as per paper
+parser.add_argument('--weight-conprox', type=float, default=0.00001, help="weight for Con-Proximity Loss") # as per paper
+parser.add_argument('--max-epoch', type=int, default=500)
+parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
+parser.add_argument('--eval-freq', type=int, default=10)
+parser.add_argument('--print-freq', type=int, default=50)
+parser.add_argument('--gpu', type=str, default='0')
+parser.add_argument('--seed', type=int, default=1)
+parser.add_argument('--use-cpu', action='store_true')
+parser.add_argument('--save-dir', type=str, default='log')
+
+args = parser.parse_args()
+state = {k: v for k, v in args._get_kwargs()}
+
+mean = [0.4914, 0.4822, 0.4465]
+std = [0.2023, 0.1994, 0.2010]
+def normalize(t):
+ t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
+ t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
+ t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
+
+ return t
+
+def un_normalize(t):
+ t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
+ t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
+ t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
+
+ return t
+
+def FGSM(model, criterion, img, label, eps):
+ adv = img.clone()
+ adv.requires_grad = True
+ _,_,_, out= model(adv)
+ loss = criterion(out, label)
+ loss.backward()
+ adv.data = un_normalize(adv.data) + eps * adv.grad.sign()
+ adv.data.clamp_(0.0, 1.0)
+ adv.grad.data.zero_()
+ return adv.detach()
+
+def main():
+ torch.manual_seed(args.seed)
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
+ use_gpu = torch.cuda.is_available()
+ if args.use_cpu: use_gpu = False
+
+ sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss_FGSM_AdvTrain' + '.txt'))
+
+ if use_gpu:
+ print("Currently using GPU: {}".format(args.gpu))
+ cudnn.benchmark = True
+ torch.cuda.manual_seed_all(args.seed)
+ else:
+ print("Currently using CPU")
+
+ # Data Load
+ num_classes=10
+ print('==> Preparing dataset')
+ transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
+ download=True, transform=transform_train)
+ trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
+ shuffle=True, num_workers=args.workers)
+
+ testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
+ download=True, transform=transform_test)
+ testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
+ shuffle=False, num_workers=args.workers)
+
+# Loading the Model
+ model = resnet(num_classes=num_classes,depth=110)
+
+ if True:
+ model = nn.DataParallel(model).cuda()
+
+ criterion_xent = nn.CrossEntropyLoss()
+ criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
+
+ optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
+ optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
+
+ optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
+ optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
+
+
+ filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
+ checkpoint = torch.load(filename)
+
+ model.load_state_dict(checkpoint['state_dict'])
+ optimizer_model.load_state_dict= checkpoint['optimizer_model']
+
+ start_time = time.time()
+
+ for epoch in range(args.max_epoch):
+
+ adjust_learning_rate(optimizer_model, epoch)
+ adjust_learning_rate_prox(optimizer_prox_1024, epoch)
+ adjust_learning_rate_prox(optimizer_prox_256, epoch)
+
+ adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
+ adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
+
+ print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
+ train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch)
+
+ if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
+ print("==> Test") #Tests after every 10 epochs
+ acc, err = test(model, testloader, use_gpu, num_classes, epoch)
+ print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
+
+ state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
+ 'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
+ 'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
+ 'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
+
+ torch.save(state_, 'Models_PCL_AdvTrain_FGSM/CIFAR10_PCL_AdvTrain_FGSM.pth.tar')
+
+ elapsed = round(time.time() - start_time)
+ elapsed = str(datetime.timedelta(seconds=elapsed))
+ print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
+
+def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch):
+
+# model.train()
+ xent_losses = AverageMeter() #Computes and stores the average and current value
+ prox_losses_1024 = AverageMeter()
+ prox_losses_256= AverageMeter()
+
+ conprox_losses_1024 = AverageMeter()
+ conprox_losses_256= AverageMeter()
+ losses = AverageMeter()
+
+ #Batchwise training
+ for batch_idx, (data, labels) in enumerate(trainloader):
+ if use_gpu:
+ data, labels = data.cuda(), labels.cuda()
+ model.eval()
+ eps= np.random.uniform(0.02,0.05)
+ adv = FGSM(model, criterion_xent, data, labels, eps=eps) # Generates Batch-wise Adv Images
+ adv.requires_grad= False
+
+ adv= normalize(adv)
+ adv= adv.cuda()
+ true_labels_adv= labels
+ data= torch.cat((data, adv),0)
+ labels= torch.cat((labels, true_labels_adv))
+ model.train()
+
+ feats128, feats256, feats1024, outputs = model(data)
+ loss_xent = criterion_xent(outputs, labels)
+
+ loss_prox_1024 = criterion_prox_1024(feats1024, labels)
+ loss_prox_256= criterion_prox_256(feats256, labels)
+
+ loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
+ loss_conprox_256= criterion_conprox_256(feats256, labels)
+
+ loss_prox_1024 *= args.weight_prox
+ loss_prox_256 *= args.weight_prox
+
+ loss_conprox_1024 *= args.weight_conprox
+ loss_conprox_256 *= args.weight_conprox
+
+ loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
+ optimizer_model.zero_grad()
+
+ optimizer_prox_1024.zero_grad()
+ optimizer_prox_256.zero_grad()
+
+ optimizer_conprox_1024.zero_grad()
+ optimizer_conprox_256.zero_grad()
+
+ loss.backward()
+ optimizer_model.step()
+
+ for param in criterion_prox_1024.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_1024.step()
+
+ for param in criterion_prox_256.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_256.step()
+
+
+ for param in criterion_conprox_1024.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_1024.step()
+
+ for param in criterion_conprox_256.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_256.step()
+
+ losses.update(loss.item(), labels.size(0))
+ xent_losses.update(loss_xent.item(), labels.size(0))
+ prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
+ prox_losses_256.update(loss_prox_256.item(), labels.size(0))
+
+ conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
+ conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
+
+ if (batch_idx+1) % args.print_freq == 0:
+ print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
+ .format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
+ prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
+ conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
+ conprox_losses_256.avg ))
+
+
+def test(model, testloader, use_gpu, num_classes, epoch):
+ model.eval()
+ correct, total = 0, 0
+
+ with torch.no_grad():
+ for data, labels in testloader:
+ if True:
+ data, labels = data.cuda(), labels.cuda()
+ feats128, feats256, feats1024, outputs = model(data)
+ predictions = outputs.data.max(1)[1]
+ total += labels.size(0)
+ correct += (predictions == labels.data).sum()
+
+
+ acc = correct * 100. / total
+ err = 100. - acc
+ return acc, err
+
+def adjust_learning_rate(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_model'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_model'] = state['lr_model']
+
+def adjust_learning_rate_prox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_prox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_prox'] = state['lr_prox']
+
+def adjust_learning_rate_conprox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_conprox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_conprox'] = state['lr_conprox']
+if __name__ == '__main__':
+ main()
+
+
+
+
+
diff --git a/case_studies/pcl_defense/pcl_training_adversarial_pgd.py b/case_studies/pcl_defense/pcl_training_adversarial_pgd.py
new file mode 100644
index 0000000..c14f468
--- /dev/null
+++ b/case_studies/pcl_defense/pcl_training_adversarial_pgd.py
@@ -0,0 +1,367 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Created on Wed Jan 23 10:15:27 2019
+
+@author: aamir-mustafa
+Implementation Part 2 of Paper:
+ "Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
+
+Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
+against adversarial attacks is chosen.
+This coe implements Adversarial Training using PGD Attack.
+"""
+
+#Essential Imports
+import os
+import sys
+import argparse
+import datetime
+import time
+import os.path as osp
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from pcl_utils import AverageMeter, Logger
+from proximity import Proximity
+from contrastive_proximity import Con_Proximity
+from resnet_model import * # Imports the ResNet Model
+
+
+parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
+parser.add_argument('-j', '--workers', default=4, type=int,
+ help="number of data loading workers (default: 4)")
+parser.add_argument('--train-batch', default=64, type=int, metavar='N',
+ help='train batchsize')
+parser.add_argument('--test-batch', default=100, type=int, metavar='N',
+ help='test batchsize')
+parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
+ help='Decrease learning rate at these epochs.')
+parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for model")
+parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
+parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
+parser.add_argument('--lr_conprox', type=float, default=0.00001, help="learning rate for Con-Proximity Loss") # as per paper
+parser.add_argument('--weight-conprox', type=float, default=0.00001, help="weight for Con-Proximity Loss") # as per paper
+parser.add_argument('--max-epoch', type=int, default=500)
+parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
+parser.add_argument('--eval-freq', type=int, default=10)
+parser.add_argument('--print-freq', type=int, default=50)
+parser.add_argument('--gpu', type=str, default='0')
+parser.add_argument('--seed', type=int, default=1)
+parser.add_argument('--use-cpu', action='store_true')
+parser.add_argument('--save-dir', type=str, default='log')
+
+args = parser.parse_args()
+state = {k: v for k, v in args._get_kwargs()}
+
+mean = [0.4914, 0.4822, 0.4465]
+std = [0.2023, 0.1994, 0.2010]
+def normalize(t):
+ t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
+ t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
+ t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
+
+ return t
+
+def un_normalize(t):
+ t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
+ t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
+ t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
+
+ return t
+
+def attack(model, criterion, img, label, eps, attack_type, iters):
+ adv = img.detach()
+ adv.requires_grad = True
+
+ if attack_type == 'fgsm':
+ iterations = 1
+ else:
+ iterations = iters
+
+ if attack_type == 'pgd':
+ step = 2 / 255
+ else:
+ step = eps / iterations
+
+ noise = 0
+
+ for j in range(iterations):
+ _,_,_,out_adv = model(adv.clone())
+ loss = criterion(out_adv, label)
+ loss.backward()
+
+ if attack_type == 'mim':
+ adv_mean= torch.mean(torch.abs(adv.grad), dim=1, keepdim=True)
+ adv_mean= torch.mean(torch.abs(adv_mean), dim=2, keepdim=True)
+ adv_mean= torch.mean(torch.abs(adv_mean), dim=3, keepdim=True)
+ adv.grad = adv.grad / adv_mean
+ noise = noise + adv.grad
+ else:
+ noise = adv.grad
+
+ # Optimization step
+ adv.data = un_normalize(adv.data) + step * noise.sign()
+# adv.data = adv.data + step * adv.grad.sign()
+
+ if attack_type == 'pgd':
+ adv.data = torch.where(adv.data > img.data + eps, img.data + eps, adv.data)
+ adv.data = torch.where(adv.data < img.data - eps, img.data - eps, adv.data)
+ adv.data.clamp_(0.0, 1.0)
+
+ adv.grad.data.zero_()
+
+ return adv.detach()
+
+def main():
+ torch.manual_seed(args.seed)
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
+ use_gpu = torch.cuda.is_available()
+ if args.use_cpu: use_gpu = False
+
+ sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss_PGD_AdvTrain' + '.txt'))
+
+ if use_gpu:
+ print("Currently using GPU: {}".format(args.gpu))
+ cudnn.benchmark = True
+ torch.cuda.manual_seed_all(args.seed)
+ else:
+ print("Currently using CPU")
+
+ # Data Load
+ num_classes=10
+ print('==> Preparing dataset')
+ transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
+ download=True, transform=transform_train)
+ trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
+ shuffle=True, num_workers=args.workers)
+
+ testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
+ download=True, transform=transform_test)
+ testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
+ shuffle=False, num_workers=args.workers)
+
+# Loading the Model
+ model = resnet(num_classes=num_classes,depth=110)
+
+ if True:
+ model = nn.DataParallel(model).cuda()
+
+ criterion_xent = nn.CrossEntropyLoss()
+ criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
+ criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
+
+ optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
+
+ optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
+ optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
+
+ optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
+ optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
+
+
+ filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
+ checkpoint = torch.load(filename)
+
+ model.load_state_dict(checkpoint['state_dict'])
+ optimizer_model.load_state_dict= checkpoint['optimizer_model']
+
+ start_time = time.time()
+
+ for epoch in range(args.max_epoch):
+
+ adjust_learning_rate(optimizer_model, epoch)
+ adjust_learning_rate_prox(optimizer_prox_1024, epoch)
+ adjust_learning_rate_prox(optimizer_prox_256, epoch)
+
+ adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
+ adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
+
+ print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
+ train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch)
+
+ if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
+ print("==> Test") #Tests after every 10 epochs
+ acc, err = test(model, testloader, use_gpu, num_classes, epoch)
+ print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
+
+ state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
+ 'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
+ 'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
+ 'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
+
+ torch.save(state_, 'Models_PCL_AdvTrain_PGD/CIFAR10_PCL_AdvTrain_PGD.pth.tar')
+
+ elapsed = round(time.time() - start_time)
+ elapsed = str(datetime.timedelta(seconds=elapsed))
+ print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
+
+def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
+ criterion_conprox_1024, criterion_conprox_256,
+ optimizer_model, optimizer_prox_1024, optimizer_prox_256,
+ optimizer_conprox_1024, optimizer_conprox_256,
+ trainloader, use_gpu, num_classes, epoch):
+
+# model.train()
+ xent_losses = AverageMeter() #Computes and stores the average and current value
+ prox_losses_1024 = AverageMeter()
+ prox_losses_256= AverageMeter()
+
+ conprox_losses_1024 = AverageMeter()
+ conprox_losses_256= AverageMeter()
+ losses = AverageMeter()
+
+ #Batchwise training
+ for batch_idx, (data, labels) in enumerate(trainloader):
+ if use_gpu:
+ data, labels = data.cuda(), labels.cuda()
+ model.eval()
+ eps= np.random.uniform(0.02,0.05)
+ adv = attack(model, criterion_xent, data, labels, eps=eps, attack_type='pgd', iters= 10) # Generates Batch-wise Adv Images
+ adv.requires_grad= False
+
+ adv= normalize(adv)
+ adv= adv.cuda()
+ true_labels_adv= labels
+ data= torch.cat((data, adv),0)
+ labels= torch.cat((labels, true_labels_adv))
+ model.train()
+
+ feats128, feats256, feats1024, outputs = model(data)
+ loss_xent = criterion_xent(outputs, labels)
+
+ loss_prox_1024 = criterion_prox_1024(feats1024, labels)
+ loss_prox_256= criterion_prox_256(feats256, labels)
+
+ loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
+ loss_conprox_256= criterion_conprox_256(feats256, labels)
+
+ loss_prox_1024 *= args.weight_prox
+ loss_prox_256 *= args.weight_prox
+
+ loss_conprox_1024 *= args.weight_conprox
+ loss_conprox_256 *= args.weight_conprox
+
+ loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
+ optimizer_model.zero_grad()
+
+ optimizer_prox_1024.zero_grad()
+ optimizer_prox_256.zero_grad()
+
+ optimizer_conprox_1024.zero_grad()
+ optimizer_conprox_256.zero_grad()
+
+ loss.backward()
+ optimizer_model.step()
+
+ for param in criterion_prox_1024.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_1024.step()
+
+ for param in criterion_prox_256.parameters():
+ param.grad.data *= (1. / args.weight_prox)
+ optimizer_prox_256.step()
+
+
+ for param in criterion_conprox_1024.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_1024.step()
+
+ for param in criterion_conprox_256.parameters():
+ param.grad.data *= (1. / args.weight_conprox)
+ optimizer_conprox_256.step()
+
+ losses.update(loss.item(), labels.size(0))
+ xent_losses.update(loss_xent.item(), labels.size(0))
+ prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
+ prox_losses_256.update(loss_prox_256.item(), labels.size(0))
+
+ conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
+ conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
+
+ if (batch_idx+1) % args.print_freq == 0:
+ print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
+ .format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
+ prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
+ conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
+ conprox_losses_256.avg ))
+
+
+def test(model, testloader, use_gpu, num_classes, epoch):
+ model.eval()
+ correct, total = 0, 0
+
+ with torch.no_grad():
+ for data, labels in testloader:
+ if True:
+ data, labels = data.cuda(), labels.cuda()
+ feats128, feats256, feats1024, outputs = model(data)
+ predictions = outputs.data.max(1)[1]
+ total += labels.size(0)
+ correct += (predictions == labels.data).sum()
+
+
+ acc = correct * 100. / total
+ err = 100. - acc
+ return acc, err
+
+def adjust_learning_rate(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_model'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_model'] = state['lr_model']
+
+def adjust_learning_rate_prox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_prox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_prox'] = state['lr_prox']
+
+def adjust_learning_rate_conprox(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr_conprox'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr_conprox'] = state['lr_conprox']
+if __name__ == '__main__':
+ main()
+
+
+
+
+
diff --git a/case_studies/pcl_defense/pcl_utils.py b/case_studies/pcl_defense/pcl_utils.py
new file mode 100644
index 0000000..a46c62d
--- /dev/null
+++ b/case_studies/pcl_defense/pcl_utils.py
@@ -0,0 +1,93 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import errno
+import shutil
+import os.path as osp
+
+import torch
+
+def mkdir_if_missing(directory):
+ if not osp.exists(directory):
+ try:
+ os.makedirs(directory)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+class AverageMeter(object):
+ """Computes and stores the average and current value.
+
+ Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
+ """
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
+ mkdir_if_missing(osp.dirname(fpath))
+ torch.save(state, fpath)
+ if is_best:
+ shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))
+
+class Logger(object):
+ """
+ Write console output to external text file.
+
+ Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
+ """
+ def __init__(self, fpath=None):
+ self.console = sys.stdout
+ self.file = None
+ if fpath is not None:
+ mkdir_if_missing(os.path.dirname(fpath))
+ self.file = open(fpath, 'w')
+
+ def __del__(self):
+ self.close()
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args):
+ self.close()
+
+ def write(self, msg):
+ self.console.write(msg)
+ if self.file is not None:
+ self.file.write(msg)
+
+ def flush(self):
+ self.console.flush()
+ if self.file is not None:
+ self.file.flush()
+ os.fsync(self.file.fileno())
+
+ def close(self):
+ self.console.close()
+ if self.file is not None:
+ self.file.close()
\ No newline at end of file
diff --git a/case_studies/pcl_defense/proximity.py b/case_studies/pcl_defense/proximity.py
new file mode 100644
index 0000000..d051bdc
--- /dev/null
+++ b/case_studies/pcl_defense/proximity.py
@@ -0,0 +1,51 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+
+class Proximity(nn.Module):
+
+ def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True):
+ super(Proximity, self).__init__()
+ self.num_classes = num_classes
+ self.feat_dim = feat_dim
+ self.use_gpu = use_gpu
+
+ if self.use_gpu:
+ self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
+ else:
+ self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
+
+ def forward(self, x, labels):
+
+ batch_size = x.size(0)
+ distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
+ torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
+ distmat.addmm_(1, -2, x, self.centers.t())
+
+ classes = torch.arange(self.num_classes).long()
+ if self.use_gpu: classes = classes.cuda()
+ labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
+ mask = labels.eq(classes.expand(batch_size, self.num_classes))
+
+ dist = []
+ for i in range(batch_size):
+ value = distmat[i][mask[i]]
+ value = value.clamp(min=1e-12, max=1e+12)
+ dist.append(value)
+ dist = torch.cat(dist)
+ loss = dist.mean()
+
+ return loss
diff --git a/case_studies/pcl_defense/resnet_model.py b/case_studies/pcl_defense/resnet_model.py
new file mode 100644
index 0000000..34e0d99
--- /dev/null
+++ b/case_studies/pcl_defense/resnet_model.py
@@ -0,0 +1,181 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Created on Tue Apr 2 14:21:30 2019
+
+@author: aamir-mustafa
+"""
+
+import torch.nn as nn
+import math
+
+def conv3x3(in_planes, out_planes, stride=1):
+ "3x3 convolution with padding"
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(BasicBlock, self).__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * 4)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x): # (conv-bn-relu) x 3 times
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual # in our case is none
+ out = self.relu(out)
+
+ return out
+
+
+class ResNet(nn.Module):
+
+ def __init__(self, depth, num_classes=10):
+ super(ResNet, self).__init__()
+ # Model type specifies number of layers for CIFAR-10 model
+ assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
+ n = (depth - 2) // 6
+
+ block = Bottleneck if depth >=44 else BasicBlock
+
+ self.inplanes = 16
+ self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(16)
+ self.relu = nn.ReLU(inplace=True)
+ self.layer1 = self._make_layer(block, 16, n)
+
+ self.layer2 = self._make_layer(block, 32, n, stride=2)
+ self.layer3 = self._make_layer(block, 64, n, stride=2)
+ self.avgpool = nn.AvgPool2d(8)
+
+ self.maxpool2= nn.MaxPool2d(16)
+ self.fc = nn.Linear(64 * block.expansion, 1024)
+ self.fcf = nn.Linear(1024,num_classes)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes, planes * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ x = self.conv1(x)
+
+
+ x = self.bn1(x)
+ x = self.relu(x)
+
+ x = self.layer1(x)
+ x = self.layer2(x)
+
+ m = self.maxpool2(x)
+ m = m.view(m.size(0), -1) # 128 dimensional
+
+ x = self.layer3(x)
+
+
+ x = self.avgpool(x)
+ z = x.view(x.size(0), -1) # 256 dimensional
+ x = self.fc(z) # 1024 dimensional
+ y = self.fcf(x) # num_classes dimensional
+
+ if features_only:
+ return z
+ elif features_and_logits:
+ return z, y
+ else:
+ return m, z, x, y
+
+
+def resnet(**kwargs):
+ """
+ Constructs a ResNet model.
+ """
+ return ResNet(**kwargs)
\ No newline at end of file
diff --git a/case_studies/pcl_defense/robustness.py b/case_studies/pcl_defense/robustness.py
new file mode 100644
index 0000000..ce49d85
--- /dev/null
+++ b/case_studies/pcl_defense/robustness.py
@@ -0,0 +1,321 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Created on Sun Mar 24 17:51:08 2019
+
+@author: aamir-mustafa
+"""
+import inspect
+import os
+import sys
+import warnings
+
+import numpy as np
+import torch
+import torchvision
+import torchvision.transforms as transforms
+
+import utils
+from resnet_model import * # Imports the ResNet Model
+
+"""
+Adversarial Attack Options: fgsm, bim, mim, pgd
+"""
+
+warnings.simplefilter('once', RuntimeWarning)
+
+currentdir = os.path.dirname(
+ os.path.abspath(inspect.getfile(inspect.currentframe())))
+grandarentdir = os.path.dirname(os.path.dirname(currentdir))
+sys.path.insert(0, grandarentdir)
+
+import active_tests.decision_boundary_binarization
+
+from attacks.autopgd import auto_pgd
+from attacks.fab import fab
+from functools import partial
+import argparse
+import utils
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--attack", choices=("autopgd", "autopgd2", "fab", "fgsm", "bim", "mim", "pgd"), default="pgd")
+parser.add_argument("--baseline", action="store_true")
+parser.add_argument("--binarization-test", action="store_true")
+parser.add_argument("--num-samples-test", type=int, default=512)
+parser.add_argument('--n-inner-points',
+ default=49,
+ type=int)
+
+parser.add_argument('--n-boundary-points',
+ default=10,
+ type=int)
+parser.add_argument("--epsilon", type=int, default=8)
+parser.add_argument("--use-autopgd-boundary-adversarials", action="store_true")
+parser.add_argument("--use-autoattack", action="store_true")
+parser.add_argument("--sample-from-corners", action="store_true")
+parser.add_argument("--decision-boundary-closeness", type=float, default=0.999)
+args = parser.parse_args()
+
+num_classes = 10
+
+model = resnet(num_classes=num_classes, depth=110)
+device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+
+model = nn.DataParallel(model).to(device)
+
+# Loading Trained Model
+
+if args.baseline:
+ filename = 'checkpoints/pcl_defense_rn110_softmax_baseline.pth.tar'
+else:
+ filename = 'checkpoints/pcl_defense_rn110.pth.tar'
+print(f"Loading checkpoint from: {filename}")
+
+checkpoint = torch.load(filename)
+model.load_state_dict(checkpoint['state_dict'])
+model.eval()
+
+
+# Loading Test Data (Un-normalized)
+transform_test = transforms.Compose([transforms.ToTensor(), ])
+
+unfiltered_testset = torchvision.datasets.CIFAR10(root='./data/', train=False,
+ download=True, transform=transform_test)
+unfiltered_test_loader = torch.utils.data.DataLoader(unfiltered_testset, batch_size=256,
+ pin_memory=True,
+ shuffle=False)
+
+# create test subset where model has perfect accuracy
+xs, ys = [], []
+n_checked = 0
+for x, y in unfiltered_test_loader:
+ x, y = x, y
+ with torch.no_grad():
+ y_pred = model(x.to(device))[3].argmax(-1).to("cpu")
+ x = x[y_pred == y]
+ y = y[y_pred == y]
+ xs.append(x)
+ ys.append(y)
+ n_checked += len(x)
+
+ if n_checked >= args.num_samples_test:
+ break
+
+xs = torch.cat(xs, 0)
+ys = torch.cat(ys, 0)
+filtered_testset = torch.utils.data.TensorDataset(xs, ys)
+test_loader = torch.utils.data.DataLoader(filtered_testset, batch_size=256,
+ pin_memory=True,
+ shuffle=False)
+
+# Mean and Standard Deviation of the Dataset
+mean = torch.tensor([0.4914, 0.4822, 0.4465]).view((1, 3, 1, 1)).to(device)
+std = torch.tensor( [0.2023, 0.1994, 0.2010]).view((1, 3, 1, 1)).to(device)
+
+
+def normalize(t):
+ return (t - mean)/std
+ #t[:, 0, :, :] = (t[:, 0, :, :] - mean[0]) / std[0]
+ #t[:, 1, :, :] = (t[:, 1, :, :] - mean[1]) / std[1]
+ #t[:, 2, :, :] = (t[:, 2, :, :] - mean[2]) / std[2]
+
+ #return t
+
+
+def un_normalize(t):
+ return (t*std) + mean
+ #t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
+ #t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
+ #t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
+
+ #return t
+
+
+class ZeroOneStandardizedNetwork(nn.Module):
+ def __init__(self, model):
+ super().__init__()
+ self.model = model
+
+ def forward(self, x, **kwargs):
+ return self.model(normalize(x), **kwargs)
+
+
+model = ZeroOneStandardizedNetwork(model)
+
+
+# Attacking Images batch-wise
+def attack(model, criterion, img, label, eps, attack_type, iters):
+ adv = img.detach()
+ adv.requires_grad = True
+
+ if attack_type == 'fgsm':
+ iterations = 1
+ else:
+ iterations = iters
+
+ if attack_type == 'pgd':
+ step = 2 / 255
+ else:
+ step = eps / iterations
+
+ noise = 0
+
+ for j in range(iterations):
+ output = model(adv.clone())
+ if isinstance(output, tuple):
+ _, _, _, out_adv = output
+ else:
+ out_adv = output
+ loss = criterion(out_adv, label)
+ loss.backward()
+
+ if attack_type == 'mim':
+ adv_mean = torch.mean(torch.abs(adv.grad), dim=1, keepdim=True)
+ adv_mean = torch.mean(torch.abs(adv_mean), dim=2, keepdim=True)
+ adv_mean = torch.mean(torch.abs(adv_mean), dim=3, keepdim=True)
+ adv.grad = adv.grad / adv_mean
+ noise = noise + adv.grad
+ else:
+ noise = adv.grad
+
+ # Optimization step
+ adv.data = adv.data + step * noise.sign()
+ # adv.data = adv.data + step * adv.grad.sign()
+
+ if attack_type == 'pgd':
+ adv.data = torch.where(adv.data > img.data + eps, img.data + eps,
+ adv.data)
+ adv.data = torch.where(adv.data < img.data - eps, img.data - eps,
+ adv.data)
+ adv.data.clamp_(0.0, 1.0)
+
+ adv.grad.data.zero_()
+
+ return adv.detach()
+
+def get_boundary_adversarials(x, y, n_samples, epsilon, model):
+ """Generate adversarial examples for the base classifier using AutoAttack."""
+ x_advs = []
+ for _ in range(n_samples):
+ x_adv = auto_pgd(model, x, y, 100, epsilon, "linf", n_classes=10,
+ n_restarts=5)[0]
+ x_advs.append(x_adv)
+
+ x_advs = torch.cat(x_advs, 0)
+
+ # replace examples for which no adversarials could be found with rnd. noise
+ is_identical = torch.max(torch.abs(x_advs.flatten(1) - x.flatten(1))) < 1e-6
+ random_noise = 2 * torch.rand_like(x) - 1.0
+ x_advs[is_identical] = random_noise[is_identical]
+
+ x_advs = utils.clipping_aware_rescaling(x, x_advs - x, epsilon, "linf")
+
+ return x_advs
+
+def binarization_test(feature_extractor, attack_type, epsilon):
+ def run_attack(model, loader):
+ adv_acc = 0
+ n_total_samples = 0
+ x_adv = []
+ logits_adv = []
+ for i, (img, label) in enumerate(loader):
+ img, label = img.to(device), label.to(device)
+ if attack_type == "autopgd":
+ adv = auto_pgd(model, img, label, 200, epsilon, "linf",
+ n_restarts=5, n_classes=2)[0]
+ elif attack_type == "autopgd2":
+ adv = auto_pgd(model, img, label, 400, epsilon, "linf",
+ n_restarts=10, n_classes=2)[0]
+ elif attack_type == "fab":
+ adv = fab(model, img, label, 200, epsilon, "linf",
+ n_restarts=5, n_classes=2)[0]
+ else:
+ adv = attack(model, criterion, img, label, eps=epsilon, attack_type=attack_type,
+ iters=10)
+ with torch.no_grad():
+ outputs = model(adv.clone().detach())
+ adv_acc += torch.sum(
+ outputs.argmax(dim=-1) == label).item()
+ n_total_samples += len(img)
+
+ x_adv.append(adv.detach().cpu())
+ logits_adv.append(outputs.detach().cpu())
+
+ x_adv = torch.cat(x_adv, 0)
+ logits_adv = torch.cat(logits_adv, 0)
+
+ asr = 1.0 - adv_acc / n_total_samples
+
+ return asr, (x_adv, logits_adv)
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ scores_logit_differences_and_validation_accuracies = active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, kwargs: run_attack(m, l, **kwargs),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=epsilon,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_boundary_adversarial_points=1 if args.use_autopgd_boundary_adversarials else 0
+ ),
+ n_samples=args.num_samples_test,
+ device=device,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ #args.num_samples_test * 10
+ decision_boundary_closeness=args.decision_boundary_closeness,
+ # TODO: activate this again
+ rescale_logits="adaptive",
+ get_boundary_adversarials_fn=partial(get_boundary_adversarials, model=lambda x: model(x)[3]) \
+ if args.use_autopgd_boundary_adversarials else None,
+ sample_training_data_from_corners=args.sample_from_corners
+ )
+
+ print(active_tests.decision_boundary_binarization.format_result(
+ scores_logit_differences_and_validation_accuracies,
+ args.num_samples_test
+ ))
+
+def adversarial_test():
+ adv_acc = 0
+ clean_acc = 0
+ for i, (img, label) in enumerate(test_loader):
+ img, label = img.to(device), label.to(device)
+
+ clean_acc += torch.sum(
+ model(img.clone().detach())[3].argmax(dim=-1) == label).item()
+ adv = attack(model, criterion, img, label, eps=eps, attack_type=attack_type,
+ iters=10)
+ adv_acc += torch.sum(
+ model(adv.clone().detach())[3].argmax(dim=-1) == label).item()
+ # print('Batch: {0}'.format(i))
+ print('Clean accuracy:{0:.3%}\t Adversarial ({2}) accuracy:{1:.3%}'.format(
+ clean_acc / len(testset), adv_acc / len(testset), attack_type))
+
+# Loss Criteria
+criterion = nn.CrossEntropyLoss()
+eps = args.epsilon / 255 # Epsilon for Adversarial Attack
+print("eps:", eps)
+
+for attack_type in (args.attack,): # ("fgsm", "bim", "mim", "pgd"):
+ if args.binarization_test:
+ binarization_test(model, attack_type, eps)
+ else:
+ adversarial_test()
diff --git a/case_studies/pcl_defense/softmax_training.py b/case_studies/pcl_defense/softmax_training.py
new file mode 100644
index 0000000..8bc8ff7
--- /dev/null
+++ b/case_studies/pcl_defense/softmax_training.py
@@ -0,0 +1,217 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Created on Wed Jan 23 10:15:27 2019
+
+@author: aamir-mustafa
+This is Part 1 file for replicating the results for Paper:
+ "Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
+Here a ResNet model is trained with Softmax Loss for 164 epochs.
+"""
+
+# Essential Imports
+import os
+import sys
+import argparse
+import datetime
+import time
+import os.path as osp
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+import torchvision
+import torchvision.transforms as transforms
+from pcl_utils import AverageMeter, Logger
+from resnet_model import * # Imports the ResNet Model
+
+parser = argparse.ArgumentParser("Softmax Training for CIFAR-10 Dataset")
+parser.add_argument('-j', '--workers', default=4, type=int,
+ help="number of data loading workers (default: 4)")
+parser.add_argument('--train-batch', default=128, type=int, metavar='N',
+ help='train batchsize')
+parser.add_argument('--test-batch', default=100, type=int, metavar='N',
+ help='test batchsize')
+parser.add_argument('--lr', type=float, default=0.1, help="learning rate for model")
+parser.add_argument('--schedule', type=int, nargs='+', default=[81, 122, 140],
+ help='Decrease learning rate at these epochs.')
+parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
+parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
+ help='momentum')
+parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
+ metavar='W', help='weight decay (default: 1e-4)')
+parser.add_argument('--max-epoch', type=int, default=164)
+parser.add_argument('--eval-freq', type=int, default=10)
+parser.add_argument('--print-freq', type=int, default=50)
+parser.add_argument('--gpu', type=str, default='0') #gpu to be used
+parser.add_argument('--seed', type=int, default=1)
+parser.add_argument('--use-cpu', action='store_true')
+parser.add_argument('--save-dir', type=str, default='log')
+
+args = parser.parse_args()
+state = {k: v for k, v in args._get_kwargs()}
+
+#%%
+
+def main():
+ torch.manual_seed(args.seed)
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
+ use_gpu = torch.cuda.is_available()
+ if args.use_cpu: use_gpu = False
+
+ sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_OnlySoftmax' + '.txt'))
+
+ if use_gpu:
+ print("Currently using GPU: {}".format(args.gpu))
+ cudnn.benchmark = True
+ torch.cuda.manual_seed_all(args.seed)
+ else:
+ print("Currently using CPU")
+
+# Data Loading
+ num_classes=10
+ print('==> Preparing dataset ')
+ transform_train = transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+ transform_test = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
+
+
+ trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
+ download=True, transform=transform_train)
+ trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
+ shuffle=True, num_workers=args.workers)
+
+ testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
+ download=True, transform=transform_test)
+ testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
+ shuffle=False, num_workers=args.workers)
+
+# Loading the Model
+
+ model = resnet(num_classes=num_classes,depth=110)
+
+ if use_gpu:
+ model = nn.DataParallel(model).cuda()
+
+ criterion = nn.CrossEntropyLoss()
+ optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
+ weight_decay=args.weight_decay)
+
+ start_time = time.time()
+
+ for epoch in range(args.max_epoch):
+ adjust_learning_rate(optimizer, epoch)
+
+ print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
+ print('LR: %f' % (state['lr']))
+
+ train(trainloader, model, criterion, optimizer, epoch, use_gpu, num_classes)
+
+ if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
+ print("==> Test") #Tests after every 10 epochs
+ acc, err = test(model, testloader, use_gpu, num_classes, epoch)
+ print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
+
+ checkpoint = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
+ 'optimizer_model': optimizer.state_dict(), }
+ torch.save(checkpoint, 'Models_Softmax/CIFAR10_Softmax.pth.tar')
+
+ elapsed = round(time.time() - start_time)
+ elapsed = str(datetime.timedelta(seconds=elapsed))
+ print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
+
+
+def train(trainloader, model, criterion, optimizer, epoch, use_gpu, num_classes):
+
+ model.train()
+ losses = AverageMeter()
+
+#Batch-wise Training
+ for batch_idx, (data, labels) in enumerate(trainloader):
+ if use_gpu:
+ data, labels = data.cuda(), labels.cuda()
+ feats_128, feats_256, feats_1024, outputs = model(data)
+ loss_xent = criterion(outputs, labels) # cross-entropy loss calculation
+
+ optimizer.zero_grad()
+ loss_xent.backward()
+ optimizer.step()
+
+ losses.update(loss_xent.item(), labels.size(0)) # AverageMeter() has this param
+
+ if (batch_idx+1) % args.print_freq == 0:
+ print("Batch {}/{}\t Loss {:.6f} ({:.6f}) " \
+ .format(batch_idx+1, len(trainloader), losses.val, losses.avg))
+
+def test(model, testloader, use_gpu, num_classes, epoch):
+ model.eval()
+ correct, total = 0, 0
+
+ with torch.no_grad():
+ for data, labels in testloader:
+ if use_gpu:
+ data, labels = data.cuda(), labels.cuda()
+ feats_128, feats_256, feats_1024, outputs = model(data)
+ predictions = outputs.data.max(1)[1]
+ total += labels.size(0)
+ correct += (predictions == labels.data).sum()
+
+ acc = correct * 100. / total
+ err = 100. - acc
+ return acc, err
+
+def adjust_learning_rate(optimizer, epoch):
+ global state
+ if epoch in args.schedule:
+ state['lr'] *= args.gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = state['lr']
+
+if __name__ == '__main__':
+ main()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/case_studies/robust_models/gowal_adversarial_evaluation.sh b/case_studies/robust_models/gowal_adversarial_evaluation.sh
new file mode 100644
index 0000000..c06e649
--- /dev/null
+++ b/case_studies/robust_models/gowal_adversarial_evaluation.sh
@@ -0,0 +1,65 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/robust_models/gowal_binarization_test.sh b/case_studies/robust_models/gowal_binarization_test.sh
new file mode 100644
index 0000000..39793f1
--- /dev/null
+++ b/case_studies/robust_models/gowal_binarization_test.sh
@@ -0,0 +1,61 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/gowal_wrn28_10_linf_with_extra.pt \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/robust_models/rade_badversarial_evaluation.sh b/case_studies/robust_models/rade_badversarial_evaluation.sh
new file mode 100644
index 0000000..4439f81
--- /dev/null
+++ b/case_studies/robust_models/rade_badversarial_evaluation.sh
@@ -0,0 +1,65 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/robust_models/rade_binarization_test.sh b/case_studies/robust_models/rade_binarization_test.sh
new file mode 100644
index 0000000..1bf1c04
--- /dev/null
+++ b/case_studies/robust_models/rade_binarization_test.sh
@@ -0,0 +1,61 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=512 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.preactresnet_18 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rade_helper_rn18_linf.pt \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/robust_models/rebuffi_adversarial_evaluation.sh b/case_studies/robust_models/rebuffi_adversarial_evaluation.sh
new file mode 100644
index 0000000..29f4252
--- /dev/null
+++ b/case_studies/robust_models/rebuffi_adversarial_evaluation.sh
@@ -0,0 +1,66 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ --adversarial-attack="$advattack" \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/robust_models/rebuffi_binarization_test.sh b/case_studies/robust_models/rebuffi_binarization_test.sh
new file mode 100644
index 0000000..fd81b88
--- /dev/null
+++ b/case_studies/robust_models/rebuffi_binarization_test.sh
@@ -0,0 +1,62 @@
+nsamples=${1:-512}
+ninner=9999
+nboundary=1
+
+#kwargs=""
+kwargs="--dbl-sample-from-corners"
+
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points PGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=pgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points APGD+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=autopgd+"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ $kwargs
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "eps=8/255, $nboundary boundary, $ninner inner points FAB"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+advattack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 attack=fab"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=$nsamples \
+ --batch-size=128 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=$ninner n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --classifier=dm_networks.wideresnet_28_10 \
+ --no-logit-diff-loss \
+ --input=checkpoints/rebuffi_wrn28_10_linf.pt \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/thermometer/eval.sh b/case_studies/thermometer/eval.sh
new file mode 100644
index 0000000..975a24e
--- /dev/null
+++ b/case_studies/thermometer/eval.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=2048
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+epsilon=${3:-8}
+attack=${4:-pgd}
+echo "Attack: $attack"
+
+if [[ "$attack" == "thermometer-lspgd" ]]; then
+ nsteps=100
+ stepsize=0.01
+else
+ nsteps=10 # 200
+ stepsize=0.005 # 0.0011372549
+fi
+#--classifier="networks.differentiable_16_thermometer_encoding_cifar_resnet18" \
+if [[ "$epsilon" == "8" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 8/255] Thermometer encoding (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="networks.differentiable_16_thermometer_encoding_cifar_wideresnet344" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=$stepsize n_steps=$nsteps attack=$attack\"" \
+ --no-logit-diff-loss \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=$stepsize n_steps=$nsteps attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "6" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 6/255] Thermometer encoding (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="networks.differentiable_16_thermometer_encoding_cifar_resnet18" \
+ --decision-boundary-binarization="norm=linf epsilon=0.02352941176 n_inner_points=49 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=$stepsize n_steps=$nsteps attack=$attack\"" \
+ --no-logit-diff-loss \
+ --adversarial-attack="norm=linf epsilon=0.02352941176 step_size=$stepsize n_steps=$nsteps attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+elif [[ "$epsilon" == "4" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "[Linf, 4/255] Thermometer encoding (${checkpoint}) evaluation"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --classifier="networks.differentiable_16_thermometer_encoding_cifar_resnet18" \
+ --decision-boundary-binarization="norm=linf epsilon=0.0156862745 n_inner_points=49 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=$stepsize n_steps=$nsteps attack=$attack\"" \
+ --no-logit-diff-loss \
+ --adversarial-attack="norm=linf epsilon=0.0156862745 step_size=$stepsize n_steps=$nsteps attack=$attack"
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+else
+ echo "Unknown epsilon value: $epsilon"
+fi
+
diff --git a/case_studies/thermometer/original/README.md b/case_studies/thermometer/original/README.md
new file mode 100644
index 0000000..81a3094
--- /dev/null
+++ b/case_studies/thermometer/original/README.md
@@ -0,0 +1,25 @@
+# Thermometer Encoding: One Hot Way To Resist Adversarial Examples
+
+Paper: [Buckman et al. 2018](https://openreview.net/forum?id=S18Su--CW)
+
+## Setup
+
+Run `./setup.sh` to fetch models.
+
+The included model is the thermometer-encoded model trained with adversarial
+training, which has 30% accuracy under the specified linf perturbation bound of
+0.031 (while the model trained without adversarial training has 0% accuracy).
+
+## Breaks
+
+* Thermometer Encoding: BPDA
+
+## [robustml] evaluation
+
+Run with:
+
+```bash
+python robustml_attack.py --cifar-path
+````
+
+[robustml]: https://github.com/robust-ml/robustml
diff --git a/case_studies/thermometer/original/adversarial_evaluation.sh b/case_studies/thermometer/original/adversarial_evaluation.sh
new file mode 100644
index 0000000..936af3b
--- /dev/null
+++ b/case_studies/thermometer/original/adversarial_evaluation.sh
@@ -0,0 +1,19 @@
+nsamples=2048
+
+epsilon=${1:-8}
+
+echo "Original attack"
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/robustness_evaluation.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --end=-1 \
+ --batch-size=512 \
+ --epsilon=$epsilon \
+ --attack=original
+
+echo "Adaptive attack"
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/robustness_evaluation.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --end=-1 \
+ --batch-size=256 \
+ --epsilon=$epsilon \
+ --attack=adaptive
\ No newline at end of file
diff --git a/case_studies/thermometer/original/binarization_test.py b/case_studies/thermometer/original/binarization_test.py
new file mode 100644
index 0000000..8e56742
--- /dev/null
+++ b/case_studies/thermometer/original/binarization_test.py
@@ -0,0 +1,186 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+logging.getLogger('tensorflow').setLevel(logging.FATAL)
+
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import torch
+
+import robustml
+from robustml_model import Thermometer
+import sys
+import argparse
+
+import numpy as np
+from robustml_attack import LSPGDAttack, Attack
+from active_tests.decision_boundary_binarization import interior_boundary_discrimination_attack, format_result
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cifar-path', type=str, required=True,
+ help='path to the test_batch file from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
+ parser.add_argument('--debug', action='store_true')
+ parser.add_argument("--attack", default="adaptive", choices=("original", "adaptive", "modified", "modified2"))
+ parser.add_argument("--n-samples", default=512, type=int)
+ parser.add_argument("--n-boundary-points", default=49, type=int)
+ parser.add_argument("--n-inner-points", default=10, type=int)
+ parser.add_argument("--epsilon", default=8, type=int)
+ parser.add_argument("--decision-boundary-closeness", type=float, default=None)
+ parser.add_argument("--sample-from-corners", action="store_true")
+ args = parser.parse_args()
+
+ # set up TensorFlow session
+ sess = tf.Session()
+
+ # initialize a model
+ model = Thermometer(sess, epsilon=args.epsilon)
+
+ # initialize a data provider for CIFAR-10 images
+ provider = robustml.provider.CIFAR10(args.cifar_path)
+
+ random_indices = list(range(len(provider)))
+ np.random.shuffle(random_indices)
+
+ x_batch = []
+ y_batch = []
+ for j in range(args.n_samples):
+ x_, y_ = provider[random_indices[j]]
+ x_batch.append(x_)
+ y_batch.append(y_)
+ x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
+ y_batch = np.array(y_batch)
+
+ from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, PyTorchToTensorFlow1Wrapper
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
+
+ def _model_forward_pass(x, features_and_logits: bool = False, features_only: bool = False):
+ if features_and_logits:
+ assert not features_only, "Only one of the flags must be set."
+ if features_and_logits:
+ return model.get_features_and_logits(x.transpose(0, 2, 3, 1))
+ elif features_only:
+ return model.get_features(x.transpose(0, 2, 3, 1))
+ else:
+ raise ValueError
+
+ feature_extractor = TensorFlow1ToPyTorchWrapper(
+ logit_forward_pass=_model_forward_pass,
+ logit_forward_and_backward_pass=lambda x: model.get_features_and_gradients(x.transpose(0, 2, 3, 1))
+ )
+
+ class ModelWrapper:
+ def __init__(self, model, weight_shape, bias_shape):
+ self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
+ self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
+ self.model = model
+ self.first = True
+
+ def __call__(self, x, **kwargs):
+ y = self.model(x, features_only=True, **kwargs)
+ logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
+ return logits
+
+ def logits_and_predictions(self, x = None):
+ if x == None: assert not self.first
+ if self.first:
+ self.logits = self(x)
+ self.predictions = tf.argmax(self.logits, 1)
+ self.first = False
+ return self.logits, self.predictions
+
+ wrapped_model = ModelWrapper(model.model, (2, 640), (2,))
+ if args.attack == "adaptive":
+ attack = Attack(sess, wrapped_model, epsilon=model.threat_model.epsilon, batch_size=1, n_classes=2)
+ elif args.attack == "original":
+ attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, n_classes=2)
+ elif args.attack == "modified":
+ attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, num_steps=50, step_size=0.25, n_classes=2)
+ elif args.attack == "modified2":
+ attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, num_steps=100, step_size=0.1, n_classes=2)
+ else:
+ raise ValueError("invalid attack mode")
+
+ #@profile
+ def run_attack(m, l, epsilon):
+ linear_layer = m[-1]
+ del m
+
+ # initialize an attack (it's a white box attack, and it's allowed to look
+ # at the internals of the model in any way it wants)
+ # attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
+ # m = PyTorchToTensorFlow1Wrapper(m, "cpu")
+
+ weights_feed_dict = {
+ wrapped_model.weight: linear_layer.weight.data.numpy(),
+ wrapped_model.bias: linear_layer.bias.data.numpy()
+ }
+
+ for x, y in l:
+ x = x.numpy().transpose((0, 2, 3, 1))
+ y = y.numpy()
+ x_adv = attack.run(x, y, None, weights_feed_dict)
+
+ x_adv = x_adv * 255.0
+ if not args.attack in ("original", "modified", "modified2"):
+ # first encode the input, then classify it
+ x_adv = model.encode(x_adv)
+ logits, y_adv = model._sess.run(
+ wrapped_model.logits_and_predictions(model._model.x_input),
+ {
+ model._model.x_input: x_adv,
+ **weights_feed_dict
+ }
+ )
+ is_adv = (y_adv != y).mean()
+ return is_adv, (torch.Tensor(x_adv), torch.Tensor(logits))
+
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ feature_extractor,
+ test_loader,
+ attack_fn=lambda m, l, kw: run_attack(m, l, args.epsilon/255.0),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.epsilon/255.0,
+ norm="linf",
+ lr=10000,
+ n_boundary_points=args.n_boundary_points,
+ n_inner_points=args.n_inner_points,
+ adversarial_attack_settings=None,
+ optimizer="sklearn"
+ ),
+ n_samples=args.n_samples,
+ device="cpu",
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+ decision_boundary_closeness=args.decision_boundary_closeness,
+ rescale_logits="adaptive",
+ sample_training_data_from_corners=args.sample_from_corners
+ #args.num_samples_test * 10
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/case_studies/thermometer/original/binarization_test.sh b/case_studies/thermometer/original/binarization_test.sh
new file mode 100644
index 0000000..cc13224
--- /dev/null
+++ b/case_studies/thermometer/original/binarization_test.sh
@@ -0,0 +1,49 @@
+nsamples=${1:-2048}
+epsilon=${2:-8}
+
+# kwargs=""
+kwargs="--sample-from-corners"
+
+
+echo "Epsilon: $epsilon"
+echo "#samples: $nsamples"
+echo "kwargs: $kwargs"
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Original attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/binarization_test.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --decision-boundary-closeness=0.999 \
+ --epsilon=$epsilon \
+ --attack=original \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Modified attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/binarization_test.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --decision-boundary-closeness=0.999 \
+ --epsilon=$epsilon \
+ --attack=modified \
+ $kwargs
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary point, 999 inner (Adaptive attack)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/binarization_test.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --decision-boundary-closeness=0.999 \
+ --epsilon=$epsilon \
+ --attack=adaptive \
+ $kwargs
\ No newline at end of file
diff --git a/case_studies/thermometer/original/binarization_test_increased_hardness.sh b/case_studies/thermometer/original/binarization_test_increased_hardness.sh
new file mode 100644
index 0000000..23e8c6d
--- /dev/null
+++ b/case_studies/thermometer/original/binarization_test_increased_hardness.sh
@@ -0,0 +1,39 @@
+attack=${1:-original}
+nsamples=${2:-2048}
+mode=$3
+
+if [ -z ${mode+x} ]; then
+ echo "No hardness mode specified. Choose from: ninner, gap"
+ exit -1
+fi
+
+echo "Attack: $attack, #Samples: $nsamples"
+echo ""
+
+if [[ "$mode" == "ninner" ]]; then
+ for ninner in 49 99 199 299 399 499 599 699 799 899 999 1999; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, $ninner inner"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/binarization_test.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=$ninner \
+ --decision-boundary-closeness=0.999 \
+ --attack=$attack
+ done
+elif [[ "$mode" == "gap" ]]; then
+ for closeness in 0.999 0.95 0.90 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "1 boundary point, 999 inner, closeness of $closeness"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ TF_CPP_MIN_LOG_LEVEL=3 PYTHONPATH=$PYTHONPATH:$(pwd) ./venv3.8tf/bin/python case_studies/thermometer/original/binarization_test.py \
+ --cifar-path=data/cifar-10-batches-py/test_batch \
+ --n-samples=$nsamples \
+ --n-boundary=1 \
+ --n-inner=999 \
+ --attack=$attack \
+ --decision-boundary-closeness=$closeness
+ done
+fi
\ No newline at end of file
diff --git a/case_studies/thermometer/original/cifar10_input.py b/case_studies/thermometer/original/cifar10_input.py
new file mode 100644
index 0000000..40991ad
--- /dev/null
+++ b/case_studies/thermometer/original/cifar10_input.py
@@ -0,0 +1,174 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for importing the CIFAR10 dataset.
+
+Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
+being unsigned integers (i.e., in the range 0,1,...,255).
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import pickle
+import sys
+import tensorflow as tf
+from tensorflow.examples.tutorials.mnist import input_data
+version = sys.version_info
+
+import numpy as np
+
+class CIFAR10Data(object):
+ """
+ Unpickles the CIFAR10 dataset from a specified folder containing a pickled
+ version following the format of Krizhevsky which can be found
+ [here](https://www.cs.toronto.edu/~kriz/cifar.html).
+
+ Inputs to constructor
+ =====================
+
+ - path: path to the pickled dataset. The training data must be pickled
+ into five files named data_batch_i for i = 1, ..., 5, containing 10,000
+ examples each, the test data
+ must be pickled into a single file called test_batch containing 10,000
+ examples, and the 10 class names must be
+ pickled into a file called batches.meta. The pickled examples should
+ be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
+ arrays, and an array of their 10,000 true labels.
+
+ """
+ def __init__(self, path="../cifar10_data"):
+ train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
+ eval_filename = 'test_batch'
+ metadata_filename = 'batches.meta'
+
+ train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
+ train_labels = np.zeros(50000, dtype='int32')
+ for ii, fname in enumerate(train_filenames):
+ cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
+ train_images[ii * 10000 : (ii+1) * 10000, ...] = cur_images
+ train_labels[ii * 10000 : (ii+1) * 10000, ...] = cur_labels
+ eval_images, eval_labels = self._load_datafile(
+ os.path.join(path, eval_filename))
+
+ with open(os.path.join(path, metadata_filename), 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ self.label_names = data_dict[b'label_names']
+ for ii in range(len(self.label_names)):
+ self.label_names[ii] = self.label_names[ii].decode('utf-8')
+
+ self.train_data = DataSubset(train_images, train_labels)
+ self.eval_data = DataSubset(eval_images, eval_labels)
+
+ @staticmethod
+ def _load_datafile(filename):
+ with open(filename, 'rb') as fo:
+ if version.major == 3:
+ data_dict = pickle.load(fo, encoding='bytes')
+ else:
+ data_dict = pickle.load(fo)
+
+ assert data_dict[b'data'].dtype == np.uint8
+ image_data = data_dict[b'data']
+ image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
+ return image_data, np.array(data_dict[b'labels'])
+
+class AugmentedCIFAR10Data(object):
+ """
+ Data augmentation wrapper over a loaded dataset.
+
+ Inputs to constructor
+ =====================
+ - raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
+ - sess: current tensorflow session
+ - model: current model (needed for input tensor)
+ """
+ def __init__(self, raw_cifar10data, sess, model):
+ assert isinstance(raw_cifar10data, CIFAR10Data)
+ self.image_size = 32
+
+ # create augmentation computational graph
+ self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
+ padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
+ img, self.image_size + 4, self.image_size + 4),
+ self.x_input_placeholder)
+ cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
+ self.image_size,
+ 3]), padded)
+ flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
+ self.augmented = flipped
+
+ self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
+ self.x_input_placeholder,
+ self.augmented)
+ self.label_names = raw_cifar10data.label_names
+
+
+class DataSubset(object):
+ def __init__(self, xs, ys):
+ self.xs = xs
+ self.n = xs.shape[0]
+ self.ys = ys
+ self.batch_start = 0
+ self.cur_order = np.random.permutation(self.n)
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ if self.n < batch_size:
+ raise ValueError('Batch size can be at most the dataset size')
+ if not multiple_passes:
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size <= 0:
+ raise ValueError('Pass through the dataset is complete.')
+ batch_end = self.batch_start + actual_batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
+ self.batch_start += actual_batch_size
+ return batch_xs, batch_ys
+ actual_batch_size = min(batch_size, self.n - self.batch_start)
+ if actual_batch_size < batch_size:
+ if reshuffle_after_pass:
+ self.cur_order = np.random.permutation(self.n)
+ self.batch_start = 0
+ batch_end = self.batch_start + batch_size
+ batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
+ batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
+ self.batch_start += actual_batch_size
+ return batch_xs, batch_ys
+
+
+class AugmentedDataSubset(object):
+ def __init__(self, raw_datasubset, sess, x_input_placeholder,
+ augmented):
+ self.sess = sess
+ self.raw_datasubset = raw_datasubset
+ self.x_input_placeholder = x_input_placeholder
+ self.augmented = augmented
+
+ def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
+ raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
+ reshuffle_after_pass)
+ images = raw_batch[0].astype(np.float32)
+ return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
+ raw_batch[0]}), raw_batch[1]
+
diff --git a/case_studies/thermometer/original/cifar_model.py b/case_studies/thermometer/original/cifar_model.py
new file mode 100644
index 0000000..daa310e
--- /dev/null
+++ b/case_studies/thermometer/original/cifar_model.py
@@ -0,0 +1,244 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# based on https://github.com/tensorflow/models/tree/master/resnet
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+
+class Model(object):
+ """ResNet model."""
+
+ def __init__(self, restore=None, sess=None, tiny=True,
+ thermometer=True, levels=8, mode='eval'):
+ """ResNet constructor.
+
+ Args:
+ mode: One of 'train' and 'eval'.
+ """
+ self.mode = mode
+ self.tiny = tiny
+ self.thermometer = thermometer
+ self.levels = levels
+ with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
+ # print("Called")
+ self.first = True
+ self._build_model()
+ self.first = False
+ if restore:
+ path = tf.train.latest_checkpoint(restore)
+ saver = tf.train.Saver()
+ saver.restore(sess, path)
+ # print("restored")
+
+ def __call__(self, xs, **kwargs):
+ with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
+ return self._build_model(xs, **kwargs)
+
+ def add_internal_summaries(self):
+ pass
+
+ def _stride_arr(self, stride):
+ """Map a stride scalar to the stride array for tf.nn.conv2d."""
+ return [1, stride, stride, 1]
+
+ def _build_model(self, x_input = None, features_only = False, **kwargs):
+ assert self.mode == 'train' or self.mode == 'eval'
+ """Build the core model within the graph."""
+ with tf.variable_scope('input'):
+
+ if x_input == None:
+ assert self.first
+ ch = 3
+ if self.thermometer:
+ ch = self.levels*3
+ x_input = self.x_input = tf.placeholder(
+ tf.float32,
+ shape=[None, 32, 32, ch], name='x_input_model')
+ else:
+ assert not self.first
+
+
+ if self.first:
+ self.y_input = tf.placeholder(tf.int64, shape=None, name='y_input_model')
+
+
+ input_standardized = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
+ x_input)
+ ch = x_input.get_shape().as_list()[3]
+ x = self._conv('init_conv', input_standardized, 3, ch, 16, self._stride_arr(1))
+
+
+
+ strides = [1, 2, 2]
+ activate_before_residual = [True, False, False]
+ res_func = self._residual
+
+ # Uncomment the following codes to use w28-10 wide residual network.
+ # It is more memory efficient than very deep residual network and has
+ # comparably good performance.
+ # https://arxiv.org/pdf/1605.07146v1.pdf
+ if self.tiny:
+ filters = [16, 16, 32, 64]
+ layers = 2
+ else:
+ filters = [16, 160, 320, 640]
+ layers = 5
+
+ # Update hps.num_residual_units to 9
+
+ with tf.variable_scope('unit_1_0'):
+ x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
+ activate_before_residual[0])
+ for i in range(1, layers):
+ with tf.variable_scope('unit_1_%d' % i):
+ x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
+
+ with tf.variable_scope('unit_2_0'):
+ x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
+ activate_before_residual[1])
+ for i in range(1, layers):
+ with tf.variable_scope('unit_2_%d' % i):
+ x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
+
+ with tf.variable_scope('unit_3_0'):
+ x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
+ activate_before_residual[2])
+ for i in range(1, layers):
+ with tf.variable_scope('unit_3_%d' % i):
+ x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
+
+ with tf.variable_scope('unit_last'):
+ x = self._batch_norm('final_bn', x)
+ x = self._relu(x, 0.1)
+ x = self._global_avg_pool(x)
+
+ if features_only:
+ return x
+
+ if self.first:
+ self.features = x
+
+ if self.first:
+ with tf.variable_scope('logit'):
+ self.pre_softmax = self._fully_connected(x, 10)
+
+ self.predictions = tf.argmax(self.pre_softmax, 1)
+ self.correct_prediction = tf.equal(self.predictions, self.y_input)
+ self.num_correct = tf.reduce_sum(
+ tf.cast(self.correct_prediction, tf.int64))
+ self.accuracy = tf.reduce_mean(
+ tf.cast(self.correct_prediction, tf.float32))
+
+ with tf.variable_scope('costs'):
+ self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=self.pre_softmax, labels=self.y_input)
+ self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
+ self.mean_xent = tf.reduce_mean(self.y_xent)
+ self.weight_decay_loss = self._decay()
+ return self.pre_softmax
+ else:
+ with tf.variable_scope('logit'):
+ return self._fully_connected(x, 10)
+
+ def _batch_norm(self, name, x):
+ """Batch normalization."""
+ with tf.name_scope(name):
+ return tf.contrib.layers.batch_norm(
+ inputs=x,
+ decay=.9,
+ center=True,
+ scale=True,
+ activation_fn=None,
+ updates_collections=None,
+ is_training=(self.mode == 'train'))
+
+ def _residual(self, x, in_filter, out_filter, stride,
+ activate_before_residual=False):
+ """Residual unit with 2 sub layers."""
+ if activate_before_residual:
+ with tf.variable_scope('shared_activation'):
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+ orig_x = x
+ else:
+ with tf.variable_scope('residual_only_activation'):
+ orig_x = x
+ x = self._batch_norm('init_bn', x)
+ x = self._relu(x, 0.1)
+
+ with tf.variable_scope('sub1'):
+ x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
+
+ with tf.variable_scope('sub2'):
+ x = self._batch_norm('bn2', x)
+ x = self._relu(x, 0.1)
+ x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
+
+ with tf.variable_scope('sub_add'):
+ if in_filter != out_filter:
+ orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
+ orig_x = tf.pad(
+ orig_x, [[0, 0], [0, 0], [0, 0],
+ [(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
+ x += orig_x
+
+ tf.logging.debug('image after unit %s', x.get_shape())
+ return x
+
+ def _decay(self):
+ """L2 weight decay loss."""
+ costs = []
+ for var in tf.trainable_variables():
+ if var.op.name.find('DW') > 0:
+ costs.append(tf.nn.l2_loss(var))
+ return tf.add_n(costs)
+
+ def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
+ """Convolution."""
+ with tf.variable_scope(name):
+ n = filter_size * filter_size * out_filters
+ kernel = tf.get_variable(
+ 'DW', [filter_size, filter_size, in_filters, out_filters],
+ tf.float32, initializer=tf.random_normal_initializer(
+ stddev=np.sqrt(2.0/n)))
+ return tf.nn.conv2d(x, kernel, strides, padding='SAME')
+
+ def _relu(self, x, leakiness=0.0):
+ """Relu, with optional leaky support."""
+ return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
+
+ def _fully_connected(self, x, out_dim):
+ """FullyConnected layer for final output."""
+ num_non_batch_dimensions = len(x.shape)
+ prod_non_batch_dimensions = 1
+ for ii in range(num_non_batch_dimensions - 1):
+ prod_non_batch_dimensions *= int(x.shape[ii + 1])
+ x = tf.reshape(x, [tf.shape(x)[0], -1])
+ w = tf.get_variable(
+ 'DW', [prod_non_batch_dimensions, out_dim],
+ initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
+ b = tf.get_variable('biases', [out_dim],
+ initializer=tf.constant_initializer())
+ return tf.nn.xw_plus_b(x, w, b)
+
+ def _global_avg_pool(self, x):
+ assert x.get_shape().ndims == 4
+ return tf.reduce_mean(x, [1, 2])
+
+
+
diff --git a/case_studies/thermometer/original/discretization_attacks.py b/case_studies/thermometer/original/discretization_attacks.py
new file mode 100644
index 0000000..063a80a
--- /dev/null
+++ b/case_studies/thermometer/original/discretization_attacks.py
@@ -0,0 +1,266 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains all the attacks on discretized inputs.
+
+The attacks implemented are Discrete Gradient Ascent (DGA) and
+Logit Space-Projected Gradient Ascent (LS-PGA).
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+import discretization_utils
+
+
+def discretize_range(discretize_fn, levels, low, high, thermometer=False):
+ """Get range of discretized values for in the interval (low, high).
+
+ For example, assume discretize_fn uniformly discretizes the values
+ between 0 and 1 into 10 bins each represented by either a one hot encoding
+ or a thermometer encoding. Then discretize_range(discretize_fn, .3, .7)
+ would return [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.]. Note that it's output
+ is independent of the encoding used.
+
+ Args:
+ discretize_fn: Discretization function used to discretize input.
+ levels: Number of levels to discretize the input into.
+ low: Minimum value in the interval.
+ high: Maximum value in the interval.
+ thermometer: If True, then the discretize_fn returns thermometer codes,
+ else it returns one hot codes. (Default: False).
+
+ Returns:
+ Mask of 1's over the interval.
+ """
+ low = tf.clip_by_value(low, 0., 1.)
+ high = tf.clip_by_value(high, 0., 1.)
+ out = 0.
+ for alpha in np.linspace(0., 1., levels):
+ q = discretize_fn(alpha * low + (1. - alpha) * high, levels, thermometer)
+
+ # Convert into one hot encoding if q is in thermometer encoding
+ if thermometer:
+ q = discretization_utils.thermometer_to_one_hot(q, levels, flattened=True)
+ out += q
+ return tf.to_float(tf.greater(out, 0.))
+
+
+def adv_dga(x, model, discretize_fn, projection_fn, levels, phase,
+ steps, eps, thermometer=False, noisy_grads=True, y=None):
+ """Compute adversarial examples for discretized input using DGA.
+
+ Args:
+ x: Input image of shape [-1, height, width, channels] to attack.
+ model: Model function which given input returns logits.
+ discretize_fn: Function used to discretize the input into one-hot or thermometer
+ encoding.
+ projection_fn: Function used to project the input before feeding to the
+ model (can be identity).
+ levels: Number of levels the input has been discretized into.
+ phase: Learning phase of the model, corresponding to train and test time.
+ steps: Number of steps to iterate when creating adversarial examples.
+ eps: Eps ball within which the perturbed image must stay.
+ thermometer: Whether the discretized input is in thermometer encoding or one
+ hot encoding. (Default: False).
+ noisy_grads: If True then compute attack over noisy input.
+ y: Optional argument to provide the true labels as opposed to model
+ predictions to compute the loss. (Default: None).
+
+ Returns:
+ Adversarial image for discretized inputs. The output
+ is in the same form of discretization as the input.
+ """
+ # Add noise
+ noise = 0
+
+ if noisy_grads:
+ noise = tf.random_uniform(
+ shape=tf.shape(x), minval=-eps, maxval=eps, dtype=tf.float32)
+ x_noisy = x + noise
+
+ # Clip so that x_noisy is in [0, 1]
+ x_noisy = tf.clip_by_value(x_noisy, 0., 1.)
+
+ # Compute the mask over the bits that we are allowed to attack
+ mask = discretize_range(
+ discretize_fn, levels, x - eps, x + eps, thermometer=thermometer)
+ cur_x_discretized = discretize_fn(x_noisy)
+
+ for i in range(steps):
+ # Compute one hot representation if input is in thermometer encoding.
+ cur_x_one_hot = cur_x_discretized
+ if thermometer:
+ cur_x_one_hot = discretization_utils.thermometer_to_one_hot(
+ cur_x_discretized, levels, flattened=True)
+
+ logits_discretized = model(projection_fn(cur_x_discretized),
+ is_training=phase)
+
+ if i == 0 and y is None:
+ # Get one hot version from predictions
+ y = tf.one_hot(
+ tf.argmax(logits_discretized, 1),
+ tf.shape(logits_discretized)[1])
+
+ loss = tf.nn.softmax_cross_entropy_with_logits(
+ labels=y, logits=logits_discretized)
+
+ # compute the gradients wrt to current input
+ grad, = tf.gradients(loss, cur_x_discretized)
+
+ # The harm done by choosing a particular bit to be active
+ harm = grad * (1. + cur_x_one_hot - 2 * cur_x_discretized)
+
+ # If we are using thermometer harm is the cumsum
+ if thermometer:
+ harm_r = discretization_utils.unflatten_last(harm, levels)
+ harm_r = tf.cumsum(harm_r, axis=-1, reverse=True)
+ harm = discretization_utils.flatten_last(harm_r)
+
+ # Make sure values outside the global mask lose the max
+ harm = harm * mask - (1. - mask) * 1000.0
+
+ harm_r = discretization_utils.unflatten_last(harm, levels)
+
+ bit_to_activate = tf.argmax(harm_r, axis=-1)
+
+ one_hot = tf.one_hot(
+ bit_to_activate,
+ depth=levels,
+ on_value=1.,
+ off_value=0.,
+ dtype=tf.float32,
+ axis=-1)
+
+ # Convert into thermometer if we are doing thermometer encodings
+ inp = one_hot
+ if thermometer:
+ inp = discretization_utils.one_hot_to_thermometer(
+ one_hot, levels, flattened=False)
+
+ flattened_inp = discretization_utils.flatten_last(inp)
+ flattened_inp.mask = mask
+ flattened_inp = tf.stop_gradient(flattened_inp)
+
+ cur_x_discretized = flattened_inp
+ return flattened_inp
+
+
+#@profile
+def adv_lspga(x, model, discretize_fn, projection_fn, levels, phase,
+ steps, eps, attack_step=1., thermometer=False,
+ noisy_grads=True, y=None, inv_temp=1., anneal_rate=1.2):
+ """Compute adversarial examples for discretized input by LS-PGA.
+
+ Args:
+ x: Input image of shape [-1, height, width, channels] to attack.
+ model: Model function which given input returns logits.
+ discretize_fn: Function used to discretize the input into one-hot or thermometer
+ encoding.
+ projection_fn: Function used to project the input before feeding to the
+ model (can be identity).
+ levels: Number of levels the input has been discretized into.
+ phase: Learning phase of the model, corresponding to train and test time.
+ steps: Number of steps to iterate when creating adversarial examples.
+ eps: Eps ball within which the perturbed image must stay.
+ attack_step: Attack step for one iteration of the iterative attack.
+ thermometer: Whether the discretized input is in thermometer encoding or one
+ hot encoding. (Default: False).
+ noisy_grads: If True then compute attack over noisy input.
+ y: True labels corresponding to x. If it is None, then use model predictions
+ to compute loss, else use true labels. (Default: None).
+ inv_temp: Inverse of the temperature parameter for softmax.
+ anneal_rate: Rate for annealing the temperature after every iteration of
+ attack.
+
+ Returns:
+ Adversarial image for discretized inputs. The output
+ is in the same form of discretization as the input.
+ """
+ # Compute the mask over the bits that we are allowed to attack
+ flat_mask = discretize_range(
+ discretize_fn, levels, x - eps, x + eps, thermometer=thermometer)
+
+ mask = discretization_utils.unflatten_last(flat_mask, levels)
+
+ if noisy_grads:
+ activation_logits = tf.random_normal(tf.shape(mask))
+ else:
+ activation_logits = tf.zeros_like(mask)
+
+ for i in range(steps):
+ print("Preparing step", i)
+ # Compute one hot representation if input is in thermometer encoding.
+ activation_probs = tf.nn.softmax(
+ inv_temp * (activation_logits * mask - 999999. * (1. - mask)))
+
+ if thermometer:
+ activation_probs = tf.cumsum(activation_probs, axis=-1, reverse=True)
+
+ logits_discretized = model(
+ projection_fn(discretization_utils.flatten_last(activation_probs)),
+ is_training=phase)
+
+ if i == 0 and y is None:
+ # Get one hot version from model predictions
+ y = tf.one_hot(
+ tf.argmax(logits_discretized, 1),
+ tf.shape(logits_discretized)[1])
+
+ loss = tf.nn.softmax_cross_entropy_with_logits(
+ labels=y, logits=logits_discretized)
+
+ # compute the gradients wrt to current logits
+ grad, = tf.gradients(loss, activation_logits)
+
+ # Get the sign of the gradient
+ signed_grad = tf.sign(grad)
+ signed_grad = tf.stop_gradient(grad)
+
+ # Modify activation logits
+ activation_logits += attack_step * signed_grad
+
+ # Anneal temperature
+ inv_temp *= anneal_rate
+
+ # Convert from logits to actual one-hot image
+ final_al = activation_logits * mask - 999999. * (1. - mask)
+ bit_to_activate = tf.argmax(final_al, axis=-1)
+
+ one_hot = tf.one_hot(
+ bit_to_activate,
+ depth=levels,
+ on_value=1.,
+ off_value=0.,
+ dtype=tf.float32,
+ axis=-1)
+
+ # Convert into thermometer if we are doing thermometer encodings
+ inp = one_hot
+ if thermometer:
+ inp = discretization_utils.one_hot_to_thermometer(
+ one_hot, levels, flattened=False)
+
+ flattened_inp = discretization_utils.flatten_last(inp)
+
+ flattened_inp.mask = mask
+ flattened_inp = tf.stop_gradient(flattened_inp)
+
+ print("Attack set up.")
+
+ return flattened_inp
diff --git a/case_studies/thermometer/original/discretization_utils.py b/case_studies/thermometer/original/discretization_utils.py
new file mode 100644
index 0000000..d771777
--- /dev/null
+++ b/case_studies/thermometer/original/discretization_utils.py
@@ -0,0 +1,309 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilty functions for discretizing image tensors in various ways.
+
+For the discretization, we may either use uniform buckets or supply our own
+custom buckets. One way to compute custom buckets is to use percentile
+information from the data distribution. The final discretized representation
+can either be a one-hot or a thermometer encoding. A thermometer encoding
+is of the form (1, 1, 1,..,1, 0, .., 0) with the transition from 1 to 0
+signifying which bucket it belongs to. To reduce the dimension, one may
+project back by convolving with a fixed random or trainable matrix.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+
+
+def flatten_last(x):
+ """Flatten the last two dimensions of a tensor into one.
+
+ Args:
+ x: Discretized input tensor of shape [-1, height, width, channels, levels]
+ to flatten.
+
+ Returns:
+ Flattened version of x, of shape [-1, height, width, channels * levels].
+ """
+ shape = x.get_shape().as_list()
+ new_shape = shape[:-1]
+ new_shape[-1] *= shape[-1]
+ new_shape[0] = tf.shape(x)[0]
+ flattened_x = tf.reshape(x, new_shape)
+ return flattened_x
+
+
+def unflatten_last(x, levels):
+ """Unflatten input tensor by separating the last two dimensions.
+
+ Args:
+ x: Discretized input tensor of shape [-1, height, width, channels * levels]
+ to unflatten.
+ levels: Number of levels the tensor has been discretized into.
+
+ Returns:
+ Unflattened version of x, of shape [-1, height, width, channels, levels].
+ """
+ shape = x.get_shape().as_list()
+ shape[-1] /= levels
+ shape[-1] = int(shape[-1])
+ shape.append(levels)
+ shape[0] = tf.shape(x)[0]
+ unflattened_x = tf.reshape(x, shape)
+ return unflattened_x
+
+
+def discretize_uniform(x, levels, thermometer=False):
+ """Discretize input into levels using uniformly distributed buckets.
+
+ Args:
+ x: Input tensor to discretize, assumed to be between (0, 1).
+ levels: Number of levels to discretize into.
+ thermometer: Whether to encode the discretized tensor in thermometer encoding
+ (Default: False).
+
+
+ Returns:
+ Discretized version of x of shape [-1, height, width, channels * levels].
+ """
+ clipped_x = tf.clip_by_value(x, 0., 1.)
+ int_x = tf.to_int32((.99999 * clipped_x) * levels)
+ one_hot = tf.one_hot(
+ int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)
+
+ # Check to see if we are encoding in thermometer
+ discretized_x = one_hot
+ if thermometer:
+ discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)
+
+ # Reshape x to [-1, height, width, channels * levels]
+ discretized_x = flatten_last(discretized_x)
+ return discretized_x
+
+
+def get_centroids_by_percentile(x, levels):
+ """Get the custom centroids by percentiles of the per-pixel distribution of x.
+
+ Args:
+ x: Input data set of shape [-1, height, width, channels]
+ whose centroids we wish to compute.
+ levels: Number of centroids to compute.
+
+ Returns:
+ Custom centroids as a tensor.
+ """
+
+ def quantile(q):
+ return tf.contrib.distributions.percentile(x, q=q, axis=None)
+
+ start = 0.
+ end = 100.
+ quantile_range = tf.lin_space(start, end, levels)
+ centroids = tf.map_fn(quantile, quantile_range)
+ return centroids
+
+
+def discretize_centroids(x, levels, centroids, thermometer=False):
+ """Discretize input into levels using custom centroids.
+
+ Args:
+ x: Input tensor to discretize, assumed to be between (0, 1).
+ levels: Number of levels to discretize into.
+ centroids: Custom centroids into which the input is to be discretized.
+ thermometer: Whether to encode the discretized tensor in thermometer encoding
+ (Default: False).
+
+ Returns:
+ Discretized version of x of shape [-1, height, width, channels * levels]
+ using supplied centroids.
+ """
+ x_stacked = tf.stack(levels * [x], axis=-1)
+ dist = tf.to_float(tf.squared_difference(x_stacked, centroids))
+ idx = tf.argmin(dist, axis=-1)
+ one_hot = tf.one_hot(idx, depth=levels, on_value=1., off_value=0.)
+
+ # Check to see if we are encoding in thermometer
+ discretized_x = one_hot
+ if thermometer:
+ discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)
+
+ # Reshape x to [-1, height, width, channels * levels]
+ discretized_x = flatten_last(discretized_x)
+ return discretized_x
+
+
+def undiscretize_uniform(x, levels, flattened=False, thermometer=False):
+ """Undiscretize a discretized tensor.
+
+ Args:
+ x: Input tensor in discretized form.
+ levels: Number of levels the input has been discretized into.
+ flattened: True if x is of the form [-1, height, width, channels * levels]
+ else it is of shape [-1, height, width, channels, levels].
+ (Default: False).
+ thermometer: Determines if we are using one-hot or thermometer encoding
+ (Default: False).
+
+ Returns:
+ Undiscretized version of x.
+ """
+ # Unflatten if flattened, so that x has shape
+ # [-1, height, width, channels, levels]
+ if flattened:
+ x = unflatten_last(x, levels)
+ if thermometer:
+ int_x = tf.reduce_sum(x, -1) - 1
+ else:
+ int_x = tf.argmax(x, -1)
+ out = tf.to_float(int_x) / (levels - 1)
+ return out
+
+
+def undiscretize_centroids(x,
+ levels,
+ centroids,
+ flattened=False,
+ thermometer=False):
+ """Undiscretize a tensor that has been discretized using custom centroids.
+
+ Args:
+ x: Input tensor in discretized form.
+ levels: Number of levels the input has been discretized into.
+ centroids: The custom centroids used to discretize.
+ flattened: True if x is of the form [-1, height, width, channels * levels]
+ else it is of shape [-1, height, width, channels, levels].
+ (Default: False).
+ thermometer: Determines if we are using one-hot or thermometer encoding
+ (Default: False).
+
+ Returns:
+ Undiscretized version of x.
+ """
+ # Unflatten if flattened, so that x has shape
+ # [-1, height, width, channels, levels]
+ if flattened:
+ x = unflatten_last(x, levels)
+ if thermometer:
+ x = thermometer_to_one_hot(x, levels, flattened=False)
+ out = tf.reduce_sum(tf.multiply(x, centroids), axis=-1)
+ return out
+
+
+def one_hot_to_thermometer(x, levels, flattened=False):
+ """Convert one hot to thermometer code.
+
+ Args:
+ x: Input tensor in one hot encoding to convert to thermometer.
+ levels: Number of levels the input has been discretized into.
+ flattened: True if x is of the form [-1, height, width, channels * levels]
+ else it is of shape [-1, height, width, channels, levels].
+ (Default: False).
+
+ Returns:
+ Thermometer encoding of x.
+ """
+ # Unflatten if flattened, so that x has shape
+ # [-1, height, width, channels, levels]
+ if flattened:
+ x = unflatten_last(x, levels)
+ thermometer = tf.cumsum(x, axis=-1, reverse=True)
+ # Flatten back if original input was flattened
+ if flattened:
+ thermometer = flatten_last(thermometer)
+ return thermometer
+
+
+def thermometer_to_one_hot(x, levels, flattened=False):
+ """Convert thermometer to one hot code.
+
+ Args:
+ x: Input tensor in thermometer encoding to convert to one-hot. Input is
+ assumed to be
+ of shape [-1, height, width, channels, levels].
+ levels: Number of levels the input has been discretized into.
+ flattened: True if x is of the form [-1, height, width, channels * levels]
+ else it is of shape [-1, height, width, channels, levels].
+ (Default: False).
+
+ Returns:
+ One hot encoding of x.
+ """
+ # Unflatten if flattened, so that x has shape
+ # [-1, height, width, channels, levels]
+ if flattened:
+ x = unflatten_last(x, levels)
+ int_x = tf.to_int32(tf.reduce_sum(x, axis=-1)) - 1
+ one_hot = tf.one_hot(
+ int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)
+ # Flatten back if input was flattened
+ if flattened:
+ one_hot = flatten_last(one_hot)
+ return one_hot
+
+
+def random_convolution(x,
+ projection_dim,
+ levels,
+ flattened=True,
+ trainable=False):
+ """Reduce dimension by random convolutions using a standard Gaussian.
+
+ Args:
+ x: Discretized input tensor in one hot or thermometer encoding to project.
+ projection_dim: Dimension to project the output tensor to.
+ levels: Number of levels the input has been discretized into.
+ flattened: True if x is of the form [-1, height, width, channels * levels]
+ else it is of shape [-1, height, width, channels * levels].
+ (Default: False).
+ trainable: If True then the weights for projection are learned (Default:
+ False).
+
+ Returns:
+ Projection of x using a fixed random convolution.
+
+ Raises:
+ ValueError: If projection dimension is higher than the number of levels.
+ """
+ if projection_dim > levels:
+ raise ValueError('Projection dimension higher than the number of levels')
+
+ # Unflatten first to get number of channels
+ if flattened:
+ x = unflatten_last(x, levels)
+
+ channels = x.get_shape().as_list()[3]
+
+ # Flatten so that x has shape [-1, height, width, channels * levels]
+ x = flatten_last(x)
+
+ scope = 'projection'
+ if trainable:
+ scope = 'trainable_projection'
+
+ with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
+ kernel = tf.get_variable(
+ 'conv_projection', [1, 1, channels * levels, channels * projection_dim],
+ trainable=trainable)
+
+ x_proj = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME')
+
+ # Unflatten back if input was not flattened
+ if not flattened:
+ x_proj = unflatten_last(x_proj, levels)
+ return x_proj
+
diff --git a/case_studies/thermometer/original/robustml_attack.py b/case_studies/thermometer/original/robustml_attack.py
new file mode 100644
index 0000000..59181ce
--- /dev/null
+++ b/case_studies/thermometer/original/robustml_attack.py
@@ -0,0 +1,148 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+from robustml_model import LEVELS
+from discretization_utils import discretize_uniform
+
+import numpy as np
+from discretization_attacks import adv_lspga
+
+
+class Attack:
+ def __init__(self, sess, model, epsilon, num_steps=30, step_size=1, batch_size=1, n_classes=10):
+ self._sess = sess
+ self.model = model
+ self.num_steps = num_steps
+ self.step_size = step_size
+
+ self.xs = tf.Variable(np.zeros((batch_size, 32, 32, 3), dtype=np.float32),
+ name='modifier')
+ self.orig_xs = tf.placeholder(tf.float32, [None, 32, 32, 3])
+
+ self.ys = tf.placeholder(tf.int32, [None])
+
+ self.epsilon = epsilon * 255
+
+ delta = tf.clip_by_value(self.xs, 0, 255) - self.orig_xs
+ delta = tf.clip_by_value(delta, -self.epsilon, self.epsilon)
+
+ self.do_clip_xs = tf.assign(self.xs, self.orig_xs+delta)
+
+ compare = tf.constant((256.0/LEVELS)*np.arange(-1,LEVELS-1).reshape((1,1,1,1,LEVELS)),
+ dtype=tf.float32)
+ inner = tf.reshape(self.xs,(-1, 32, 32, 3, 1)) - compare
+ inner = tf.maximum(tf.minimum(inner/(256.0/LEVELS), 1.0), 0.0)
+
+ self.therm = tf.reshape(inner, (-1, 32, 32, LEVELS*3))
+
+ self.logits = logits = model(self.therm)
+
+ self.uniform = discretize_uniform(self.xs/255.0, levels=LEVELS, thermometer=True)
+ self.real_logits = model(self.uniform)
+
+ label_mask = tf.one_hot(self.ys, n_classes)
+ correct_logit = tf.reduce_sum(label_mask * logits, axis=1)
+ wrong_logit = tf.reduce_max((1-label_mask) * logits - 1e4*label_mask, axis=1)
+
+ self.loss = (correct_logit - wrong_logit)
+
+ start_vars = set(x.name for x in tf.global_variables())
+ optimizer = tf.train.AdamOptimizer(step_size*1)
+ self.grad = tf.sign(tf.gradients(self.loss, self.xs)[0])
+
+ grad,var = optimizer.compute_gradients(self.loss, [self.xs])[0]
+ self.train = optimizer.apply_gradients([(tf.sign(grad),var)])
+
+ end_vars = tf.global_variables()
+ self.new_vars = [x for x in end_vars if x.name not in start_vars]
+
+ #@profile
+ def perturb(self, x, y, sess, feed_dict={}):
+ sess.run(tf.variables_initializer(self.new_vars))
+ sess.run(self.xs.initializer)
+ sess.run(self.do_clip_xs,
+ {self.orig_xs: x})
+
+ for i in range(self.num_steps):
+
+ t = sess.run(self.uniform)
+ sess.run(self.train, feed_dict={self.ys: y,
+ self.therm: t,
+ **feed_dict})
+ sess.run(self.do_clip_xs,
+ {self.orig_xs: x})
+
+ x_batch_adv = sess.run(self.xs)
+
+ return x_batch_adv
+
+ def run(self, x, y, target, feed_dict={}):
+ if len(x.shape) == 3:
+ x = np.array([x])
+ y = np.array([y])
+ if target is not None:
+ raise NotImplementedError
+ return self.perturb(x * 255.0, y, self._sess, feed_dict) / 255.0
+
+
+class LSPGDAttack:
+ def __init__(self, sess, model, epsilon, num_steps=7, step_size=0.1,
+ use_labels=True, n_classes=10):
+ # ATTENTION: use_labels is a modification from AUTHOR
+ self._sess = sess
+ self.model = model
+
+ self.xin = tf.placeholder(tf.float32, (None, 32, 32, 3))
+ if use_labels:
+ self.yin = tf.placeholder(tf.int64, shape=None)
+ self.y_filled = tf.one_hot(
+ self.yin,
+ n_classes)
+ else:
+ self.yin = None
+
+ steps = num_steps
+ eps = epsilon
+ attack_step = step_size
+
+ projection_fn = tf.identity
+
+ self.attack = adv_lspga(self.xin, model, discretize_uniform,
+ projection_fn, 16, tf.constant(False), steps, eps,
+ attack_step, thermometer=True, noisy_grads=False,
+ y=self.y_filled)
+
+
+
+ def perturb(self, x, y, sess, feed_dict={}):
+ if self.yin is None:
+ x_batch_adv = sess.run(self.attack,
+ {self.xin: x/255.0, **feed_dict})
+ else:
+ x_batch_adv = sess.run(self.attack,
+ {self.xin: x/255.0, **feed_dict,
+ self.yin: y})
+
+ return x_batch_adv
+
+ def run(self, x, y, target, feed_dict={}):
+ if len(x.shape) == 3:
+ x = np.array([x])
+ y = np.array([y])
+ if target is not None:
+ raise NotImplementedError
+ return self.perturb(x * 255.0, y, self._sess, feed_dict) / 255.0
diff --git a/case_studies/thermometer/original/robustml_model.py b/case_studies/thermometer/original/robustml_model.py
new file mode 100644
index 0000000..cdc4d80
--- /dev/null
+++ b/case_studies/thermometer/original/robustml_model.py
@@ -0,0 +1,84 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import robustml
+import tensorflow as tf
+from discretization_utils import discretize_uniform
+import numpy as np
+from cifar_model import Model
+
+LEVELS = 16
+
+class Thermometer(robustml.model.Model):
+ def __init__(self, sess, epsilon):
+ self._sess = sess
+
+ self._x = tf.placeholder(tf.float32, (None, 32, 32, 3))
+ self._encode = discretize_uniform(self._x/255.0, levels=LEVELS, thermometer=True)
+
+ self._model = Model(
+ 'checkpoints/original_thermometer_wrn/thermometer_advtrain/',
+ sess,
+ tiny=False,
+ mode='eval',
+ thermometer=True,
+ levels=LEVELS
+ )
+
+ self._dataset = robustml.dataset.CIFAR10()
+ self._threat_model = robustml.threat_model.Linf(epsilon=epsilon/255.0)
+
+ @property
+ def dataset(self):
+ return self._dataset
+
+ @property
+ def threat_model(self):
+ return self._threat_model
+
+ def classify(self, x, skip_encoding=False):
+ x = x * 255.0
+ if not skip_encoding:
+ # first encode the input, then classify it
+ x = self.encode(x)
+ return self._sess.run(self._model.predictions, {self._model.x_input: x})
+
+ def get_features_and_gradients(self, x):
+ x = x * 255.0
+ x = self.encode(x)
+ grad = tf.gradients(self._model.features, self._model.x_input)[0]
+ return self._sess.run((self._model.features, grad),
+ {self._model.x_input: x})
+
+ def get_features(self, x):
+ x = x * 255.0
+ x = self.encode(x)
+ return self._sess.run(self._model.features,
+ {self._model.x_input: x})
+
+ def get_features_and_logits(self, x):
+ x = x * 255.0
+ x = self.encode(x)
+ return self._sess.run((self._model.features, self._model.pre_softmax),
+ {self._model.x_input: x})
+
+ # expose internals for white box attacks
+
+ @property
+ def model(self):
+ return self._model
+
+ # x should be in [0, 255]
+ def encode(self, x):
+ return self._sess.run(self._encode, {self._x: x})
diff --git a/case_studies/thermometer/original/robustness_evaluation.py b/case_studies/thermometer/original/robustness_evaluation.py
new file mode 100644
index 0000000..e3ef0f2
--- /dev/null
+++ b/case_studies/thermometer/original/robustness_evaluation.py
@@ -0,0 +1,116 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+tf.logging.set_verbosity(tf.logging.ERROR)
+
+import robustml
+from robustml_model import Thermometer
+import sys
+import argparse
+
+import numpy as np
+from robustml_attack import LSPGDAttack, Attack
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cifar-path', type=str, required=True,
+ help='path to the test_batch file from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
+ parser.add_argument('--start', type=int, default=0)
+ parser.add_argument('--end', type=int, default=100)
+ parser.add_argument('--debug', action='store_true')
+ parser.add_argument("--attack", default="adaptive", choices=("original", "adaptive", "modified", "modified2"))
+ parser.add_argument("--batch-size", default=256, type=int)
+ parser.add_argument("--epsilon", type=int, default=8)
+ args = parser.parse_args()
+
+ # set up TensorFlow session
+ sess = tf.Session()
+
+ # initialize a model
+ model = Thermometer(sess, args.epsilon)
+
+ batch_size = args.batch_size
+
+ # initialize an attack (it's a white box attack, and it's allowed to look
+ # at the internals of the model in any way it wants)
+ # attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
+
+ # ATTENTION: Original attack did _not_ use the labels
+ use_labels=True
+ if args.attack == "adaptive":
+ attack = Attack(sess, model.model, epsilon=model.threat_model.epsilon, batch_size=batch_size, n_classes=10)
+ elif args.attack == "original":
+ attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, use_labels=use_labels)
+ elif args.attack == "modified":
+ attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, num_steps=50, step_size=0.25, use_labels=use_labels)
+ elif args.attack == "modified2":
+ attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, num_steps=100, step_size=0.1, use_labels=use_labels)
+ else:
+ raise ValueError("invalid attack mode")
+
+ # initialize a data provider for CIFAR-10 images
+ provider = robustml.provider.CIFAR10(args.cifar_path)
+
+ success = 0
+ total = 0
+ random_indices = list(range(len(provider)))
+ if args.end == -1:
+ args.end = int(len(random_indices) / batch_size)
+ assert args.end <= len(random_indices) / batch_size
+ assert args.start <= len(random_indices) / batch_size
+
+ """
+ print("using robustml...")
+ success_rate = robustml.evaluate.evaluate(
+ model,
+ attack,
+ provider,
+ start=args.start,
+ end=args.end,
+ deterministic=True,
+ debug=args.debug,
+ )
+ print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, args.end-args.start))
+ print("now using own eval...")
+ """
+
+ np.random.shuffle(random_indices)
+ for i in range(args.start, args.end):
+ print('evaluating batch %d of [%d, %d)' % (i, args.start, args.end), file=sys.stderr)
+
+ x_batch = []
+ y_batch = []
+ for j in range(batch_size):
+ x_, y_ = provider[random_indices[i*batch_size + j]]
+ x_batch.append(x_)
+ y_batch.append(y_)
+ x_batch = np.array(x_batch)
+ y_batch = np.array(y_batch)
+ total += len(x_batch)
+ assert len(x_batch) == batch_size
+
+ x_batch_adv = attack.run(x_batch, y_batch, None)
+ y_batch_adv = model.classify(x_batch_adv, skip_encoding=args.attack in ("original", "modified", "modified2"))
+ # adv_acc = (y_batch_adv == y_batch).mean()
+ success += (y_batch_adv != y_batch).sum()
+
+ success_rate = success / total
+
+
+ print('attack success rate: %.2f%%, robust accuracy: %.sf%% (over %d data points)' % (success_rate*100, 100-success_rate*100, total))
+
+if __name__ == '__main__':
+ main()
diff --git a/case_studies/thermometer/original/setup.sh b/case_studies/thermometer/original/setup.sh
new file mode 100644
index 0000000..05b595e
--- /dev/null
+++ b/case_studies/thermometer/original/setup.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+cd "$(dirname "$0")" # cd to directory of this script
+
+# $1 is filename
+# $2 is expected sha
+check_sha1() {
+ computed=$(sha1sum "$1" 2>/dev/null | awk '{print $1}') || return 1
+ if [ "$computed" == "$2" ]; then
+ return 0;
+ else
+ return 1;
+ fi
+}
+
+# $1 is URL
+# $2 is extracted file name
+# $3 is the checksum
+fetch() {
+ f=${1##*/}
+ if check_sha1 $f $3; then
+ echo "$2 already downloaded"
+ return
+ fi
+ echo "downloading $1"
+ wget -q $1 -O $f
+ if check_sha1 $f $3; then
+ echo "downloaded $2"
+ else
+ echo "HASH MISMATCH, SHA1($f) != $3"
+ return
+ fi
+
+ tar xzf $f
+}
+
+cd ..
+fetch https://github.com/anishathalye/obfuscated-gradients/releases/download/v0/cifar10_data.tgz cifar10_data 6d011cbb029aec2c18dc10bce32adea9e27c2068
+mkdir -p models
+cd models
+fetch https://github.com/anishathalye/obfuscated-gradients/releases/download/v0/model_thermometer_advtrain.tgz models/thermometer_advtrain 595261189ee9a78911f312cd2443ee088ef59bee
diff --git a/case_studies/thermometer/original/thermometer.ipynb b/case_studies/thermometer/original/thermometer.ipynb
new file mode 100644
index 0000000..48c17da
--- /dev/null
+++ b/case_studies/thermometer/original/thermometer.ipynb
@@ -0,0 +1,234 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
+ " from ._conv import register_converters as _register_converters\n"
+ ]
+ }
+ ],
+ "source": [
+ "import tensorflow as tf\n",
+ "import numpy as np\n",
+ "import math\n",
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline\n",
+ "\n",
+ "import cifar10_input\n",
+ "\n",
+ "from discretization_utils import one_hot_to_thermometer\n",
+ "from discretization_utils import discretize_uniform\n",
+ "from discretization_attacks import adv_lspga\n",
+ "\n",
+ "from cifar_model import Model\n",
+ "import cifar10_input\n",
+ "levels = 16"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class Attack:\n",
+ " def __init__(self, model, num_steps, step_size, epsilon):\n",
+ " self.model = model\n",
+ " self.num_steps = num_steps\n",
+ " self.step_size = step_size\n",
+ "\n",
+ " self.xs = tf.Variable(np.zeros((1, 32, 32, 3), dtype=np.float32),\n",
+ " name='modifier')\n",
+ " self.orig_xs = tf.placeholder(tf.float32, [None, 32, 32, 3])\n",
+ "\n",
+ " self.ys = tf.placeholder(tf.int32, [None])\n",
+ "\n",
+ " self.epsilon = epsilon\n",
+ "\n",
+ " delta = tf.clip_by_value(self.xs, 0, 255) - self.orig_xs\n",
+ " delta = tf.clip_by_value(delta, -self.epsilon, self.epsilon)\n",
+ "\n",
+ " self.do_clip_xs = tf.assign(self.xs, self.orig_xs+delta)\n",
+ "\n",
+ " compare = tf.constant((256.0/levels)*np.arange(-1,levels-1).reshape((1,1,1,1,levels)),\n",
+ " dtype=tf.float32)\n",
+ " inner = tf.reshape(self.xs,(-1, 32, 32, 3, 1)) - compare\n",
+ " inner = tf.maximum(tf.minimum(inner/(256.0/levels), 1.0), 0.0)\n",
+ "\n",
+ " self.therm = tf.reshape(inner, (-1, 32, 32, levels*3))\n",
+ "\n",
+ " self.logits = logits = model(self.therm)\n",
+ "\n",
+ " self.uniform = discretize_uniform(self.xs/255.0, levels=levels, thermometer=True)\n",
+ " self.real_logits = model(self.uniform)\n",
+ "\n",
+ " label_mask = tf.one_hot(self.ys, 10)\n",
+ " correct_logit = tf.reduce_sum(label_mask * logits, axis=1)\n",
+ " wrong_logit = tf.reduce_max((1-label_mask) * logits - 1e4*label_mask, axis=1)\n",
+ "\n",
+ " self.loss = (correct_logit - wrong_logit)\n",
+ "\n",
+ " start_vars = set(x.name for x in tf.global_variables())\n",
+ " optimizer = tf.train.AdamOptimizer(step_size*1)\n",
+ " self.grad = tf.sign(tf.gradients(self.loss, self.xs)[0])\n",
+ "\n",
+ " grad,var = optimizer.compute_gradients(self.loss, [self.xs])[0]\n",
+ " self.train = optimizer.apply_gradients([(tf.sign(grad),var)])\n",
+ "\n",
+ " end_vars = tf.global_variables()\n",
+ " self.new_vars = [x for x in end_vars if x.name not in start_vars]\n",
+ "\n",
+ " def perturb(self, x, y, sess):\n",
+ " sess.run(tf.variables_initializer(self.new_vars))\n",
+ " sess.run(self.xs.initializer)\n",
+ " sess.run(self.do_clip_xs,\n",
+ " {self.orig_xs: x})\n",
+ "\n",
+ " for i in range(self.num_steps):\n",
+ "\n",
+ " t = sess.run(self.uniform)\n",
+ " sess.run(self.train, feed_dict={self.ys: y,\n",
+ " self.therm: t})\n",
+ " sess.run(self.do_clip_xs,\n",
+ " {self.orig_xs: x})\n",
+ "\n",
+ " return sess.run(self.xs)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Called\n",
+ "WARNING:tensorflow:From /home/npc/breaking-iclr-2018-defenses/release/thermometer/cifar_model.py:214: UniformUnitScaling.__init__ (from tensorflow.python.ops.init_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.initializers.variance_scaling instead with distribution=uniform to get equivalent behavior.\n",
+ "INFO:tensorflow:Restoring parameters from ../models/thermometer_advtrain/checkpoint-68000\n",
+ "restored\n"
+ ]
+ }
+ ],
+ "source": [
+ "sess = tf.Session()\n",
+ "cifar = cifar10_input.CIFAR10Data()\n",
+ "model = Model('../models/thermometer_advtrain/',\n",
+ " sess, tiny=False, mode='eval',\n",
+ " thermometer=True, levels=levels)\n",
+ "attack = Attack(model,\n",
+ " 30,\n",
+ " 1,\n",
+ " 8)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAHypJREFUeJztnWuMnOd13/9n3rns/cYll8urKImyIqsxpdCqnaiK7NSBoiSQDQSuXcBQASMKigiogfSD4AK1C/SDU9Q2/KFwQVeqFcO1rNoWJCRCalsOIhh2JFE36kJdKF4kkksuyeXed3Zupx9mZFCr5/9wyCVnqTz/H0Bw9jnzvO+Z933PvDPPf8455u4QQqRHbq0dEEKsDQp+IRJFwS9Eoij4hUgUBb8QiaLgFyJRFPxCJIqCX4hEUfALkSj51Uw2szsAfAtABuB/ufvXYs/v7877uoFieFvx/Vywb7FfLjq4LbovMi26Pb61uNFj78sx/8M2i+2MzAGA2A9AL+7XodyP2NbcL/waaG6THQ9OI/qiL86P2KtjlkbEDebjzEINS8v1tpy86OA3swzA/wDwKQBHATxjZo+5+6tszrqBIr7yb68Pb88bdF/FQthNy/EAqVSWqa1Wr/J9FcNvTgBQb4R99MhZslyd2nIZNcGrvXyb4NssFMvB8Sxyqi3H/a83atRWrfFz1miQ68+4H7XINbvMtofzBXLYx9ibfKXCr496PXIcI9dwLnLOKuS6WuCHHouV8Pa+9/NjfNL7fLp4bgFwwN0PunsFwEMA7lrF9oQQHWQ1wb8ZwDvn/H20NSaE+ABw2Rf8zOweM9trZnvnlyKfY4QQHWU1wX8MwNZz/t7SGnsP7r7H3Xe7++6+7lWtLwohLiGrCf5nAOw0sx1mVgTwOQCPXRq3hBCXm4u+Fbt7zczuBfD/0JT6HnD3V6JzYKiQ9xv3JT6RrIaWwFfEc+BL6fl8ZAX+IhQ2K/BJy5UKtdUaER8jUl8WUQnyZJo1+Ao2alwZia1SNyL+V6wrOF7PSnxObHt1fjyswX00olZ0Rc5Z3rgtl48oI9XIMTb+ldfJMfaIjpFlYR8vRIhc1edwd38cwOOr2YYQYm3QL/yESBQFvxCJouAXIlEU/EIkioJfiETp8K9uHM4SRZzLTV4Pz7E6l4YaVS6xZd0R2Qg8OYNJbI2I1FQsFKit5tzWqEZeW2R/tVrYZpFMtVxEVrSMJzp5FpbzAGCpHpb0TpzhcthChfs4P8/nZc6PR39X+DgWjZ/ngZ5uausuccmukePXXC4q24V95FcHUGXJZBeg9enOL0SiKPiFSBQFvxCJouAXIlEU/EIkSkdX+80d+TpZ1c8iq9EkKaWUReoD5CPLnpHsnRxJmABAE3tqsWJrOe5HochXlTdedR21zU6fprbTZxbD+8rzVfscIsk2NX6JLDn3f/+RsI9eGqFzqhlP1Kr0cWVhfmaK2o5NTgfH+0r8ddVPhOcAwLYxfhzX9fPj2JWPlf8KX8fFyCVcJwrHhdS71J1fiERR8AuRKAp+IRJFwS9Eoij4hUgUBb8QibIG5XTDUoTlh/gMIl/UYh1SclwGrNR4AkYxUmOuXie11iKJNohIL8VIHbl/+a8/RW3P/urX1HZ8+kxwfCEi2dXqXGI7cvQUtR06xrvDlIbGg+NbxnbQOV7qp7ZKnp+XQt96aquV54PjZyaP0zk9Q1yOPDp/ktrKpNYkAIz18zSdnkI4sadeDcu2AMCaLEU6r71/G+0/VQjxzwkFvxCJouAXIlEU/EIkioJfiERR8AuRKKuS+szsMIA5AHUANXffHXt+w3JYzoXlnJnFHjqvTtpJDfdxOW8g4/JbPlLPrhGRAZmMQusSIp4luLh4ltp+8bePUtvJaV7v8OR8eH9HjvF9HZl4h9qyrj5qq2cD1NY7MBocL/Tw7eW7eJZgKdJCqyvHpcrTlXAbuPEt2+ic8tICtR06xKW+qZkytWXGX/dV68O2Qp1Lh8bqWl5AVt+l0Pk/4e48x1QIcUWij/1CJMpqg98B/NTMnjWzey6FQ0KIzrDaj/23uvsxM9sA4Gdm9pq7P3nuE1pvCvcAwHA/r4IihOgsq7rzu/ux1v+TAB4BcEvgOXvcfbe77+7rXoNUAiFEkIsOfjPrNbP+dx8D+EMAL18qx4QQl5fV3IrHADzSkhbyAP6Pu/99bEKtYTi1FM5gmqryrL4nf/WPwfHf2sklnk98OCw1AcBwpFhog2TuAUCOtFXK5XjGVt15m6mIeoVDRw5R29QSz3DznuHgeNbHpabc8By1dQ8NUlulzKWtCmmHNTDMz9lAH7dNnjhBbbNneQHP/mL4Eu/q5rLi22e5eFXo30Btp068TW19J/kx3jgQ9qXbIpmYpKgtIjL2Si46+N39IICPXOx8IcTaIqlPiERR8AuRKAp+IRJFwS9Eoij4hUiUzvbqy0rID4YLOC6e4e9D1WK4QOPUYlh6A4DFCu/tNlDkmXsN0jetZQwOZxnPSCxXuKR0iifn4fQclxxjBSaH14ez1RYas3TOKLiPWSTTrlLgx7G8EJa2yvPcj+1j66htkUh2ADBJMvcAwAphWXRmihfHRKQg69ICz/jLivw6mJzlWZUTJBtw+yi/vnMs4a/9pD7d+YVIFQW/EImi4BciURT8QiSKgl+IROnoan9Xdy8+9Nvvy/oFABz9p9fpvL7B8Gr/LR8PbwsAerIj1FYhK9EAkMvzJB0rhFe+686Tkvo3bKW2F/YdoLa+Ib7yvXn7h6nNc+HV7UJkZb6xHG7xBQCVSqQlWuRYZSQp5ZUX99E5A6VIS6tenvTTG6kLePxEuOZejSg3AJARhQAAhvu5+jFT50lcZ6e47dCJmeD4prGNdE6eKVaxbLEV6M4vRKIo+IVIFAW/EImi4BciURT8QiSKgl+IROmo1JfL8ugZDEtY26++js5bIirJth3X0jmjVS7lTB/iMmA1kthTr4UTN2657dN0zrareQezHf/iMLU9+/yL1DbcxyWg45Ph+nN552XTSwUusSFSEm4+kuQyQ+rqDffyfcWqz9Uj0tzo+rAUDADL1fD5PH02LK8BgEVarPVH6gzmMx5OlTJPJDr4ztHg+PohLivu3BJue+cXcD/XnV+IRFHwC5EoCn4hEkXBL0SiKPiFSBQFvxCJcl6pz8weAPAnACbd/cbW2AiAHwK4CsBhAJ91d16k7N1t5XLISuEMrOMn99N5u37no8Hx3kFeMy2bO0Zt9RqXjfKRWnEH3wlnA946HK5LCADo2UJN/b1c/unK80y17kituK4iyUiL1KXbvGmc2l596y1qKxZ5ncTZufCxumrLTjrnuutvoLapKX559Q3wrMrjJyaD45bj9fGGhnmNxJlILb4sIhF293Afl+bC18EBcr0BQHcxvK9qjWdhrqSdO/93AdyxYuw+AE+4+04AT7T+FkJ8gDhv8Lv7kwBW/mLjLgAPth4/CID/ykUIcUVysd/5x9x9ovX4BJode4UQHyBWveDn7o7ILzPN7B4z22tme2dmeM12IURnudjgP2lm4wDQ+j+8qgLA3fe4+2533z04OHCRuxNCXGouNvgfA3B36/HdAB69NO4IITpFO1LfDwDcDmDUzI4C+AqArwF42My+COAIgM+2szOzDIWu8N2/XOYFJpeXw2l9hYjk1dPLP2X0RlpQlTKe1deXD/fX+u6e++mcP/0391JbYeEEtRVL/H05l+M+7rh6c3B8cuo4nVOe59l5GzeMUtvULJcqlyvh83n1tTwT85preWbnzPPPUdvC3Dy1zS6EfazVuSS2tBRunwUAQ0OD1FZ3Ls0NDPFsxlolfD6zHO/ndnQi/GG7QrIYQ5w3+N3988T0B23vRQhxxaFf+AmRKAp+IRJFwS9Eoij4hUgUBb8QidLRAp4wg2VhyWMxIjeVF5eC44VIT7W5MzyLDRmX+grghR3Hh8KZYG/u5z33jh/lNixy+e3I0cPUdtNG3qNw8/Zwcc9Nk/wX2AsHeEHTkVKkD+EQlwEPHjwcHB/fFJYiAWB6lv8CtBqR5k6e4r0GG27BcYsU21yMSH2W49dVeE9NeiOFP9EIZxEWLXzdA0DlTFgm9mgZ1PeiO78QiaLgFyJRFPxCJIqCX4hEUfALkSgKfiESpbNSnwMgPdcy51LO+Gi4v19PF5f6frGPF54cjhQ53DnCs6+6SmGZp5jn0tCpycPU1ljmxSC3XcOLgmaR190zMBwcHx3jhUTPTPGsuJlI5l49oqauJ/3z8hF5tkyy24B4ttpSmWe/1YiTbBwAyss8w7RW4/fLdaMbqM2MX1dFC18/JYv0jfRwRmshUkR0JbrzC5EoCn4hEkXBL0SiKPiFSBQFvxCJ0tHVfjOgkA8nxwz28WSbof6wzRp8NXTWeSLF6bM8BWO0nx+S3mJ4xbaeC9cYBIDDxw9T29gwrwe3/VreuqrMd4ennw23PTs2wZWF/r6wQgAAhQJvyfXKgbe5I+S+0ojcb5Yjq/3zCzzJZWiEt9eqkcSeiZO04DR6+/l5yWc8caanh9eULLI2agBQDScm1Rem6ZSxDf3B8XyBtyFbie78QiSKgl+IRFHwC5EoCn4hEkXBL0SiKPiFSJR22nU9AOBPAEy6+42tsa8C+HMAp1pP+7K7P97ODjMLSy8bN4RrzzWdJLJRJKFjfAtPjNkbkd+mjUuEnoXrDA6O8iSRwQGe0FHoCss1AHBVROrrGwwnOgHA/37ge8Hxxcixml2aorbFJV5bsRC5ejYOh193eYrXC1wgiVMAMDjAz8trr79JbSdPngqOz0ZafA0N8Rc20NtHbZlzDbZQ4ccxI7Uc1/fy7Q12heMofwG383ae+l0AdwTGv+nuu1r/2gp8IcSVw3mD392fBMBvDUKIDySr+c5/r5ntM7MHzIz/REwIcUVyscH/bQDXANgFYALA19kTzeweM9trZnunp/nPFYUQneWigt/dT7p73d0bAL4DgHaRcPc97r7b3XcPDfEGEEKIznJRwW9m4+f8+RkAL18ad4QQnaIdqe8HAG4HMGpmRwF8BcDtZrYLzap8hwH8RTs7y+VyNLtpYJhLfbV62M1SnmdKXbdjG7XtfZZLbLOFa6mtYXPB8bHNXM57df8/Udvv/v6/o7Zf/4rPW1iItLWqnA6OT554h86J3QPmq9yWB5eihnPhLMLN3dz3mVNcsqtlfFlpbAO31evhTMGlSEuu8hKvW7gQqUFYa3D5sFo+Rm0bCuGMxU19PEtwuRaecyF38/MGv7t/PjB8/wXsQwhxBaJf+AmRKAp+IRJFwS9Eoij4hUgUBb8QidLRAp65XA69feHsrOHRUTqvZmE3y7kindPVN0BtQ0O8QOPb75ygtls/+uGwH/O8/VdPfzirDAAmjh2ltgNvvEFttTpvJ5Uj9RsXZmfonP5149Q2M8Nlr8E+XtzzQ9fdGBx/5sXX6JznXjtMbbfe/kfUVihySezggQPB8Zk5/rpiRUbLS1zO2z7GJeTuXl6gdmQkPM/zvKBprRIuJOokazaE7vxCJIqCX4hEUfALkSgKfiESRcEvRKIo+IVIlI5Kfe4NNGphiWVwhBdGXFgKF3ZcrPO+aVnG39e2bd1CbW+8wjPLZhbDkl5fL88g3HoNNeHIG7yY5bHjE9T28Y9/lNoWF8NSVP+mzXTOyCZe7PTtKS7NLS1zibPYG+6fN7B+K51zUz8/L6dOhfvZAcDhIy9S28JSWBadnuGS3fr166lt0Pl52d7HJdgNA7yHXsHCmY6VKu9P2EskvRx4TLz/uUKIJFHwC5EoCn4hEkXBL0SiKPiFSJSOrvY3alXMnQmvlnZHaqMtl8OrqNbg7pvxVc/REd7u6o3cQWqbnAq3XDqT8VXvwT5em/D6G3mC0cEjvOZelXe1wvRsWE3ZuXMnnbNzB5ckjkzwhKBXXnmJ2s6cDifbFEtc1Rnu44kxR1/hqsOJM7wuoJHkryzSKi3W6m17JG9mWz9PdOrK8SSd5XL4+mk0eG3Iao1sr/3Fft35hUgVBb8QiaLgFyJRFPxCJIqCX4hEUfALkSjttOvaCuBvAIyhKSTscfdvmdkIgB8CuArNll2fdfdwj6YWy8vLOHggLKVt2/lbdF5XLiz1NSo88SHfFZFdIrb+fi5F9Q2E6wJef/2H6Jyf//Rxaluc4fUCe0Y2UNuBo5PUtnVLOMlox4dupnNKRX4ZXL2NJy1NT/HT/er+cIJUw7lOeWyaJ8bMkuQuACjXuUw8Ox2WPjds5ElEb5/h9f1GtnJ59kyJ+4EGf23TtfBr8zy/TpfJ9irgCUQraefOXwPwV+5+A4CPAfhLM7sBwH0AnnD3nQCeaP0thPiAcN7gd/cJd3+u9XgOwH4AmwHcBeDB1tMeBPDpy+WkEOLSc0Hf+c3sKgA3AXgKwJj7b5KbT6D5tUAI8QGh7eA3sz4APwbwJXd/z+8p3d1BflhoZveY2V4z2zs3xwsoCCE6S1vBb2YFNAP/++7+k9bwSTMbb9nHAQRXodx9j7vvdvfdscU0IURnOW/wm5kBuB/Afnf/xjmmxwDc3Xp8N4BHL717QojLRTtZfb8H4AsAXjKzF1pjXwbwNQAPm9kXARwB8NnzbWhxuYYXDoRlqm033kLnNRDOpjOW2QQADZ7eNDs3R23T06epbd3IruD4nXd8gs7Z9ZHrqe3hnzxCbWZcshkcHKa2zZvCElbfwBCdk9XCxxcARjbyS2R8R5XaZrrDMtXzL/J6exPzPGXOC7z92uBGnqU5ek1YmssiMlrduR+ve7jdHAAcOMHlyGLGt7lULgfHFyOXd60Rvj7m6jz7cSXnDX53/yUA5vkftL0nIcQVhX7hJ0SiKPiFSBQFvxCJouAXIlEU/EIkSkcLeJbrhjdmuoO203VeUNELYSkkV+HFJZ1IIQCQy3HbpnGeTfevfjecGddV4BLPju28TdYf/9nnqO1Hj/wdtZ0+wV/3xEy4GGS5fIDOKYJrSlNL3HbgCM9KRCUsA/ooz4Ac3hAu+gkAjUhlyuZv0Mi8rvA2GxYu7AkA1UgbuJk631dXgW+zK8+lvgULZxFWC3xf3ggf33pEIl6J7vxCJIqCX4hEUfALkSgKfiESRcEvRKIo+IVIlI5Kfct1wxvT4febR3/J+77t2j4aHN9Y5BlWPYVINtpG3j9vfJRnj11zNSn66Lw448SpM9T2wENcznvuhVepjfUuBACa6Oj8fd7rfHv1Ej8e9RyXovIIS7q1iBRVy4XnAEBX7EqNZOGVK+HX7Tk+Jx/J+MsavC+jl7ksWgOfV2iEfcyMn7NKNex/pEXl+9CdX4hEUfALkSgKfiESRcEvRKIo+IVIlI6u9tdhmM+Fkx+eeO4NOu/Nt8Itvu74nRvonGs28bZKhw6GW0kBwG0fvZHaukiixVyFr2A//PfPUNvzrx6ntsVapPVTZDU6Vwi/nzciNQ1zxlepY6vi9QZPaFomK9jVOp9jxmsCLiOS5OL8teXzZCU94/e9nh6eoFME97/OF/RRNx5qdTKxVuXnpdgfrsloufZDWnd+IRJFwS9Eoij4hUgUBb8QiaLgFyJRFPxCJMp5dQEz2wrgb9Bswe0A9rj7t8zsqwD+HMCp1lO/7O6PR3eWz2Pd6Pqgbeosl2smzk4Hx3/1Im9NVK9uj3jCpZz1G0nyDgDLwvLb03tfpnP+7he/prblBq9ZhzyX+nK5C3/Pri/z5B2PyICNiJwXk9hYy6tCnl9ylkXqz2X8nOUj87IsvL9Y09gscnxzzuXIeiR5qhGRKplGuHEjl6v7B8K2t0r8OK2kHVGwBuCv3P05M+sH8KyZ/axl+6a7//e29yaEuGJop1ffBICJ1uM5M9sPgJekFUJ8ILigz49mdhWAmwA81Rq618z2mdkDZsZbxwohrjjaDn4z6wPwYwBfcvdZAN8GcA2AXWh+Mvg6mXePme01s721Jd4aWwjRWdoKfmt2RfgxgO+7+08AwN1Punvd3RsAvgPgltBcd9/j7rvdfXe+mzfmEEJ0lvMGv5kZgPsB7Hf3b5wzPn7O0z4DgC95CyGuONpZ7f89AF8A8JKZvdAa+zKAz5vZLjTlv8MA/uJ8GzIzKssUClzaqpXD8sXhk7N0zvLCfmq77ebrqK17aJzaZsphSeYfn9pL55SdZ2ZVa1w2KpV45l4jUkducTHc+ilGFsk4M57Uh0gHLZSIxBbNOovYrMRl0e5uXvsvT6TFaiRjbm5hgdrqEVl0ucbPy+BwuA4lAIyNh219kcKFS3Phr9AeuTZW0s5q/y8BhC6BqKYvhLiy0S/8hEgUBb8QiaLgFyJRFPxCJIqCX4hE6WgBT7ijUSNZYrGMqCwse1XAs7km55ep7bnXeeHMOxe5lDPnYXnl2Fn+y8VSH88eqy1y/8vL3P+enoi0RdqUxbZnOe5HLtJeK5ah50S288j9phCRN+erPLuwUuPSHJMBYxmJMcluIdIqrW+Iy3lD63mLuEotvM3XX+NZqwWSbVmtcP9Woju/EImi4BciURT8QiSKgl+IRFHwC5EoCn4hEqXDUh8AlhXlXF7JsnDxw4ZzGaqe4wUTD09yae6Bh3m+0idv3x0cP3T8VHAcABbrsaKOEdmrixdizIrc1kN60BW7uYy2NMelslj2m0cksQLJSMvy/JzF9pVFinTG+hAuLc5f8JzYvoaGR6ht3RjPCD19Zorapk+fCI+/zXtKXrtjR9gQkTBXoju/EImi4BciURT8QiSKgl+IRFHwC5EoCn4hEqWjUl+WzzAyNBS0lctcfltYCmcqFTOe3VaLyFC5SLHQJ5/eR22HjoezAWcWeCHOqfklaiPJXACA3t5INmCkSGOpFH5t+Yg82NXNM+aySMZfvsC3WSf3lVpEYrOIzZ37WK/y41+phg9ydxeXPkfXraO24VEu51UimanLxUgxTtJfr5HncvVCOXxdNSKS+Up05xciURT8QiSKgl+IRFHwC5EoCn4hEuW8q/1m1gXgSQCl1vN/5O5fMbMdAB4CsA7AswC+4O7RAmLecCyTVcpS5G1ouR5ezS1kfLW5xhep4Tm+s1w3X2U/QhJ4cpFklVqVr2DHFIlyuUxtC5F2Ujny2pgKAAC9Rb6q3B1JCMrluP/FrvD+unv48a1UeGLP6SmeGNMAn5cvhI/H8EAvnTM2ElakAGDjRp7YM73A6yTOTZ+ltvmZ6eD40Ajf1+lTp4PjtUhy1EraufMvA/iku38EzXbcd5jZxwD8NYBvuvu1AM4C+GLbexVCrDnnDX5v8m5eZKH1zwF8EsCPWuMPAvj0ZfFQCHFZaOs7v5llrQ69kwB+BuAtANPuv2lBexTA5svjohDictBW8Lt73d13AdgC4BYA17e7AzO7x8z2mtne6iJvqS2E6CwXtNrv7tMA/gHAxwEMmf2msfsWAMfInD3uvtvddxd6BlblrBDi0nHe4Dez9WY21HrcDeBTAPaj+SbwZ62n3Q3g0cvlpBDi0tNOYs84gAfNLEPzzeJhd/9bM3sVwENm9l8BPA/g/vNtqNFoYHkpLGGVMqPzeoiXjSpPmol0mUIDXKKKJUY0SHuwWiWSkFLnryvWMipma0QSe5jUd/Ysl5qmIsdxoI9LYoORenYDpJZgF7h0WG9wqSxvkeSjEj/Zy+XwNkt5fl5i+6otzkRs3P/56TPU1iDJR10lLsGWWZ1B469rJecNfnffB+CmwPhBNL//CyE+gOgXfkIkioJfiERR8AuRKAp+IRJFwS9EolhMUrrkOzM7BeBI689RAOHUpM4iP96L/HgvHzQ/trv7+nY22NHgf8+Ozfa6e7j5nfyQH/Ljsvuhj/1CJIqCX4hEWcvg37OG+z4X+fFe5Md7+Wfrx5p95xdCrC362C9EoqxJ8JvZHWb2upkdMLP71sKHlh+HzewlM3vBzPZ2cL8PmNmkmb18ztiImf3MzN5s/T+8Rn581cyOtY7JC2Z2Zwf82Gpm/2Bmr5rZK2b2H1rjHT0mET86ekzMrMvMnjazF1t+/JfW+A4ze6oVNz80M17Bth3cvaP/AGRolgG7GkARwIsAbui0Hy1fDgMYXYP93gbgZgAvnzP23wDc13p8H4C/XiM/vgrgP3b4eIwDuLn1uB/AGwBu6PQxifjR0WMCwAD0tR4XADwF4GMAHgbwudb4/wTw71ezn7W4898C4IC7H/Rmqe+HANy1Bn6sGe7+JICVtajvQrMQKtChgqjEj47j7hPu/lzr8RyaxWI2o8PHJOJHR/Eml71o7loE/2YA75zz91oW/3QAPzWzZ83snjXy4V3G3H2i9fgEgLE19OVeM9vX+lpw2b9+nIuZXYVm/YinsIbHZIUfQIePSSeK5qa+4Heru98M4I8A/KWZ3bbWDgHNd34035jWgm8DuAbNHg0TAL7eqR2bWR+AHwP4kru/p9prJ49JwI+OHxNfRdHcdlmL4D8GYOs5f9Pin5cbdz/W+n8SwCNY28pEJ81sHABa/0+uhRPufrJ14TUAfAcdOiZmVkAz4L7v7j9pDXf8mIT8WKtj0tr3BRfNbZe1CP5nAOxsrVwWAXwOwGOddsLMes2s/93HAP4QwMvxWZeVx9AshAqsYUHUd4OtxWfQgWNiZoZmDcj97v6Nc0wdPSbMj04fk44Vze3UCuaK1cw70VxJfQvAf1ojH65GU2l4EcArnfQDwA/Q/PhYRfO72xfR7Hn4BIA3AfwcwMga+fE9AC8B2Idm8I13wI9b0fxIvw/AC61/d3b6mET86OgxAfDbaBbF3YfmG81/PueafRrAAQD/F0BpNfvRL/yESJTUF/yESBYFvxCJouAXIlEU/EIkioJfiERR8AuRKAp+IRJFwS9Eovx/I+RL+AXYaQ4AAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Image Label [3]\n",
+ "Clean Model Prediction [3]\n",
+ "Logits [[-1.3182435 2.5834875 -5.5870852 10.171819 -5.0424666 6.116695\n",
+ " -4.9661837 -3.076646 -1.206701 2.325575 ]]\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJztnWmMXNeV3/+n9q6l9ybZXMRFoiRLsrWYEGzJNjwaeKJxJpENJI6NwNAHYzgIxkAMTD4IDhA7QAJ4gtiGPwwc0LYwmsRj2TOSIU3g8ViWZ8J4HNOiFFEbtVAUKbLZ7Ca72Ut1de0nH6qEUK37f91ik9WU3/8HEKy+p+579933Tr2q+3/nHHN3CCHiR2KjByCE2Bjk/ELEFDm/EDFFzi9ETJHzCxFT5PxCxBQ5vxAxRc4vREyR8wsRU1Lr6Wxm9wL4FoAkgO+6+9ei3l/qS/nYQDZoc2vRfm7JYHsK/OnEphsfiLepKWkR20R4HMmIcbTAj8ta4e0BQMQmgVSE0cLHHTVXkTtr8Xn0iHlEMnxsUaNoRR0W+DjakVsN94u4OoB21DXAjznR4vdSj7gO2FO2UUfVbIf3tVhpYLkecdIu4pKd38ySAP4MwCcAnAbwlJk97u4vsT5jA1n85399Y9DWTC/RfTXSA8H2oUSN9pmthz9kAMBaFWorZRrUNtMuBtuL7TrtU06UqS27OEhtzQa/yGyQ7y+ZyQTbR8DnyhNNbptNU1ujyc9Zu38o3B5xWc41+OWeBR9HOeoD1sNOkiYfkgCACp+PWePnM38hfH0AQN0uUFuDfNhUm3w+5mq5YPsPD56ifVaynq/9dwI45u7H3b0O4GEA961je0KIHrIe598G4OKPmdPdNiHEe4ArvuBnZvvN7LCZHV6M+DolhOgt63H+CQA7Lvp7e7ftbbj7AXff5+77Svl1rS8KIS4j63H+pwDsNbPdZpYB8FkAj1+eYQkhrjSXfCt296aZfRHA36Ej9T3o7i9G9Wm6Ydr7graSL9J+Vg+vwNerfAW4HaEetDP8sJeMr/bnbTnYXk3neZ8qH8dCxEdvcYCv9ObrJWrz9EKwvWJV2ifdDK8cA0Cif4ba2k1+3PVmWK1oVbkKU22Erw0AmEvy1flCY5baWtXwCryX+Pbyaa6MFKv8pFWTfEW/3V+gtsRyeK6aC/O0T5JIqVEixkrW9T3c3X8C4Cfr2YYQYmPQE35CxBQ5vxAxRc4vREyR8wsRU+T8QsSU3j51Y20kiBS1EFE/IN8Oy2XLfeEgFgBIzvLtpTNc9moPcymqnSQ2ImsBQDvLtZdkhu+rdY7LTbUSf1KycCEctJQgAS4A0BrgMmtqcYSPo8CPu9wKB/a8NsuDkpbqXCpbWoyIpsvzwJ5SK3yuMwtc3uwv8gCd4QK/rgoRUY4Ly2GZGABa2bBkncjxc2YsgvBdaH268wsRU+T8QsQUOb8QMUXOL0RMkfMLEVN6u9rfbsHK4ZXloSIP0sFCeAU7k+TBErWIVFeocpVgMMkDexoeXp1fSvAV1r7EMN9XRH6D/A3XU9tchasV55fCK+Z9Sb5qn47IFldrh1OoAUA1IjXYb16ZCrZ7Zpz28SRfZa9t40FE5UmuEkwQ9aOY5em4Wmd5mrdrtvK8i9v6+fgHk3x/lWb4uiqAX8NT/eFxJCLSQr7jvWt/qxDitwk5vxAxRc4vREyR8wsRU+T8QsQUOb8QMaWnUp85kKmFZbF6lmsUicyOYHvSeO62RCuiekqRS2zliClpz4RlSm9HBOEMhQNcAKA0xm0f/cQnqO3wPz5DbZXFuWB7lauDKEeorK+cPkttR9/g+f2yg3uC7dv33ET7+CZ+DeTr/Lz0t3iuu8UtYRlzZu5Vvq8Kl0VPz/P5sIgqS6mBiEnOh497mavOyBB1M6Lq3TvQnV+ImCLnFyKmyPmFiClyfiFiipxfiJgi5xcipqxL6jOzEwAWAbQANN19X9T725bEYjpcamret9J+LVK2aKiwifYZcC5DpZYjcviN8Ag3kCBCr3LpMGkR0Vzn+b7+8bHHqO3UHO/3KtnmyYl31FD9/7YLXKJK5iNy/yW3U1txMDxZmTyfq1KbR1uWk/3Ulhjg+fjqS28E23cVr6F95iNKg73xBpf6Zmvh/JQAAONRidcSqa+eGqN9Msnz4d0YP5cruRw6/++4e3gkQoirFn3tFyKmrNf5HcDPzOxpM9t/OQYkhOgN6/3a/xF3nzCzTQCeMLOX3f3gxW/ofijsB1bJ1iOE6CnruvO7+0T3/2kAPwZwZ+A9B9x9n7vvK/T1NmuYEIJzyc5vZgUzK731GsDvAXjhcg1MCHFlWc+teDOAH1unPFAKwF+6+0+jOjQ9gXONcLTd7AX+OXTwV0eD7e/by4f/OzdvprZrkrzfwjxPmjhkYUmmioifM3O8TFN5kEtbJ0+GJSoAmG3xMl+eCUekFUe20T6JOpe28v382GpZLjm202HJqTTKxzEyxMPYatPhhKAAMD87TW3ZdFhyHCxw6fBc+Ri1pUs8Aem5s/ycvd7gJdH2jPUF25MWLlMHALVsOLGqJ/g8reSSnd/djwO49VL7CyE2Fkl9QsQUOb8QMUXOL0RMkfMLEVPk/ELElN4+dWM5pHJ7g6blJS43tfrDEVizFS6fVOo8IgqZN6mp2eKfhxcS4Yi0VJJHbCVbYRkHACamuFTWXIyIztqym5tK4aSg7UQ4sScA7KjxY27meHLSaoGfs3Y9HOuVnedzPzx6G7XNN3mU5lRE7cVCX/gamTrPJdj+Pi45njwdEQGZ4dfBdMT5nKyFs25uSfPz0lgi57O99gyeuvMLEVPk/ELEFDm/EDFFzi9ETJHzCxFTerranysWcMNHPhy0LTxxmPbbtDm8inrnrXfTPsNJHuxRXzpFbUjxMl+l0fAq8IW5Qdqnb1O41BgATDx3nNoGd/JSXjeP8W3OpUipqQUesFR3Ph/LS/zYMskIJWB5NNj+6yOv0T7ZEg8iGi2QBIoAprM8i9yp4+Fz5m2uHvQXeeDUllI4ByUATKdJDS0AF948Q21vvBK+vvMf5Tkq03Nh9cBs7fdz3fmFiClyfiFiipxfiJgi5xcipsj5hYgpcn4hYkpPpb5UMoWhgeGgbffeO2i/Wj4crHDN7uton83ZcL49AKi8eYLauMgDLLbCwUJ33fsvaZ/+sXAgEwDc/FEebPPMrw5SW6LIjztXOxFst0ZYegOAjHGJqomIPHKLPABm0cMBPMNkDgFgOeJW1OIxRLh2nOdrrDXCku8554E9tSq/CgpFfswjST6OOk8ZiGcWw2NMP8/Lyu3dHi5R5oiYqBXozi9ETJHzCxFT5PxCxBQ5vxAxRc4vREyR8wsRU1aV+szsQQB/AGDa3W/ptg0D+CGAXQBOAPiMu3O9qIu7oVkLyygvzU/Qfnd88B31PwEAYwmeH6/Pz1Lb4hyXUFIRpbyOLy4E2z+Q30P7XLN1O7V5RO68QjqirNUgzwd35iTJI9c8Tfts3sVlwDNHeRReYoDP/9SxsDy7aRePzrtpd/g8A0B9hkdpzhYiroPJcPkqW4zI+1fi87HQOkdt/XkuH84t8GjRRCUsHx47xaMV+wbD5brqrYjcjyv3u4b3/DmAe1e0PQDgSXffC+DJ7t9CiPcQqzq/ux8EMLui+T4AD3VfPwTgU5d5XEKIK8yl/ubf7O6T3ddn0anYK4R4D7HuBT93dwA0Ab2Z7Tezw2Z2eGEh/JtZCNF7LtX5p8xsHAC6/9PVGHc/4O773H1ff3/EA85CiJ5yqc7/OID7u6/vB/DY5RmOEKJXrEXq+wGAjwMYNbPTAL4C4GsAfmRmXwBwEsBn1rKzRDKBYn9YYmlU+E+CmamwPLhpJy+PlKtzuaYwFo6IAoDsYrgkFwAU6yvXPTs8+t3v0j733Lef2gaab1Bbynkpr2SNH/f267cE2ycXeZLOs7P8mPs2caky4eH5AIDlZli+2nzr+2ifW2/fSm3/8L/Dkh0AoM6TcZ4jUYnNSkQprBSPZBwskQSpAGYWuDQ3UuKRglOkDFxumbvn6dfDCUHrNX4uV7Kq87v754jpd9e8FyHEVYee8BMipsj5hYgpcn4hYoqcX4iYIucXIqb0NIGnmcHSYZmtPV2h/arD4TpzjTSXT85HyFeW5PXW0kkuG43nw9P12uQR2ufk+aN8X7PhJJcAcPL0CWq7Y0u43iEAXL/3pmD7KxM8yrHW5JF7bQ9HjwFAoR5OxgoAJ+1EsH3r5rAUCQBT58rUVmtxefPUdHhfAFBcCkvLSzkupTbAx9Gs8QSeSV4OEYUiH//OdrjmYa3IIyDrDXI+k0rgKYRYBTm/EDFFzi9ETJHzCxFT5PxCxBQ5vxAxpadSn7cTaFTCkkdqICJSbWe47l4+x/MD/OLFF6ltd5UnfNy7O01tuUo4CWNmKRxhBQDn3uQJH9s1Hqm26/27qS2f41GJrfRQsH18F99ea45H51Xn+P2hXeTybGYsnNwpm+Q1FC8sclu+xvPD2hy/jMvZsKSXKfPIvfOL1IRNOa7n9Y3upLZUP+9Xnggn/hxzPr/lhbDMmrKIwa9Ad34hYoqcX4iYIucXIqbI+YWIKXJ+IWJKT1f7Ewmg0Bcu4zTS5EEMfRgPtluCr9guNPj2nrrAc62NbudTkusLrxzPGC+R1FriQT9bE4PUtm3rXdRWbvHAkxdefDrY/uopvqI/lh/j48jw0maHjvF5ZOv26Tqfq1qdr/afTvFSWJnNPMBry/nwqvjxiEXxkRK/dopJHthjA/xemm/zAKlWJjyYCsk/CADXXxdWrA4eWfv9XHd+IWKKnF+ImCLnFyKmyPmFiClyfiFiipxfiJiylnJdDwL4AwDT7n5Lt+2rAP4QwFtRK19295+sti13R8PDUt+WnTxYpT8RLuXVXuISz/btPJDl0Lk5ajtT5sFCg8nw2Ae2cemt33mgUGZkG7W9/7Zd1NZK834PfvfPgu2ViGCm15e57pVOTVDbiPE8idWhcO68s/OTwXYAuHEzl9EGwOW3M5M8eOrNSjjX3cISP2cJrjiiVQwHTgFAf5V3tD4+xmTrRLB9b4HnGSzmwuczmbi8Ofz+HMC9gfZvuvtt3X+rOr4Q4upiVed394MA+BMiQoj3JOv5zf9FM3vOzB40M/5dSAhxVXKpzv9tANcCuA3AJICvszea2X4zO2xmh+fm+W9tIURvuSTnd/cpd2+5exvAdwDcGfHeA+6+z933DQ7wZ9mFEL3lkpzfzC6OtPk0gBcuz3CEEL1iLVLfDwB8HMComZ0G8BUAHzez2wA4gBMA/mgtOzMz5JJhea5duJ72a5Nh9iejcvHdQG2vPH2Y2s4n91FbK3862H7DteFySwDwq18/RW1bNt9IbX/7s4N8HBUuzTXqYdv0hWdpH7S4HFmucemz1Obj2DIczrm3LcOj8+ZP8bJhzQYv89W3iV/Go+fC3zZbxsfRrPHceV7mkvRsm5d6KzkvzXZ9IjyWodQI7ZOw8Bp8AmE5OsSqzu/unws0f2/NexBCXJXoCT8hYoqcX4iYIucXIqbI+YWIKXJ+IWJKTxN4JlOGwkhYVhob4rLdXCJcJqs0zKOe+ownTLRxbpucfpnabv7Q+4PtjRkur+RLvEzT5ASPLDt28nVqa1Z59FsiF460WzrP91Ua5lGC82UeuZcY5VGVd4yE5+qpI8/TPsfO8rm/87bfp7Z2Ziu1nXh5Oth+IRFRRq3K74nVKp/H68ZL1DZc4Nd3vhSWMWvtiLlPheVINyXwFEKsgpxfiJgi5xcipsj5hYgpcn4hYoqcX4iY0lOpD95GshGWKLbt5gkaJ86G5bKZGZ4fYJhElQHATaO3UNtzL/EovPOz4QSNqdwHaJ8d11ITTr7Kpa2JMzyy7MN376W25aVwhFhpnMt5IzfwRExnnj5Oben5iBqF23cF2/vHuTz7wR2j1HbuDJdMz8z/LbUtkqSrc1NcshsrXUNthSSP3NvBSx5ijAdOwvvC12q6xo95AWG5143P70p05xcipsj5hYgpcn4hYoqcX4iYIucXIqb0dLW/2XCcnwyvRjedD6U/E16ZbabO850t87JF+WGeG61hfBwTS+G8aQPzvM9wISL33C07qe34ye9TW6PB1Yq5+fBq9HU38ByJ123hasWJAZ7r7tUzPG/r1saJYHsxx+83FdxEbVPLh6jt2OmIcmONfLA9Ca5wjG/nq/07UzyYaXOJr7Tnyny5vzYfvr6dXPcAUGqEg4iSbQX2CCFWQc4vREyR8wsRU+T8QsQUOb8QMUXOL0RMWUu5rh0A/gLAZnTKcx1w92+Z2TCAHwLYhU7Jrs+4O4+mAVCr1XD8jXCgyAduHw+2A8C5pXA5rMpShvZJb+ayy9gWHjBRKvEAo8FcWLa7/dZwvjoA+Pk/PExt0xNL1Jbfvonajp0+RW07doaDfq67hY8x289zxe3ZzuXIuQvh/HgAcPKpcA7CVpPfb16eO0ptCyRABwCqlXCORwCo1cOVoQd2c7l0mits2LSHS3YzCS4DIsfP9dxMuARYIs+vgQw5ZfV3cT9fyzubAP7E3W8C8CEAf2xmNwF4AMCT7r4XwJPdv4UQ7xFWdX53n3T3Z7qvFwEcBbANwH0AHuq+7SEAn7pSgxRCXH7e1W9+M9sF4HYAhwBsdvfJruksOj8LhBDvEdbs/GZWBPAIgC+5+8LFNnd3dNYDQv32m9lhMztcLvPHMIUQvWVNzm9maXQc//vu/mi3ecrMxrv2cQDB1R93P+Du+9x9X7HIixoIIXrLqs5vZgbgewCOuvs3LjI9DuD+7uv7ATx2+YcnhLhSrCWq724AnwfwvJk92237MoCvAfiRmX0BwEkAn1ltQ8v1Bp49MRm07byZR50tklRxluH51OaqXLK7cJbLRnNzfJvbd4YjxO6653ba55abeb69R/+Kf142UndQW7qfS1vbxq4LtheHw3ISAJhxGWrrdn5/mPP3UVv1eDgf36Gnf0n7TC5xyXFgNDz3AFDYsofadg2ES7Mtp/gcVvI8kvFF40tbk8d5TsPsII8yXW6F91fhFcXQJPkkF+sv8k4rWNX53f2XANjIf3fNexJCXFXoCT8hYoqcX4iYIucXIqbI+YWIKXJ+IWJKTxN4NtqO6XpYzmkU+ANArXw4Qm8YvKTVfLmf2moZHhW3dZxLc3fdeVewPWc80is7zsdx97/6p9T287/7G2pbmOCy0eT8m8H26iIPVSsYj45cqPDoyFNnz1IbFsPyoY/eQLsM7QzLcgBQQkTpqiqXASukTlazxaXgvjqXRWfnuERYzoblNwAYyHB5+ewMuQdnwtGsHVt4e82IBLQr0Z1fiJgi5xcipsj5hYgpcn4hYoqcX4iYIucXIqb0VOqrtoBX58KfN4/98hjtt2soLK/UU7zm3tCWiKSUGS437SpG1GnbvSPY3m7wz9DluQlqe/B/HKS2Z47yOniNSpXa6qnwKe2rccmuWW5QWzXLa9o109yWJgFufVkuoy23+6gtb1xia6S4nFpthSXO5CKX+pIFLtmlItQ3NPl5qfBcp8i1wrJdY5nPVSIZvr6Nq8Dv3Mba3yqE+G1Czi9ETJHzCxFT5PxCxBQ5vxAxpaer/W0zlBPhIJJHfnGE9ts5EM4H908+yHO3XTvGSycdP/IKtf3zj+2mNq+Hp6uR4avl3/mbp6jt2Rdeo7ZKZYzaCpintnY7fNx15/nl2kU+V4XCArUtNXm+w2o1vOycjAhwqZa5bSo5SG2e4VXiBhbCq/q1DFc/2gleJmvTFj73dZ76D/O18DUMAIlUePztFr+uBtNhpSuhwB4hxGrI+YWIKXJ+IWKKnF+ImCLnFyKmyPmFiCmr6gJmtgPAX6BTgtsBHHD3b5nZVwH8IYC3igp92d1/ErWtBNLIpcIySirLc8xVPBwcc+QIj2JoNXZFjITngyvs3EptlgwHbjz/0ku0z0//z2FqaxR5IAtSXDdqtXjAh5OYlPoC35c7l8oSxvv1VXnV5SoZY5MEHgHAWJLnQpzL8oia1MA4tTWr4f2VRrnUl6xwyXExy6+5vgoPFmpneLBQvRUuAbZnD9/XYP+2YPvRX0RcUytYiyjYBPAn7v6MmZUAPG1mT3Rt33T3/7rmvQkhrhrWUqtvEsBk9/WimR0FEP7YEUK8Z3hXv/nNbBeA2wEc6jZ90cyeM7MHzYwHdwshrjrW7PxmVgTwCIAvufsCgG8DuBbAbeh8M/g66bffzA6b2eFmxG9EIURvWZPzm1kaHcf/vrs/CgDuPuXuLXdvA/gOgDtDfd39gLvvc/d9qRwvzCGE6C2rOr+ZGYDvATjq7t+4qP3iJdZPA+B5p4QQVx1rWe2/G8DnATxvZs92274M4HNmdhs68t8JAH+06pYMQDr8eZPt51LITD0cCVibmaJ9Zl+ZpLaPXXc9tSXBy3VZMjyO/3XoVdqnVuWSUjXB8wwWwEtoNXM8Qs9nwhJhts0jxOp5ngPPl3heOqR4ea3hVPjYytWwrAUArRQfYzo7TG3FUoR0S4ZYrfA5rJa5zLo0zyXHWpP327qby5gDA2F5eXggIjfkhXPBdo+IBFzJWlb7f4mO264kUtMXQlzd6Ak/IWKKnF+ImCLnFyKmyPmFiClyfiFiSk8TeMIBa5DSRM4litzilmB7FTyB5PSZiASe2TPUdq7GJcfkYniME2f4k4upES7ZZcp8+tsLPLKsZPxhqfPJsOzVdC5R5Sv8HlBJcIkKxm2ZUngeE2UuHbaLXM7LZWapbfY0f7K8OhiWWlNVXv5roXmW2paXuay4ZYgn6exP30htmWT4+nn612/SPkMIR2LWq1H1xN6O7vxCxBQ5vxAxRc4vREyR8wsRU+T8QsQUOb8QMaWnUp8DaLbDElAuIspqKRmOEMsXuYzmpA8AHJnjct4jD/8ltd3zz+4Otr8yGU4wCgCVOpeG2jUulbVzeWqrFrgMWEhuD7bX8zxBamqWS6atRT7+OoncA4DCclhqTUbU3GtneSJRgEu3g+BS68KF8Dw2c6dpn6RzKXXHOJcIR3dwOW/yFJcqp068HGyfe43Xctx3Lakp6TyKdCW68wsRU+T8QsQUOb8QMUXOL0RMkfMLEVPk/ELElJ5KfalkEsMD4WSRZ5a5zGP1pWB7osFro9UrPJniYIbbfvoUr7v38kRYLluenad9ZspcelnK8GPelOWSUmM5IgV6ISy/9VW5rFixcNQkAGzfymW0couP0RNhW66PH3NzlteZqzuXdZcaXHJspMIybNZ4vcNdIxGy4iiX8zzHr6vhPi61lpfD9SsXUid4n0pYgm23JfUJIVZBzi9ETJHzCxFT5PxCxBQ5vxAxZdXVfjPLATiITmRFCsBfu/tXzGw3gIcBjAB4GsDn3Z1HgQBoJ9qo5cIr94UE/xzK1MN5yXIRJa0uGA/eqTS5rVkcobY3l14PtjfafHV4ybkSkC3z1fKZZZ7TsFXmOeb6Se7C/Cif32aDKwH1Cl8VJ9XLOraB8PivyfE8fdM5PsbZCj9mW+J560ba4W0OpnjZsMHhcHAUAOy5kc/V3BI/Z4sJHrg21zoRbN+8eYz2eX0qXKqu1uTKx0rWcuevAbjH3W9Fpxz3vWb2IQB/CuCb7n4dgAsAvrDmvQohNpxVnd87vCVSprv/HMA9AP662/4QgE9dkREKIa4Ia/rNb2bJboXeaQBPAHgdwJy7v/Ud4zSAbVdmiEKIK8GanN/dW+5+G4DtAO4EwB9zWoGZ7Tezw2Z2uLnEnxYTQvSWd7Xa7+5zAP4ewIcBDJrZWwuG2wEEn6N09wPuvs/d96UKEY+lCiF6yqrOb2ZjZjbYfd0H4BMAjqLzIfAvum+7H8BjV2qQQojLz1oCe8YBPGRmSXQ+LH7k7v/TzF4C8LCZ/ScA/xfA91bbUKLRRnYyXK6pkeSSUm0k/BnlizyQYigTIa3kuNQ3uLRAbdVqWKZarvOgjaLz41rIh2VPAMgkeQ6/vgW+zXIuLPXU3+QBH+VGWDYCgMYAl5uGtnBlt395Z3h7xvXBbH2O2pILXGKzNp+rRr0SbPdtfOyZIR58tLzEv702zxi1lafPU1v/ufD465v49ZEncnX1XYTqrfpWd38OwO2B9uPo/P4XQrwH0RN+QsQUOb8QMUXOL0RMkfMLEVPk/ELEFPN3Ud5n3TszOwfgZPfPUQBc/+gdGsfb0TjeznttHDvdneuzF9FT53/bjs0Ou/u+Ddm5xqFxaBz62i9EXJHzCxFTNtL5D2zgvi9G43g7Gsfb+a0dx4b95hdCbCz62i9ETNkQ5zeze83sFTM7ZmYPbMQYuuM4YWbPm9mzZna4h/t90MymzeyFi9qGzewJM3ut+//QBo3jq2Y20Z2TZ83skz0Yxw4z+3sze8nMXjSzf9tt7+mcRIyjp3NiZjkz+42ZHemO4z9223eb2aGu3/zQLCJEci24e0//AUiikwZsD4AMgCMAbur1OLpjOQFgdAP2+zEAdwB44aK2/wLgge7rBwD86QaN46sA/l2P52McwB3d1yUArwK4qddzEjGOns4JAANQ7L5OAzgE4EMAfgTgs932/wbg36xnPxtx578TwDF3P+6dVN8PA7hvA8axYbj7QQCzK5rvQycRKtCjhKhkHD3H3Sfd/Znu60V0ksVsQ4/nJGIcPcU7XPGkuRvh/NsAnLro741M/ukAfmZmT5vZ/g0aw1tsdvfJ7uuzAHhi+SvPF83sue7Pgiv+8+NizGwXOvkjDmED52TFOIAez0kvkubGfcHvI+5+B4DfB/DHZvaxjR4Q0PnkR+eDaSP4NoBr0anRMAng673asZkVATwC4Evu/raUSr2ck8A4ej4nvo6kuWtlI5x/AsCOi/6myT+vNO4+0f1/GsCPsbGZiabMbBwAuv9Pb8Qg3H2qe+G1AXwHPZoTM0uj43Dfd/dHu809n5PQODZqTrr7ftdJc9fKRjj/UwD2dlcuMwA+C+DxXg/CzApmVnrrNYDfA/BCdK8ryuPoJEIFNjAh6lvO1uXT6MGcmJmhkwPyqLt/4yJTT+eEjaPXc9KzpLm9WsFcsZr5SXRWUl8H8O83aAx70FEajgB4sZfjAPADdL4+NtD57fYFdGoePgngNQA/BzC8QeP47wCeB/A+vRlAAAAAaElEQVQcOs433oNxfASdr/TPAXi2+++TvZ6TiHH0dE4AfACdpLjPofNB8x8uumZ/A+AYgL8CkF3PfvSEnxAxJe4LfkLEFjm/EDFFzi9ETJHzCxFT5PxCxBQ5vxAxRc4vREyR8wsRU/4f0SqOSmXkELcAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Max distortion 0.031372577\n",
+ "Adversarial Model Prediction [5]\n",
+ "Logits [[-0.8225111 0.85914516 -4.7383585 1.4677212 -2.8243408 11.350895\n",
+ " -2.3103023 -3.077036 -1.6778117 1.7728287 ]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "xs = tf.placeholder(tf.float32, (1, 32, 32, 3))\n",
+ "encode = discretize_uniform(xs/255.0, levels=levels, thermometer=True)\n",
+ "\n",
+ "image = np.array(cifar.eval_data.xs[:1],dtype=np.float32)\n",
+ "label = cifar.eval_data.ys[:1]\n",
+ "\n",
+ "plt.imshow(image[0]/255.0)\n",
+ "plt.show()\n",
+ "print(\"Image Label\", label)\n",
+ "\n",
+ "thermometer_encoded = sess.run(encode, {xs: image})\n",
+ "print('Clean Model Prediction',\n",
+ " sess.run(model.predictions, {model.x_input: thermometer_encoded}))\n",
+ "print('Logits',\n",
+ " sess.run(model.pre_softmax, {model.x_input: thermometer_encoded}))\n",
+ "\n",
+ "adversarial = attack.perturb(image, label, sess)\n",
+ "\n",
+ "plt.imshow(adversarial[0]/255.0)\n",
+ "plt.show()\n",
+ "\n",
+ "print(\"Max distortion\", np.max(np.abs(adversarial/255.0-image/255.0)))\n",
+ "\n",
+ "thermometer_encoded = sess.run(encode, {xs: adversarial})\n",
+ "print('Adversarial Model Prediction',\n",
+ " sess.run(model.predictions, {model.x_input: thermometer_encoded}))\n",
+ "print('Logits',\n",
+ " sess.run(model.pre_softmax, {model.x_input: thermometer_encoded}))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/case_studies/thermometer/original/train.py b/case_studies/thermometer/original/train.py
new file mode 100644
index 0000000..31950a2
--- /dev/null
+++ b/case_studies/thermometer/original/train.py
@@ -0,0 +1,173 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import scipy.misc
+from datetime import datetime
+import json
+import os
+import shutil
+from timeit import default_timer as timer
+from discretization_utils import discretize_uniform
+from discretization_attacks import adv_lspga
+
+import tensorflow as tf
+import numpy as np
+
+from cifar_model import Model
+import cifar10_input
+
+with open('config.json') as config_file:
+ config = json.load(config_file)
+
+# seeding randomness
+tf.set_random_seed(config['tf_random_seed'])
+np.random.seed(config['np_random_seed'])
+
+# Setting up training parameters
+max_num_training_steps = config['max_num_training_steps']
+num_output_steps = config['num_output_steps']
+num_summary_steps = config['num_summary_steps']
+num_checkpoint_steps = config['num_checkpoint_steps']
+step_size_schedule = config['step_size_schedule']
+weight_decay = config['weight_decay']
+data_path = config['data_path']
+momentum = config['momentum']
+batch_size = config['training_batch_size']
+
+levels = 16
+
+# Setting up the data and the model
+raw_cifar = cifar10_input.CIFAR10Data(data_path)
+global_step = tf.contrib.framework.get_or_create_global_step()
+model = Model(mode='train', tiny=False,
+ thermometer=True, levels=levels)
+
+# Setting up the optimizer
+boundaries = [int(sss[0]) for sss in step_size_schedule]
+boundaries = boundaries[1:]
+values = [sss[1] for sss in step_size_schedule]
+learning_rate = tf.train.piecewise_constant(
+ tf.cast(global_step, tf.int32),
+ boundaries,
+ values)
+total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
+train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(
+ total_loss,
+ global_step=global_step)
+
+
+xin = tf.placeholder(tf.float32, (None, 32, 32, 3))
+
+steps = 7
+eps = 0.031
+attack_step = 0.01
+
+projection_fn = tf.identity
+
+attack = adv_lspga(xin, model, discretize_uniform,
+ projection_fn, levels, tf.constant(True), steps, eps,
+ attack_step, thermometer=True, noisy_grads=False)
+
+thermometerize = discretize_uniform(xin, levels=levels, thermometer=True)
+
+
+# Setting up the Tensorboard and checkpoint outputs
+model_dir = config['model_dir']
+if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+
+# We add accuracy and xent twice so we can easily make three types of
+# comparisons in Tensorboard:
+# - train vs eval (for a single run)
+# - train of different runs
+# - eval of different runs
+
+saver = tf.train.Saver(max_to_keep=3)
+tf.summary.scalar('accuracy adv train', model.accuracy)
+tf.summary.scalar('accuracy adv', model.accuracy)
+tf.summary.scalar('xent adv train', model.xent / batch_size)
+tf.summary.scalar('xent adv', model.xent / batch_size)
+#tf.summary.image('images adv train', model.x_input)
+merged_summaries = tf.summary.merge_all()
+
+# keep the configuration file with the model for reproducibility
+shutil.copy('config.json', model_dir)
+
+with tf.Session() as sess:
+
+ # initialize data augmentation
+ cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess, model)
+
+ # Initialize the summary writer, global variables, and our time counter.
+ summary_writer = tf.summary.FileWriter(model_dir, sess.graph)
+ sess.run(tf.global_variables_initializer())
+ training_time = 0.0
+ attack_time = 0.0
+
+ if sys.argv[-1] == 'restore':
+ saver.restore(sess,
+ os.path.join("models/adv_train_fixed/", 'checkpoint-105000'))
+
+ # Main training loop
+ for ii in range(max_num_training_steps):
+ x_batch, y_batch = cifar.train_data.get_next_batch(batch_size,
+ multiple_passes=True)
+
+ # Compute Adversarial Perturbations
+ start = timer()
+ x_batch, x_batch_adv = sess.run((thermometerize, attack),
+ {xin: x_batch/255.0})
+ end = timer()
+ attack_time += end - start
+
+
+ nat_dict = {model.x_input: x_batch,
+ model.y_input: y_batch}
+
+ adv_dict = {model.x_input: x_batch_adv,
+ model.y_input: y_batch}
+
+ # Output to stdout
+ if ii % (num_output_steps) == 0:
+ nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
+ adv_acc = sess.run(model.accuracy, feed_dict=adv_dict)
+ print('Step {}: ({})'.format(ii, datetime.now()))
+ print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
+ print(' training adv accuracy {:.4}%'.format(adv_acc * 100))
+ if ii != 0:
+ print(' {} examples per second (train)'.format(
+ num_output_steps * batch_size / (training_time)))
+ print(' {} examples per second (attack)'.format(
+ num_output_steps * batch_size / (attack_time)))
+ attack_time = training_time = 0.0
+ # Tensorboard summaries
+ if ii % num_summary_steps == 0:
+ summary = sess.run(merged_summaries, feed_dict=adv_dict)
+ summary_writer.add_summary(summary, global_step.eval(sess))
+
+ # Write a checkpoint
+ if ii % num_checkpoint_steps == 0:
+ saver.save(sess,
+ os.path.join(model_dir, 'checkpoint'),
+ global_step=global_step)
+
+ # Actual training step
+ start = timer()
+ sess.run(train_step, feed_dict=adv_dict)
+ end = timer()
+ training_time += end - start
diff --git a/case_studies/thermometer/start_docker.sh b/case_studies/thermometer/start_docker.sh
new file mode 100644
index 0000000..7add255
--- /dev/null
+++ b/case_studies/thermometer/start_docker.sh
@@ -0,0 +1,5 @@
+docker run -it --rm --gpus all --ipc=host --ulimit memlock=-1 \
+ --ulimit stack=67108864 \
+ --shm-size=32g \
+ -v ~/projects:/projects \
+ nvcr.io/nvidia/tensorflow:21.10-tf1-py3 /bin/bash
\ No newline at end of file
diff --git a/case_studies/thermometer/train.sh b/case_studies/thermometer/train.sh
new file mode 100644
index 0000000..0446397
--- /dev/null
+++ b/case_studies/thermometer/train.sh
@@ -0,0 +1,4 @@
+PYTHONPATH=$PYTHONPATH:$(pwd) python train_classifier.py \
+ -bs=256 -lr=0.1 -op=sgd -sm=0.9 -wd=0 -ne=200 -lrs --device="cuda" \
+ --classifier="networks.differentiable_16_thermometer_encoding_cifar_resnet18" \
+ --output=checkpoints/thermometer_16_mrn18_200_epochs.pth
diff --git a/case_studies/thermometer/train_at.sh b/case_studies/thermometer/train_at.sh
new file mode 100644
index 0000000..0e03db3
--- /dev/null
+++ b/case_studies/thermometer/train_at.sh
@@ -0,0 +1,19 @@
+attack=${1:-pgd}
+echo "Attack: $attack"
+
+if [[ "$attack" == "thermometer-lspgd" ]]; then
+ nsteps=7
+ stepsize=0.01
+ attackname="tlspgd"
+else
+ nsteps=10
+ stepsize=0.005
+ attackname="pgd"
+fi
+
+PYTHONPATH=$PYTHONPATH:$(pwd) python train_classifier.py \
+ -bs=256 -lr=0.1 -op=sgd -sm=0.9 -wd=5e-4 -ne=200 --device="cuda" \
+ --learning-rate-scheduler="multistep-0.1-60,120,160" \
+ --classifier="networks.non_differentiable_16_thermometer_encoding_cifar_wideresnet344" \
+ --output=checkpoints/thermometer_16_wrn304_linf_at_${attackname}_200_epochs.pth \
+ -at="norm=linf epsilon=0.031372549 step_size=$stepsize n_steps=$nsteps attack=$attack"
diff --git a/case_studies/trapdoor/adversarial_evaluation.py b/case_studies/trapdoor/adversarial_evaluation.py
new file mode 100644
index 0000000..0763b91
--- /dev/null
+++ b/case_studies/trapdoor/adversarial_evaluation.py
@@ -0,0 +1,173 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import torch
+from defense import CIFAR10
+from defense import load_model, load_model_3
+import numpy as np
+from attacks import pgd
+import orthogonal_pgd
+
+device = "cuda" if torch.cuda.is_available() else "cpu"
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--batch-size", type=int, default=512)
+parser.add_argument("--n-samples", type=int, default=2048)
+parser.add_argument("--pgd-steps", type=int, default=50)
+parser.add_argument("--pgd-step-size", type=float, default=8 / 255 / 50 * 2.5)
+parser.add_argument("--epsilon", type=float, default=8 / 255)
+parser.add_argument("--thresholds", type=float, nargs=10, default=None)
+parser.add_argument("--fpr-threshold", type=float, default=0.05)
+parser.add_argument("--attack", type=str,
+ choices=("naive", "orthogonal", "selective"),
+ default="naive")
+
+args = parser.parse_args()
+
+dataset = CIFAR10()
+
+if args.n_samples == -1:
+ args.n_samples = len(dataset.test_data)
+model, run_detector = load_model(device)
+
+orthogonal_pgd_attack = orthogonal_pgd.PGD(
+ model,
+ lambda x: run_detector(x),
+ classifier_loss=torch.nn.CrossEntropyLoss(),
+ detector_loss=lambda x, _: torch.mean(x),
+ eps=args.epsilon,
+ steps=args.pgd_steps,
+ alpha=args.pgd_step_size, k=None,
+ # project_detector=True,
+ projection_norm='l2',
+ project_classifier=True,
+ use_projection=True,
+ verbose=True)
+
+selective_pgd_attack = orthogonal_pgd.PGD(
+ model, run_detector,
+ classifier_loss=torch.nn.CrossEntropyLoss(),
+ eps=args.epsilon,
+ steps=args.pgd_steps,
+ alpha=args.pgd_step_size, k=None,
+ project_detector=False,
+ project_classifier=False,
+ projection_norm='l2',
+ use_projection=True)
+
+if args.attack == "naive":
+ run_attack = lambda x_batch, y_batch: pgd.pgd(
+ model, x_batch, y_batch,
+ args.pgd_steps, args.pgd_step_size,
+ args.epsilon, norm="linf", targeted=False)[0]
+elif args.attack == "orthogonal":
+ run_attack = lambda x_batch, y_batch: orthogonal_pgd_attack.attack(
+ x_batch.cpu(), y_batch.cpu(), device=device).to(
+ device)
+elif args.attack == "selective":
+ run_attack = lambda x_batch, y_batch: selective_pgd_attack.attack(
+ x_batch.cpu(), y_batch.cpu(), device=device).to(
+ device)
+else:
+ raise ValueError()
+
+is_adv = []
+adv_detector_scores = []
+detector_scores = []
+y_pred = []
+y_adv_pred = []
+
+for batch_idx in range(int(np.ceil(args.n_samples / args.batch_size))):
+ x_batch = dataset.test_data[
+ batch_idx * args.batch_size:(batch_idx + 1) * args.batch_size]
+ y_batch = dataset.test_labels[
+ batch_idx * args.batch_size:(batch_idx + 1) * args.batch_size]
+
+ x_batch = torch.tensor(x_batch, device=device, dtype=torch.float32)
+ y_batch = torch.tensor(y_batch, device=device, dtype=torch.long)
+
+ x_adv_batch = run_attack(x_batch.clone(), y_batch)
+
+ with torch.no_grad():
+ y_adv_pred_batch = model(x_adv_batch).argmax(-1).detach()
+ y_pred_batch = model(x_batch).argmax(-1).detach()
+ y_pred.append(y_pred_batch.cpu().numpy())
+ y_adv_pred.append(y_adv_pred_batch.cpu().numpy())
+ is_adv_batch = y_adv_pred_batch != y_batch
+ is_adv_batch = is_adv_batch.cpu().numpy()
+
+ # since detector uses np.random set the seed here so that different attacks
+ # are comparable
+ np.random.seed(batch_idx)
+
+ with torch.no_grad():
+ detector_scores_batch = run_detector(x_batch).detach().cpu().numpy()
+ adv_detector_scores_batch = run_detector(x_adv_batch).detach().cpu().numpy()
+ is_adv.append(is_adv_batch)
+ detector_scores.append(detector_scores_batch)
+ adv_detector_scores.append(adv_detector_scores_batch)
+
+y_pred = np.concatenate(y_pred, 0)
+y_pred = y_pred[:args.n_samples]
+y_adv_pred = np.concatenate(y_adv_pred, 0)
+y_adv_pred = y_adv_pred[:args.n_samples]
+is_adv = np.concatenate(is_adv, 0)
+is_adv = is_adv[:args.n_samples]
+
+detector_scores = np.concatenate(detector_scores, 0)
+detector_scores = detector_scores[:args.n_samples]
+
+adv_detector_scores = np.concatenate(adv_detector_scores, 0)
+adv_detector_scores = adv_detector_scores[:args.n_samples]
+
+if args.thresholds is None:
+ detector_thresholds = []
+ for label in range(10):
+ scores = detector_scores[y_pred == label]
+ detector_threshold = np.sort(scores)[-int(len(scores) * args.fpr_threshold)]
+ detector_thresholds.append(detector_threshold)
+ print("Thresholds for FPR", args.fpr_threshold, "=", detector_thresholds)
+else:
+ detector_thresholds = args.thresholds
+detector_thresholds = np.array(detector_thresholds)
+
+adv_is_detected = adv_detector_scores > detector_thresholds[y_adv_pred]
+is_detected = detector_scores > detector_thresholds[y_pred]
+
+# true positive: detected + adversarial example
+# true negative: not detected + normal example
+# false positive: detected + normal example
+# false negative: not detected + adversarial example
+tnr = np.mean(~is_detected)
+tpr = np.mean(adv_is_detected)
+fnr = np.mean(~adv_is_detected)
+fpr = np.mean(is_detected)
+
+tp = np.sum(adv_is_detected)
+fn = np.sum(~adv_is_detected)
+fp = np.sum(is_detected)
+
+f1 = tp / (tp + 0.5 * (fp + fn))
+
+print("TPR", tpr)
+print("FPR", fpr)
+print("TNR", tnr)
+print("FNR", fnr)
+print("F1 ", f1)
+
+is_adv_and_not_detected = np.logical_and(is_adv, ~adv_is_detected)
+
+print("Attack Success Rate (w/o detector):", np.mean(is_adv))
+print("Attack Success Rate (w/ detector):", np.mean(is_adv_and_not_detected))
diff --git a/case_studies/trapdoor/adversarial_evaluation.sh b/case_studies/trapdoor/adversarial_evaluation.sh
new file mode 100644
index 0000000..c5f26ad
--- /dev/null
+++ b/case_studies/trapdoor/adversarial_evaluation.sh
@@ -0,0 +1,69 @@
+n_samples=${1:-512}
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal/Naive PGD (FPR = 5%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=100 \
+ --pgd-step-size=0.0007843137254901962 \
+ --fpr-threshold=0.05 \
+ --attack=naive
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Selective PGD (FPR = 5%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=500 \
+ --pgd-step-size=0.025 \
+ --fpr-threshold=0.05 \
+ --attack=selective
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Orthogonal PGD (FPR = 5%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=1000 \
+ --pgd-step-size=0.025 \
+ --fpr-threshold=0.05 \
+ --attack=orthogonal
+
+echo ""
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Normal/Naive PGD (FPR = 50%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=100 \
+ --pgd-step-size=0.0007843137254901962 \
+ --fpr-threshold=0.50 \
+ --attack=naive
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Selective PGD (FPR = 50%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=500 \
+ --pgd-step-size=0.025 \
+ --fpr-threshold=0.50 \
+ --attack=selective
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "Orthogonal PGD (FPR = 50%)"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/adversarial_evaluation.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=500 \
+ --pgd-step-size=0.025 \
+ --fpr-threshold=0.50 \
+ --attack=orthogonal
\ No newline at end of file
diff --git a/case_studies/trapdoor/binarization_test.py b/case_studies/trapdoor/binarization_test.py
new file mode 100644
index 0000000..81110a4
--- /dev/null
+++ b/case_studies/trapdoor/binarization_test.py
@@ -0,0 +1,265 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import math
+import warnings
+from functools import partial
+
+import torch
+
+from active_tests.decision_boundary_binarization import format_result, \
+ _train_logistic_regression_classifier
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack
+from defense import CIFAR10
+from defense import load_model
+import numpy as np
+from attacks import pgd
+import orthogonal_pgd
+import utils as ut
+
+
+def main():
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--batch-size", type=int, default=2048)
+ parser.add_argument("--n-samples", type=int, default=2048)
+ parser.add_argument("--n-boundary-points", type=int, default=1)
+ parser.add_argument("--n-inner-points", type=int, default=19999)
+ parser.add_argument("--pgd-steps", type=int, default=50)
+ parser.add_argument("--pgd-step-size", type=float, default=8 / 255 / 50 * 2.5)
+ parser.add_argument("--epsilon", type=float, default=8 / 255)
+ parser.add_argument("--thresholds", type=float, nargs="+", required=True)
+ parser.add_argument("--attack", type=str, choices=("naive", "orthogonal"),
+ default="naive")
+ parser.add_argument("--inverted-test", action="store_true")
+ parser.add_argument("--sample-from-corners", action="store_true")
+ args = parser.parse_args()
+
+ args.thresholds = np.array(args.thresholds)
+
+ if args.inverted_test:
+ print("Running inverted test")
+ else:
+ print("Running normal/non-inverted test")
+
+ dataset = CIFAR10()
+ model, run_detector = load_model(device)
+
+ from torch.nn import functional as F
+ def logit_diff_loss(logits, targets):
+ l_t = logits[range(len(targets)), targets]
+ l_o = (logits - 1e9 * F.one_hot(targets, 2)).max(-1)[0]
+ diff = l_o - l_t
+ loss = diff.mean()
+ return loss
+
+ thresholds_torch = torch.tensor(args.thresholds).to(device)
+
+ orthogonal_pgd_attack = orthogonal_pgd.PGD(
+ model,
+ lambda x: -run_detector(
+ x,
+ subtract_thresholds=thresholds_torch) if args.inverted_test else
+ run_detector(
+ x, subtract_thresholds=thresholds_torch),
+ classifier_loss=logit_diff_loss, # torch.nn.CrossEntropyLoss(),
+ detector_loss=lambda x, _: torch.mean(x),
+ eps=args.epsilon,
+ steps=args.pgd_steps,
+ alpha=args.pgd_step_size, k=None,
+ #project_detector=True,
+ project_classifier=True,
+ use_projection=True,
+ projection_norm='l2',
+ verbose=False,
+ )
+
+ def run_naive_attack(model, x_batch, y_batch, epsilon=None, targeted=False):
+ if epsilon is None:
+ epsilon = args.epsilon
+
+ return pgd.pgd(
+ model, x_batch.to(device), y_batch.to(device),
+ args.pgd_steps, args.pgd_step_size,
+ epsilon, norm="linf", targeted=targeted)[0]
+
+ def run_orthogonal_attack(model, x_batch, y_batch):
+ orthogonal_pgd_attack.classifier = model
+ return orthogonal_pgd_attack.attack(
+ x_batch.cpu(), y_batch.cpu(), device=device).to(
+ device)
+
+ if args.attack == "naive":
+ run_attack = run_naive_attack
+ else:
+ run_attack = run_orthogonal_attack
+
+ def verify_valid_input_data(x_set: torch.Tensor) -> np.ndarray:
+ """Returns True if something is not detected as an adversarial example."""
+ n_batches = math.ceil(x_set.shape[0] / args.batch_size)
+ values = []
+ with torch.no_grad():
+ for b in range(n_batches):
+ s = run_detector(
+ x_set[b * args.batch_size:(b + 1) * args.batch_size],
+ subtract_thresholds=thresholds_torch)
+ values.append(s.cpu().numpy() < 0)
+ return np.concatenate(values)
+
+ def get_boundary_adversarials(x, y, n_samples, epsilon):
+ """Generate adversarial examples for the base classifier."""
+ assert len(x.shape) == 3
+ x = x.unsqueeze(0)
+ x = torch.repeat_interleave(x, n_samples, dim=0)
+
+ y = y.unsqueeze(0)
+ y = torch.repeat_interleave(y, n_samples, dim=0)
+
+ if n_samples == 1:
+ # generate a bunch of samples at the same time and try if any of them
+ # gets detected
+ x = torch.repeat_interleave(x, 5, dim=0)
+ y = torch.repeat_interleave(y, 5, dim=0)
+
+ for _ in range(4):
+ x_adv = run_naive_attack(model, x, y, epsilon)
+
+ # project adversarials to the max norm boundary
+ x_adv = ut.clipping_aware_rescaling(x, x_adv - x, epsilon,
+ norm="linf")
+ is_valid = verify_valid_input_data(x_adv)
+ is_invalid = ~is_valid
+
+ if n_samples != 1:
+ if np.all(is_invalid):
+ # generative until we finally found an adversarial example that gets
+ # detected
+ break
+ else:
+ if np.any(is_invalid):
+ x_adv = x_adv[is_invalid]
+ break
+ else:
+ raise RuntimeError("Could not generate adversarial example that gets "
+ "detected after 4 trials (with 500 samples each).")
+
+ if n_samples == 1:
+ x_adv = x_adv[[0]]
+
+ return x_adv
+
+ def attack_model(m, l, attack_kwargs):
+ del attack_kwargs
+
+ for x, y in l:
+ x_adv = run_attack(m, x, y)
+
+ logits = m(x_adv).cpu()
+ is_adv = logits.argmax(-1) != y
+ with torch.no_grad():
+ s = run_detector(x_adv, return_pred=False,
+ subtract_thresholds=thresholds_torch)
+
+ #for _ in range(5):
+ # print(run_detector(x_adv, return_pred=False,
+ # subtract_thresholds=thresholds_torch).cpu())
+
+ is_detected = s.cpu() > 0 # torch.tensor(args.thresholds[p.cpu().numpy()])
+ is_not_detected = ~is_detected
+ is_adv_and_not_detected = torch.logical_and(is_adv,
+ is_not_detected).numpy()
+ is_adv_and_detected = torch.logical_and(is_adv, is_detected).numpy()
+
+ # print(is_adv, logits, is_detected, s.cpu())
+
+ if args.inverted_test:
+ return is_adv_and_detected, (x_adv, logits)
+ else:
+ return is_adv_and_not_detected, (x_adv, logits)
+
+ x_data = dataset.validation_data[:args.n_samples].astype(np.float32)
+ y_data = dataset.validation_labels[:args.n_samples].astype(np.int64)
+
+ # exclude samples with label 3 since detector was trained to detect targeted
+ # attacks against class 3
+ # x_data = x_data[y_data != 3]
+ # y_data = y_data[y_data != 3]
+
+ from utils import build_dataloader_from_arrays
+
+ test_loader = build_dataloader_from_arrays(x_data, y_data,
+ batch_size=args.batch_size)
+
+ from argparse_utils import DecisionBoundaryBinarizationSettings
+
+ if args.inverted_test:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=1,
+ )
+ else:
+ additional_settings = dict(
+ n_boundary_points=args.n_boundary_points,
+ n_boundary_adversarial_points=args.n_boundary_points - 1,
+ n_far_off_boundary_points=1,
+ n_far_off_adversarial_points=0,
+ )
+
+ far_off_distance = 1.75
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ model,
+ test_loader,
+ attack_fn=lambda m, l, attack_kwargs: attack_model(m, l, attack_kwargs),
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=args.epsilon,
+ norm="linf",
+ lr=20000,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_inner_points=args.n_inner_points,
+ **additional_settings
+ ),
+ n_samples=args.n_samples,
+ device=device,
+ batch_size=args.batch_size,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+
+ get_boundary_adversarials_fn=get_boundary_adversarials,
+ verify_valid_boundary_training_data_fn=verify_valid_input_data,
+ verify_valid_inner_training_data_fn=None,
+ verify_valid_boundary_validation_data_fn=(
+ lambda x: ~verify_valid_input_data(x)) \
+ if args.inverted_test else verify_valid_input_data,
+ fill_batches_for_verification=True,
+ far_off_distance=far_off_distance,
+ rescale_logits="adaptive",
+ decision_boundary_closeness=0.999999,
+ fail_on_exception=False,
+ sample_training_data_from_corners=args.sample_from_corners
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ args.n_samples))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/trapdoor/binarization_test.sh b/case_studies/trapdoor/binarization_test.sh
new file mode 100644
index 0000000..e43de31
--- /dev/null
+++ b/case_studies/trapdoor/binarization_test.sh
@@ -0,0 +1,111 @@
+n_samples=${1:-512}
+epsilon=${2:-0.031}
+
+# for eps=0.01
+#thresholds="0.27039704 0.3084671 0.24682844 0.22648834 0.25642416 0.2449155 0.25744236 0.2869493 0.2991438 0.2467549"
+#thresholds="0.32685438 0.3048646 0.21139874 -0.011124074 0.26301256 0.25698307 0.25040865 0.18050945 0.3116589 0.16479838"
+#thresholds="0.2704116 0.30847666 0.2505051 0.18937282 0.25757647 0.24697195 0.25848407 0.28757182 0.2991565 0.24589166"
+
+thresholds="0.3268544 0.30486462 0.21139881 0.23486444 0.26301256 0.25698304 0.24667358 0.17656253 0.31165892 0.16479836"
+
+kwargs=""
+#kwargs="--sample-from-corners"
+
+if [[ "$epsilon" == "0.01" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.01, Normal test (1 boundary, 999 inner points), Normal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=100 \
+ --thresholds $thresholds \
+ --pgd-step-size=0.00078431372 \
+ $kwargs
+
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.01, Inverted test (1 boundary, 999 inner points), Normal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --pgd-steps=100 \
+ --thresholds $thresholds \
+ --pgd-step-size=0.00078431372 \
+ --inverted-test \
+ $kwargs
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.01, Normal test (1 boundary, 999 inner points), Orthogonal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --attack=orthogonal \
+ --thresholds $thresholds \
+ --pgd-steps=500 \
+ --pgd-step-size=0.01 \
+ $kwargs
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.01, Inverted test (1 boundary, 999 inner points), Orthogonal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.01 \
+ --attack=orthogonal \
+ --thresholds $thresholds \
+ --pgd-steps=1000 \
+ --pgd-step-size=0.05 \
+ --inverted-test \
+ $kwargs
+elif [[ "$epsilon" == "0.031" ]]; then
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.031, Normal test (1 boundary, 999 inner points), Normal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.031 \
+ --pgd-steps=100 \
+ --thresholds $thresholds \
+ --pgd-step-size=0.00078431372\
+ $kwargs
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.031, Inverted test (1 boundary, 999 inner points), Normal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.031 \
+ --pgd-steps=100 \
+ --thresholds $thresholds \
+ --pgd-step-size=0.00078431372 \
+ --inverted-test \
+ $kwargs
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.031, Normal test (1 boundary, 999 inner points), Orthogonal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.031 \
+ --attack=orthogonal \
+ --thresholds $thresholds \
+ --pgd-steps=750 \
+ --pgd-step-size=0.015 \
+ $kwargs
+
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "eps=0.031, Inverted test (1 boundary, 999 inner points), Orthogonal PGD"
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/trapdoor/binarization_test.py \
+ --n-samples=$n_samples \
+ --epsilon=0.031 \
+ --attack=orthogonal \
+ --thresholds $thresholds \
+ --pgd-steps=1000 \
+ --pgd-step-size=0.05 \
+ --inverted-test \
+ $kwargs
+fi
\ No newline at end of file
diff --git a/case_studies/trapdoor/defense.py b/case_studies/trapdoor/defense.py
new file mode 100644
index 0000000..1aa60c0
--- /dev/null
+++ b/case_studies/trapdoor/defense.py
@@ -0,0 +1,214 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import numpy as np
+
+
+class CIFAR10:
+ def __init__(self, seed=43):
+ import tensorflow as tf
+ (train_data, train_labels),(self.test_data, self.test_labels) = tf.keras.datasets.cifar10.load_data()
+ train_data = train_data/255.
+ self.test_data = self.test_data/255.
+
+ VALIDATION_SIZE = 5000
+
+ np.random.seed(seed)
+ shuffled_indices = np.arange(len(train_data))
+ np.random.shuffle(shuffled_indices)
+ train_data = train_data[shuffled_indices]
+ train_labels = train_labels[shuffled_indices]
+
+ shuffled_indices = np.arange(len(self.test_data))
+ np.random.shuffle(shuffled_indices)
+ self.test_data = self.test_data[shuffled_indices].transpose((0,3,1,2))
+ self.test_labels = self.test_labels[shuffled_indices].flatten()
+
+ self.validation_data = train_data[:VALIDATION_SIZE, :, :, :].transpose((0,3,1,2))
+ self.validation_labels = train_labels[:VALIDATION_SIZE].flatten()
+ self.train_data = train_data[VALIDATION_SIZE:, :, :, :].transpose((0,3,1,2))
+ self.train_labels = train_labels[VALIDATION_SIZE:].flatten()
+
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ class Transpose(torch.nn.Module):
+ def forward(self, x):
+ return x.permute((0, 2, 3, 1))
+
+ self.layers = torch.nn.ModuleList([
+ torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(32, eps=.000),
+ torch.nn.Conv2d(32, 32, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(32, eps=.000),
+ torch.nn.MaxPool2d(2, 2),
+
+ torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(64, eps=.000),
+ torch.nn.Conv2d(64, 64, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(64, eps=.000),
+ torch.nn.MaxPool2d(2, 2),
+
+ torch.nn.Conv2d(64, 128, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(128, eps=.000),
+ torch.nn.Conv2d(128, 128, kernel_size=3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm2d(128, eps=.000),
+ torch.nn.MaxPool2d(2, 2),
+
+ Transpose(),
+ torch.nn.Flatten(),
+ torch.nn.Linear(2048, 1024),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm1d(1024, eps=.000),
+ torch.nn.Linear(1024, 512),
+ torch.nn.ReLU(),
+ torch.nn.BatchNorm1d(512, eps=.000),
+ torch.nn.Linear(512, 10),
+ ])
+
+ def __call__(self, x, training=False, upto=None, features_only=False,
+ features_and_logits=False, detector_features_and_logits=False):
+ if features_only or features_and_logits or detector_features_and_logits:
+ assert upto is None
+ upto = len(self.layers)
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x, dtype=torch.float32)
+ outputs = []
+ for i,layer in enumerate(self.layers[:upto] if upto is not None else self.layers):
+ x = layer(x)
+ outputs.append(x)
+
+ if features_only:
+ return outputs[-2]
+ if detector_features_and_logits:
+ return outputs[-4], outputs[-1]
+ if features_and_logits:
+ return outputs[-2], outputs[-1]
+
+ return x
+
+
+def run_multi_detect(model, x, adv_sig, random=None, return_pred=False,
+ subtract_thresholds=None):
+ X_neuron_adv, logits = model(x, detector_features_and_logits=True)
+
+ y_pred = logits.argmax(-1)
+
+ if random is None: random="correct"
+ filter_ratio = .1
+ if random == "fast":
+ n_mask = torch.rand(X_neuron_adv.shape[1]) < filter_ratio
+ X_neuron_adv = X_neuron_adv * n_mask.to(x.device)
+ elif random == "correct":
+ number_neuron = X_neuron_adv.shape[1]
+ number_keep = int(number_neuron * filter_ratio)
+ n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
+ n_mask = np.array(n_mask)
+ np.random.shuffle(n_mask)
+ X_neuron_adv = X_neuron_adv * torch.tensor(n_mask).to(x.device)
+ else:
+ raise
+
+ adv_scores = torch_multi_sim(X_neuron_adv, adv_sig)
+
+ # return scores based on the detectors corresponding to the predicted classes
+ adv_scores = adv_scores[range(len(adv_scores)), y_pred]
+
+ if subtract_thresholds is not None:
+ relevant_thresholds = subtract_thresholds[y_pred]
+ adv_scores = adv_scores - relevant_thresholds
+
+ if return_pred:
+ return adv_scores, y_pred
+ else:
+ return adv_scores
+
+
+def run_detect(model, x, adv_sig, random=None):
+ X_neuron_adv = model(x, upto=-3)
+
+ if random is None: random="correct"
+ filter_ratio = .1
+ if random == "fast":
+ n_mask = torch.rand(X_neuron_adv.shape[1]) < filter_ratio
+ X_neuron_adv = X_neuron_adv * n_mask.to(x.device)
+ elif random == "correct":
+ number_neuron = X_neuron_adv.shape[1]
+ number_keep = int(number_neuron * filter_ratio)
+ n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
+ n_mask = np.array(n_mask)
+ np.random.shuffle(n_mask)
+ X_neuron_adv = X_neuron_adv * torch.tensor(n_mask).to(x.device)
+ else:
+ raise
+
+ adv_scores = torch_sim(X_neuron_adv, adv_sig)
+ return adv_scores
+
+
+def torch_sim(X_neuron, adv_sig):
+ if len(adv_sig.shape) == 1:
+ adv_sig = adv_sig.view((512, 1))
+ dotted = torch.matmul(X_neuron, adv_sig.reshape((512, 1))).flatten()
+ dotted /= (X_neuron**2).sum(axis=1)**.5
+ dotted /= (adv_sig**2).sum()**.5
+
+ return dotted
+
+
+def torch_multi_sim(X_neuron, adv_sig):
+ assert len(adv_sig.shape) == 2
+ dotted = torch.matmul(X_neuron, adv_sig)
+ dotted /= (X_neuron**2).sum(axis=1, keepdim=True)**.5
+ dotted /= (adv_sig**2).sum(axis=0, keepdim=True)**.5
+
+ return dotted
+
+def load_model_3(device=None):
+ # loads model & detector for class 3
+
+ model = TorchModel()
+ model.load_state_dict(torch.load('checkpoints/trapdoor/torch_cifar_model.h5'))
+ model = model.eval().to(device)
+
+ signature = np.load("checkpoints/trapdoor/signature.npy")
+ signature = torch.tensor(signature).to(device)
+
+ def detector(x, how=None):
+ return run_detect(model, x, signature, how)
+
+ return model, detector
+
+
+def load_model(device=None):
+ model = TorchModel()
+ model.load_state_dict(torch.load('checkpoints/trapdoor/torch_cifar_model.h5'))
+ model = model.eval().to(device)
+
+ signatures = np.load("checkpoints/trapdoor/signatures_all_nicholas.npy").transpose((1, 0))
+ signatures = torch.tensor(signatures).to(device)
+
+ def detectors(x, how=None, return_pred=False, subtract_thresholds=None):
+ return run_multi_detect(model, x, signatures, how, return_pred=return_pred,
+ subtract_thresholds=subtract_thresholds)
+
+ return model, detectors
\ No newline at end of file
diff --git a/case_studies/trapdoor/generate_signatures.py b/case_studies/trapdoor/generate_signatures.py
new file mode 100644
index 0000000..31fc732
--- /dev/null
+++ b/case_studies/trapdoor/generate_signatures.py
@@ -0,0 +1,85 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pickle
+import torch
+import tqdm
+import numpy as np
+import random
+
+import defense
+
+
+def injection_func(mask, pattern, adv_img):
+ if len(adv_img.shape) == 4:
+ return mask.transpose((0,3,1,2)) * pattern.transpose((0,3,1,2)) + (1 - mask.transpose((0,3,1,2))) * adv_img
+ else:
+ return mask.transpose((2,0,1)) * pattern.transpose((2,0,1)) + (1 - mask.transpose((2,0,1))) * adv_img
+
+def mask_pattern_func(y_target, pattern_dict):
+ mask, pattern = random.choice(pattern_dict[y_target])
+ mask = np.copy(mask)
+ return mask, pattern
+
+def infect_X(img, tgt, num_classes, pattern_dict):
+ mask, pattern = mask_pattern_func(tgt, pattern_dict)
+ raw_img = np.copy(img)
+ adv_img = np.copy(raw_img)
+
+ adv_img = injection_func(mask, pattern, adv_img)
+ return adv_img, None
+
+
+def build_neuron_signature(model, X, Y, y_target, pattern_dict):
+ num_classes = 10
+ X_adv = np.array(
+ [infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
+ BS = 512
+ X_neuron_adv = np.concatenate([model(X_adv[i:i+BS], upto=-3) for i in range(0,len(X_adv),BS)])
+ X_neuron_adv = np.mean(X_neuron_adv, axis=0)
+ sig = X_neuron_adv
+ return sig
+
+
+def main():
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ model, _ = defense.load_model_3(device)
+
+ data = defense.CIFAR10()
+
+ pattern_dict = pickle.load(
+ open("checkpoints/trapdoor/torch_cifar_res.p", "rb"))['pattern_dict']
+
+ signatures = {}
+ for label in tqdm.tqdm(range(10)):
+ signature = build_neuron_signature(
+ lambda x, upto=None: model(
+ torch.tensor(x, dtype=torch.float32).to(device),
+ upto=upto).cpu().detach().numpy(),
+ data.train_data, data.train_labels, label, pattern_dict)
+ signatures[label] = signature
+
+ signatures_np = np.array([signatures[k] for k in range(10)])
+
+ signature_nicholas = np.load("checkpoints/trapdoor/signature.npy").reshape(1, -1)
+
+ diff = signature_nicholas - signatures_np
+ # should be ~0 for label 3
+ print(np.abs(diff).max(-1))
+
+ np.save("checkpoints/trapdoor/signatures_all_torch.npy", signatures_np)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/trapdoor/generate_signatures_tf.py b/case_studies/trapdoor/generate_signatures_tf.py
new file mode 100644
index 0000000..33d9496
--- /dev/null
+++ b/case_studies/trapdoor/generate_signatures_tf.py
@@ -0,0 +1,110 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pickle
+import torch
+import tqdm
+import numpy as np
+import random
+
+import defense
+
+import keras
+import keras.backend as K
+import numpy as np
+from sklearn.utils import shuffle
+from tensorflow import set_random_seed
+from original.trap_utils import test_neuron_cosine_sim, init_gpu, preprocess, CoreModel, build_bottleneck_model, load_dataset, \
+ get_other_label_data, cal_roc, injection_func, generate_attack
+
+
+K.set_learning_phase(0)
+
+random.seed(1234)
+np.random.seed(1234)
+set_random_seed(1234)
+
+
+def mask_pattern_func(y_target, pattern_dict):
+ mask, pattern = random.choice(pattern_dict[y_target])
+ mask = np.copy(mask)
+ return mask, pattern
+
+
+def infect_X(img, tgt, num_classes, pattern_dict):
+ mask, pattern = mask_pattern_func(tgt, pattern_dict)
+ raw_img = np.copy(img)
+ adv_img = np.copy(raw_img)
+
+ adv_img = injection_func(mask, pattern, adv_img)
+ return adv_img, keras.utils.to_categorical(tgt, num_classes=num_classes)
+
+
+def eval_trapdoor(model, test_X, test_Y, y_target, pattern_dict, num_classes):
+ cur_test_X = np.array([infect_X(img, y_target, num_classes, pattern_dict)[0] for img in np.copy(test_X)])
+ trapdoor_succ = np.mean(np.argmax(model.predict(cur_test_X), axis=1) == y_target)
+ return trapdoor_succ
+
+
+
+def build_neuron_signature(bottleneck_model, X, Y, y_target, pattern_dict, num_classes=10):
+ X_adv = np.array(
+ [infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
+ X_neuron_adv = bottleneck_model.predict(X_adv)
+ X_neuron_adv = np.mean(X_neuron_adv, axis=0)
+ sig = X_neuron_adv
+ return sig
+
+
+def main():
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ pattern_dict = pickle.load(
+ open("cifar_res.p", "rb"))['pattern_dict']
+
+
+ sess = init_gpu("0")
+ model = CoreModel("cifar", load_clean=True, load_model=False)
+
+ new_model = keras.models.load_model("cifar_model.h5", compile=False)
+
+ train_X, train_Y, test_X, test_Y = load_dataset(dataset='cifar')
+
+ bottleneck_model = build_bottleneck_model(new_model, model.target_layer)
+
+ train_X, train_Y = shuffle(train_X, train_Y)
+
+ import pdb; pdb.set_trace()
+ signatures = {}
+ for label in tqdm.tqdm(range(10)):
+ signature = build_neuron_signature(
+ bottleneck_model,
+ train_X, train_Y, label, pattern_dict)
+ eval_acc = eval_trapdoor(new_model, test_X, test_Y, label, pattern_dict, 10)
+ print(eval_acc)
+ signatures[label] = signature
+
+ signatures_np = np.array([signatures[k] for k in range(10)])
+
+ signature_nicholas = np.load("checkpoints/trapdoor/signature.npy").reshape(1, -1)
+ import pdb; pdb.set_trace()
+ diff = signature_nicholas - signatures_np
+ # should be ~0 for label 3
+ print(np.abs(diff).max(-1))
+
+ np.save("checkpoints/trapdoor/signatures_all.npy", signatures_np)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/case_studies/trapdoor/original/__init__.py b/case_studies/trapdoor/original/__init__.py
new file mode 100644
index 0000000..6cf2daf
--- /dev/null
+++ b/case_studies/trapdoor/original/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/case_studies/trapdoor/original/eval_detection.py b/case_studies/trapdoor/original/eval_detection.py
new file mode 100644
index 0000000..cf3733d
--- /dev/null
+++ b/case_studies/trapdoor/original/eval_detection.py
@@ -0,0 +1,208 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import pickle
+import random
+import sys
+
+import keras
+import keras.backend as K
+import numpy as np
+from sklearn.utils import shuffle
+from tensorflow import set_random_seed
+from trap_utils import test_neuron_cosine_sim, init_gpu, preprocess, CoreModel, build_bottleneck_model, load_dataset, \
+ get_other_label_data, cal_roc, injection_func, generate_attack
+
+K.set_learning_phase(0)
+
+random.seed(1234)
+np.random.seed(1234)
+set_random_seed(1234)
+
+
+def neuron_extractor(all_model_layers, x_input):
+ vector = []
+ for layer in all_model_layers:
+ cur_neuron = layer.predict(x_input)
+ cur_neuron = cur_neuron.reshape(x_input.shape[0], -1)
+ vector.append(cur_neuron)
+ vector = np.concatenate(vector, axis=1)
+ return vector
+
+
+def eval_filter_pattern(bottleneck_model, X_train, Y_train, X_test, X_adv_raw, y_target, pattern_dict, num_classes,
+ filter_ratio=1.0):
+ def build_neuron_signature(bottleneck_model, X, Y, y_target):
+ X_adv = np.array(
+ [infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
+ X_neuron_adv = bottleneck_model.predict(X_adv)
+ X_neuron_adv = np.mean(X_neuron_adv, axis=0)
+ sig = X_neuron_adv
+ return sig
+
+ adv_sig = build_neuron_signature(bottleneck_model, X_train, Y_train, y_target)
+ X = np.array(X_test)
+ X_adv = preprocess(X_adv_raw, method="raw")
+ X_neuron = bottleneck_model.predict(X)
+ X_neuron_adv = bottleneck_model.predict(X_adv)
+
+ scores = []
+ sybils = set()
+ idx = 0
+
+ number_neuron = X_neuron_adv.shape[1]
+ number_keep = int(number_neuron * filter_ratio)
+ n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
+ n_mask = np.array(shuffle(n_mask))
+ X_neuron = X_neuron * n_mask
+ X_neuron_adv = X_neuron_adv * n_mask
+
+ normal_scores = test_neuron_cosine_sim(X_neuron, adv_sig)
+
+ for score in normal_scores:
+ scores.append((idx, -score))
+ idx += 1
+
+ adv_scores = test_neuron_cosine_sim(X_neuron_adv, adv_sig)
+ for score in adv_scores:
+ scores.append((idx, -score))
+ sybils.add(idx)
+ idx += 1
+
+ roc_data, auc = cal_roc(scores, sybils)
+
+ fpr_list = [0.05]
+ fnr_mapping = {}
+ for fpr, tpr in roc_data:
+ for fpr_cutoff in fpr_list:
+ if fpr < fpr_cutoff:
+ fnr_mapping[fpr_cutoff] = 1 - tpr
+
+ detection_succ = fnr_mapping[0.05]
+ print("Detection Success Rate at 0.05 FPR: {}".format(1 - detection_succ))
+ print('Detection AUC score %f' % auc)
+
+ return detection_succ, roc_data, normal_scores, adv_scores
+
+
+def mask_pattern_func(y_target, pattern_dict):
+ mask, pattern = random.choice(pattern_dict[y_target])
+ mask = np.copy(mask)
+ return mask, pattern
+
+
+def infect_X(img, tgt, num_classes, pattern_dict):
+ mask, pattern = mask_pattern_func(tgt, pattern_dict)
+ raw_img = np.copy(img)
+ adv_img = np.copy(raw_img)
+
+ adv_img = injection_func(mask, pattern, adv_img)
+ return adv_img, keras.utils.to_categorical(tgt, num_classes=num_classes)
+
+
+def eval_trapdoor(model, test_X, test_Y, y_target, pattern_dict, num_classes):
+ cur_test_X = np.array([infect_X(img, y_target, num_classes, pattern_dict)[0] for img in np.copy(test_X)])
+ trapdoor_succ = np.mean(np.argmax(model.predict(cur_test_X), axis=1) == y_target)
+ return trapdoor_succ
+
+
+def eval_defense():
+ MODEL_PATH = "models/{}_model.h5".format(args.dataset)
+ RES_PATH = "results/{}_res.p".format(args.dataset)
+
+ sess = init_gpu(args.gpu)
+ if args.attack == 'all':
+ ATTACK = ["cw", "en", 'pgd']
+ else:
+ ATTACK = [args.attack]
+
+ model = CoreModel(args.dataset, load_clean=True, load_model=False)
+
+ RES = pickle.load(open(RES_PATH, "rb"))
+ target_ls = RES['target_ls']
+
+ pattern_dict = RES['pattern_dict']
+
+ new_model = keras.models.load_model(MODEL_PATH, compile=False)
+
+ train_X, train_Y, test_X, test_Y = load_dataset(dataset=args.dataset)
+
+ bottleneck_model = build_bottleneck_model(new_model, model.target_layer)
+
+ train_X, train_Y = shuffle(train_X, train_Y)
+ selected_X = train_X
+ selected_Y = train_Y
+
+ test_X, test_Y = shuffle(test_X, test_Y)
+ test_X = test_X[:1000]
+ test_Y = test_Y[:1000]
+ print("Randomly Select 3 Target Label for Evaluations: ")
+ for y_target in random.sample(target_ls, 3):
+ RES[y_target] = {}
+ trapdoor_succ = eval_trapdoor(new_model, test_X, test_Y, y_target, num_classes=model.num_classes,
+ pattern_dict=pattern_dict)
+
+ print("Target: {} - Trapdoor Succ: {}".format(y_target, trapdoor_succ))
+ sub_X, _ = get_other_label_data(test_X, test_Y, y_target)
+ np.random.shuffle(sub_X)
+ sub_X = sub_X[:64]
+
+ for attack in ATTACK:
+ clip_max = 1 if args.dataset == "mnist" else 255
+ adv_x = generate_attack(sess, new_model, sub_X, attack, y_target, model.num_classes,
+ clip_max=clip_max, clip_min=0,
+ mnist=args.dataset == "mnist")
+
+ succ_idx = np.argmax(new_model.predict(adv_x), axis=1) == y_target
+ attack_succ = np.mean(succ_idx)
+ print("ATTACK: {}, Attack Success: {:.4f}".format(attack, attack_succ))
+ if attack_succ < 0.05:
+ print("{} attack has low success rate".format(attack))
+ continue
+
+ adv_x = adv_x[succ_idx]
+ succ_sub_X = sub_X[succ_idx]
+
+ fnr_ls, roc_data, normal_scores, adv_scores = eval_filter_pattern(bottleneck_model, selected_X, selected_Y,
+ succ_sub_X, adv_x,
+ y_target, pattern_dict=pattern_dict,
+ num_classes=model.num_classes,
+ filter_ratio=args.filter_ratio)
+
+ RES[y_target][attack] = {}
+ RES[y_target][attack]['attack_succ'] = attack_succ
+ RES[y_target][attack]['adv_x'] = adv_x
+ RES[y_target][attack]["roc_data"] = roc_data
+ RES[y_target][attack]["normal_scores"] = normal_scores
+ RES[y_target][attack]["adv_scores"] = adv_scores
+ RES[y_target][attack]["fnr_ls"] = fnr_ls
+
+
+def parse_arguments(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--gpu', type=str,
+ help='GPU id', default='0')
+ parser.add_argument('--dataset', type=str,
+ help='name of dataset', default='mnist')
+ parser.add_argument('--attack', type=str,
+ help='attack type', default='pgd')
+ parser.add_argument('--filter-ratio', type=float,
+ help='ratio of neuron kept for matching', default=1.0)
+ return parser.parse_args(argv)
+
+
+if __name__ == '__main__':
+ args = parse_arguments(sys.argv[1:])
+ eval_defense()
\ No newline at end of file
diff --git a/case_studies/trapdoor/original/inject_trapdoors.py b/case_studies/trapdoor/original/inject_trapdoors.py
new file mode 100644
index 0000000..2e1f804
--- /dev/null
+++ b/case_studies/trapdoor/original/inject_trapdoors.py
@@ -0,0 +1,185 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import pickle
+import random
+import sys
+
+import keras
+import numpy as np
+from keras.callbacks import LearningRateScheduler
+from keras.callbacks import ReduceLROnPlateau
+from keras.preprocessing.image import ImageDataGenerator
+from tensorflow import set_random_seed
+from trap_utils import injection_func, init_gpu, CoreModel, craft_trapdoors, CallbackGenerator, load_dataset
+
+MODEL_PREFIX = "models/"
+DIRECTORY = 'results/'
+
+
+class DataGenerator(object):
+ def __init__(self, target_ls, pattern_dict, num_classes):
+ self.target_ls = target_ls
+ self.pattern_dict = pattern_dict
+ self.num_classes = num_classes
+
+ def mask_pattern_func(self, y_target):
+ mask, pattern = random.choice(self.pattern_dict[y_target])
+ mask = np.copy(mask)
+
+ return mask, pattern
+
+ def infect_X(self, img, tgt):
+ mask, pattern = self.mask_pattern_func(tgt)
+ raw_img = np.copy(img)
+ adv_img = np.copy(raw_img)
+
+ adv_img = injection_func(mask, pattern, adv_img)
+ return adv_img, keras.utils.to_categorical(tgt, num_classes=self.num_classes)
+
+ def generate_data(self, gen, inject_ratio):
+ while 1:
+ batch_X, batch_Y = [], []
+
+ clean_X_batch, clean_Y_batch = next(gen)
+ for cur_x, cur_y in zip(clean_X_batch, clean_Y_batch):
+ inject_ptr = random.uniform(0, 1)
+ if inject_ptr < inject_ratio:
+ tgt = random.choice(self.target_ls)
+ cur_x, cur_y = self.infect_X(cur_x, tgt)
+
+ batch_X.append(cur_x)
+ batch_Y.append(cur_y)
+
+ yield np.array(batch_X), np.array(batch_Y)
+
+
+def lr_schedule(epoch):
+ lr = 1e-3
+ if epoch > 50:
+ lr *= 0.5e-1
+ elif epoch > 40:
+ lr *= 1e-1
+ elif epoch > 15:
+ lr *= 1e-1
+ elif epoch > 10:
+ lr *= 1e-1
+ print('Learning rate: ', lr)
+ return lr
+
+
+def main():
+ random.seed(args.seed)
+ np.random.seed(args.seed)
+ set_random_seed(args.seed)
+
+ sess = init_gpu(args.gpu)
+ model = CoreModel(args.dataset, load_clean=False)
+ new_model = model.model
+
+ target_ls = range(model.num_classes)
+ INJECT_RATIO = args.inject_ratio
+ print("Injection Ratio: ", INJECT_RATIO)
+ f_name = "{}".format(args.dataset)
+
+ os.makedirs(DIRECTORY, exist_ok=True)
+ file_prefix = os.path.join(DIRECTORY, f_name)
+
+ pattern_dict = craft_trapdoors(target_ls, model.img_shape, args.num_cluster,
+ pattern_size=args.pattern_size, mask_ratio=args.mask_ratio,
+ mnist=1 if args.dataset == 'mnist' or args.dataset == 'cifar' else 0)
+
+ RES = {}
+ RES['target_ls'] = target_ls
+ RES['pattern_dict'] = pattern_dict
+
+ data_gen = ImageDataGenerator()
+
+ X_train, Y_train, X_test, Y_test = load_dataset(args.dataset)
+ train_generator = data_gen.flow(X_train, Y_train, batch_size=32)
+ number_images = len(X_train)
+ test_generator = data_gen.flow(X_test, Y_test, batch_size=32)
+
+ new_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=lr_schedule(0)),
+ metrics=['accuracy'])
+ lr_scheduler = LearningRateScheduler(lr_schedule)
+
+ lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
+ cooldown=0,
+ patience=5,
+ min_lr=0.5e-6)
+
+ base_gen = DataGenerator(target_ls, pattern_dict, model.num_classes)
+ test_adv_gen = base_gen.generate_data(test_generator, 1)
+ test_nor_gen = base_gen.generate_data(test_generator, 0)
+ clean_train_gen = base_gen.generate_data(train_generator, 0)
+ trap_train_gen = base_gen.generate_data(train_generator, INJECT_RATIO)
+
+ os.makedirs(MODEL_PREFIX, exist_ok=True)
+ os.makedirs(DIRECTORY, exist_ok=True)
+
+ model_file = MODEL_PREFIX + f_name + "_model.h5"
+ RES["model_file"] = model_file
+
+ if os.path.exists(model_file):
+ os.remove(model_file)
+
+ cb = CallbackGenerator(test_nor_gen, test_adv_gen, model_file=model_file, expected_acc=model.expect_acc)
+ callbacks = [lr_reducer, lr_scheduler, cb]
+
+ print("First Step: Training Normal Model...")
+ new_model.fit_generator(clean_train_gen, validation_data=test_nor_gen, steps_per_epoch=number_images // 32,
+ epochs=model.epochs, verbose=2, callbacks=callbacks, validation_steps=100,
+ use_multiprocessing=True,
+ workers=1)
+
+ print("Second Step: Injecting Trapdoor...")
+ new_model.fit_generator(trap_train_gen, validation_data=test_nor_gen, steps_per_epoch=number_images // 32,
+ epochs=model.epochs, verbose=2, callbacks=callbacks, validation_steps=100,
+ use_multiprocessing=True,
+ workers=1)
+
+ if not os.path.exists(model_file):
+ raise Exception("NO GOOD MODEL!!!")
+
+ new_model = keras.models.load_model(model_file)
+ loss, acc = new_model.evaluate_generator(test_nor_gen, verbose=0, steps=100)
+
+ RES["normal_acc"] = acc
+ loss, backdoor_acc = new_model.evaluate_generator(test_adv_gen, steps=200, verbose=0)
+ RES["trapdoor_acc"] = backdoor_acc
+
+ file_save_path = file_prefix + "_res.p"
+ pickle.dump(RES, open(file_save_path, 'wb'))
+ print("File saved to {}, use this path as protected-path for the eval script. ".format(file_save_path))
+
+
+def parse_arguments(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--gpu', type=str, help='GPU id', default='0')
+ parser.add_argument('--dataset', type=str, help='name of dataset. mnist or cifar', default='mnist')
+ parser.add_argument('--inject-ratio', type=float, help='injection ratio', default=0.5)
+ parser.add_argument('--seed', type=int, help='', default=0)
+ parser.add_argument('--num_cluster', type=int, help='', default=7)
+ parser.add_argument('--pattern_size', type=int, help='', default=3)
+ parser.add_argument('--mask_ratio', type=float, help='', default=0.1)
+
+ return parser.parse_args(argv)
+
+
+if __name__ == '__main__':
+ args = parse_arguments(sys.argv[1:])
+ main()
\ No newline at end of file
diff --git a/case_studies/trapdoor/original/trap_utils.py b/case_studies/trapdoor/original/trap_utils.py
new file mode 100644
index 0000000..0c70575
--- /dev/null
+++ b/case_studies/trapdoor/original/trap_utils.py
@@ -0,0 +1,564 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import random
+
+import keras
+import keras.backend as K
+import numpy as np
+import tensorflow as tf
+from cleverhans import attacks
+from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Activation, Dropout, BatchNormalization
+from keras.models import Model
+from keras.models import Sequential
+from keras.regularizers import l2
+from sklearn.metrics.pairwise import paired_cosine_distances
+
+
+def injection_func(mask, pattern, adv_img):
+ return mask * pattern + (1 - mask) * adv_img
+
+
+def fix_gpu_memory(mem_fraction=1):
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+ tf_config = None
+ if tf.test.is_gpu_available():
+ gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
+ tf_config = tf.ConfigProto(gpu_options=gpu_options)
+ tf_config.gpu_options.allow_growth = True
+ tf_config.log_device_placement = False
+ init_op = tf.global_variables_initializer()
+ sess = tf.Session(config=tf_config)
+ sess.run(init_op)
+ K.set_session(sess)
+ return sess
+
+
+def init_gpu(gpu_index, force=False):
+ if isinstance(gpu_index, list):
+ gpu_num = ','.join([str(i) for i in gpu_index])
+ else:
+ gpu_num = str(gpu_index)
+ if "CUDA_VISIBLE_DEVICES" in os.environ and os.environ["CUDA_VISIBLE_DEVICES"] and not force:
+ print('GPU already initiated')
+ return
+ os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
+ sess = fix_gpu_memory()
+ return sess
+
+
+class CoreModel(object):
+ def __init__(self, dataset, load_clean=False, load_model=True):
+ self.dataset = dataset
+ if load_model:
+ self.model = get_model(dataset, load_clean=load_clean)
+ else:
+ self.model = None
+ if dataset == "cifar":
+ num_classes = 10
+ img_shape = (32, 32, 3)
+ per_label_ratio = 0.1
+ expect_acc = 0.75
+ target_layer = 'dense'
+ mask_ratio = 0.03
+ pattern_size = 3
+ epochs = 20
+
+ elif dataset == "mnist":
+ num_classes = 10
+ img_shape = (28, 28, 1)
+ per_label_ratio = 0.1
+ expect_acc = 0.98
+ target_layer = 'dense'
+ mask_ratio = 0.1
+ pattern_size = 3
+ epochs = 10
+
+ else:
+ raise Exception("Not implement")
+
+ self.num_classes = num_classes
+ self.img_shape = img_shape
+ self.per_label_ratio = per_label_ratio
+ self.expect_acc = expect_acc
+ self.target_layer = target_layer
+ self.mask_ratio = mask_ratio
+ self.epochs = epochs
+ self.pattern_size = pattern_size
+
+
+def get_cifar_model(softmax=True):
+ layers = [
+ Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)), # 0
+ Activation('relu'), # 1
+ BatchNormalization(), # 2
+ Conv2D(32, (3, 3), padding='same'), # 3
+ Activation('relu'), # 4
+ BatchNormalization(), # 5
+ MaxPooling2D(pool_size=(2, 2)), # 6
+
+ Conv2D(64, (3, 3), padding='same'), # 7
+ Activation('relu'), # 8
+ BatchNormalization(), # 9
+ Conv2D(64, (3, 3), padding='same'), # 10
+ Activation('relu'), # 11
+ BatchNormalization(), # 12
+ MaxPooling2D(pool_size=(2, 2)), # 13
+
+ Conv2D(128, (3, 3), padding='same'), # 14
+ Activation('relu'), # 15
+ BatchNormalization(), # 16
+ Conv2D(128, (3, 3), padding='same'), # 17
+ Activation('relu'), # 18
+ BatchNormalization(), # 19
+ MaxPooling2D(pool_size=(2, 2)), # 20
+
+ Flatten(), # 21
+ Dropout(0.5), # 22
+
+ Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), # 23
+ Activation('relu'), # 24
+ BatchNormalization(), # 25
+ Dropout(0.5), # 26
+ Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), name='dense'), # 27
+ Activation('relu'), # 28
+ BatchNormalization(), # 29
+ Dropout(0.5), # 30
+ Dense(10), # 31
+ ]
+ model = Sequential()
+ for layer in layers:
+ model.add(layer)
+ if softmax:
+ model.add(Activation('softmax'))
+ return model
+
+
+def get_mnist_model(input_shape=(28, 28, 1),
+ num_classes=10):
+ model = Sequential()
+ model.add(Conv2D(16, kernel_size=(5, 5),
+ activation='relu',
+ input_shape=input_shape))
+ model.add(MaxPooling2D(pool_size=(2, 2)))
+ model.add(Conv2D(32, (5, 5), activation='relu'))
+ model.add(MaxPooling2D(pool_size=(2, 2)))
+ model.add(Flatten())
+ model.add(Dense(512, activation='relu', name='dense'))
+ model.add(Dense(num_classes, activation='softmax'))
+
+ model.compile(loss='categorical_crossentropy',
+ optimizer='Adam',
+ metrics=['accuracy'])
+
+ return model
+
+
+def get_model(dataset, load_clean=False):
+ if load_clean:
+ model = keras.models.load_model("/home/shansixioing/trap/models/{}_clean.h5".format(dataset))
+ else:
+ if dataset == "cifar":
+ model = get_cifar_model()
+ elif dataset == 'mnist':
+ model = get_mnist_model()
+ else:
+ raise Exception("Model not implemented")
+
+ return model
+
+
+def load_dataset(dataset):
+ if dataset == "cifar":
+ from keras.datasets import cifar10
+ (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
+ X_train = X_train / 255.0
+ X_test = X_test / 255.0
+
+ Y_train = keras.utils.to_categorical(Y_train, 10)
+ Y_test = keras.utils.to_categorical(Y_test, 10)
+ elif dataset == 'mnist':
+ from keras.datasets import mnist
+ (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
+ X_train = X_train.reshape(-1, 28, 28, 1)
+ X_test = X_test.reshape(-1, 28, 28, 1)
+ Y_train = keras.utils.to_categorical(Y_train, 10)
+ Y_test = keras.utils.to_categorical(Y_test, 10)
+ X_train = X_train / 255.0
+ X_test = X_test / 255.0
+ else:
+ raise Exception("Dataset not implemented")
+
+ return X_train, Y_train, X_test, Y_test
+
+
+class CallbackGenerator(keras.callbacks.Callback):
+ def __init__(self, test_nor_gen, adv_gen, model_file, expected_acc=0.9):
+ self.test_nor_gen = test_nor_gen
+ self.adv_gen = adv_gen
+ self.best_attack = 0
+ self.expected_acc = expected_acc
+ self.model_file = model_file
+
+ def on_epoch_end(self, epoch, logs=None):
+ _, clean_acc = self.model.evaluate_generator(self.test_nor_gen, verbose=0, steps=100)
+ _, attack_acc = self.model.evaluate_generator(self.adv_gen, steps=100, verbose=0)
+
+ print("Epoch: {} - Clean Acc {:.4f} - Trapdoor Acc {:.4f}".format(epoch, clean_acc, attack_acc))
+ if clean_acc > self.expected_acc and attack_acc > self.best_attack and attack_acc > 0.9:
+ if self.model_file:
+ self.model.save(self.model_file)
+ self.best_attack = attack_acc
+
+ # if clean_acc > self.expected_acc and attack_acc > 0.995:
+ # self.model.stop_training = True
+
+
+def generate_attack(sess, model, test_X, method, target, num_classes, clip_max=255.0,
+ clip_min=0.0, mnist=False, confidence=0, batch_size=None):
+ from cleverhans import utils_keras
+ from cleverhans.utils import set_log_level
+ set_log_level(0)
+
+ wrap = utils_keras.KerasModelWrapper(model)
+ y_tgt = keras.utils.to_categorical([target] * test_X.shape[0], num_classes=num_classes)
+
+ batch_size = len(test_X) if batch_size is None else batch_size
+
+ if method == "cw":
+ cwl2 = attacks.CarliniWagnerL2(wrap, sess=sess)
+ adv_x = cwl2.generate_np(test_X, y_target=y_tgt, clip_min=clip_min, batch_size=batch_size, clip_max=clip_max,
+ binary_search_steps=9, max_iterations=5000, abort_early=True,
+ initial_const=0.001, confidence=confidence, learning_rate=0.01)
+
+ elif method == "pgd":
+ eps = 8 if not mnist else 8 / 255
+ eps_iter = 0.1 if not mnist else 0.1 / 255
+ pgd = attacks.ProjectedGradientDescent(wrap, sess=sess)
+ adv_x = pgd.generate_np(test_X, y_target=y_tgt, clip_max=clip_max, nb_iter=100, eps=eps,
+ eps_iter=eps_iter, clip_min=clip_min)
+
+ elif method == "en":
+ enet = attacks.ElasticNetMethod(wrap, sess=sess)
+ adv_x = enet.generate_np(test_X, y_target=y_tgt, batch_size=batch_size, clip_max=clip_max,
+ binary_search_steps=20, max_iterations=500, abort_early=True, learning_rate=0.5)
+
+ else:
+ raise Exception("No such attack")
+
+ return adv_x
+
+
+def construct_mask_random_location(image_row=32, image_col=32, channel_num=3, pattern_size=4,
+ color=[255.0, 255.0, 255.0]):
+ c_col = random.choice(range(0, image_col - pattern_size + 1))
+ c_row = random.choice(range(0, image_row - pattern_size + 1))
+
+ mask = np.zeros((image_row, image_col, channel_num))
+ pattern = np.zeros((image_row, image_col, channel_num))
+
+ mask[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = 1
+ if channel_num == 1:
+ pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = [1]
+ else:
+ pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = color
+
+ return mask, pattern
+
+
+def construct_mask_random_location_mnist(image_row=28, image_col=28, channel_num=1, pattern_size=4,
+ color=[1.]):
+ c_col = random.choice(range(0, image_col - pattern_size + 1))
+ c_row = random.choice(range(0, image_row - pattern_size + 1))
+
+ mask = np.zeros((image_row, image_col, channel_num))
+ pattern = np.zeros((image_row, image_col, channel_num))
+
+ mask[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = 1
+ if channel_num == 1:
+ pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = [1]
+ else:
+ pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = color
+
+ return mask, pattern
+
+
+def iter_pattern_base_per_mnist(target_ls, image_shape, num_clusters, pattern_per_label=1, pattern_size=3,
+ mask_ratio=0.1):
+ total_ls = {}
+
+ for y_target in target_ls:
+
+ cur_pattern_ls = []
+
+ for p in range(pattern_per_label):
+ tot_mask = np.zeros(image_shape)
+ tot_pattern = np.zeros(image_shape)
+ for p in range(num_clusters):
+ mask, _ = construct_mask_random_location_mnist(image_row=image_shape[0],
+ image_col=image_shape[1],
+ channel_num=image_shape[2],
+ pattern_size=pattern_size)
+ tot_mask += mask
+
+ m1 = random.uniform(0, 1)
+
+ s1 = random.uniform(0, 1)
+
+ r = np.random.normal(m1, s1, image_shape[:-1])
+ cur_pattern = np.stack([r], axis=2)
+ cur_pattern = cur_pattern * (mask != 0)
+ cur_pattern = np.clip(cur_pattern, 0, 1.0)
+ tot_pattern += cur_pattern
+
+ tot_mask = (tot_mask > 0) * mask_ratio
+ tot_pattern = np.clip(tot_pattern, 0, 1.0)
+ cur_pattern_ls.append([tot_mask, tot_pattern])
+
+ total_ls[y_target] = cur_pattern_ls
+ return total_ls
+
+
+def craft_trapdoors(target_ls, image_shape, num_clusters, pattern_per_label=1, pattern_size=3, mask_ratio=0.1,
+ mnist=False):
+ if mnist:
+ return iter_pattern_base_per_mnist(target_ls, image_shape, num_clusters, pattern_per_label=pattern_per_label,
+ pattern_size=pattern_size,
+ mask_ratio=mask_ratio)
+ total_ls = {}
+
+ for y_target in target_ls:
+ cur_pattern_ls = []
+
+ for _ in range(pattern_per_label):
+ tot_mask = np.zeros(image_shape)
+ tot_pattern = np.zeros(image_shape)
+
+ for p in range(num_clusters):
+ mask, _ = construct_mask_random_location(image_row=image_shape[0],
+ image_col=image_shape[1],
+ channel_num=image_shape[2],
+ pattern_size=pattern_size)
+ tot_mask += mask
+
+ m1 = random.uniform(0, 255)
+ m2 = random.uniform(0, 255)
+ m3 = random.uniform(0, 255)
+
+ s1 = random.uniform(0, 255)
+ s2 = random.uniform(0, 255)
+ s3 = random.uniform(0, 255)
+
+ r = np.random.normal(m1, s1, image_shape[:-1])
+ g = np.random.normal(m2, s2, image_shape[:-1])
+ b = np.random.normal(m3, s3, image_shape[:-1])
+ cur_pattern = np.stack([r, g, b], axis=2)
+ cur_pattern = cur_pattern * (mask != 0)
+ cur_pattern = np.clip(cur_pattern, 0, 255.0)
+ tot_pattern += cur_pattern
+
+ tot_mask = (tot_mask > 0) * mask_ratio
+ tot_pattern = np.clip(tot_pattern, 0, 255.0)
+ cur_pattern_ls.append([tot_mask, tot_pattern])
+
+ total_ls[y_target] = cur_pattern_ls
+
+ return total_ls
+
+
+def get_other_label_data(X, Y, target):
+ X_filter = np.array(X)
+ Y_filter = np.array(Y)
+ remain_idx = np.argmax(Y, axis=1) != target
+ X_filter = X_filter[remain_idx]
+ Y_filter = Y_filter[remain_idx]
+ return X_filter, Y_filter
+
+
+def build_bottleneck_model(model, cut_off):
+ bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
+ bottleneck_model.compile(loss='categorical_crossentropy',
+ optimizer='adam',
+ metrics=['accuracy'])
+
+ return bottleneck_model
+
+
+def test_neuron_cosine_sim(X_neuron, adv_sig, neuron_mask=None):
+ nb_sample = X_neuron.shape[0]
+
+ # neuron_mask_expand = np.expand_dims(neuron_mask, axis=0)
+ # neuron_mask_repeat = np.repeat(neuron_mask_expand, nb_sample, axis=0)
+
+ adv_sig_repeat = np.expand_dims(adv_sig, axis=0)
+ adv_sig_repeat = np.repeat(adv_sig_repeat, nb_sample, axis=0)
+ adv_sig_flatten = np.reshape(adv_sig_repeat, (nb_sample, -1))
+
+ X_neuron_mask = X_neuron
+ X_flatten = np.reshape(X_neuron_mask, (nb_sample, -1))
+
+ cosine_sim = 1 - paired_cosine_distances(X_flatten, adv_sig_flatten)
+
+ # print(list(np.percentile(cosine_sim, [0, 5, 25, 50, 75, 95, 100])))
+
+ return cosine_sim
+
+
+def preprocess(X, method):
+ assert method in {'raw', 'imagenet', 'inception', 'mnist'}
+
+ if method is 'raw':
+ pass
+ elif method is 'imagenet':
+ X = imagenet_preprocessing(X)
+ else:
+ raise Exception('unknown method %s' % method)
+
+ return X
+
+
+def reverse_preprocess(X, method):
+ assert method in {'raw', 'imagenet', 'inception', 'mnist'}
+
+ if method is 'raw':
+ pass
+ elif method is 'imagenet':
+ X = imagenet_reverse_preprocessing(X)
+ else:
+ raise Exception('unknown method %s' % method)
+
+ return X
+
+
+def imagenet_preprocessing(x, data_format=None):
+ if data_format is None:
+ data_format = K.image_data_format()
+ assert data_format in ('channels_last', 'channels_first')
+
+ x = np.array(x)
+ if data_format == 'channels_first':
+ # 'RGB'->'BGR'
+ if x.ndim == 3:
+ x = x[::-1, ...]
+ else:
+ x = x[:, ::-1, ...]
+ else:
+ # 'RGB'->'BGR'
+ x = x[..., ::-1]
+
+ mean = [103.939, 116.779, 123.68]
+ std = None
+
+ # Zero-center by mean pixel
+ if data_format == 'channels_first':
+ if x.ndim == 3:
+ x[0, :, :] -= mean[0]
+ x[1, :, :] -= mean[1]
+ x[2, :, :] -= mean[2]
+ if std is not None:
+ x[0, :, :] /= std[0]
+ x[1, :, :] /= std[1]
+ x[2, :, :] /= std[2]
+ else:
+ x[:, 0, :, :] -= mean[0]
+ x[:, 1, :, :] -= mean[1]
+ x[:, 2, :, :] -= mean[2]
+ if std is not None:
+ x[:, 0, :, :] /= std[0]
+ x[:, 1, :, :] /= std[1]
+ x[:, 2, :, :] /= std[2]
+ else:
+ x[..., 0] -= mean[0]
+ x[..., 1] -= mean[1]
+ x[..., 2] -= mean[2]
+ if std is not None:
+ x[..., 0] /= std[0]
+ x[..., 1] /= std[1]
+ x[..., 2] /= std[2]
+
+ return x
+
+
+def imagenet_reverse_preprocessing(x, data_format=None):
+ import keras.backend as K
+ x = np.array(x)
+ if data_format is None:
+ data_format = K.image_data_format()
+ assert data_format in ('channels_last', 'channels_first')
+
+ if data_format == 'channels_first':
+ if x.ndim == 3:
+ # Zero-center by mean pixel
+ x[0, :, :] += 103.939
+ x[1, :, :] += 116.779
+ x[2, :, :] += 123.68
+ # 'BGR'->'RGB'
+ x = x[::-1, :, :]
+ else:
+ x[:, 0, :, :] += 103.939
+ x[:, 1, :, :] += 116.779
+ x[:, 2, :, :] += 123.68
+ x = x[:, ::-1, :, :]
+ else:
+ # Zero-center by mean pixel
+ x[..., 0] += 103.939
+ x[..., 1] += 116.779
+ x[..., 2] += 123.68
+ # 'BGR'->'RGB'
+ x = x[..., ::-1]
+ return x
+
+
+def cal_roc(scores, sybils):
+ from collections import defaultdict
+ nb_sybil = len(sybils)
+ nb_total = len(scores)
+ nb_normal = nb_total - nb_sybil
+ TP = nb_sybil
+ FP = nb_normal
+ FN = 0
+ TN = 0
+ roc_data = []
+ # scores = sorted(list(scores), key=lambda x: x[1], reverse=True)
+ # trust_score = sorted(trust_score, key=lambda x: x[1])
+ score_mapping = defaultdict(list)
+ for uid, score in scores:
+ score_mapping[score].append(uid)
+ ranked_scores = []
+ for score in sorted(score_mapping.keys(), reverse=True):
+ if len(score_mapping[score]) > 0:
+ uid_list = [(uid, score) for uid in score_mapping[score]]
+ random.shuffle(uid_list)
+ ranked_scores.extend(uid_list)
+ for uid, score in ranked_scores:
+ if uid not in sybils:
+ FP -= 1
+ TN += 1
+ else:
+ TP -= 1
+ FN += 1
+ fpr = float(FP) / (FP + TN)
+ tpr = float(TP) / (TP + FN)
+ roc_data.append((fpr, tpr))
+ roc_data = sorted(roc_data)
+ if roc_data[-1][0] < 1:
+ roc_data.append((1.0, roc_data[-2][1]))
+ auc = 0
+ for i in range(1, len(roc_data)):
+ auc += ((roc_data[i][0] - roc_data[i - 1][0]) *
+ (roc_data[i][1] + roc_data[i - 1][1]) /
+ 2)
+
+ return roc_data, auc
\ No newline at end of file
diff --git a/case_studies/trapdoor/orthogonal_pgd.py b/case_studies/trapdoor/orthogonal_pgd.py
new file mode 100644
index 0000000..6cacf44
--- /dev/null
+++ b/case_studies/trapdoor/orthogonal_pgd.py
@@ -0,0 +1,241 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Taken from
+https://github.com/v-wangg/OrthogonalPGD/blob/c92f11ee69723822f2179be1d6f50cd86d94bbff/attack.py#L15
+"""
+import torch
+import numpy as np
+import tqdm
+
+
+class PGD:
+ def __init__(self, classifier, detector, classifier_loss=None,
+ detector_loss=None, steps=100, alpha=1 / 255, eps=8 / 255,
+ use_projection=True, projection_norm='linf', target=None, lmbd=0, k=None,
+ project_detector=False, project_classifier=False, img_min=0, img_max=1,
+ verbose=True):
+ '''
+ :param classifier: model used for classification
+ :param detector: model used for detection
+ :param classifier_loss: loss used for classification model
+ :param detector_loss: loss used for detection model. Need to have __call__
+ method which outputs adversarial scores ranging from 0 to 1
+ (0 if not afversarial and 1 if adversarial)
+ :param steps: number of steps for which to perform gradient descent/ascent
+ :param alpha: step size
+ :param eps: constraint on noise that can be applied to images
+ :param use_projection: True if gradients should be projected onto each other
+ :param projection_norm: 'linf' or 'l2' for regularization of gradients
+ :param target: target label to attack. if None, an untargeted attack is run
+ :param lmbd: hyperparameter for 'f + lmbd * g' when 'use_projection' is False
+ :param k: if not None, take gradients of g onto f every kth step
+ :param project_detector: if True, take gradients of g onto f
+ :param project_classifier: if True, take gradients of f onto g
+ '''
+ self.classifier = classifier
+ self.detector = detector
+ self.steps = steps
+ self.alpha = alpha
+ self.eps = eps
+ self.classifier_loss = classifier_loss
+ self.detector_loss = detector_loss
+ self.use_projection = use_projection
+ self.projection_norm = projection_norm
+ self.project_classifier = project_classifier
+ self.project_detector = project_detector
+ self.target = target
+ self.lmbd = lmbd
+ self.k = k
+ self.img_min = img_min
+ self.img_max = img_max
+
+ self.verbose = verbose
+
+ # metrics to keep track of
+ self.all_classifier_losses = []
+ self.all_detector_losses = []
+
+ def attack_batch(self, inputs, targets, device):
+ adv_images = inputs.clone().detach()
+ original_inputs_numpy = inputs.clone().detach().numpy()
+
+ # alarm_targets = torch.tensor(np.zeros(len(inputs)).reshape(-1, 1))
+
+ # ideally no adversarial images should be detected
+ alarm_targets = torch.tensor(np.zeros(len(inputs)))
+
+ batch_size = inputs.shape[0]
+
+ # targeted attack
+ if self.target:
+ targeted_targets = torch.tensor(
+ torch.tensor(self.target * np.ones(len(inputs)), dtype=torch.int64)).to(
+ device)
+
+ advx_final = inputs.detach().numpy()
+ loss_final = np.zeros(inputs.shape[0]) + np.inf
+
+ if self.verbose:
+ progress = tqdm.tqdm(range(self.steps))
+ else:
+ progress = range(self.steps)
+ for i in progress:
+ adv_images.requires_grad = True
+
+ # calculating gradient of classifier w.r.t. images
+ outputs = self.classifier(adv_images.to(device))
+
+ if self.target is not None:
+ loss_classifier = 1 * self.classifier_loss(outputs, targeted_targets)
+ else:
+ loss_classifier = self.classifier_loss(outputs, targets.to(device))
+
+ loss_classifier.backward(retain_graph=True)
+ grad_classifier = adv_images.grad.cpu().detach()
+
+ # calculating gradient of detector w.r.t. images
+ adv_images.grad = None
+ adv_scores = self.detector(adv_images.to(device))
+
+ if self.detector_loss:
+ loss_detector = -self.detector_loss(adv_scores, alarm_targets.to(device))
+ else:
+ loss_detector = torch.mean(adv_scores)
+
+ loss_detector.backward()
+ grad_detector = adv_images.grad.cpu().detach()
+
+ self.all_classifier_losses.append(loss_classifier.detach().data.item())
+ self.all_detector_losses.append(loss_detector.detach().data.item())
+
+ if self.target:
+ has_attack_succeeded = (outputs.cpu().detach().numpy().argmax(
+ 1) == targeted_targets.cpu().numpy())
+ else:
+ has_attack_succeeded = (
+ outputs.cpu().detach().numpy().argmax(1) != targets.numpy())
+
+ adv_images_np = adv_images.cpu().detach().numpy()
+ # print(torch.max(torch.abs(adv_images-inputs)))
+ # print('b',torch.max(torch.abs(torch.tensor(advx_final)-inputs)))
+ for i in range(len(advx_final)):
+ if has_attack_succeeded[i] and loss_final[i] > adv_scores[i]:
+ # print("assign", i, np.max(advx_final[i]-original_inputs_numpy[i]))
+ advx_final[i] = adv_images_np[i]
+ loss_final[i] = adv_scores[i]
+ # print("Update", i, adv_scores[i])
+
+ # using hyperparameter to combine gradient of classifier and gradient of detector
+ if not self.use_projection:
+ grad = grad_classifier + self.lmbd * grad_detector
+ else:
+ if self.project_detector:
+ # using Orthogonal Projected Gradient Descent
+ # projection of gradient of detector on gradient of classifier
+ # then grad_d' = grad_d - (project grad_d onto grad_c)
+ grad_detector_proj = grad_detector - torch.bmm((torch.bmm(
+ grad_detector.view(batch_size, 1, -1),
+ grad_classifier.view(batch_size, -1, 1))) / (1e-20 + torch.bmm(
+ grad_classifier.view(batch_size, 1, -1),
+ grad_classifier.view(batch_size, -1, 1))).view(-1, 1, 1),
+ grad_classifier.view(
+ batch_size, 1,
+ -1)).view(
+ grad_detector.shape)
+ else:
+ grad_detector_proj = grad_detector
+
+ if self.project_classifier:
+ # using Orthogonal Projected Gradient Descent
+ # projection of gradient of detector on gradient of classifier
+ # then grad_c' = grad_c - (project grad_c onto grad_d)
+ grad_classifier_proj = grad_classifier - torch.bmm((torch.bmm(
+ grad_classifier.view(batch_size, 1, -1),
+ grad_detector.view(batch_size, -1, 1))) / (1e-20 + torch.bmm(
+ grad_detector.view(batch_size, 1, -1),
+ grad_detector.view(batch_size, -1, 1))).view(-1, 1, 1),
+ grad_detector.view(
+ batch_size, 1,
+ -1)).view(
+ grad_classifier.shape)
+ else:
+ grad_classifier_proj = grad_classifier
+
+ # making sure adversarial images have crossed decision boundary
+ outputs_perturbed = outputs.cpu().detach().numpy()
+ if self.target:
+ outputs_perturbed[
+ np.arange(targeted_targets.shape[0]), targets] += .05
+ has_attack_succeeded = np.array(
+ (outputs_perturbed.argmax(1) == targeted_targets.cpu().numpy())[:,
+ None, None, None], dtype=np.float32)
+ else:
+ outputs_perturbed[np.arange(targets.shape[0]), targets] += .05
+ has_attack_succeeded = np.array(
+ (outputs_perturbed.argmax(1) != targets.numpy())[:, None, None,
+ None], dtype=np.float32)
+
+ if self.verbose:
+ progress.set_description(
+ "Losses (%.3f/%.3f/%.3f/%.3f)" % (np.mean(self.all_classifier_losses[-10:]),
+ np.mean(self.all_detector_losses[-10:]),
+ np.mean(loss_final),
+ has_attack_succeeded.mean()))
+
+ # print('correct frac', has_attack_succeeded.mean())
+ # print('really adv target reached', (outputs.argmax(1).cpu().detach().numpy() == self.target).mean())
+
+ if self.k:
+ # take gradients of g onto f every kth step
+ if i % self.k == 0:
+ grad = grad_detector_proj
+ else:
+ grad = grad_classifier_proj
+ else:
+ # print(outputs_perturbed, has_attack_succeeded, adv_scores)
+ grad = grad_classifier_proj * (
+ 1 - has_attack_succeeded) + grad_detector_proj * has_attack_succeeded
+
+ if np.any(np.isnan(grad.numpy())):
+ print(np.mean(np.isnan(grad.numpy())))
+ print("ABORT")
+ break
+
+ if self.target:
+ grad = -grad
+
+ # l2 regularization
+ if self.projection_norm == 'l2':
+ grad_norms = torch.norm(grad.view(batch_size, -1), p=2, dim=1) + 1e-20
+ grad = grad / grad_norms.view(batch_size, 1, 1, 1)
+ # linf regularization
+ elif self.projection_norm == 'linf':
+ grad = torch.sign(grad)
+ else:
+ raise Exception('Incorrect Projection Norm')
+
+ adv_images = adv_images.detach() + self.alpha * grad
+ delta = torch.clamp(adv_images - torch.tensor(original_inputs_numpy),
+ min=-self.eps, max=self.eps)
+ adv_images = torch.clamp(torch.tensor(original_inputs_numpy) + delta,
+ min=self.img_min, max=self.img_max).detach()
+
+ return torch.tensor(advx_final)
+
+ def attack(self, inputs, targets, device):
+ adv_images = []
+ batch_adv_images = self.attack_batch(inputs, targets, device)
+ adv_images.append(batch_adv_images)
+ return torch.cat(adv_images)
\ No newline at end of file
diff --git a/case_studies/vanilla_classifier/all.sh b/case_studies/vanilla_classifier/all.sh
new file mode 100644
index 0000000..1a463cb
--- /dev/null
+++ b/case_studies/vanilla_classifier/all.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+./case_studies/vanilla_classifier/baseline.sh $1 $2
+./case_studies/vanilla_classifier/gradient_masking.sh $1 $2
+./case_studies/vanilla_classifier/inefficient_pgd.sh $1 $2
+./case_studies/vanilla_classifier/noisy_pgd.sh $1 $2
+./case_studies/vanilla_classifier/non_differentiable_input.sh $1 $2
diff --git a/case_studies/vanilla_classifier/at_inefficient_pgd.sh b/case_studies/vanilla_classifier/at_inefficient_pgd.sh
new file mode 100644
index 0000000..4ccce56
--- /dev/null
+++ b/case_studies/vanilla_classifier/at_inefficient_pgd.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+n_samples=${1:-2048}
+echo "Using ${n_samples} samples"
+
+checkpoint="checkpoints/rn50_madry_robustness_linf_at_8.pth"
+basecommand="
+--n-samples=${n_samples}
+--batch-size=512
+--classifier=networks.resnet50
+--classifier-input-normalization
+--input=${checkpoint}
+--no-logit-diff-loss
+"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+function eval {
+ local bs="norm=linf epsilon=0.031372549 \
+ n_inner_points=999 \
+ n_boundary_points=1 \
+ optimizer=sklearn \
+ adversarial_attack_settings=\"ATTACK_SETTINGS\""
+
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ $basecommand \
+ --no-clean-evaluation \
+ --decision-boundary-binarization="${bs/ATTACK_SETTINGS/$1}"
+}
+
+function evaladv {
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ $basecommand \
+ --no-clean-evaluation \
+ --adversarial-attack="$1"
+}
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "+Normal adversarial evaluation+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 200 steps, lr = 0.0011372549"
+evaladv "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "+Changing number of steps while keeping step size fixed+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 200 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 100 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=100"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 50 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=50"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 25 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=25"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 12 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=12"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 6 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=6"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 6 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=3"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 6 steps, lr = 0.0011372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "+Changing number of steps and step size while maintaining their ratio+"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 100 steps, lr = 0.00078431372"
+eval "norm=linf epsilon=0.031372549 step_size=0.00078431372 n_steps=100"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 50 steps, lr = 0.00156862745"
+eval "norm=linf epsilon=0.031372549 step_size=0.00156862745 n_steps=50"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 25 steps, lr = 0.0031372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.0031372549 n_steps=25"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 12 steps, lr = 0.0065359477"
+eval "norm=linf epsilon=0.031372549 step_size=0.0065359477 n_steps=12"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 6 steps, lr = 0.01307189541"
+eval "norm=linf epsilon=0.031372549 step_size=0.01307189541 n_steps=6"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 3 steps, lr = 0.02614379083"
+eval "norm=linf epsilon=0.031372549 step_size=0.02614379083 n_steps=3"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] PGD, 1 steps, lr = 0.031372549"
+eval "norm=linf epsilon=0.031372549 step_size=0.031372549 n_steps=1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
\ No newline at end of file
diff --git a/case_studies/vanilla_classifier/baseline.sh b/case_studies/vanilla_classifier/baseline.sh
new file mode 100644
index 0000000..058f913
--- /dev/null
+++ b/case_studies/vanilla_classifier/baseline.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=2048
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Clean evaluation"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --batch-size=2048 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=1 n_boundary_points=999 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=1\""
+ #--adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200"
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+exit
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+#--adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+# --logit-matching="n_steps=2000 step_size=0.0011372549" \
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 6/255] Clean evaluation"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --decision-boundary-binarization="norm=linf epsilon=0.02352941176 n_inner_points=49 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200\""
+ #--adversarial-attack="norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200"
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+#--adversarial-attack="norm=linf epsilon=0.02352941176 step_size=0.0011372549 n_steps=200" \
+# --logit-matching="n_steps=2000 step_size=0.0011372549" \
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 4/255] Clean evaluation"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --decision-boundary-binarization="norm=linf epsilon=0.0156862745 n_inner_points=49 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200\""
+ #--adversarial-attack="norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200" \
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+#--adversarial-attack="norm=linf epsilon=0.0156862745 step_size=0.0011372549 n_steps=200" \
+# --logit-matching="n_steps=2000 step_size=0.0011372549" \
+
diff --git a/case_studies/vanilla_classifier/baseline_binarization_inner_boundary.sh b/case_studies/vanilla_classifier/baseline_binarization_inner_boundary.sh
new file mode 100644
index 0000000..78b2ac1
--- /dev/null
+++ b/case_studies/vanilla_classifier/baseline_binarization_inner_boundary.sh
@@ -0,0 +1,24 @@
+function run_experiment {
+ nboundary="$1"
+ ninner="$2"
+ epsilon="$3"
+ stepsize="$4"
+
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --n-samples=2048 \
+ --batch-size=512 \
+ --input=checkpoints/mrn18_200_epochs.pth \
+ --decision-boundary-binarization="norm=linf epsilon=$epsilon n_inner_points=$ninner \
+ n_boundary_points=$nboundary optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=$epsilon \
+ step_size=$stepsize n_steps=200\"" \
+ --no-ce-loss
+}
+
+#run_experiment 1 49 0.031372549 0.0011372549
+#run_experiment 1 149 0.031372549 0.0011372549
+#run_experiment 1 249 0.031372549 0.0011372549
+#run_experiment 1 499 0.031372549 0.0011372549
+#run_experiment 1 1499 0.031372549 0.0011372549
+#run_experiment 1 1999 0.031372549 0.0011372549
+#run_experiment 1 2499 0.031372549 0.0011372549
+#run_experiment 1 2999 0.031372549 0.0011372549
\ No newline at end of file
diff --git a/case_studies/vanilla_classifier/gradient_masking.sh b/case_studies/vanilla_classifier/gradient_masking.sh
new file mode 100644
index 0000000..a90ff37
--- /dev/null
+++ b/case_studies/vanilla_classifier/gradient_masking.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=512
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Gradient masking through softmax saturation through logit scaling (s=10)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.0011372549" \
+ --classifier-logit-scale=10 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Gradient masking through softmax saturation through logit scaling (s=100)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.0011372549" \
+ --classifier-logit-scale=100 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+#--model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Gradient masking through double softmax operation"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.0011372549" \
+ --n-final-softmax=2
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Gradient masking through triple softmax operation"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --n-final-softmax=3 \
+ --logit-matching="n_steps=200 step_size=0.0011372549"
+#--decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+# optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+#--model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=3 stddev=1.0" \
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
diff --git a/case_studies/vanilla_classifier/inefficient_pgd.sh b/case_studies/vanilla_classifier/inefficient_pgd.sh
new file mode 100644
index 0000000..dd90ab9
--- /dev/null
+++ b/case_studies/vanilla_classifier/inefficient_pgd.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=512
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Too few steps (n = 5)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=5" \
+ --logit-matching="n_steps=2000 step_size=0.0011372549" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=5\""
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=5\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Too few steps/too small step size (only reaches boundary)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.00015686274 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.00015686274" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.00015686274 n_steps=200\""
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.00015686274 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Too large step size (epsilon = step size)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.031372549 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.031372549" \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.031372549 n_steps=200\""
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.031372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
diff --git a/case_studies/vanilla_classifier/noisy_pgd.sh b/case_studies/vanilla_classifier/noisy_pgd.sh
new file mode 100644
index 0000000..16e543a
--- /dev/null
+++ b/case_studies/vanilla_classifier/noisy_pgd.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=512
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+#echo
+#printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+#echo "[Linf, 8/255] Adding additive Gaussian noise as a pre-processing step"
+#PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+# --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+# --classifier-input-noise=0.1 \
+# --no-logit-diff \
+# --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+# optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+## --logit-matching="n_steps=2000 step_size=0.0011372549" \
+## --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+#printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Adding additive Gaussian noise as a pre-processing step"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=10" \
+ --classifier-input-noise=0.1 \
+ --no-logit-diff \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=10\""
+# --logit-matching="n_steps=2000 step_size=0.0011372549" \
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' '
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Adding additive Gaussian noise as a pre-processing step"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=100" \
+ --classifier-input-noise=0.1 \
+ --no-logit-diff \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=100\""
+# --logit-matching="n_steps=2000 step_size=0.0011372549" \
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Adding additive Gaussian noise to gradients"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --classifier-gradient-noise=0.1 \
+ --no-logit-diff \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+#--logit-matching="n_steps=2000 step_size=0.0011372549" \
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Adding additive Gaussian noise to gradients"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=10" \
+ --classifier-gradient-noise=0.1 \
+ --no-logit-diff \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=10\""
+#--logit-matching="n_steps=2000 step_size=0.0011372549" \
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Adding additive Gaussian noise to gradients"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=100" \
+ --classifier-gradient-noise=0.1 \
+ --no-logit-diff \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200 n_averages=100\""
+#--logit-matching="n_steps=2000 step_size=0.0011372549" \
+# --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
diff --git a/case_studies/vanilla_classifier/non_differentiable_input.sh b/case_studies/vanilla_classifier/non_differentiable_input.sh
new file mode 100644
index 0000000..dce41c1
--- /dev/null
+++ b/case_studies/vanilla_classifier/non_differentiable_input.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+checkpoint="$1"
+
+basecommand='
+--n-samples=512
+--batch-size=512
+'
+basecommand="${basecommand} --input=${checkpoint}"
+
+if [ -z ${2+x} ]; then echo "Using default device"; else basecommand="$basecommand --device=$2"; fi
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Gaussian blur"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ #--adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ #--logit-matching="n_steps=2000 step_size=0.0011372549" \
+ #--classifier-input-gaussian-blur-stddev=0.5 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+
+echo
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] JPEG compression (quality = 80)"
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py $basecommand \
+ --adversarial-attack="norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200" \
+ --logit-matching="n_steps=2000 step_size=0.0011372549" \
+ --classifier-input-jpeg-quality=80 \
+ --decision-boundary-binarization="norm=linf epsilon=0.031372549 n_inner_points=49 n_boundary_points=10 \
+ optimizer=sklearn adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\""
+ # --model-destruction="adversarial_attack_settings=\"norm=linf epsilon=0.031372549 step_size=0.0011372549 n_steps=200\" n_probes=10 stddev=1.0" \
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
\ No newline at end of file
diff --git a/case_studies/vanilla_classifier/pgd_num_steps_dependency.sh b/case_studies/vanilla_classifier/pgd_num_steps_dependency.sh
new file mode 100644
index 0000000..dd586d5
--- /dev/null
+++ b/case_studies/vanilla_classifier/pgd_num_steps_dependency.sh
@@ -0,0 +1,26 @@
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Clean evaluation"
+
+epsilon="0.031372549"
+nsteps="200"
+
+nstepsvalues=( 1 5 10 15 20)
+
+for nsteps in "${nstepsvalues[@]}"; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "$nsteps-step PGD"
+ advattack="norm=linf epsilon=$epsilon step_size=0.0011372549 n_steps=$nsteps"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --decision-boundary-binarization="norm=linf epsilon=$epsilon n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --adversarial-attack="$advattack" \
+ --no-clean-evaluation \
+ --no-logit-diff-loss \
+ --n-samples=2048 \
+ --batch-size=512 \
+ --input=$1 \
+ --classifier=${2:-networks.cifar_resnet18} \
+ $3
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+done
\ No newline at end of file
diff --git a/case_studies/vanilla_classifier/pgd_step_size_dependency.sh b/case_studies/vanilla_classifier/pgd_step_size_dependency.sh
new file mode 100644
index 0000000..19cd479
--- /dev/null
+++ b/case_studies/vanilla_classifier/pgd_step_size_dependency.sh
@@ -0,0 +1,37 @@
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "[Linf, 8/255] Clean evaluation"
+
+epsilon="0.031372549"
+nsteps="200"
+
+# 2.5 / nsteps = 0.0125
+stepsizevalues=(
+ $(echo "print($epsilon)" | python3)
+ $(echo "print($epsilon * 0.5)" | python3)
+ $(echo "print($epsilon * 0.25)" | python3)
+ $(echo "print($epsilon * 0.125)" | python3)
+ $(echo "print($epsilon * 0.0625)" | python3)
+ $(echo "print($epsilon * 0.0125)" | python3) # good value, equals eps/nsteps*2.5
+ $(echo "print($epsilon * 0.003125)" | python3)
+ $(echo "print($epsilon * 0.0015625)" | python3)
+ $(echo "print($epsilon * 0.00078125)" | python3)
+)
+
+for stepsize in "${stepsizevalues[@]}"; do
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+ echo "$nsteps-step PGD (step size $stepsize)"
+ advattack="norm=linf epsilon=$epsilon step_size=$stepsize n_steps=$nsteps"
+ PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/evaluate_classifier.py \
+ --decision-boundary-binarization="norm=linf epsilon=$epsilon n_inner_points=999 n_boundary_points=1 \
+ optimizer=sklearn adversarial_attack_settings=\"$advattack\"" \
+ --adversarial-attack="$advattack" \
+ --no-logit-diff-loss \
+ --no-logit-diff-loss \
+ --n-samples=2048 \
+ --batch-size=512 \
+ --input=$1 \
+ --classifier=${2:-networks.cifar_resnet18} \
+ $3
+ printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+done
\ No newline at end of file
diff --git a/case_studies/vonenet/LICENSE b/case_studies/vonenet/LICENSE
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/case_studies/vonenet/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/case_studies/vonenet/README.md b/case_studies/vonenet/README.md
new file mode 100644
index 0000000..756f30b
--- /dev/null
+++ b/case_studies/vonenet/README.md
@@ -0,0 +1,105 @@
+
+# VOneNet: CNNs with a Primary Visual Cortex Front-End
+
+A family of biologically-inspired Convolutional Neural Networks (CNNs). VOneNets have the following features:
+- Fixed-weight neural network model of the primate primary visual cortex (V1) as the front-end.
+- Robust to image perturbations
+- Brain-mapped
+- Flexible: can be adapted to different back-end architectures
+
+[read more...](#longer-motivation)
+
+## Available Models
+*(Click on model names to download the weights of ImageNet-trained models. Alternatively, you can use the function get_model in the vonenet package to download the weights.)*
+
+| Name | Description |
+| -------- | ------------------------------------------------------------------------ |
+| [VOneResNet50](https://vonenet-models.s3.us-east-2.amazonaws.com/voneresnet50_e70.pth.tar) | Our best performing VOneNet with a ResNet50 back-end |
+| [VOneCORnet-S](https://vonenet-models.s3.us-east-2.amazonaws.com/vonecornets_e70.pth.tar) | VOneNet with a recurrent neural network back-end based on the CORnet-S |
+| [VOneAlexNet](https://vonenet-models.s3.us-east-2.amazonaws.com/vonealexnet_e70.pth.tar) | VOneNet with a back-end based on AlexNet |
+
+
+## Quick Start
+
+VOneNets was trained with images normalized with mean=[0.5,0.5,0.5] and std=[0.5,0.5,0.5]
+
+More information coming soon...
+
+
+## Longer Motivation
+
+Current state-of-the-art object recognition models are largely based on convolutional neural network (CNN) architectures, which are loosely inspired by the primate visual system. However, these CNNs can be fooled by imperceptibly small, explicitly crafted perturbations, and struggle to recognize objects in corrupted images that are easily recognized by humans. Recently, we observed that CNN models with a neural hidden layer that better matches primate primary visual cortex (V1) are also more robust to adversarial attacks. Inspired by this observation, we developed VOneNets, a new class of hybrid CNN vision models. Each VOneNet contains a fixed weight neural network front-end that simulates primate V1, called the VOneBlock, followed by a neural network back-end adapted from current CNN vision models. The VOneBlock is based on a classical neuroscientific model of V1: the linear-nonlinear-Poisson model, consisting of a biologically-constrained Gabor filter bank, simple and complex cell nonlinearities, and a V1 neuronal stochasticity generator. After training, VOneNets retain high ImageNet performance, but each is substantially more robust, outperforming the base CNNs and state-of-the-art methods by 18% and 3%, respectively, on a conglomerate benchmark of perturbations comprised of white box adversarial attacks and common image corruptions. Additionally, all components of the VOneBlock work in synergy to improve robustness.
+Read more: [Dapello\*, Marques\*, et al. (biorxiv, 2020)](https://doi.org/10.1101/2020.06.16.154542)
+
+
+
+## Requirements
+
+- Python 3.6+
+- PyTorch 0.4.1+
+- numpy
+- pandas
+- tqdm
+- scipy
+
+
+## Citation
+
+Dapello, J., Marques, T., Schrimpf, M., Geiger, F., Cox, D.D., DiCarlo, J.J. (2020) Simulating a Primary Visual Cortex at the Front of CNNs Improves Robustness to Image Perturbations. *biorxiv.* doi.org/10.1101/2020.06.16.154542
+
+
+## License
+
+GNU GPL 3+
+
+
+## FAQ
+
+Soon...
+
+## Setup and Run
+
+1. You need to clone it in your local repository
+ $ git clone https://github.com/dicarlolab/vonenet.git
+
+2. And when you setup its codes, you must need 'val' directory. so here is link.
+ this link is from Korean's blog I refered as below https://seongkyun.github.io/others/2019/03/06/imagenet_dn/
+
+ ** Download link**
+ https://academictorrents.com/collection/imagenet-2012
+
+ Once you download that large tar files, you must unzip that files
+ -- all instructions below are refered above link, I only translate it
+
+ # Unzip training dataset
+ $ mkdir train && mb ILSVRC2012_img_train.tar train/ && cd train
+ $ tar -xvf ILSVRC2012_img_train.tar
+ $ rm -f ILSVRC2012_img_train.tar (If you want to remove zipped file(tar))
+ $ find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done
+ $ cd ..
+
+ # Unzip validation dataset
+ $ mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar
+ $ wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash
+
+ when it's finished, you can see train directory, val directory
+ that 'val' directory is needed when setting up
+
+ ## Caution!!!!
+ after all execution above, must remove directory or file not having name n0000
+ -> there will be fault in training
+ -> ex) 'ILSVRC2012_img_train' in train directory, 'ILSVRC2012_img_val.tar' in val directory
+
+3. if you've done getting data, then we can setting up
+ go to local repository which into you cloned and open terminal (you must check your versions of python, pytorch, cudatoolkit if okay then,)
+ $ python3 setup.py install
+ $ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!}
+
+ # If you see any GPU related problem especially 'GPU is not available' although you already got
+ $ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!} --ngpus 0
+
+ ngpus is 1 as default. if you don't care running on CPU you do so
+
+
+
+
diff --git a/case_studies/vonenet/adversarial_evaluation.py b/case_studies/vonenet/adversarial_evaluation.py
new file mode 100644
index 0000000..f41bd79
--- /dev/null
+++ b/case_studies/vonenet/adversarial_evaluation.py
@@ -0,0 +1,163 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, argparse, time
+import tqdm
+import numpy as np
+import attacks.pgd as pgd
+import attacks.autopgd as autopgd
+
+
+parser = argparse.ArgumentParser(description='ImageNet Adversarial Validation')
+
+parser.add_argument('--in-path', required=True,
+ help='path to ImageNet folder that contains val folder')
+parser.add_argument('--batch-size', default=128, type=int,
+ help='size of batch for validation')
+parser.add_argument('--workers', default=20,
+ help='number of data loading workers')
+parser.add_argument('--model-arch',
+ choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'],
+ default='resnet50',
+ help='back-end model architecture to load')
+
+parser.add_argument("--n-samples", type=int, default=50000)
+parser.add_argument("--epsilon", default=1, help="in X/255", type=int)
+parser.add_argument("--attack", choices=("pgd", "apgd"), default="pgd")
+parser.add_argument("--n-steps", type=int, default=64)
+parser.add_argument("--step-size", default=0.1, help="in X/255", type=float)
+parser.add_argument("--ensemble-size", type=int, default=1)
+parser.add_argument("--deterministic-replacement", action="store_true")
+parser.add_argument("--differentiable-replacement", action="store_true")
+parser.add_argument("--stable-gradients", action="store_true")
+
+FLAGS = parser.parse_args()
+
+import torch
+import torch.nn as nn
+import torchvision
+from vonenet import get_model
+
+device = "cuda" if torch.cuda.is_available() else "cpu"
+
+
+def val():
+ model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
+ model = model.to(device)
+
+ if FLAGS.attack == "pgd":
+ attack_fn = lambda m, x, y: \
+ pgd.pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
+ FLAGS.epsilon / 255.0, "linf",
+ n_averaging_steps=FLAGS.ensemble_size)[0]
+ else:
+ attack_fn = lambda m, x, y: \
+ autopgd.auto_pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
+ FLAGS.epsilon / 255.0, "linf",
+ n_averaging_steps=FLAGS.ensemble_size)[0]
+
+ validator = ImageNetAdversarialVal(model, attack_fn=attack_fn,
+ n_samples=FLAGS.n_samples)
+ record = validator()
+
+ print("Top 1:", record['top1'])
+ print("Top 5:", record['top5'])
+ return
+
+
+class ImageNetAdversarialVal(object):
+ def __init__(self, model, attack_fn, n_samples=50000):
+ self.name = 'val'
+ self.model = model
+ self.data_loader = self.data()
+ self.loss = nn.CrossEntropyLoss(size_average=False)
+ self.loss = self.loss.to(device)
+ self.attack_fn = attack_fn
+ self.n_samples = n_samples
+
+ def data(self):
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(FLAGS.in_path, 'val'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.Resize(256),
+ torchvision.transforms.CenterCrop(224),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
+ std=[0.5, 0.5, 0.5]),
+ ]))
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=FLAGS.batch_size,
+ shuffle=True,
+ num_workers=FLAGS.workers,
+ pin_memory=True)
+
+ return data_loader
+
+ def __call__(self):
+ self.model.eval()
+ start = time.time()
+ record = {'loss': 0, 'top1': 0, 'top5': 0}
+ n_samples = 0
+ n_batches = 0
+ with tqdm.tqdm(
+ total=int(np.ceil(self.n_samples / self.data_loader.batch_size)),
+ desc=self.name) as pbar:
+ for (inp, target) in self.data_loader:
+ target = target.to(device)
+ with torch.autograd.set_detect_anomaly(True):
+ if FLAGS.stable_gradients:
+ self.model.module.vone_block.stable_gabor_f = True
+ self.model.module.vone_block.deterministic = FLAGS.deterministic_replacement
+ if FLAGS.differentiable_replacement:
+ self.model.module.vone_block.simple = nn.ReLU(inplace=False)
+
+ inp_adv = self.attack_fn(self.model, inp, target)
+
+ # make model stochastic again etc.
+ self.model.module.vone_block.deterministic = False
+ self.model.module.vone_block.stable_gabor_f = False
+ self.model.module.vone_block.simple = nn.ReLU(inplace=True)
+
+ with torch.no_grad():
+ output = self.model(inp_adv)
+ record['loss'] += self.loss(output, target).item()
+
+ p1, p5 = accuracy(output, target, topk=(1, 5))
+ record['top1'] += p1
+ record['top5'] += p5
+ n_samples += len(inp)
+ n_batches += 1
+ pbar.update(1)
+
+ if n_samples >= self.n_samples:
+ break
+
+ for key in record:
+ record[key] /= n_samples
+ record['dur'] = (time.time() - start) / n_batches
+
+ return record
+
+
+def accuracy(output, target, topk=(1,)):
+ with torch.no_grad():
+ _, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+ res = [correct[:k].sum().item() for k in topk]
+ return res
+
+
+if __name__ == '__main__':
+ val()
diff --git a/case_studies/vonenet/adversarial_evaluation.sh b/case_studies/vonenet/adversarial_evaluation.sh
new file mode 100644
index 0000000..f5e1621
--- /dev/null
+++ b/case_studies/vonenet/adversarial_evaluation.sh
@@ -0,0 +1,121 @@
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "50 step PGD, step size = 0.1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "50 step PGD, differentiable, step size = 0.1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ --deterministic-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "50 step stable PGD, step size = 0.1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "50 step stable PGD, deterministic, step size = 0.1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement \
+ --deterministic-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "500 step stable PGD, step size = 0.01"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.01 \
+ --n-steps=500 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "500 step stable PGD, deterministic, step size = 0.01"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.01 \
+ --n-steps=500 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement \
+ --deterministic-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1000 step stable PGD, step size = 0.005"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.005 \
+ --n-steps=1000 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1000 step stable PGD, deterministic, step size = 0.005"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/adversarial_evaluation.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=512 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --step-size=0.005 \
+ --n-steps=1000 \
+ --ensemble-size=1 \
+ --stable-gradients \
+ --differentiable-replacement \
+ --deterministic-replacement
\ No newline at end of file
diff --git a/case_studies/vonenet/binarization_test.py b/case_studies/vonenet/binarization_test.py
new file mode 100644
index 0000000..e9ec181
--- /dev/null
+++ b/case_studies/vonenet/binarization_test.py
@@ -0,0 +1,176 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, argparse
+from typing import Tuple
+
+import numpy as np
+import attacks.pgd as pgd
+import attacks.autopgd as autopgd
+from active_tests.decision_boundary_binarization import format_result
+from active_tests.decision_boundary_binarization import \
+ interior_boundary_discrimination_attack
+from argparse_utils import DecisionBoundaryBinarizationSettings
+
+parser = argparse.ArgumentParser(description='ImageNet Binarization Test')
+
+parser.add_argument('--in-path', required=True,
+ help='path to ImageNet folder that contains val folder')
+parser.add_argument('--batch-size', default=128, type=int,
+ help='size of batch for validation')
+parser.add_argument('--workers', default=20,
+ help='number of data loading workers')
+parser.add_argument('--model-arch',
+ choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'],
+ default='resnet50',
+ help='back-end model architecture to load')
+
+parser.add_argument("--n-boundary-points", type=int, default=1)
+parser.add_argument("--n-inner-points", type=int, default=999)
+parser.add_argument("--n-samples", type=int, default=50000)
+parser.add_argument("--epsilon", default=1, help="in X/255", type=float)
+parser.add_argument("--attack", choices=("pgd", "apgd"), default="pgd")
+parser.add_argument("--n-steps", type=int, default=64)
+parser.add_argument("--step-size", default=0.1, help="in X/255", type=float)
+parser.add_argument("--ensemble-size", type=int, default=1)
+parser.add_argument("--deterministic-replacement", action="store_true")
+parser.add_argument("--differentiable-replacement", action="store_true")
+parser.add_argument("--stable-gradients", action="store_true")
+parser.add_argument("--anomaly-detection", action="store_true")
+
+FLAGS = parser.parse_args()
+
+import torch
+import torch.nn as nn
+import torchvision
+from vonenet import get_model
+
+device = "cuda" if torch.cuda.is_available() else "cpu"
+
+
+def val():
+ model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
+ model = model.to(device)
+
+ if FLAGS.attack == "pgd":
+ attack_fn = lambda m, x, y: \
+ pgd.pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
+ FLAGS.epsilon / 255.0, "linf",
+ n_averaging_steps=FLAGS.ensemble_size)[0]
+ else:
+ attack_fn = lambda m, x, y: \
+ autopgd.auto_pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
+ FLAGS.epsilon / 255.0, "linf",
+ n_averaging_steps=FLAGS.ensemble_size)[0]
+
+ validator = ImageNetAdversarialVal(model, attack_fn=attack_fn,
+ n_samples=FLAGS.n_samples)
+ validator()
+
+
+class ImageNetAdversarialVal(object):
+ def __init__(self, model, attack_fn, n_samples=50000):
+ self.name = 'val'
+ self.model = model
+ self.data_loader = self.data()
+ self.loss = nn.CrossEntropyLoss(size_average=False)
+ self.loss = self.loss.to(device)
+ self.attack_fn = attack_fn
+ self.n_samples = n_samples
+
+ def data(self):
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(FLAGS.in_path, 'val'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.Resize(256),
+ torchvision.transforms.CenterCrop(224),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
+ std=[0.5, 0.5, 0.5]),
+ ]))
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=FLAGS.batch_size,
+ shuffle=True,
+ num_workers=FLAGS.workers,
+ pin_memory=True)
+
+ return data_loader
+
+ def __call__(self):
+ self.model.eval()
+
+ def attack_model(m, l, attack_kwargs) -> Tuple[
+ np.ndarray, Tuple[torch.Tensor, torch.Tensor]]:
+ for inp, target in l:
+ target = target.to(device)
+ with torch.autograd.set_detect_anomaly(FLAGS.anomaly_detection):
+ if FLAGS.stable_gradients:
+ self.model.module.vone_block.stable_gabor_f = True
+ self.model.module.vone_block.deterministic = FLAGS.deterministic_replacement
+ if FLAGS.differentiable_replacement:
+ self.model.module.vone_block.simple = nn.ReLU(inplace=False)
+
+ inp_adv = self.attack_fn(m, inp, target)
+
+ # make model stochastic again etc.
+ self.model.module.vone_block.deterministic = False
+ self.model.module.vone_block.stable_gabor_f = False
+ self.model.module.vone_block.simple = nn.ReLU(inplace=True)
+
+ with torch.no_grad():
+ output = m(inp_adv)
+
+ is_adv = (output != target).cpu().numpy()
+
+ return is_adv, (inp_adv, output.cpu())
+
+ additional_settings = dict(
+ n_boundary_points=FLAGS.n_boundary_points,
+ n_far_off_boundary_points=0,
+ n_far_off_adversarial_points=0,
+ )
+
+ scores_logit_differences_and_validation_accuracies = \
+ interior_boundary_discrimination_attack(
+ self.model,
+ self.data_loader,
+ attack_fn=attack_model,
+ linearization_settings=DecisionBoundaryBinarizationSettings(
+ epsilon=FLAGS.epsilon / 255.0,
+ norm="linf",
+ lr=10000,
+ adversarial_attack_settings=None,
+ optimizer="sklearn",
+ n_inner_points=FLAGS.n_inner_points,
+ **additional_settings
+ ),
+ n_samples=FLAGS.n_samples,
+ device=device,
+ batch_size=FLAGS.batch_size,
+ n_samples_evaluation=200,
+ n_samples_asr_evaluation=200,
+
+ verify_valid_boundary_training_data_fn=None,
+ get_boundary_adversarials_fn=None,
+ verify_valid_inner_training_data_fn=None,
+ verify_valid_input_validation_data_fn=None,
+ fill_batches_for_verification=True
+ )
+
+ print(format_result(scores_logit_differences_and_validation_accuracies,
+ FLAGS.n_samples))
+
+
+if __name__ == '__main__':
+ val()
diff --git a/case_studies/vonenet/binarization_test.sh b/case_studies/vonenet/binarization_test.sh
new file mode 100644
index 0000000..b0ca8dd
--- /dev/null
+++ b/case_studies/vonenet/binarization_test.sh
@@ -0,0 +1,131 @@
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD, epsilon = 0.5"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=0.5 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ #--stable-gradients \
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD (stable), epsilon = 0.5"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' '
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=0.5 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ --stable-gradients
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD, epsilon = 1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ #--stable-gradients \
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD (stable), epsilon = 1"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=1 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ --stable-gradients
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD, epsilon = 2"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=2 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ #--stable-gradients \
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD (stable), epsilon = 2"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=2 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ --stable-gradients
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD, epsilon = 4"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=4 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ #--stable-gradients \
+
+
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+echo "1 boundary, 1999 inner points, 50 step PGD (stable), epsilon = 4"
+printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
+PYTHONPATH=$PYTHONPATH:$(pwd) python case_studies/vonenet/binarization_test.py \
+ --in-path=/home/rolandz/imagenet_dataset/ \
+ --n-samples=2048 \
+ --batch-size=64 \
+ --attack=pgd \
+ --epsilon=4 \
+ --n-inner-points=1999 \
+ --step-size=0.1 \
+ --n-steps=50 \
+ --ensemble-size=1 \
+ --differentiable-replacement \
+ --stable-gradients
\ No newline at end of file
diff --git a/case_studies/vonenet/run.py b/case_studies/vonenet/run.py
new file mode 100644
index 0000000..e2075d5
--- /dev/null
+++ b/case_studies/vonenet/run.py
@@ -0,0 +1,150 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, argparse, time, subprocess, io, shlex
+import pandas as pd
+import tqdm
+
+parser = argparse.ArgumentParser(description='ImageNet Validation')
+
+parser.add_argument('--in_path', required=True,
+ help='path to ImageNet folder that contains val folder')
+parser.add_argument('--batch_size', default=128, type=int,
+ help='size of batch for validation')
+parser.add_argument('--workers', default=20,
+ help='number of data loading workers')
+parser.add_argument('--ngpus', default=1, type=int,
+ help='number of GPUs to use; 0 if you want to run on CPU')
+parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
+ help='back-end model architecture to load')
+
+FLAGS, FIRE_FLAGS = parser.parse_known_args()
+
+
+def set_gpus(n=2):
+ """
+ Finds all GPUs on the system and restricts to n of them that have the most
+ free memory.
+ """
+ if n > 0:
+ gpus = subprocess.run(shlex.split(
+ 'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
+ stdout=subprocess.PIPE).stdout
+ gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
+ gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
+ if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
+ visible = [int(i)
+ for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
+ gpus = gpus[gpus['index'].isin(visible)]
+ gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
+ os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
+ os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
+ [str(i) for i in gpus['index'].iloc[:n]])
+ else:
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
+
+
+set_gpus(FLAGS.ngpus)
+
+import torch
+import torch.nn as nn
+import torchvision
+from vonenet import get_model
+
+device = torch.device("cuda" if FLAGS.ngpus > 0 else "cpu")
+
+
+def val():
+ model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
+
+ if FLAGS.ngpus == 0:
+ print('Running on CPU')
+ if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
+ print('Running on multiple GPUs')
+ model = model.to(device)
+ elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
+ print('Running on single GPU')
+ model = model.to(device)
+ else:
+ print('No GPU detected!')
+ model = model.module
+
+ validator = ImageNetVal(model)
+ record = validator()
+
+ print(record['top1'])
+ print(record['top5'])
+ return
+
+
+class ImageNetVal(object):
+
+ def __init__(self, model):
+ self.name = 'val'
+ self.model = model
+ self.data_loader = self.data()
+ self.loss = nn.CrossEntropyLoss(size_average=False)
+ self.loss = self.loss.to(device)
+
+ def data(self):
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(FLAGS.in_path, 'val'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.Resize(256),
+ torchvision.transforms.CenterCrop(224),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
+ std=[0.5, 0.5, 0.5]),
+ ]))
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=FLAGS.batch_size,
+ shuffle=False,
+ num_workers=FLAGS.workers,
+ pin_memory=True)
+
+ return data_loader
+
+ def __call__(self):
+ self.model.eval()
+ start = time.time()
+ record = {'loss': 0, 'top1': 0, 'top5': 0}
+ with torch.no_grad():
+ for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
+ target = target.to(device)
+ output = self.model(inp)
+
+ record['loss'] += self.loss(output, target).item()
+ p1, p5 = accuracy(output, target, topk=(1, 5))
+ record['top1'] += p1
+ record['top5'] += p5
+
+ for key in record:
+ record[key] /= len(self.data_loader.dataset.samples)
+ record['dur'] = (time.time() - start) / len(self.data_loader)
+
+ return record
+
+
+def accuracy(output, target, topk=(1,)):
+ with torch.no_grad():
+ _, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+ res = [correct[:k].sum().item() for k in topk]
+ return res
+
+
+if __name__ == '__main__':
+ val()
diff --git a/case_studies/vonenet/setup.py b/case_studies/vonenet/setup.py
new file mode 100644
index 0000000..9cf0ed9
--- /dev/null
+++ b/case_studies/vonenet/setup.py
@@ -0,0 +1,55 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from setuptools import setup
+
+with open('README.md') as readme_file:
+ readme = readme_file.read()
+
+requirements = [
+ "torch>=0.4.0+",
+ "torchvision",
+ "numpy",
+ "pandas",
+ "scipy",
+ "tqdm",
+ "fire",
+ "requests",
+]
+
+setup(
+ name='vonenet',
+ version='0.1.0',
+ description="CNNs with a Primary Visual Cortex Front-End ",
+ long_description=readme,
+ author="Tiago Marques, Joel Dapello",
+ author_email='tmarques@mit.edu, dapello@mit.edu',
+ url='https://github.com/dicarlolab/vonenet',
+ packages=['vonenet'],
+ include_package_data=True,
+ install_requires=requirements,
+ license="GNU GPL v3",
+ zip_safe=False,
+ keywords='VOneNet, Robustness, Primary Visual Cortex',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU GPL v3',
+ 'Natural Language :: English',
+ 'Programming Language :: Python :: 3.6'
+ ],
+)
diff --git a/case_studies/vonenet/train.py b/case_studies/vonenet/train.py
new file mode 100644
index 0000000..9c8eb73
--- /dev/null
+++ b/case_studies/vonenet/train.py
@@ -0,0 +1,397 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, argparse, time, subprocess, io, shlex, pickle, pprint
+import pandas as pd
+import numpy as np
+import tqdm
+import fire
+
+parser = argparse.ArgumentParser(description='ImageNet Training')
+## General parameters
+parser.add_argument('--in_path', required=True,
+ help='path to ImageNet folder that contains train and val folders')
+parser.add_argument('-o', '--output_path', default=None,
+ help='path for storing ')
+parser.add_argument('-restore_epoch', '--restore_epoch', default=0, type=int,
+ help='epoch number for restoring model training ')
+parser.add_argument('-restore_path', '--restore_path', default=None, type=str,
+ help='path of folder containing specific epoch file for restoring model training')
+
+## Training parameters
+parser.add_argument('--ngpus', default=0, type=int,
+ help='number of GPUs to use; 0 if you want to run on CPU')
+parser.add_argument('-j', '--workers', default=20, type=int,
+ help='number of data loading workers')
+parser.add_argument('--epochs', default=70, type=int,
+ help='number of total epochs to run')
+parser.add_argument('--batch_size', default=256, type=int,
+ help='mini-batch size')
+parser.add_argument('--optimizer', choices=['stepLR', 'plateauLR'], default='stepLR',
+ help='Optimizer')
+parser.add_argument('--lr', '--learning_rate', default=.1, type=float,
+ help='initial learning rate')
+parser.add_argument('--step_size', default=20, type=int,
+ help='after how many epochs learning rate should be decreased by step_factor')
+parser.add_argument('--step_factor', default=0.1, type=float,
+ help='factor by which to decrease the learning rate')
+parser.add_argument('--momentum', default=.9, type=float, help='momentum')
+parser.add_argument('--weight_decay', default=1e-4, type=float,
+ help='weight decay ')
+
+## Model parameters
+parser.add_argument('--torch_seed', default=0, type=int,
+ help='seed for weights initializations and torch RNG')
+parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
+ help='back-end model architecture to load')
+parser.add_argument('--normalization', choices=['vonenet', 'imagenet'], default='vonenet',
+ help='image normalization to apply to models')
+parser.add_argument('--visual_degrees', default=8, type=float,
+ help='Field-of-View of the model in visual degrees')
+
+## VOneBlock parameters
+# Gabor filter bank
+parser.add_argument('--stride', default=4, type=int,
+ help='stride for the first convolution (Gabor Filter Bank)')
+parser.add_argument('--ksize', default=25, type=int,
+ help='kernel size for the first convolution (Gabor Filter Bank)')
+parser.add_argument('--simple_channels', default=256, type=int,
+ help='number of simple channels in V1 block')
+parser.add_argument('--complex_channels', default=256, type=int,
+ help='number of complex channels in V1 block')
+parser.add_argument('--gabor_seed', default=0, type=int,
+ help='seed for gabor initialization')
+parser.add_argument('--sf_corr', default=0.75, type=float,
+ help='')
+parser.add_argument('--sf_max', default=6, type=float,
+ help='')
+parser.add_argument('--sf_min', default=0, type=float,
+ help='')
+parser.add_argument('--rand_param', choices=[True, False], default=False, type=bool,
+ help='random gabor params')
+parser.add_argument('--k_exc', default=25, type=float,
+ help='')
+
+# Noise layer
+parser.add_argument('--noise_mode', choices=['gaussian', 'neuronal', None],
+ default=None,
+ help='noise distribution')
+parser.add_argument('--noise_scale', default=1, type=float,
+ help='noise scale factor')
+parser.add_argument('--noise_level', default=1, type=float,
+ help='noise level')
+
+
+FLAGS, FIRE_FLAGS = parser.parse_known_args()
+
+
+def set_gpus(n=2):
+ """
+ Finds all GPUs on the system and restricts to n of them that have the most
+ free memory.
+ """
+ if n > 0:
+ gpus = subprocess.run(shlex.split(
+ 'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
+ stdout=subprocess.PIPE).stdout
+ gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
+ gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
+ if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
+ visible = [int(i)
+ for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
+ gpus = gpus[gpus['index'].isin(visible)]
+ gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
+ os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
+ os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
+ [str(i) for i in gpus['index'].iloc[:n]])
+ else:
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
+
+
+if FLAGS.ngpus > 0:
+ set_gpus(FLAGS.ngpus)
+
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo
+import torchvision
+from vonenet import get_model
+
+torch.manual_seed(FLAGS.torch_seed)
+
+torch.backends.cudnn.benchmark = True
+
+if FLAGS.ngpus > 0:
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+else:
+ device = 'cpu'
+
+if FLAGS.normalization == 'vonenet':
+ print('VOneNet normalization')
+ norm_mean = [0.5, 0.5, 0.5]
+ norm_std = [0.5, 0.5, 0.5]
+elif FLAGS.normalization == 'imagenet':
+ print('Imagenet standard normalization')
+ norm_mean = [0.485, 0.456, 0.406]
+ norm_std = [0.229, 0.224, 0.225]
+
+
+def load_model():
+ map_location = None if FLAGS.ngpus > 0 else 'cpu'
+ print('Getting VOneNet')
+ model = get_model(map_location=map_location, model_arch=FLAGS.model_arch, pretrained=False,
+ visual_degrees=FLAGS.visual_degrees, stride=FLAGS.stride, ksize=FLAGS.ksize,
+ sf_corr=FLAGS.sf_corr, sf_max=FLAGS.sf_max, sf_min=FLAGS.sf_min, rand_param=FLAGS.rand_param,
+ gabor_seed=FLAGS.gabor_seed, simple_channels=FLAGS.simple_channels,
+ complex_channels=FLAGS.simple_channels, noise_mode=FLAGS.noise_mode,
+ noise_scale=FLAGS.noise_scale, noise_level=FLAGS.noise_level, k_exc=FLAGS.k_exc)
+
+ if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
+ print('We have multiple GPUs detected')
+ model = model.to(device)
+ elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
+ print('We run on GPU')
+ model = model.to(device)
+ else:
+ print('No GPU detected!')
+ model = model.module
+
+ return model
+
+
+def train(save_train_epochs=.2, # how often save output during training
+ save_val_epochs=.5, # how often save output during validation
+ save_model_epochs=1, # how often save model weights
+ save_model_secs=720 * 10 # how often save model (in sec)
+ ):
+
+ model = load_model()
+
+ trainer = ImageNetTrain(model)
+ validator = ImageNetVal(model)
+
+ start_epoch = 0
+ records = []
+
+ if FLAGS.restore_epoch > 0:
+ print('Restoring from previous...')
+ ckpt_data = torch.load(os.path.join(FLAGS.restore_path, f'epoch_{FLAGS.restore_epoch:02d}.pth.tar'))
+ start_epoch = ckpt_data['epoch']
+ print('Loaded epoch: '+str(start_epoch))
+ model.load_state_dict(ckpt_data['state_dict'])
+ trainer.optimizer.load_state_dict(ckpt_data['optimizer'])
+ results_old = pickle.load(open(os.path.join(FLAGS.restore_path, 'results.pkl'), 'rb'))
+ for result in results_old:
+ records.append(result)
+
+ results = {'meta': {'step_in_epoch': 0,
+ 'epoch': start_epoch,
+ 'wall_time': time.time()}
+ }
+
+ # records = []
+ recent_time = time.time()
+
+ nsteps = len(trainer.data_loader)
+
+ if save_train_epochs is not None:
+ save_train_steps = (np.arange(0, FLAGS.epochs + 1,
+ save_train_epochs) * nsteps).astype(int)
+ if save_val_epochs is not None:
+ save_val_steps = (np.arange(0, FLAGS.epochs + 1,
+ save_val_epochs) * nsteps).astype(int)
+ if save_model_epochs is not None:
+ save_model_steps = (np.arange(0, FLAGS.epochs + 1,
+ save_model_epochs) * nsteps).astype(int)
+
+ for epoch in tqdm.trange(start_epoch, FLAGS.epochs + 1, initial=0, desc='epoch'):
+ print(epoch)
+ data_load_start = np.nan
+
+ data_loader_iter = trainer.data_loader
+
+ for step, data in enumerate(tqdm.tqdm(data_loader_iter, desc=trainer.name)):
+ data_load_time = time.time() - data_load_start
+ global_step = epoch * nsteps + step
+
+ if save_val_steps is not None:
+ if global_step in save_val_steps:
+ results[validator.name] = validator()
+ if FLAGS.optimizer == 'plateauLR' and step == 0:
+ trainer.lr.step(results[validator.name]['loss'])
+ trainer.model.train()
+ print('LR: ', trainer.optimizer.param_groups[0]["lr"])
+
+ if FLAGS.output_path is not None:
+ if not (os.path.isdir(FLAGS.output_path)):
+ os.mkdir(FLAGS.output_path)
+
+ records.append(results)
+ if len(results) > 1:
+ pickle.dump(records, open(os.path.join(FLAGS.output_path, 'results.pkl'), 'wb'))
+
+ ckpt_data = {}
+ ckpt_data['flags'] = FLAGS.__dict__.copy()
+ ckpt_data['epoch'] = epoch
+ ckpt_data['state_dict'] = model.state_dict()
+ ckpt_data['optimizer'] = trainer.optimizer.state_dict()
+
+ if save_model_secs is not None:
+ if time.time() - recent_time > save_model_secs:
+ torch.save(ckpt_data, os.path.join(FLAGS.output_path,
+ 'latest_checkpoint.pth.tar'))
+ recent_time = time.time()
+
+ if save_model_steps is not None:
+ if global_step in save_model_steps:
+ torch.save(ckpt_data, os.path.join(FLAGS.output_path,
+ f'epoch_{epoch:02d}.pth.tar'))
+
+ else:
+ if len(results) > 1:
+ pprint.pprint(results)
+
+ if epoch < FLAGS.epochs:
+ frac_epoch = (global_step + 1) / nsteps
+ record = trainer(frac_epoch, *data)
+ record['data_load_dur'] = data_load_time
+ results = {'meta': {'step_in_epoch': step + 1,
+ 'epoch': frac_epoch,
+ 'wall_time': time.time()}
+ }
+ if save_train_steps is not None:
+ if step in save_train_steps:
+ results[trainer.name] = record
+
+ data_load_start = time.time()
+
+
+class ImageNetTrain(object):
+
+ def __init__(self, model):
+ self.name = 'train'
+ self.model = model
+ self.data_loader = self.data()
+ self.optimizer = torch.optim.SGD(self.model.parameters(), FLAGS.lr, momentum=FLAGS.momentum,
+ weight_decay=FLAGS.weight_decay)
+ if FLAGS.optimizer == 'stepLR':
+ self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, gamma=FLAGS.step_factor,
+ step_size=FLAGS.step_size)
+ elif FLAGS.optimizer == 'plateauLR':
+ self.lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=FLAGS.step_factor,
+ patience=FLAGS.step_size-1, threshold=0.01)
+ self.loss = nn.CrossEntropyLoss()
+ if FLAGS.ngpus > 0:
+ self.loss = self.loss.cuda()
+
+ def data(self):
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(FLAGS.in_path, 'train'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.RandomResizedCrop(224),
+ torchvision.transforms.RandomHorizontalFlip(),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize(mean=norm_mean, std=norm_std)
+ ]))
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=FLAGS.batch_size,
+ shuffle=True,
+ num_workers=FLAGS.workers,
+ pin_memory=True)
+
+ return data_loader
+
+ def __call__(self, frac_epoch, inp, target):
+ start = time.time()
+ if FLAGS.optimizer == 'stepLR':
+ self.lr.step(epoch=frac_epoch)
+ target = target.to(device)
+
+ output = self.model(inp)
+
+ record = {}
+ loss = self.loss(output, target)
+ record['loss'] = loss.item()
+ record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5))
+ record['top1'] /= len(output)
+ record['top5'] /= len(output)
+ # record['learning_rate'] = self.lr.get_lr()[0]
+ record['learning_rate'] = self.optimizer.param_groups[0]["lr"]
+ self.optimizer.zero_grad()
+ loss.backward()
+ self.optimizer.step()
+
+ record['dur'] = time.time() - start
+ return record
+
+
+class ImageNetVal(object):
+
+ def __init__(self, model):
+ self.name = 'val'
+ self.model = model
+ self.data_loader = self.data()
+ self.loss = nn.CrossEntropyLoss(size_average=False)
+ self.loss = self.loss.to(device)
+
+ def data(self):
+ dataset = torchvision.datasets.ImageFolder(
+ os.path.join(FLAGS.in_path, 'val'),
+ torchvision.transforms.Compose([
+ torchvision.transforms.Resize(256),
+ torchvision.transforms.CenterCrop(224),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize(mean=norm_mean, std=norm_std),
+ ]))
+ data_loader = torch.utils.data.DataLoader(dataset,
+ batch_size=FLAGS.batch_size,
+ shuffle=False,
+ num_workers=FLAGS.workers,
+ pin_memory=True)
+
+ return data_loader
+
+ def __call__(self):
+ self.model.eval()
+ start = time.time()
+ record = {'loss': 0, 'top1': 0, 'top5': 0}
+ with torch.no_grad():
+ for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
+ target = target.to(device)
+ output = self.model(inp)
+
+ record['loss'] += self.loss(output, target).item()
+ p1, p5 = accuracy(output, target, topk=(1, 5))
+ record['top1'] += p1
+ record['top5'] += p5
+
+ for key in record:
+ record[key] /= len(self.data_loader.dataset.samples)
+ record['dur'] = (time.time() - start) / len(self.data_loader)
+
+ return record
+
+
+def accuracy(output, target, topk=(1,)):
+ with torch.no_grad():
+ _, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
+ pred = pred.t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+ res = [correct[:k].sum().item() for k in topk]
+ return res
+
+
+if __name__ == '__main__':
+ fire.Fire(command=FIRE_FLAGS)
diff --git a/case_studies/vonenet/vonenet/__init__.py b/case_studies/vonenet/vonenet/__init__.py
new file mode 100644
index 0000000..93c31c2
--- /dev/null
+++ b/case_studies/vonenet/vonenet/__init__.py
@@ -0,0 +1,85 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+import os
+import requests
+
+from .vonenet import VOneNet
+from torch.nn import Module
+
+FILE_WEIGHTS = {'alexnet': 'vonealexnet_e70.pth.tar', 'resnet50': 'voneresnet50_e70.pth.tar',
+ 'resnet50_at': 'voneresnet50_at_e96.pth.tar', 'cornets': 'vonecornets_e70.pth.tar',
+ 'resnet50_ns': 'voneresnet50_ns_e70.pth.tar'}
+
+
+class Wrapper(Module):
+ def __init__(self, model):
+ super(Wrapper, self).__init__()
+ self.module = model
+
+
+def get_model(model_arch='resnet50', pretrained=True, map_location='cpu', **kwargs):
+ """
+ Returns a VOneNet model.
+ Select pretrained=True for returning one of the 3 pretrained models.
+ model_arch: string with identifier to choose the architecture of the back-end (resnet50, cornets, alexnet)
+ """
+ if pretrained and model_arch:
+ url = f'https://vonenet-models.s3.us-east-2.amazonaws.com/{FILE_WEIGHTS[model_arch.lower()]}'
+ home_dir = os.environ['HOME']
+ vonenet_dir = os.path.join(home_dir, '.vonenet')
+ weightsdir_path = os.path.join(vonenet_dir, FILE_WEIGHTS[model_arch.lower()])
+ if not os.path.exists(vonenet_dir):
+ os.makedirs(vonenet_dir)
+ if not os.path.exists(weightsdir_path):
+ print('Downloading model weights to ', weightsdir_path)
+ r = requests.get(url, allow_redirects=True)
+ open(weightsdir_path, 'wb').write(r.content)
+
+ ckpt_data = torch.load(weightsdir_path, map_location=map_location)
+
+ stride = ckpt_data['flags']['stride']
+ simple_channels = ckpt_data['flags']['simple_channels']
+ complex_channels = ckpt_data['flags']['complex_channels']
+ k_exc = ckpt_data['flags']['k_exc']
+
+ noise_mode = ckpt_data['flags']['noise_mode']
+ noise_scale = ckpt_data['flags']['noise_scale']
+ noise_level = ckpt_data['flags']['noise_level']
+
+ model_id = ckpt_data['flags']['arch'].replace('_','').lower()
+
+ model = globals()[f'VOneNet'](model_arch=model_id, stride=stride, k_exc=k_exc,
+ simple_channels=simple_channels, complex_channels=complex_channels,
+ noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level)
+
+ if model_arch.lower() == 'resnet50_at':
+ ckpt_data['state_dict'].pop('vone_block.div_u.weight')
+ ckpt_data['state_dict'].pop('vone_block.div_t.weight')
+ model.load_state_dict(ckpt_data['state_dict'])
+ else:
+ model = Wrapper(model)
+ model.load_state_dict(ckpt_data['state_dict'])
+ model = model.module
+
+ model = nn.DataParallel(model)
+ else:
+ model = globals()[f'VOneNet'](model_arch=model_arch, **kwargs)
+ model = nn.DataParallel(model)
+
+ model.to(map_location)
+ return model
+
diff --git a/case_studies/vonenet/vonenet/back_ends.py b/case_studies/vonenet/vonenet/back_ends.py
new file mode 100644
index 0000000..9dd4d3b
--- /dev/null
+++ b/case_studies/vonenet/vonenet/back_ends.py
@@ -0,0 +1,352 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import numpy as np
+import torch
+from torch import nn
+from collections import OrderedDict
+
+
+# AlexNet Back-End architecture
+# Based on Torchvision implementation in
+# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
+class AlexNetBackEnd(nn.Module):
+ def __init__(self, num_classes=1000):
+ super().__init__()
+ self.features = nn.Sequential(
+ nn.Conv2d(64, 192, kernel_size=5, stride=2, padding=2),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
+ )
+ self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
+ self.classifier = nn.Sequential(
+ nn.Dropout(),
+ nn.Linear(256 * 7 * 7, 4096),
+ nn.ReLU(inplace=True),
+ nn.Dropout(),
+ nn.Linear(4096, 4096),
+ nn.ReLU(inplace=True),
+ nn.Linear(4096, num_classes),
+ )
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avgpool(x)
+ x = torch.flatten(x, 1)
+ x = self.classifier(x)
+ return x
+
+
+# ResNet Back-End architecture
+# Based on Torchvision implementation in
+# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
+def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=dilation, groups=groups, bias=False, dilation=dilation)
+
+
+def conv1x1(in_planes, out_planes, stride=1):
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+ __constants__ = ['downsample']
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
+ base_width=64, dilation=1, norm_layer=None):
+ super(BasicBlock, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ if groups != 1 or base_width != 64:
+ raise ValueError('BasicBlock only supports groups=1 and base_width=64')
+ if dilation > 1:
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = norm_layer(planes)
+ self.relu = nn.ReLU(inplace=True) #
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = norm_layer(planes)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+ __constants__ = ['downsample']
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
+ base_width=64, dilation=1, norm_layer=None):
+ super(Bottleneck, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ width = int(planes * (base_width / 64.)) * groups
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
+ self.conv1 = conv1x1(inplanes, width)
+ self.bn1 = norm_layer(width)
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
+ self.bn2 = norm_layer(width)
+ self.conv3 = conv1x1(width, planes * self.expansion)
+ self.bn3 = norm_layer(planes * self.expansion)
+ self.relu = nn.ReLU(inplace=True) # inplace=True
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.relu(out)
+
+ return out
+
+
+class ResNetBackEnd(nn.Module):
+ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
+ groups=1, width_per_group=64, replace_stride_with_dilation=None,
+ norm_layer=None):
+ super(ResNetBackEnd, self).__init__()
+ if norm_layer is None:
+ norm_layer = nn.BatchNorm2d
+ self._norm_layer = norm_layer
+
+ self.inplanes = 64
+ self.dilation = 1
+ if replace_stride_with_dilation is None:
+ # each element in the tuple indicates if we should replace
+ # the 2x2 stride with a dilated convolution instead
+ replace_stride_with_dilation = [False, False, False]
+ if len(replace_stride_with_dilation) != 3:
+ raise ValueError("replace_stride_with_dilation should be None "
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
+ self.groups = groups
+ self.base_width = width_per_group
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
+ dilate=replace_stride_with_dilation[0])
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
+ dilate=replace_stride_with_dilation[1])
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
+ dilate=replace_stride_with_dilation[2])
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
+ nn.init.constant_(m.weight, 1)
+ nn.init.constant_(m.bias, 0)
+
+ # Zero-initialize the last BN in each residual branch,
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
+ if zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ nn.init.constant_(m.bn3.weight, 0)
+ elif isinstance(m, BasicBlock):
+ nn.init.constant_(m.bn2.weight, 0)
+
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
+ norm_layer = self._norm_layer
+ downsample = None
+ previous_dilation = self.dilation
+ if dilate:
+ self.dilation *= stride
+ stride = 1
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ conv1x1(self.inplanes, planes * block.expansion, stride),
+ norm_layer(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
+ self.base_width, previous_dilation, norm_layer))
+ self.inplanes = planes * block.expansion
+ for _ in range(1, blocks):
+ layers.append(block(self.inplanes, planes, groups=self.groups,
+ base_width=self.base_width, dilation=self.dilation,
+ norm_layer=norm_layer))
+
+ return nn.Sequential(*layers)
+
+ def _forward_impl(self, x, features_only=False):
+ # See note [TorchScript super()]
+
+ x = self.layer1(x)
+ x = self.layer2(x)
+ x = self.layer3(x)
+ x = self.layer4(x)
+
+ x = self.avgpool(x)
+ x = torch.flatten(x, 1)
+ if not features_only:
+ x = self.fc(x)
+
+ return x
+
+ def forward(self, x, **kwargs):
+ return self._forward_impl(x, **kwargs)
+
+
+# CORnet-S Back-End architecture
+# Based on CORnet code in
+# https://github.com/dicarlolab/CORnet
+class Flatten(nn.Module):
+ def forward(self, x):
+ return x.view(x.size(0), -1)
+
+
+class Identity(nn.Module):
+ def forward(self, x):
+ return x
+
+
+class CORblock_S(nn.Module):
+
+ scale = 4 # scale of the bottleneck convolution channels
+
+ def __init__(self, in_channels, out_channels, times=1):
+ super().__init__()
+
+ self.times = times
+
+ self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
+ self.skip = nn.Conv2d(out_channels, out_channels,
+ kernel_size=1, stride=2, bias=False)
+ self.norm_skip = nn.BatchNorm2d(out_channels)
+
+ self.conv1 = nn.Conv2d(out_channels, out_channels * self.scale,
+ kernel_size=1, bias=False)
+ self.nonlin1 = nn.ReLU(inplace=True) #
+
+ self.conv2 = nn.Conv2d(out_channels * self.scale, out_channels * self.scale,
+ kernel_size=3, stride=2, padding=1, bias=False)
+ self.nonlin2 = nn.ReLU(inplace=True) #
+
+ self.conv3 = nn.Conv2d(out_channels * self.scale, out_channels,
+ kernel_size=1, bias=False)
+ self.nonlin3 = nn.ReLU(inplace=True) #
+
+ self.output = Identity() # for an easy access to this block's output
+
+ # need BatchNorm for each time step for training to work well
+ for t in range(self.times):
+ setattr(self, f'norm1_{t}', nn.BatchNorm2d(out_channels * self.scale))
+ setattr(self, f'norm2_{t}', nn.BatchNorm2d(out_channels * self.scale))
+ setattr(self, f'norm3_{t}', nn.BatchNorm2d(out_channels))
+
+ def forward(self, inp):
+ x = self.conv_input(inp)
+
+ for t in range(self.times):
+ if t == 0:
+ skip = self.norm_skip(self.skip(x))
+ self.conv2.stride = (2, 2)
+ else:
+ skip = x
+ self.conv2.stride = (1, 1)
+
+ x = self.conv1(x)
+ x = getattr(self, f'norm1_{t}')(x)
+ x = self.nonlin1(x)
+
+ x = self.conv2(x)
+ x = getattr(self, f'norm2_{t}')(x)
+ x = self.nonlin2(x)
+
+ x = self.conv3(x)
+ x = getattr(self, f'norm3_{t}')(x)
+
+ x += skip
+ x = self.nonlin3(x)
+ output = self.output(x)
+
+ return output
+
+
+class CORnetSBackEnd(nn.Module):
+ def __init__(self, num_classes=1000):
+ super(CORnetSBackEnd, self).__init__()
+
+ self.V2 = CORblock_S(64, 128, times=2)
+ self.V4 = CORblock_S(128, 256, times=4)
+ self.IT = CORblock_S(256, 512, times=2)
+ self.decoder = nn.Sequential(OrderedDict([
+ ('avgpool', nn.AdaptiveAvgPool2d(1)),
+ ('flatten', Flatten()),
+ ('linear', nn.Linear(512, num_classes)),
+ ('output', Identity())
+ ]))
+
+ # weight initialization
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, np.sqrt(2. / n))
+ # nn.Linear is missing here because I originally forgot
+ # to add it during the training of this network
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+
+ def forward(self, x):
+ x = self.V2(x)
+ x = self.V4(x)
+ x = self.IT(x)
+ x = self.decoder(x)
+ return x
diff --git a/case_studies/vonenet/vonenet/modules.py b/case_studies/vonenet/vonenet/modules.py
new file mode 100644
index 0000000..e8fe59b
--- /dev/null
+++ b/case_studies/vonenet/vonenet/modules.py
@@ -0,0 +1,149 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+from .utils import gabor_kernel
+
+device = "cuda" if torch.cuda.is_available() else "cpu"
+
+
+class Identity(nn.Module):
+ def forward(self, x):
+ return x
+
+
+class GFB(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, stride=4):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.kernel_size = (kernel_size, kernel_size)
+ self.stride = (stride, stride)
+ self.padding = (kernel_size // 2, kernel_size // 2)
+
+ # Param instatiations
+ self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size))
+
+ def forward(self, x):
+ return F.conv2d(x, self.weight, None, self.stride, self.padding)
+
+ def initialize(self, sf, theta, sigx, sigy, phase):
+ random_channel = torch.randint(0, self.in_channels, (self.out_channels,))
+ for i in range(self.out_channels):
+ self.weight[i, random_channel[i]] = gabor_kernel(frequency=sf[i], sigma_x=sigx[i], sigma_y=sigy[i],
+ theta=theta[i], offset=phase[i], ks=self.kernel_size[0])
+ self.weight = nn.Parameter(self.weight, requires_grad=False)
+
+
+class VOneBlock(nn.Module):
+ def __init__(self, sf, theta, sigx, sigy, phase,
+ k_exc=25, noise_mode=None, noise_scale=1, noise_level=1,
+ simple_channels=128, complex_channels=128, ksize=25, stride=4, input_size=224,
+ stable_gabor_f=False, deterministic=False):
+ super().__init__()
+
+ self.in_channels = 3
+
+ self.simple_channels = simple_channels
+ self.complex_channels = complex_channels
+ self.out_channels = simple_channels + complex_channels
+ self.stride = stride
+ self.input_size = input_size
+
+ self.sf = sf
+ self.theta = theta
+ self.sigx = sigx
+ self.sigy = sigy
+ self.phase = phase
+ self.k_exc = k_exc
+
+ self.set_noise_mode(noise_mode, noise_scale, noise_level)
+ self.fixed_noise = None
+
+ self.simple_conv_q0 = GFB(self.in_channels, self.out_channels, ksize, stride)
+ self.simple_conv_q1 = GFB(self.in_channels, self.out_channels, ksize, stride)
+ self.simple_conv_q0.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
+ phase=self.phase)
+ self.simple_conv_q1.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
+ phase=self.phase + np.pi / 2)
+
+ self.simple = nn.ReLU(inplace=True)
+ self.complex = Identity()
+ self.gabors = Identity()
+ self.noise = nn.ReLU(inplace=True)
+ self.output = Identity()
+
+ self.stable_gabor_f = stable_gabor_f
+ self.deterministic = deterministic
+
+ def forward(self, x):
+ # Gabor activations [Batch, out_channels, H/stride, W/stride]
+ x = self.gabors_f(x)
+ # Noise [Batch, out_channels, H/stride, W/stride]
+ if not self.deterministic:
+ x = self.noise_f(x)
+ # V1 Block output: (Batch, out_channels, H/stride, W/stride)
+ x = self.output(x)
+ return x
+
+ def gabors_f(self, x):
+ s_q0 = self.simple_conv_q0(x)
+ s_q1 = self.simple_conv_q1(x)
+ if self.stable_gabor_f:
+ c = self.complex(torch.sqrt(1e-12 + s_q0[:, self.simple_channels:, :, :] ** 2 +
+ s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
+ else:
+ c = self.complex(torch.sqrt(s_q0[:, self.simple_channels:, :, :] ** 2 +
+ s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
+ s = self.simple(s_q0[:, 0:self.simple_channels, :, :])
+ return self.gabors(self.k_exc * torch.cat((s, c), 1))
+
+ def noise_f(self, x):
+ if self.noise_mode == 'neuronal':
+ eps = 10e-5
+ x *= self.noise_scale
+ x += self.noise_level
+ if self.fixed_noise is not None:
+ x += self.fixed_noise * torch.sqrt(F.relu(x.clone()) + eps)
+ else:
+ x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * \
+ torch.sqrt(F.relu(x.clone()) + eps)
+ x -= self.noise_level
+ x /= self.noise_scale
+ if self.noise_mode == 'gaussian':
+ if self.fixed_noise is not None:
+ x += self.fixed_noise * self.noise_scale
+ else:
+ x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * self.noise_scale
+ return self.noise(x)
+
+ def set_noise_mode(self, noise_mode=None, noise_scale=1, noise_level=1):
+ self.noise_mode = noise_mode
+ self.noise_scale = noise_scale
+ self.noise_level = noise_level
+
+ def fix_noise(self, batch_size=256, seed=None):
+ noise_mean = torch.zeros(batch_size, self.out_channels, int(self.input_size/self.stride),
+ int(self.input_size/self.stride))
+ if seed:
+ torch.manual_seed(seed)
+ if self.noise_mode:
+ self.fixed_noise = torch.distributions.normal.Normal(noise_mean, scale=1).rsample().to(device)
+
+ def unfix_noise(self):
+ self.fixed_noise = None
diff --git a/case_studies/vonenet/vonenet/params.py b/case_studies/vonenet/vonenet/params.py
new file mode 100644
index 0000000..cdf4507
--- /dev/null
+++ b/case_studies/vonenet/vonenet/params.py
@@ -0,0 +1,114 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import numpy as np
+from .utils import sample_dist
+import scipy.stats as stats
+
+
+def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
+ # Generates random sample
+ np.random.seed(seed)
+
+ phase_bins = np.array([0, 360])
+ phase_dist = np.array([1])
+
+ if rand_flag:
+ print('Uniform gabor parameters')
+ ori_bins = np.array([0, 180])
+ ori_dist = np.array([1])
+
+ nx_bins = np.array([0.1, 10**0.2])
+ nx_dist = np.array([1])
+
+ ny_bins = np.array([0.1, 10**0.2])
+ ny_dist = np.array([1])
+
+ # sf_bins = np.array([0.5, 8])
+ # sf_dist = np.array([1])
+
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
+ sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
+
+ sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
+ sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
+
+ sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
+ sf_dist = sf_dist[sfmin_ind:sfmax_ind]
+
+ sf_dist = sf_dist / sf_dist.sum()
+ else:
+ print('Neuronal distributions gabor parameters')
+ # DeValois 1982a
+ ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
+ ori_dist = np.array([66, 49, 77, 54])
+ ori_dist = ori_dist / ori_dist.sum()
+
+ # Schiller 1976
+ cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
+
+ # Ringach 2002b
+ nx_bins = np.logspace(-1, 0.2, 6, base=10)
+ ny_bins = np.logspace(-1, 0.2, 6, base=10)
+ n_joint_dist = np.array([[2., 0., 1., 0., 0.],
+ [8., 9., 4., 1., 0.],
+ [1., 2., 19., 17., 3.],
+ [0., 0., 1., 7., 4.],
+ [0., 0., 0., 0., 0.]])
+ n_joint_dist = n_joint_dist / n_joint_dist.sum()
+ nx_dist = n_joint_dist.sum(axis=1)
+ nx_dist = nx_dist / nx_dist.sum()
+ ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
+
+ # DeValois 1982b
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
+ sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
+
+ sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
+ sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
+
+ sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
+ sf_dist = sf_dist[sfmin_ind:sfmax_ind]
+
+ sf_dist = sf_dist / sf_dist.sum()
+
+ phase = sample_dist(phase_dist, phase_bins, features)
+ ori = sample_dist(ori_dist, ori_bins, features)
+ ori[ori < 0] = ori[ori < 0] + 180
+
+ if rand_flag:
+ sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
+ nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
+ ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
+ else:
+
+ samps = np.random.multivariate_normal([0, 0], cov_mat, features)
+ samps_cdf = stats.norm.cdf(samps)
+
+ nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
+ nx = 10**nx
+
+ ny_samp = np.random.rand(features)
+ ny = np.zeros(features)
+ for samp_ind, nx_samp in enumerate(nx):
+ bin_id = np.argwhere(nx_bins < nx_samp)[-1]
+ ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
+ np.log10(ny_bins))
+ ny = 10**ny
+
+ sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
+ sf = 2**sf
+
+ return sf, ori, phase, nx, ny
diff --git a/case_studies/vonenet/vonenet/utils.py b/case_studies/vonenet/vonenet/utils.py
new file mode 100644
index 0000000..dd4ae4c
--- /dev/null
+++ b/case_studies/vonenet/vonenet/utils.py
@@ -0,0 +1,46 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import numpy as np
+import torch
+
+
+def gabor_kernel(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
+
+ w = ks // 2
+ grid_val = torch.arange(-w, w+1, dtype=torch.float)
+ x, y = torch.meshgrid(grid_val, grid_val)
+ rotx = x * np.cos(theta) + y * np.sin(theta)
+ roty = -x * np.sin(theta) + y * np.cos(theta)
+ g = torch.zeros(y.shape)
+ g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
+ g /= 2 * np.pi * sigma_x * sigma_y
+ g *= torch.cos(2 * np.pi * frequency * rotx + offset)
+
+ return g
+
+
+def sample_dist(hist, bins, ns, scale='linear'):
+ rand_sample = np.random.rand(ns)
+ if scale == 'linear':
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
+ elif scale == 'log2':
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
+ rand_sample = 2**rand_sample
+ elif scale == 'log10':
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
+ rand_sample = 10**rand_sample
+ return rand_sample
+
diff --git a/case_studies/vonenet/vonenet/vonenet.py b/case_studies/vonenet/vonenet/vonenet.py
new file mode 100644
index 0000000..3375ec8
--- /dev/null
+++ b/case_studies/vonenet/vonenet/vonenet.py
@@ -0,0 +1,91 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from collections import OrderedDict
+from torch import nn
+from .modules import VOneBlock
+from .back_ends import ResNetBackEnd, Bottleneck, AlexNetBackEnd, CORnetSBackEnd
+from .params import generate_gabor_param
+import numpy as np
+
+
+class LastLayerKwargsSequential(nn.Sequential):
+ def forward(self, x, **last_layer_kwargs):
+ for i, module in enumerate(self):
+ if i == (len(self) - 1):
+ x = module(x, **last_layer_kwargs)
+ else:
+ x = module(x)
+ return x
+
+def VOneNet(sf_corr=0.75, sf_max=9, sf_min=0, rand_param=False, gabor_seed=0,
+ simple_channels=256, complex_channels=256,
+ noise_mode='neuronal', noise_scale=0.35, noise_level=0.07, k_exc=25,
+ model_arch='resnet50', image_size=224, visual_degrees=8, ksize=25, stride=4):
+
+
+ out_channels = simple_channels + complex_channels
+
+ sf, theta, phase, nx, ny = generate_gabor_param(out_channels, gabor_seed, rand_param, sf_corr, sf_max, sf_min)
+
+ gabor_params = {'simple_channels': simple_channels, 'complex_channels': complex_channels, 'rand_param': rand_param,
+ 'gabor_seed': gabor_seed, 'sf_max': sf_max, 'sf_corr': sf_corr, 'sf': sf.copy(),
+ 'theta': theta.copy(), 'phase': phase.copy(), 'nx': nx.copy(), 'ny': ny.copy()}
+ arch_params = {'k_exc': k_exc, 'arch': model_arch, 'ksize': ksize, 'stride': stride}
+
+
+ # Conversions
+ ppd = image_size / visual_degrees
+
+ sf = sf / ppd
+ sigx = nx / sf
+ sigy = ny / sf
+ theta = theta/180 * np.pi
+ phase = phase / 180 * np.pi
+
+ vone_block = VOneBlock(sf=sf, theta=theta, sigx=sigx, sigy=sigy, phase=phase,
+ k_exc=k_exc, noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level,
+ simple_channels=simple_channels, complex_channels=complex_channels,
+ ksize=ksize, stride=stride, input_size=image_size)
+
+ if model_arch:
+ bottleneck = nn.Conv2d(out_channels, 64, kernel_size=1, stride=1, bias=False)
+ nn.init.kaiming_normal_(bottleneck.weight, mode='fan_out', nonlinearity='relu')
+
+ if model_arch.lower() == 'resnet50':
+ print('Model: ', 'VOneResnet50')
+ model_back_end = ResNetBackEnd(block=Bottleneck, layers=[3, 4, 6, 3])
+ elif model_arch.lower() == 'alexnet':
+ print('Model: ', 'VOneAlexNet')
+ model_back_end = AlexNetBackEnd()
+ elif model_arch.lower() == 'cornets':
+ print('Model: ', 'VOneCORnet-S')
+ model_back_end = CORnetSBackEnd()
+
+ model = LastLayerKwargsSequential(OrderedDict([
+ ('vone_block', vone_block),
+ ('bottleneck', bottleneck),
+ ('model', model_back_end),
+ ]))
+ else:
+ print('Model: ', 'VOneNet')
+ model = vone_block
+
+ model.image_size = image_size
+ model.visual_degrees = visual_degrees
+ model.gabor_params = gabor_params
+ model.arch_params = arch_params
+
+ return model
diff --git a/case_studies/vonenet/vonenet_tutorial-activations.ipynb b/case_studies/vonenet/vonenet_tutorial-activations.ipynb
new file mode 100644
index 0000000..cac6d51
--- /dev/null
+++ b/case_studies/vonenet/vonenet_tutorial-activations.ipynb
@@ -0,0 +1,352 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 83,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import vonenet\n",
+ "import torchvision\n",
+ "import torch\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 84,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Neuronal distributions gabor parameters\n",
+ "Model: VOneNet\n",
+ "VOneBlock(\n",
+ " (simple_conv_q0): GFB()\n",
+ " (simple_conv_q1): GFB()\n",
+ " (simple): ReLU(inplace=True)\n",
+ " (complex): Identity()\n",
+ " (gabors): Identity()\n",
+ " (noise): ReLU(inplace=True)\n",
+ " (output): Identity()\n",
+ ")\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load V1 model\n",
+ "v1_model = vonenet.get_model(model_arch=None, pretrained=False, noise_mode=None).module\n",
+ "\n",
+ "# v1_model = vonenet.get_model(model_arch=None, pretrained=False, noise_mode=None, image_size=32, visual_degrees=3, sf_max=5, stride=1, ksize=15).module\n",
+ "# v1_model = vonenet.get_model(model_arch='resnet50_ns', pretrained=True).module\n",
+ "\n",
+ "print(v1_model)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 99,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data_path = '/braintree/data2/active/common/imagenet_raw/val'\n",
+ "\n",
+ "bsize = 16\n",
+ "crop = 256 # 48 256\n",
+ "px = 224 # 32 224\n",
+ "\n",
+ "normalize = torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],\n",
+ " std=[0.5, 0.5, 0.5])\n",
+ "dataset = torchvision.datasets.ImageFolder(data_path,\n",
+ " torchvision.transforms.Compose([\n",
+ " torchvision.transforms.Resize(crop), \n",
+ " torchvision.transforms.CenterCrop(px), \n",
+ " torchvision.transforms.ToTensor(),\n",
+ " normalize,\n",
+ " ]))\n",
+ "\n",
+ "data_loader = torch.utils.data.DataLoader(dataset, batch_size=bsize, shuffle=True, num_workers=20, pin_memory=True)\n",
+ "\n",
+ "dataloader_iterator = iter(data_loader)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 100,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "torch.Size([16, 3, 224, 224])\n"
+ ]
+ }
+ ],
+ "source": [
+ "X, _ = next(dataloader_iterator)\n",
+ "print(X.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 101,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "torch.Size([16, 512, 56, 56])\n"
+ ]
+ }
+ ],
+ "source": [
+ "activations = v1_model(X)\n",
+ "print(activations.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 102,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAGKCAYAAAASfgYQAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy96a8lSXYf9ovIu7yl9q33vXump2c4O4dDcRMl2jQh25QhyYZhAwYMGP7i/8iAAX8QDMiWbNmiFkKUSIriMjOkhrP0dM/0Vt1dXdXVVdW1vOW+ezMj/CHOOXEiMm7e+6peLT2KA1TlfZmREZGRkRHnnN9ZjPcelSpVqlSp0jKyD7sDlSpVqlTp0aa6UVSqVKlSpUGqG0WlSpUqVRqkulFUqlSpUqVBqhtFpUqVKlUapNHQxed/63EyiTIwNvy0De0thq2l+lZT1g7vP967pA4DA7G+cobKeFXe0DUP1/m0WarKOQB0zXVcR7zXt3TNmVAWgOe6Wm7HY8gKbOiaMTJUMMbwSemqbRoAgB2FI6wJ/wA0fI3GzVoLo34DgBlbKW/42AAmu9fI0UjfrPz2sZyJ12UQpRSNFTwcDZbxPH4uDCwA1zkZF0+/XddxJeHg4pj6FvGa43dKp1onbXlH9bfUdhfr6OidhTb5fZjYR3lFJnlOYwCeRjDxGp/jcYQxanz7YyVzm865pWNKdRXO+exUqIi/Bcjz0Q8Zl67jieqkvEcYb9d1cTx4HDsndcT6qBvey5yVcVfluN+lOd97JiBZBUrXh+qI76VwjfuDUpv6mXgO9O89LOmW4rdMJ6w6b7My0OPW9esdGJfiOK/R11XPaFycE7FsXB/1EQBuXLxTbLZKFJUqVapUaZAGJYq4pXkwz+BIGtDMmVEcmj4urzdy2vy3YeaHubnYJBeHNwaWTzJHyuVbD0e94t3PuyhROGbkVTdEUrGRe9Lc1X+sxM/uvOtJFN574fjjDUjZEihuj69n5XMO13svQmo+9B76fcR+xIrX4b2GKXnf0lTaEWOM4nBj0zkX/qhR2i/Noa+e46Vn0udc6ZGze4rfkpYe8jpK41iQMmIl6reP3/Jhv+Ao2Kzm/PVciF3sn3v4JCIk0m9HT2+DVd/Q8EaxTgcKDfSnZbkferPxvMnwwyBOIKPH3pp4E6Ko6UZhswg3R1ULqzZEFeNVfaROk93D9T+MR+/FP1xKVYJq8/DpJEwW29657Lf8zCcyfZRL3gGfflDrs/c+qhYK5x4dyhi2ZPzUt3HYWo/gOddmJg/Rj1XqscN+w2XVWsYIed9b0x7JuSB7aWFVLjAQy6iqnipVqlSp0iANSxRaHDDZqXgFCscNR71TpRIhnUp3XQ0MpbhqCiSZEmil2rZNWoWBCZKJIud9VE3xNRuByZzTfTTFyQdDRoGx+t3m41ES8qNxQrxmtGDh03NOAdEmK6/rS49H916OQvX0qKqglqme1pnWq1RPUQOwXvliG/l3vmYdyXgPPMu676O0fh3GuCWZH4/IFOCxLfbnEJ2sEkWlSpUqVRqkFWA2X/bRZNKkHJXxRrh7S9esAgKYu/fGi1RhM3s446OMkZoOphyadx4+YUsBY6JpIpvwQjg8EzkdOmetElq4nNUcEv+Oppqae5S2s90456x0cSRceB/biRyMYrON+k1HY7JbTbxHTI55CJxZaaa8lES3HRvT2EOU5ui9eyOGAbmk5wyA1GIW3scmHI8zvDLXo7kjGIWBcfSeCVy33qCHficMXgbYGVMYeTWm6qpwYZn0EGRRGlM1tnG4oiwbzluZz9rM04oJpeouf1fySNGowtBvkYS1NCXTQ4HCVp4uHJ2S+dRcY6OEIQ5+pTTNJqJ59SsonWL87Sh8UgoWzsm1wqmkv+W+r5IwnIlqDZ/9SjQj2QcZ5vVQ3el3nuAcpt9Xp+dkDi35Qn8Kj1uG9HkhVuVWvOcqUVSqVKlSpUEalCjYX8eY6DCmZAR1zLkbJzuZUTtm3LNCXSUzNM1d5xymtuISKyaFaWhIBQBM42MbXM4q7l/ORc6xoUo64XiUJQOb3pX5m+Vklu3sdLnHLpTII+dIUDBx1FeGLEz8AOdV7GsuEEEzdJFb7yu+jeIUl1fnl5wDWGqNv2PbhC35BNgqdrz8xuKYmkQayIE1/dDMPTq54r3Vl9Q78OpZlHSipnPsSj5uUVLMBcn+aKZ1LZdd+2VyeuD4ih4QebJS7/rnkr4WbolGXyukorylAsaazrjexQKtO46FD0v6EcuUpOHD1J9CcOuNr6bBjaIRXZGNXrJ0B2t5YI2IaiJm2ygqeSWzHFZ84YnAtvxA9GKOBtxG6s7VY3CpTwXAk4afi8V7Ku49TJOpBbwpLBjDk6DvV6JUEFImevkeJQAWncLLlZU/mvIkCeqgbBMpgM7Q5bLNPWhJ8mv9cnB9P4pk8cvNb++B9HjnY++hnk8WGn0te5akT+kN3ve3p5J9v/YhyftYIqM35gFKQOKCAcID3hKWUpyqJjloGo6KYGCihcrPDfH7cz56VdvihjVcC5DO9WKpFYtPVT1VqlSpUqVBGpQoRgwqdh6NqHpSds/DwNN2zkCzN5FL4J1Ic0zr7IapqZlJzuuKBfC0sWbBtA2iKCEchxFwMOdCjFccplXcoZYuAAFn8/4OPZzNdmyDQhygI6DINPiUq6dzd91mgRvp19/nWozTKqRCddqBL7soHuAFzvywlEoKEajtt65UZazmUqKOzA/RVJlYr1JvxkfJxmOJuXUatyi9P5o4Ur/7Gscy8ZTUU/jnwNR7SKVajBN1yDn/2R+hMt2LM3GVKCpVqlSp0iANShSb4zEAYDabYUy/JTgoc/QmwgVe7fRGzGhJt+YVR8nmeyV7LqKU444gcm4GqkFttliMIDjQcRynxkm5HDAQM0itO+dgoh4xwqmOGZF1XUsUPY7HGInqKrdpHM67/skjoBLnVeauypxGEk20cD1ibYWou1pvX4RF1Jgjdbhbhw4NUOrwGwVMKHZXYVjajBEZRtG7Uc1/Fe0Ydk0uLouftUqffAiBokiFCBQPkaIeXf15yCpY0xC/7RhuZf2xDVWt7sCDkMyiQVBB+5DHW7sLOoykNbhRfPvr3wQA/PBHP8Cd23dC5Y1aAADYJk5aCSCn21eevQ3peqLdVEkEj796D6KL589oo5YpOd2oAtwmq54yYNJ4r+rw8bZeN/ViQmf0xBl6AcVLy8trlcQ639EqMPtuqai6WGOuljYb/Z7S+nz/HHA0H8VRLYuaWcCSd1FiKA7Z/CqLnkNW1j91j1XeD4obcqT1vappPVIGIr24Yw+Q1lF7pRv+w1V4VT+KSpUqVap0TzQoUXzvL78HAGgMsDGdAAD8OLDos/kBAKBrvaB44xF7z+rdKVxrxiPhDJ2yQQd41013/2X72zLQ2DsIiq05VpuJ/p330j+jAG5uNYLZdD+M+JOI96y1co9TgKt0vG/rKNe1tzRLYDFxkVbdMce6HLDz6HMuunwONq9izkrAa1TXaHEte4+JNEUlWEWpgGht9uoy6cE4L2PJIHaMHqvViXfHeRXDWZdmmY9SnDYCkI4j/Zm8q4IpcZQC1Xs5JDvvXSYqJ0JYn3NdFU1VneQbBu8d7lw6ufT960aKTVR7KH/7q2NORTVvPJe1o+8rSC+5scPdUD8Cg6ZUxZase4XvdoiGPOeTdzDoq7Z+e1WiqFSpUqVKgzQoUSzIDnR+sBA1sUUAtV964SUAwM6d27h+4zoAwJNTnoNHMw1cckfs06JtVS6JfAfTeITmkJbvdD5jy4zCKBKz1yyejW0MBK8QyYdj6XgBsZmcMdHcVmxyrdxrhWuKe27OU2Q9lWOf4ypIFCvMbu83lRzdhriYqBem8fFJoXBwUepMY0ghucepgSw5/h2G83PeKY4x1qF8Srkp8GD3JCxVIuHaeuPBEgbu+r0l49l7B0ejz5aurfTcf/TJyZtT0knEtftUlIDuZw9L5PGwsYl1aXCj2DmYAQhqp67j/MVh83j7nXcBAOdPn8bJ6RYAYLa3F260BvNZKNdMwxfYwUgGOpMtpcbYQ4t7YtsuHzXUBqTqylRPxqucyNwLnlBWLWxeLdSycHFbRgVe401GWVgUwcN84S9ZrvQXB33pQc3jPIFQcq20gAwsKiXv5GWt9hZE33+fpQ3rKID7xKKt1Dfpo9oE0q71yKjLj+RyUFA9xUvLVRufNVoZolxOLi+/7P577Bke8RkiVFVPlSpVqlRpkIbDjG+EyzPXcRw/NOQ0MSau/OrHV7FBXNaUQNnJ5hiWyu0tFqGhzRHmhHAyiJsDzYDiZDRw6Pu/ow+G3okVV09/m1zfYPu7oyOTXwujYgeyesrIswsYDyPOI1y/kYCBWlJRaqMilmiyYzz/KKuexOBAAYJDsZjkTSk1U1+V1C+XhNQr1H8YScIa2xtDo0Lf+xIwXxz0tB/GxbmYJ3lSxXVggPV8IPSz5XPB+xWBENejnyfVE9M6QH7JpyaUu799KxJPQFOadw/mHazzHVWJolKlSpUqDdKgRBH1tj7GKqLoqgddCwAYTxoBlg8WQWLo5h7HTx4HAExI6X9z7w4a9tIehWadC+WtNcJ9S7IOvckphy0x9/LJIblBR82UCIxKouilYtVJaKTN+ENCW7O3tuJwY2IV1d28sjU5ldSDmvuWnUjv6AHhR0IKI8hr9fJfduTfLn0/xeq1m5GA2gr8Z27vCBztdKtDp5M5kYQyh9Llx5NrQBRDra68mJi7DuEFCmZYVt0yyDQ309XlevDQPbyKu5UCD9nKXZYtaSTWoMJjFKzEy633FgggCbMtc5Ktc9Z8tgF4bWUNK15LlSgqVapUqdIgDUoUbAFkVNrJ1pInFUkWHTw62g1HhDksABzcCSE/jm8Hi6jHT57D/v4+AGD3YCfUQfGjFosOZhLYddb3LlwnJosc7XVkrEg24uimuD3T4wiMmK1ymtSwm3M4D8JKOMdFEx9aSzaSNEcSHGlO26f9Qcw9kTD7YiXFdZiCnpT/dso5jUMSQKXQNKoOSgIl12K/1uHeQjRTJP2IWILrYzxOh0agNjsnDnYq8Je0weWdVzlRo9BC13wiyaT9ODxHmpdz0Oax+VgpfEH1uz+foMKEUR+181bmxGWV+a2WVKXeNRzdvPcyFzVmo8QdOhOTheWmnx5evY4oLeUoRyJ5FMoPMKxR6l/h+Fe636zBMZekkjIesbIqJCtEAVAaSjLWr0GfU/X6OMf68003mmoEjFF9kvJuSYurv4f15azheoZVT4VzMrnUB9Bl6iLrnMR1urW7CwDY29/HeByaO7F1LJw7CBtH5wDT0kQj724LI6A3t9kZEz16uS0Z57jgSd+8l80gGYYcfORkRS7mUDbKn8LydV7t2/hJ5coZ773SF3HXCi/BDFxf9vLvg9i+6mMuAcuHrb8Ucyevpa8A+OxRT1vjPfLhfeAZ5MDMAM95OnmUWj1Fh/VvqZSTAb8co9a2ZfSgjA6q6qlSpUqVKg3SsESRsuHh/4IXKp/pTJQKdL7t8MPBEQDe3glxorY2NwEAk2aMgzaY0Tq+cdygpZs7Pnqg9QyAJ91KgFetGelxUNo0jkVeVpsYL2KwUaoiI89C1xoTzR0zRNA7o8aNcylrkFqrjcqcgtHllIPe/aJc5TRsEquv9+vIBQ8tUQx6GycqrfTaZ40/TVVK4XikXLbJwPfQQKFcVNFE01q+1Jd2Kj0qZLLjw39RVaKoVKlSpUqDNOxwR/uIswr2kmQSEStg/JL3v4V3wsE0DYXw8B6WOPIxAQB3ZkGyGFuLzelG0nLnOixIeuia0M3OAguJsMrZhGLfREes9f/ZbqxjSEUogZ4l2TajxtkHzF3tqn24yyspxYvzndI1mlS6SP2pzNJrR01r6TQVRz+EUQw62SlJISLWLp4T8DtKDTHj6GdTkoj4b/hlrYW3mWR41G0WJM5SFOBIPLbmaBjVDMAf7OMjQquj2T6gjqygdTAK4MHgFCs2ikAGBh11piSC5GB+0zRiA99KfmojHw1nyZtMqHnn4WYUV2oUwplvTiaY0r0z1yZ1AYBjKyZlhRLjKRWeQaQ4IwC3kMR+0rYg6sPjb9HFK078J1KRPvyZfzwmNXvh1tb5gNQm8iDUUEtpSJWkfhcx78K13mLmBwDuISeBR4nWjCl03z/sfIF5VFa+SmtS/hEVMkg+YKqqp0qVKlWqNEiDEkXk0G2wQwdUNCRFmV19pxKtCLBrvJjRmjGHIA/lJsZgg9RRrG5ysxanT5wAAExd6Obt/V00xCUdsEk+m65aK92w4gCizXnZnjZqQLwkMFKgH/soiM8EoppkxNU6YdokBDnXgZiUJz66F71KAtgWuE3+W3KBSxnblyiGJKdlQHkh7s0QmJ2fc85LIh15lM4NSBSmyNG6PB0tTF/K4KqUh/aq1JLLzoWoX/fGlQUQXkufaRulUOhe33uXNJgSeKDaEhTq1fvp5xBPdLb9uoqe4SZevw90mCQ+655fZsJbinKwrP2ShFhMpDRAy5+N16+4Vtzt/EkUHWu336cqUVSqVKlSpUEaNo9lztt4ybcQYy0pULOwXeUerJqDEemEyrQWWDBnwlFZPXBr9zYAiBSxMRqhpfqsC3sc4xbtooMdkXe34CNdVNM2qj/cVrZNGngBpROJIjeBtVoyYOBSHjMmUJJ6zcPBFQ5JuWdxem45YD3Il2h8QUkn/eC/vscBP3KadYNh1jnawj6I3lBbR1DFUCypzzAdZb6S/9hpWPXERioqZEAp3Y6Qtq/PZB5joiTVEJotfgnGis8EL+gOFgekhhrTtYXrRO3RUMFtCgNiRhvYX8wBAC3piiajsazxvME4eDiTblSi/jBenk82DBefma27Gh+BZQa1jd432MKJx8XZGHJcD8hDoP5iUBBrS6onCd7nigvKMtWTcX2VFpT6SjYHb3ptWVZ3PSL2T8mGL/ySDhB5f9UwqiNr+lGU742H1aqnz9rm8aj09976ka4pj8L0r6qnSpUqVao0SIMSxYg9opUpp/MZy7iMBE+m3dHFuExjAXuJg2w8PCUPit7YMSggSwWdB2xHqh4CmDsKbT4Zj3CSYkgddEGy2F0ciG9EDOxnMLIj6iJD3S24tz0LVAMJgGjF38IItxvzgEdWLdZh5ZLzusJHR61S8q4ugtn87F5dd+l95fr7EksxGJ6q1/jYVqU+rWIwYzDD9Qbw50n1NGSO/NlTQYn+Hg97xagSRaVKlSpVGqRhh7sFhfce2chF8iZXci7S+nqvfiOAvcpqNT06gLBpOXbGC4jtfOD4GxhMKOlRR9LACGwue4DFQSi3uRVCm29sbWKPvL9nhF90nQdHamoNOfIpD22xomVzV2MkhlUCXLtUynAKzI6BohI4O16nZ/e9gcCh6W55JJEQENNqxnNUJhE34rHI24glX6ZfVWbA2sxVwUJ3/QRCIi4O2Av7iD/JJSx5liWDakzZxPahcKyZM13JoERjJr3osQqZLydEKhitLCnyIOiwTZVMYO//e+rP71W0rI+6jlBGosut0b4pnCs1vl4fgSpRVKpUqVKlFTQsUczIIW5jgpa4+m5Mewtx3M56kSQkoY6Jpo7ikNbFqKqLDAgwxopemi2tRsbIb+bujQU6G6LMzkah/JywDQsLS4DE/n6QHjZGGzi5GZz2jtltAMDezh7cgnCQSbCY2mF8pJuLFMO0gIvbKT279YCEv6KHWoyIK3eQ+EUMgTivslaISr6JWAbjHA2PKXqSDRATF2nHO5OJI9ySN5DB9wpH8Sw90NF5JxKFi6BDKO6NepbYDo8zJyIyLpo1O5XaVPrFsa+Uox4nXPKOPSe9ShrEz8l/xvEzfbvaxGQ7N3nmuixPJn3FmPhY8WSqGkbK9cXmGX+yEYdDyrUbOBkroxNt2bzR5VSKPBykQJUECiThm2ycRdJSAiG/Ym3tpsRd/doASF+X59ZYLpX0cc0+ExuknVTeNgVpIPk7b0o7Nso7MIMSxHBoFdXftV6VwvTYYk8ntOnVH8ja9fj0cFvEO9M2dX95HupGlmPK6zgFMg1uFAsyY13s72NEPgqycPHReVEDcaY4E58geQSfLWqlTsuGkRTrl+dAhYm6hBujlzVrZxgfhEc8uX2S+r+N/d1ZUte0CwEJN8xUQqF7qt9ahwNZXKkfIy8LLX95DFx7ExdhNLyxWHTSN34infKIn4XNavuLw2HF5XsRruOwK2WNUh95cW2nYj4uBrmZbAJ+S11LgNZMfZVeWOOJinqsowMBU/PR+F4eRDDHQ1E2Z0rZ4eiv9HpJF6dVdz9HoDfT8rG5S8p1zPdAqepp2bkH8y6q6qlSpUqVKg3ScM7saYjkulgsMJ9TdFcEdc3YhOOosViIY1TMViQqEHFWizxkVBVQO05xrkno70yN4SJol3NxxmQFAdhmhJ15SMW6T/3fmmxgcjxIEMcnAfTe3wlSxPxgBjaVXbigvlqMOrQmqLtarcrJHK+MoPBd5OhUXCDS3EUjAA9IECnLqhzuvpX0rJqKMWVyNZ6N/fKarZdjes54H6W4nFN0XjrFUoTrnFJHeTn0TFtZ06Md7pxSz2USpC94cMcHB4YFCr9uwXuiFNxPZeXw8xGVLNRvDWqXONZ8+PScy+efPne/aVU7PZXZIetapaoq1VGS9kUzMtCRofD8eZ/ycg9LqqsSRaVKlSpVGqRhMHvMuIRHR6anjhzc2IFsNG4wpnItAZPeeAEJtdlelCjunYzo85kLsLFeZu59BzMOZzkm1I7r0CyCyewxKn7m2GOh/GSB/f0QX6prgsS07w9ieBFH6VrhYSyBsQzqS+uNJGjSTKegMx1fUwgjmxUzN7IE7Mw5kVWmpQrL1JUk51yJM3Gqney6Ue0nJtAsVfac6+I1jWn06lXnDs8sPUwW/tHV15e02JEjPTx3+llwWCuhoHmvw/Nm0l8YkHtrO8H0hsutQ+sA7g/qnQxuFHNKGGQtsEG+Cc0sLJbdPKhm0DWYmCkASPjw1nfR+kBN19xOX4Np8fnTgeDryC+LWB3tiyXUtyxITiwLPKl5Ogd4UiHdPrgZzu2FTeSx0+dx9uwTAICZC5tJszXFxSuXAAB3Znvh+bpO2uhIVXVg2bvbx6RGyvrJNqnwpiery8Kd98sFgNz2TXqEUvv4MAhF2/o1SKuKSjGc+J1GKykNVKeLvfdQcaJiXb1sdn5dP+KhjpvVu+c9UJI8ytr+uUd4IV1vU4iRCXpqppX33l9aezNLolih9ztUJv9lKiLRxR2qb6UMguvQatUTl+ufe9BUVU+VKlWqVGmQhsOMMxCNCFRPKVprQztx13WYz4OU0ZhQ3Wg8itmB6D4HRG6TOOeyGTFzPq7vI+A9ok0+1U4cZIIvqyRCHThSLVVlgRahv5Z8H/YRJIXLty9hjuB38dSTTwMAJhubokZzNFwfvH8J+ztkYmvYtDXUMUcrz8wcQ9M0sd80pl3nRdphQcGVbJ21/ohVVE08x5JSlKwUsJZx7UG8jmBmGA4j2HRkxuO4s7STJuzJI78mrYZzkh87clwi+ykzTFvg6DKNHGDLJqg9jswMJHhZAlb2PGPV79zOfXk6U643qkHze6IPRLxX199TK67JOg7FNEpGtldvlOK1tDEEtK42t02pl4hKSV+6nXVMwA+rYtH1lvoxVG9JVVfqT2ms5N0OSP+r+h1/c1sYPPcgqEoUlSpVqlRpkFbko4jcp3hVcz4IirlkbIOOsIx2FnCLsR+jGXOmINrVOwc7YjNQ5qpDEWsVt6LajMwPiwrDD+MLBTJIIznXkUSxsKHfxne4eudjAMDexTsAgHPnnsDOXrj+1FMvAAD+7u/8Lt59610AwOuv/zg8+yJIKWM7wrwlc1rxnjXo6LnG4wk9s0PLwHYPmOpzpF4/Q8YJpg/MdapIuLGyiBMVuCCNhwCEUbBJa8diVfQ+1easpciz3Ho/30XsZ1oH3fHwVOGrqcjKrc/eBWfm5XjBgwOM+9x9qf0HgUuUHSzvjmyB/e6nki2nQn2Up93DpmGrJ9FJQL4FDr/B6qOA9JNHNhVfzA4w6khFNQ5N2GaEtiVAmWKEGwaavemD2TYFcbgjcf3JQe/lImJWhRBvLAsKDoiuRdOFhfHmnQBmL7oWu5Sg+8q1qwCAt376Jr7+C98EAPz27/w2AODt998GAPzsrZ/h+s1PAQAb0zAu87aVfrO3u3OAtWxVZuWcHgL9fGXficIDyyZS2jb1+rx8w0jcEnqRL7wCrNVG59KXlbzPfPPwqo0c6FYPpjfJEvVUFgNr7FEvAqX8ymveKb9KC/TPo/fzg6RhSyH0riXnxXJvuNw6tG6Y88/Ke66qp0qVKlWqNEjDqqcupgwVfwFWH5kIaDZk+jliCaE1aA/IjHYRuPXRdILRmH0PQv0d7+Auspg2Wrsigtj9vokxJZuiphGS4lF2c/SuOkKpx01QB02bBm0bQOmOfCb25/twI3p2kjxutjfwr//8XwIAtqfBG+PLr30DAPCf/K3fwocfXgYAXL56BQBw6ePL2NkN9XKY9NEYWCxY98Ydo2dxvicneXgFbEdJT560p68xKKWtFe9rdeR4eRK4TiQFfV96WxiQ2F/xo8jFIqVmEu7JxfqkPKCCAsrj9frxsCmXcoKK9DDShZdyD5vDHJK2mR5kf0pB+Y6Siqo23f7S/iyv627bXaeeR0naqBJFpUqVKlUapGHzWOYwLURX3WXRUjV/ZTmxT2MxItCWo7F2iwUaS3GiJMpsuK/1XXTKEtM+U5Yklmyyyno0dbZJ+omEbWjAElC8NKK+LQigH00aiY4L8vI+WMzQTCmkuQ+g97//sz8Mddoxnn76WQDAs88+DwD4wmuv4uIHHwEA3n4vgOA7e3swJKm0hFtw9NiymSz/hyThkZQ0/Eym95wSigg+Rq8VCSHiCyxZuI4BbCccP1/T0WPTNKZZW05fY/GhUJ5v817ulXDqvVEo01oYxRFwZ2WzyoKoOkDhMVl6vnfz2KMkHf/ps053l7hoGO9cjwpzUTCPvI/30MwDpipRVKpUqVKlQVrhcMc/VIIcUUFLYgWRAlrmag1AQWbhCdMw3qHtgt5/Og/SxgZx752x5KgWjQslUZIAACAASURBVGw6E0NhcFypxiOGjzCc64F7acXRjpMP2cikxqPV+n/mnIUNxoRjOHmySHI2OlJ1Gkch01oOc3K8of57vHP1ZwCAdy6/BQA4tnUCT5x/CgDwlc9/AQCwvbGNt94KllLXbwQrqRt7FCIE0SKsZadH78D7eufiOTZD5mizkjOjU9JDEtaAZUD+SyeNYmkkogMxmG90oJOX5NWcYEvgjt43/R0kEpPUrx27pCWvUrIyVsPSoDE9bgwm4jKSQAlls8d4TR4mtp2xdZxTJTwyS89DHGYfGYsYhI3zTmx/4/UEn1nD6Uy3yYmfYmn1YgrOdZKd18UvwhUs1XLJRqQe75N31etRod8l3bx+ZibLWopDYhOrxqp3VWkrIsQZ2X1ZW3TK3jXgpwRaE4He9c71OxhNzSPmqurNnHTLVfj4XImTH42pOMoCQ3bnq8ZyeKPw/QHrTxJlmy9dVGARP4SN9q6OTUTJXBajBqNJqvJxrYse1uSJ7BA9HqUtqt/pqE+CmaYQd+EJk2PwMKYNQqmsLKuo9DqaLWquIfWRd7C8eJNqabbYwZtvvw4AeO/iewCAF595Cc888yQA4Gtf/xoA4Mr1EHvq0uXL+ORG+H17N6i2nI9Tb8E5xEfj+O5b8oAnwN34uLjKAuxijCqvNiBPCy1730sMJ2VkoN9xNK21ci0mLkpHtv9HSkUTUf57+W15LWuXXF1V/PByMoWWwmZ1+B5Xunc6tJqupOtZEVApX1OSIKdH8brv0WJDz777SVX1VKlSpUqVBmltiSLmJMpEpMByh/Jc1Ecx3yoxXMQySS1KZVwHSyqL6Ygj0Jrogcb4kNo6OySXCMxO91ZjoomoVSVjaHIG3+loYt7tMXueGyPgasxBbEUtVvKmFr0L/2ktNo4FdVtHcbHevfQW3nrvpwCA7c3jAIALF4J66rVXv4Bjx0Pq1ps3Q9jzH73+E1x8/30AwAb1zS+cqJwsnVvQyDiruHyJEWVifmxORGQ7Ud1ESUKlmc1xaO+jekvORTGZpRJR73koR6ao1hAwXY/TodQv94eGnOCAMgPYz+P92ZAs7tZs82HQYb3Hj7r/snoUJIsjqV+9i8OYWz8oE9oqUVSqVKlSpUEalCgcgbfGKlWeSbl8GCO6amjmWqK7OnWS62CgTPFn5Hw2pgi0Y2vhGBsgdrazPiYIUhwuAHQJWBmBbivF6Vk8AAk/kj2vUYl3GBPuHJrpKHkWA49GHpk4aTYNNkZAJQGjnIfzQZIwFNLEuVb6tNcGqeG9DwKY/dY7b2J7O0SxffXzXwIA/Nbf/A3cuRPSuv7oRyG+1JWPP8JsHkKNHMwofwZhPXZk0VEiqbZtaQycwlkidy/5MwqYjehmHT9nfB88fAE/SSUVFkFsIQ4UlEOhBoztQ5QoIp6jpGExXzXxwD+z9x/+6JlO4FGWLooOdw8j1lPvR++PYlibSH1jh7vqt7zm1e9M48K+NAXWeO3rhvl4FGg41pPypFUrbq8MJwJK7stUC85FiNtw7mn+21iMeFElj27bNBiN2fInts17UsydDTnBdhku+VBZ9WSlPE8EWa4UYpWDvZ33GBU+Hr5lzNZJbNEDCNAtL72xvcnnHTCiWFAd+S3YNrQ5nYzRIgQi/N73vxOO/+GvcOH84wCAL7z6GgDgG1//Gm5cvw4A+NEbASy/eDkkWUJnMCJ1lKiPXCsLulhCeQNLAf84SKPkI/ImD+GUfLGJJ61Okq0b1bZAek48cCPyOBciFcxVgg88nUrCBACwxfhT/XNqXAoe84/y5vFwqDQe+Tk976hEstDyuQczr5LeHXGTdx9H7P5RVT1VqlSpUqVBMkMi2oUvnRYdg3BNKmosH02myrHWKtEc8iPaJLONb6h+5A2mdEPDkoXvwPoduxGcMrrGYGEZhKV+E9PcNEBDPhANmdNa40SdwebxjVXxpKQ/4cTEW0xbiltFe+jubIbpdkgDe0DqI2+Dqal+ZudY3aQGkJ/FGEl+JLbOChQWB/iOQ7OPJA4VOpYKGsH2W1LTbW1s4qUXXwEAvPTSy6GYC3VcunQZF9+/CAC49um1cF+7ALM/DGB7H0OJzxdBimnJm975aGvv2fy26+DFc5uuOafSvqaqJ9NG6cxr8SRTRzUwSxMFedu3sA+e6rm4PsTa9e33jTF9UNIYGJ53Kt2pdDtOcrpmexxglJSN+IJEPwob860fEqCVa1DSsPKLiCpDlqK5rq4XpbcYZr7gdZ8kvSrcu46qpNSm9qdgXyhTuOewpCWKoaRDQ2WSeWjlw80rkZ9xXTucBLDqGX0BMV9nXMIz8RpL54rSbZ+uvXGr+BBVoqhUqVKlSoM0HD225HIo2LTaneScwhL4nEJ3luE8Dl48kCEM20gkFbGStQZj8pju2LyzjdKJoUREbO5qYASctj7l9sJv/ou4a2Pl3pGlKK+LJpqNsjmtjR3tx7rXTxjFqqbAkQjSIbGKIghu6DkdR+R1RowLxqNgausbi79+/QcAgL/8wfcBAI+fCU58v/ytX8Evfi3kzLj6Scij8frrP8Zbbwdv8L3dXRq3iJ+M2lHSn861MDzOyrw4ZrmNnKjJuMZSrKfcGzu0L9U+OCoEBRMu0sZ54URqZIlZWXUMectqUP6RzsK0PoVpfb+A7gL+uU7V92lsNbAcHzGVXtMItPFcxE4fHXzhKGiFH0U4Gv0HbwYM3hY2DIfoVS11oZ/DVm8dHX+cdKYxvre4ms5LGIhRk3poe7homcV9CjE/pAT/X/Ky5D46KReflzPVsUqiMV5USPkmkmSTVno3Cbng4zlWY3Bwwo7r8FY6pxdSVq3xtVk7w2QrqOUsWTZdvxk2hX/++/8vRk249twzzwEAXnrhRbz4/PMAgFvkn3Hx3Xdx6dKHoX0yJIjbnJENX8KXuPhhcOQT46JlkxVXDN4cICRLbPKRKbqfm4YpgdmqaTZA8Lan0hr+5j0id6NOFcvh/jzbg6BCxMAHsRgObkaJRYFPTuqc1iW13roqM5mz+VqE/mtW2tAj8bNYxyqq+lFUqlSpUqVHggbB7POvBjA7iTXFXHsCZottWjjYCHDrc3xvAxbpmWu3vR3Y+KgOGJMaqIEV4KuhOEqWjr7x8BzPjVRQGEXP5VETQW3BqjiWlGVQPYLZGxQSfb5o4VmVNeEGup63cSdxj2wcpDhYIlFYeVCrolMFagXcNuK3EH0xjPiVdKL7McIJMzjI97WLTuJVLSiX+eJggRPkn/H800HKeOWFl3D8eDj3wfsfAAC+/4O/BgBcvX4dbTunZyAJoXPyu6WkVK7r4nVWVXHgQhdHIwkIx+9RBf5rTDoezD255gjAbNP1roc0voWKObgkh8M3cS6LulAZdYiBgpzjOGFRaoxpYy1gloPZJeC3RDKN7jOYLf0rlV+Tm10FZnPCrMQ34bCcssyFewezE0MdeffUr1KdpXPNah589TP268jvWe6VXsHsSpUqVar0AGnY4U556ArHw7u/4AGRi9WhjIWbYWbLG9nto74ttiLWoIjckJW0mhQRFSOMuJFWxSMC0DQj8L6n4xLlvoDlx2TMJHIQlri+8UhhFAzydjEtqWzSzE0qr0CJc6VsgyO32fSc8CJXGTldJ1iQBSiyLTsgeji0HYPNoY8cfRdjgwWnst2gOFqTMWaL4P3945/9CADwwx//AMePhVhTr30+OPL9xt/8DQDAzVu38BNy5PvgYogzNZ+3kUvxcQw0eK3HVIWLiqRiPQmco1T9D4OEc7RmOWirldCpMSedyfiuEvOJwnh8FmgJRvHIpOuU6bYao1i7So1lZBDTUcd6etSpShSVKlWqVGmQVsR6ilYfwjdkoTw8fNxulBlrHg8IiOExWNcu3LUHrJjQcJtOGmFueW4cHHP6YjEUHsE6I12TkBtto5ig0LnOA36UWrPECLdKh6qSwHSk45+YCfihWLDhOEnWKP27qDWjlVKfAfGKcyWpSCKwRknMqP4vSIri52y7VvAV5u4bjiXVecEoOqV3lphMlqUNgxt7IXHSv/2zPwQAkdoeP38Bzz/zPADg+aefBgBc+vADvPv2ewCAfcIvvHMxvpW0paSjDL/wHmhsmvY1dX5LLVgSyyl2dINX46y52rJ1UagjfwtRgpQ3pFlFgYLYAq2gz1bX86BF3iFG7k1ChHCslPya0ikPcKzeAzl3TyeL5Y2xEqcsFfhyCaHfsHa4K2ENJbV3nminFwIru28wttIaAkt6d4zx04Ou+DX5iJd6PfYi3dJzGtNLSxyr1O8slo8SMifT6kuorjBHiyhbIr2mGgyfF1HkS9+D+l7uhlbEeuIfqgUB5YicEqZL8aD4wZwyj2XViTI/jBBvbDPP+uTg0VIIb0tgsw7cJha8YloaFyn2nIZ1soiJ6kS1Gbsdr/FGIi0lICiXi+PSDzuN/kwwBvkHVaZYxlK2QAaKrbEwlPxJFlCx1lVqAV+YkfSu2jbGf2omnJ0u3Hf5ow9x8d33AACnT50BEExsv/mL3wAAvH8xgN/v/Ow9dOTNPRqH99IueKNVTbOJsIkh0LVFaT4apfExauKX533+TulsaaiNytyY9DPbPJKjSc4MdIF4gUJJ7e3MlWX9W5lwKwc1VQXl7aw3IgP1azXo8nLBfLT0fINVl1pbfmkN9VFyRYxKdIF0dQ3fKPrlRKUb55A8/tDrMHFORF4hvtv+JlAYoEH1mF47C3OnV7y/7tyrgrCqnipVqlSp0iCtALMDJaZ0iTkec9cph249oukpcwQ6VzXvjiwpGCVasahutZgc5XFufiHmdRRC2zewHJWWyjSjKH5aBtIdoqkg77ZsYmtUICg2Z22A8TiVXpx1icoEQGLaKSZ0gu5bND3uNKrehNgL2yupS8R9gxxBc95IqHZxGFMpXPsqnMiZlJx52BN5QWav1gCjSXj2G5/eCMfr1zEdBxXcM089CwD45je+gXfeeoeuh3KiidOsjJgwIs4PxHfhc+5HVIP3Tg/WOUxxmIcEVNcBh3XO8cPee9dklPJMP9N9bFJT0QyUr6Ul6aIpSJH6WiZ5arBeVFR9h7vkvgEv7aM0zBiUpgrf8f2gKlFUqlSpUqVBWj8V6pLdylulLxU9mhP2UHAIpxjsjKu1QAT9FBcpm6XTJ+kUO32xKtw6NJr7BoAOMGlQDRiYJC0qADB84QzQcb8FkLAw5HgVQSgbnhEKGyh5JQp3348wWnK40+ax3JRTHEorGCjr95UZcoajwETgWlK/+n6UV6Pa5WdoKI9Ft+gkSm5DKWq7eYu9vX0AwBs/eQMAcGzjGJ5/7nkAwKljIYXr+5S2dX6w6EmXzvskPSuTyD93zY1ppCOtpGTKedgoqIfrB30WOddp+n0r1rDK4W4FdgCkz9R7PmsBerdDLcncCDbyybkHaRpbCvuSCKvrVBJFoni3gBUq8ZlT70rWr6y8BjCKoTbW6dCK7nJTa1Y2iIfdI60ICqhUTnmDCRqUfYDOI1uLQ3hyiaed1qkXRq/UR1Es7PdJEgspcEd64frlk6jkHDOJ+8Mprm18lCScNAcn1J6lcpkXaP2S0ocPHp7ZNW96L1bqUGC5/igaUcuxv4iPKi/S9XC/nPfJBsHH6O/AocLjuSxlNjoX42exv4Z3XlZytlza39/H6z8MWfceO38BAPDFV18FALz//oe4/umn1Cey2hpZGcv7rxCKdL8XuKNQPR2unfXO3Uvbyb0FP4qjWoiOihi3dtA+EKkKVptxlgIA6tHqjaGsWeX5dLQMx4P8Ooapqp4qVapUqdIgrQ9my26bIo7eQOI6xR05RuCMmhMf4z/xrt9FE0rR3DB3C5UQSe3+Yp+fgaHaTI8FBO8dLF8nVrtrvfg8BG9uxTU4q/Vj0jf2jnZaDJfB4UPkkCXNqJYiChx0NJM26TUT63CxAVVfONco3wOWgCyir4XPpAxjlCpBDBBMBJ7zfNe2kXhO0SPfisEB5+JufJSnrly+AgC4+ektAMCpk6fw/LMB9L78cbi2O9sVVZZXUqZE4s0MBYrRPpUBRG/81L0yYonJoAYk+VxSVfo7mbZZPxLNa/9aTl5JvutKAcvj+axHJSOGw8ZpChYWd9ePo5bghuqL81oB0QXDa3mbPpbqAdEFrUbiqS2GOnHy5KORzrv0HeQ9yq/5mKtBvb/SsyyrTdO9zacqUVSqVKlSpUFa0+FO7coZ92Q8oke2gEFOpQccqJ9NVlVBHS8qd600UT3ec2zxnZHoscyFO+/ZN1q4Zudc3D05ZapKjdqLDmmMeDhrt1mjop5SY9kgqCr0/8L92sK5/J7IxHkf4yPFGEvx+fj9SCBfRM92o0eYK3SxDpHYVCIiLmrYgYm5G2WaGb2xdcfDub39AHjv7u7h5KlTAIBnn3kGAHD95g18cuMa1QHpDzsUWoq86TqWDYdpPd5a65H1mSFuN+O+V4CWvYirBS4u8rLL73uUqJRsqnR91bmjpBy/W1Yomuan9wWPu0KlAxVGn72jx5yW04DklMMvS8uV5uzhsboVG0UUO6U58UGIKh+fxR0wVt0rgQCjY0Quzjk4AXLFJ8MAxqQhK7zx6QaFuLA7qIgLsunEBEMcIcR6g5b/mJNFDwlW1vq40uow0jKeLl6LMZyzh1ILv3pJVjaWuCux1ZOU66Lbf0ykQ6U1OK1A6rgf8zUaM+/knJVN3kVA3ilwmjcNsYhSx65QXvwu4gaefoVxQ7eNxd5eCET43sWQIOn5F5+X93zt06vUtoPpWD1IQSDZIuoIPsrih114Z6t2nf7HBjUHCuquu+z7qgV46GO/HwD6Knro/cgW8tI1Yb6U1jIa2GjfitjvqO2N6x23kwcM1PrhFCTPNtulzGRKVqn0OSx7vraUbA2S6hPNV79v61JVPVWqVKlSpUFaC8wGCmK1VztrBnCHQFjJKai8RSrXilyNuWYVuCkCjcq/HXPSpmyC7kcngK4RP4QolRhhd1mzYRb0ozHhH+Ku3MAGaSh5PhVIUHwa4k5vhdNQUkl8BKGcidVxXDTIxmWUEIneT+lbLM9JnkpxgETqgiZ+TrKv77ySLqiI08Hher2JEgsdO++i1LcIqVbfePNNPPnk4wCAC+fOAwCuX78uYzOmOF4xkKNfV780QMNidmJ4IPo7l5QJweQGgMhSq0fAVa+j3hkqs+zaUfD5j1pu6ERFlKmu0/mavZeSSf+gBBdjtelvKo9Ppw0J4rK37pxItTLJ75JUUuq31jncg+qpShSVKlWqVGmQ1vLMLsV6KiUIERMyZ3q7s4OHBQOjKc5hofAFxo2NTRznuC7hwjkYLF1yCmSVCOHOo+VzhFiPrBUnPcMiBZVvRgZjMq71RpBxcWpjL3AbOgh1QN8wTl0zegx9co0GhB9PykhxxgYQsRKrHjMbZsV9lLkFr7EG+juGAadjx5hFLA9OkOQQ07Qizg/pLmMT4tEapIpQRcAeptMJPvwweG4/8fhjAIAL587hk08Y4M7kHNvn1otPZ/o/czxsaXn9sgYYvZ5kbSDvr9cnD3nRiTPeGkx48s0NmDM+bGzgYYDZQ1SKJiEh+5VKQ5QEeqx84Vxer0KRc3NaHU3XqTrWwShKFOuIeMW90BBGseqdVYmiUqVKlSoN0toYRR7xNV5Aj5Xy8Ooc6/JNxBpkh1dAhjDCIoIo9btC+oUTyLrh1AlmudvITbDJbAcvgIVNGQIcdB2sDVzvhJzxnDcY0TCZjp3EAEfluJe841pTzlWRxMwBYNBJaA2NUiC7Vx9Fq2xi/8UsNteXlhhXrwZambvmYFByX85oePVuldK3pzPno424STOhVK5dKyawVz8OVk9nz5zB0088BQC4cuVy6AfPExiYJs0z0MHJe+5Iimpg4oRmk22JBabDqKg31DvnZQT68blSpEiGI5tH90T5a18mgJTeS7ylX6bHMS7hIA/zEI+wWW9CPSxNWzL2ZM/kPnkNxcv0zRVAKnFBVQtmH8stNVt4j4pKK0W/UPkl5pqfw9BaG0WIVUQfT0EG6WW0Qmlw1EfW046oF8cfeLLcxgUymqexHkgNLKtmRD2m2m9IdeIdxCzVkmc2qZk67zBrA+A6omsbY4PJZAMAsCBz2q5bwHHyumwWGmvV5OB+ODSGNhv1oLmTZWLsKsPG9WsRljeKaJosYc+lBvUOtElspiYx8DEWlEz8qIqKsaHiMdOiFZ124wdm5F1JMMjkQw306Y2b2JxuAgCepA3jo4/DhuFGTlZ8CUOPqDJrdOwuz1kTKUmSuOkDJjqZELniqir3anWU3Nj/yoY+2nJ47EN8qZ7bHbqejn9PxZD0IzIvceEYWJySpD9pHR4xi6J0x/e96IumooldJ4rle+VQHs+kiqGhFe1RVDNFo45+f3x6E3cglst2CKMAY+v7S6uopUida63pzZ3+mfL8khVm1VRK5vfymbpKG1ZVT5UqVapUaZDWVj3dLZW4iRw0AhA9gOlv5+KGbRRH6rK9zYgIYiJHrBhAiVHURtVZZBRpZ2+iRzADrrN5yAd9YOc4PjmW9BEdepixmNA6q/btyHll/An6f8SOezU2JQZKGxk4lQ877Y8yY9VllEd2uNbPhczOfs759F7pY1o+eYSCGF4ivnVMIc0ba3HpoyBBvPbaFwAAx0+cAABc3r+KCdU3IQlhCgO74LZCH1s7QsfSIR1Z8rNo+y9N90ekKRvfg7BrJGFY2+Nw6QKVPzpVjG4nf8erQOp1TWdzKYMm3so+lQDgzxItk3qKBgKZU13JwS2RRtcBjI3+mxej+E0VY0E9QAOFElWJolKlSpUqDdJ9kShWhRoQZzI2s7S+xxFDp0LlzEI6NIhLWVdjvOLIo25WYjyJFOEl9ARf9GQSOzKNcLigcBJ7B3O0W5wxiHWRFg1xmR1z6Ak6kGExRS7Ax7hSuYRVEDsSzJmuOGXamhVPHeP0Ra7DRQ4mH9N4n6pbXkZJAjEaKip2Jyd+9AU54WE0RkOS3cWLFwEAL7z8EgBg57076OYzAMCEkys5jxGNc0vc2HQywpzwir2Oot5SO1P4aLzAY2CiOWOJkY6JpZQOesnzHIYGo5+uwTGWuN9V31xPeijXvLQ/JlTS7+NnUKIA+u8gTWxVgozzMfXqu9bjzPX12ylJG/0Yeg9feijRfdsohkQwJ+ApTUKn4kVxHdoXA7o8leM4KKJ5UnCUspuOaqZwnzMengyW+CU5RH8KSyD2mBacUTPBxnQLANDOw6LWtgeykVgjYQflWXKw0nqDHPAv05qqJ8eqISebHttc89hqNZMA2EU1k083De4AwoLq83o7L34WsnE6/YGsObmp3Hga8m+7tpNESLuUQe/TT0LCoxfPvIiLHwS/C086qIOmxcyG92bHpDocA7Chn2Oa2QvylXHK87YfqhlIjO19ukFoX4jSW1zv3a5HQz4TJXXUOnXci+opbhRRTfIw/SSOgg6jeloeDLDk6Xy0qifN6A2wFivVvEdBVfVUqVKlSpUG6a5jPd0LRUvL8MNaiLdv9HTWag+lohHVCl+indtGPw3hAlQeoiQUOksXxBnbJnLyLaksHPlRjEdjbGwGiWJGUVB39hYgwUO4U8u5s105SUyZ086lBq1m8oUyd8k6KDPJHsdY6E9MFCWnYopJ3ytOP5SKQl1a1mN+vgVFih3ZRhzx2bjg2vXrAIAXfuFlfHT1BgBgvwkSwszO4DksOUf6bVtM6D1MN4JJs+tm1BEHTzGsNFco6Vl7PhOF3nv0zBEHrTGVBHJYNcK63rND3+XhVU8DdRSkDe2J/PNAxfGlo8nKhPeyXHoo1flZVj1ViaJSpUqVKg3S+qlQhZbrKUscDO+KOmFQH4CLuRmE74lWivBKdy9pIASIjvpy9t4V7tZHFlDMZG3ENYRbb8OxQwvTEEhN1+Zth8kkOIJtbgUz2cs3LmMyCvWO6BgFFxfjsigm1fV0nLF9yevAz+tioncB3qHSwA6MvVP4Re7EGIBr6pKW6vhdSZsxHlRuHmtUeemGU1B+5oGuBaHkyekP9mB13sUBoMFcdMFE+ebeVZx7+iQA4CcX3wMATE9so6X4U8aFaLO2aWCPHQcAzKdBohifDHXOrr6PqQv1tT5gTdYYSYcrTofGROwlSazFkh79odL05nGAShCIpsOmMV31e+jcWm0WLvUwECVR6O/3biWK1dI2t1vGTIA+l7+8isONlXbMM5mMXOL215UkhsoXu7EGRhGqWl7fUWmCHpgfRf4bUI/nkSQQ4XM+u8/osB6ZpsB7qBDlRgrlYUPg+17gvuVNJ4TsAICOPLkXXQc7DgvRxlZQQVnbYDY/AADw3nRsGjaYprG95E6w8RySfpfUS/xN5tdMH7h2Tqyu5MPWG0FeRwHMTmiNj+eoiTdQi6gntBIsMqiKrn/0Hl76/IsAgI8+CvcZ57E3C8+8dWw7nNw8jc/96q8AALrHg3f3T9/+CQCg3buFhrLqWQLQfddi0YY2xiOZIDDkzh01a7yBGTlp/fIFSyIJWL3MqAH8jIPBTJ9VP4rVpAHr1cpUrxiLQTVUyf/I9DeiR0nlxFRVT5UqVapUaZDui0SxShTrX+xz0Dq+VFTbqIQ+Fkn5UC695H0Es1OnXKpPuNnYb5YCOlJtzRcL3N7ZDQXI1NLbRjzE24OgzpiyqSYaiMUs1+sB8XYWi8sYpC4+O6t8Sqqn0ONVJEYA2vxRKuufM0q95DJpw3uvxi2ey6z87ompVDEgo+pGxiiM8c6NPUzbKQBgSmqmO3cOsL0V1FHHtk4DAD7aa/H97/w1AOBX/sevAQC++uITAIDvf/Qmulu3AABdG95ZYyzMmIN2qaBQQnyOI1FakVYlnLqWfJX/TrgrJuTy+j0+egzjUirlc0/oMyhRLFuLkvS2sXS4JupvVsWqxEU9T/54X1K/1BHv6/tz9PtXUj2lfe2Xj20eDVWJolKlSpUqDdIDwyiSXTKLy+y9ArqJibOKKzfs8Rq0zwAAIABJREFUwW0gILbJ6ihJFsEXJQUmDWICpehAzZhC7EBHXnmtb3Hz5k0AwJiA7tY5WOJEbRM43NlBwCycc5hOGFy1dM5HkYbb1GGvo5JbDrkJMeDhHBsGkNSjHe4UiA0EcFh+d8wFuSSlKZfvSR5a+upJGaqcMomVx+MqCqxMItSJGp/Mig0gEhUbNtggRbTNJnaakDJ19FS4dnxssU3jfO54iAk1++BDuDaYML/+e/83AOAL3wqSxeTOAQ7GmzQe3DkHULRgx/PDeFiJTZ7pj42NHWfuMLCAdC9HrqXaFSB8mCQxefmHQbmJ98+Tw90yyj2zi054iRYgN5Tpv+NSYjddpjySy3GOfv2Fiu8DVYmiUqVKlSoN0hFLFEpvluvekjAWJQyDjnLCxPqkjFH5MPLtGUqUYNMUoxj5aE0VFcfRGkgukSTD8YMWrsWt20G3ffr4Nj0LYt4fDiVCJ+btXCSJCXnlGXVdAyhxNBS3jshRZyMidcScQyr8hoThYOmhi3hFojotSXj54DMZZZq8ApHoSXb9tk3yI9V9W8TYV3yOI4W0ncFsO+AQX/3bvw0A+PjTa9h/520AwLs/DbjEK88+jhnFjnrtcy8DAP7kn/0RAODk6bO4MqUQIbt3Qpt39rHZkvWaCziUsQt0HPuLcwpISI+RWMXxizRGYWNKQgYAOCeOfA09VWfi3Ernd44FDOu7+9/QXXD5eRWm8If+VESYUrr2bFocmawxUNEQ/+wHr65opzTFl0gDaUFteplXpm9UaxD93X8FBiox9EDHVRdKOMfqO5XFVb+OnA69UQxW5+Nk972FIC46LkWWY3A8Vd77COimQchSdDqKxgbpJ4d0EMXG3fbePQPTnbHwpPOyHYHV8JjPQuyhdhqGa2JH2Jtzbm0KUkfmla3vsEsAd0t1bI2n4IVFFhHfSVyi6AvC46Js2MUU1ifjy8+Xr8/eUb982wPbwgTijYSr9yo+FJ+LC5iXRSEyAWqeS/3xjabvVsdVEjcJE9+VbGwwYhbL6j/21bYjYPtEUBtNnnkeAPDcF7+CP3vnUih+MqiecMLD3Aiqp3/3+/8KALCYB3+K63davPY//NcAgI0zZwEAV77zQ1z70z8LbcyvAAAO8Kl4fJsFxfty4b23FuhoN+gc9xFoyHrByljR/UlQfLVB0zg4sWawiPMjXaAtAGu6fh09ZcDyL7MU28p7vU4olYnck5Z3uo4mzoW4QajvLPPBKC13ojpRjRZN6YfMTQvqnaGNYnCt1+VKTJTUwe/HF+7VADe/TwU8a504QEnOshqMAb9bZ2VwyxErgGhvgexb4+4X9quYWbQw3kuoqp4qVapUqdIg3R8wOwFpNCfKu34mUahbS4lvJJ6T2rFFzm+83NeLuOo1hxR+WVUxRy/n7dohmo8yl+08MBcJIWzf0/EG7twJUoYZ8e7P4PYI3SLcezAPgHiDBmP2ANahbYXzCm12WMijR/NYKuGiZ3anHO867i/4GD2po8rO98+pa30Mu/9eRGo2fYHZa7Ple6JU+mPhcTaZ4/xLQQqY7wbDgo2nnsb53/xNAMDrfxTey/d/8mOcaHcAALu3Qy7ujgawm2/j7f/z3wAAXviVrwIAnnryAmanz4VGdkPbB+0YB/NQx4i4ws1jATT3ix2MeYJ2LFFYWA45TxKkJ9VjNx5hRvNI8nqbmKpX8/Em5+iOTIdz9FSMspoWSK8rDrYUGTVyy+raEeKzRzmUOvZVGWwelnzy8+uc4/aS+hMpUUuawyD43VKVKCpVqlSp0iCtJVGkpn3rVVyCcvJrsdJ+mZJpoYfv6dein5QRrlb6aCGJi8RnynsJv8AAMOsCOw+BBzrh8oEDctA6IMlia/MYxk3QfS8WQWpYUP2NMWgk+VGoY382gydTTr7WoBE8RDh4O6e/nXQ4gskWrZi08rETXbljbMLFtJwx5EdfYirpLqMk0QcrBedQHmaJ9JfhPutyhAl+Qn1jXT+bMZ/GBC+QGfJfvf6XAIA77QLPfPkXAQAHCGa0V95f4MbFv0r7Ng85LcbzfezuBvziuz8NuITfGGHjxCkAwEtf+AoA4PYlj6kJ9R07Edp8ajMc291NkTRv3gqSzcHBAo4kCB5TjkGlHTP5vsZCJqUke/JeYk35OOAyRvI+5DtA8Zs5LK1nfmmWljXG9PFZLWXkMY7Qz+EQ7u2jyNE5rXep3N+sH8WUI1J3/z59T9GUP3ZMDoPRZg8Zz0mq13jLkAOfjkdm+uPXb2dVP4av37fERb12lRpoSPV0iEb6f+YLmIPCvpUKjCeETK7om5FbFnUAZmQZs7MbQlYfP3YaZ08GlcXlTz8BACyaqAIyY7WoAoDrxBpn7EOcoQkmsDT8rFrT9tb8s5N+WzVq/BG5CE6b1PPbqVHVMWbib9o81Lnii8j8P5LQyLGB8pe5BpU/KM4/HjbSs8eewPEuLNZv/vEfAADai+/iaerHF74dVFCb1xb40W6wXmo/Chv4NgGvz7/8NJ57/jkAwB999zsAgOv7t3GwCBZtTzz1DADg1S99GRfffQMAcGIU6jh7ENSMn3QjzCiM+YmN4BW+u7+P/fk+9Zd8cCgbn/UeEx5vYnwaY8SaK/U9yeezBrU16I10st8nGlroEvVRr1iEWyWHvWI8EvVSv2Iqh54qToZjzcf2ZnlZjyUAe6YCyzoXbwYyY4BCXWty1CXVk1xTG4VfshmEebLcSkqXzqtIeO4V3a2qp0qVKlWqNEiHVj2trXuS8gPi7bpt8jl9by6Tql3R+2iKylyNY5M0H1Ux0SObuWxIBFqWBjw8Zz3Frb0Ach7f28Pj558EEMKQA8BHswCetosOvgsSyIS42Y0xsCDPbUOJkYz1AoyyL0bLIb0NBMV2rEpyHt5nqiqYqI7IQpB7qNhN7NnutdliLOe1PJ0fc1nepBwikBoI5PUfijL1Ab+nM8+8gLc/DpFfd/eCD8SN17+LGxRK9syz4V2YLz6F4/a/AgAsLv5yKP8XQRX1a//z38d//svfAAAc+1//DwDAP/y9f4btpx4DAPz0Ung/Xz59Are7oHq6cf02AOA6qR5Pnn4MHYU+53y6060t2DZIFM6Fc/ZWuM92HdpZUHcxh9w5pc5RfFr0pWHVTG5Ym6pX+tyyfllHR1r1VFS1ZBKnFtmjUQk/U5wzMYWAUjVoeD+L+yV+QquWIsU19wK/Fv4szlzhuGP02L6JcV8u8UtlleVUUj0Vr0WdFh11WHybFMp7Jcf8WfSrq+axlSpVqlTpXsgM6dLOPHe8dzFPq6DNK3OP51BOlOaxDonps7ztVWBQzoRYq+6h7c8aE3EC8rhubIxK27EqXNKaGowYSCXMAdZgbAmIJp35qc2T+NIrrwEAnnsh6L3/8u0Asn5w+SLG5Eg3GXF3WnEma1namIwxHlFeBAJDWxMDXZH1JQyid3DXZVKDd1gQ9sHSApvmOufRUp4NYnThFg4k0Mi1rvXwVK+A39S277yKF6WUs5kxQPidvnsRShKHu8idSmRgAn6bxoojF3PaE8Jzfvc//ft440pwrru5QQ9wcgufXgvc/eNf+TYA4MazL+BSF7znx9vhvRzcDBLI5vxjfH6TcKWPg2R404yxeSK80/Gnof7XXjyLW/MgHV569x0AwNW33wv9ef8NTPYDOD45GRIknTlzFgcHARdp6JnOnzgDAFjMDnCTcmDskzf4HC08vRCe/w1iYDNP79NyAi0TpeES8Jp+Jamjm45KXJTQbV5Ozy3m/Psc7lCOmbAe+KSPiXOdW16H4DKJv2laV+LxmZh9Z+D3krb0+azjBVnAxPhNBWlbmHvNbovRR7+JdQDuZLyNljjzH1xeqVKSuFxx/dLXVtG1N24VC973oID3i+Rp4rcQzUgkvIa6riaXTAmZmFHMNSnCCMBjThnReBOZzQ9w604AQc+Rl+/nus8BAG5c+wQzUlExGLu1vYnNDUpsNCF1TePQduQ3MSa7+yaoPEzTYEYWVvv7YWNZtAtw/HIvC0vMM80qKvH/cE6FDedhKS3aSLK1yTmAPgbaOCXbm/IN1R71Nv0ovWIahrxqG1oQrTXoaDMajTkESih7eqvFE6eDZ/b02ZCQ6Nap0/jSNIz9q7/+qwCAf3fxA3xMm8COC+offyZsHN2tDfxgTBvhq88CANo5sPBBbbQ9CdkLj71wAaNjAaje+Du/BAB47A4lUPrLt3DnjbdCfVeCJ/flDy5hdpuebz9sBnf2Qp3z/WsYjcK98xPh3Vq/CbdDebw7zrQ3gqPfo4Y/Sc1sxbFfTnehdircsiwb23Kb/3SO6YVLtCW6IfGJgtxnslU1BOf0/XPcH5fWn6qSYn97Wm+lUtLPQCdjOckFoMLyIx8X9czlIitplR+FJGJLIldkm4JPbqZLWj93uD4to6p6qlSpUqVKgzQoUZTVUofcNks1rLPLrQTNWRxXiFUGeAWBgjnicM55RJ8KqYu4WnhYSkDEnFJrPDrevYmRn/sWn94KKogrxFk+Q+aVd176Mq5/EtQNkxHFCpoAY+JmzThwkydOTbFxjKQAMsPkOEJ20qCj/M53SHK5du0abt0KKo47twlQ7TYACsXN0Qw5Jo/zKt+1ss3PcC9Yq5LrsB9DwY5bDPCc8uTWoJiA6Zn60Q3PF1a/dK0XdYsj/djZC0FiOHlujMX1AAqffvmlcO2rX8Nbf/0mAOCp4+TvsNHgThckiZvTMC5708CpYzJCeyyosjAO57YWc5y7HvwhbvzwTwEAl26fw7UrQeW0/VxQX51/5vMAgDPHTsD9Ughbvr0VVE+LkcGNvfCO9glo37kR5sbsnYvYvR36016/AQAYze5gy4T5cbATyh20BxjRs0uMoLaNI9T7YNb7Bo0SDcupOV3v3DK/iXKSHROTihXY+zykkEmw3vitSi/ZCAVq7clzdwMqGZmSHrhWFQ+t50GeqayAmNJ2WY7reEv2Dryu36QX9I0raMiPQve3l1RJif8mG1O6kNSfGrL0aZVarEoUlSpVqlRpkB4qRnEvsonsnQUnNeYvnNPmdXS0RkwVoxdzv+IYTToCWi2hvAt0mJPJ5CdXPgYAnDoduN9Xnn0VX3yJE+QErvDd997C/sEtaivUNZ/N4S2ZWBKgOp0GjtdOHOwknDtxOkRGPXkW+PhKuPfqFeJgdw12SX/u0gCjAV9oUo7HGPRBucZAgpOayOXxWEWTOqrLKu5Rg45ZtfKrGJNGvRjhfBxG1ibnnn42YAkbx0/gOoHBb/+LEBX222cfw2vPBSnuxjvvAgAeOzjAz37yQwDAhddeBACcfCZIIDvjDdyaUJIikijG1y9h54+Dl/bOX/wFAOCj/c/hzPHgrX3j94KU8dG1/w8AMMccbpOkPopY6x4/j+3Hgont1hPBTNeS1/6zzz0n6VpPngjH/bfexFu//89DfZYc9SzgF2RiK0PAP1wcNzXK9y7XD9Nh4xKV+laMPaR9BumH+oRLHelf8+mPspd3bGSYW2bMRM3cBKTLG43zufd8HmperxYplo1p1rWyNKfsgDWa1esjhgZX31clikqVKlWqdA/0yFo9rYxNktmLLU2YzhICH7t4sxerRJJAELlvCXHhPFzLeRFIh+477FKoCE6TevuTcHzl5Vfx8kuvAoBYLp09eQG3d64DAK7eCPrvznyKZhosmlpK37mYkemstWhInb6xGV7R5tZxTKdkRbUZ9OO3bxpcej/ce5tiTnnLSXMiNyRcnymZ9JnIucTMO3FcxIGP6lBSRMoMLeGkjJFzMalSrMSSFGFtPL+5HSyVniCM4Jo3ONgIXPrJW+F57/zev8ZLf+NvAADmhqL77nyKx3zABL77f/1DAMDjTwaJ4thTX0G3EcZyl3CMvb/4HuxewIz+wf/yP4VrWxNsHA8WUG/QBDl4M/Tx1NTi8uX3AQDuk/A+R7d2cfMnFwEAH9OcsJyzwgITMoF+lhIpfe4rX8YBjf3odDCj3RxZ3P7oQwBAQwkGDDnxeaskZImADCw16VFkMqktPdf/xrTZcqmuonNYr/yQ3OOLbfZMW5Xe3eSYV4Jz8KV4Mko46Ek20a+0j0HoKvk78PqePkQhF7XhVM/JMGmr/46GHO5KZfqpWUvSZfzm8j6WyCSqhjKt2CiW35x2ns/1r8Waklexsv7SfE3qdS4pZ2DEw1nM7QzEvl+Ar2APmrbRUIweG8FeyX/sgDGh2KM2nJs2I2xskbkj+2eQx67b38PuzQBcjidBBfXUhSdx4UzI0Hb2ZDhn7B5MExapGcUbut2FRbC1M/jmgJ6TjvYA2xth0dkhtdT4zAkc3wyL6hu0WF2/FlRh3lqZmJKoDV30Sqexcp0TQFJAU9F6+GgFQP4OaNXHLmZ8kAQq8XuWXUp9INSO9TDkWdr5MG5jCzQIoPTpE2EjPHUyLNhmbHBiK9x7gwwFps3jmH3yEwDAJ1eDh/btW7dx7aPgDzG+Hd4BaeYwv3oLl8mnwp8IY3buzGl887/4HQDAb/69/zL0Z9xgQf08fe7xUO/1cN++b7B1LWwQu1vhHcy9gd0Pqqz5h6Htxa1QfnHzGtqrob/vXaF4UTs/wPlvBA/xbZo8ixvXcI3B7r0wB9hnxxoniZQ6Q8C88WgW6cJINqXQFL+h0rcXNueUfK+O9FvmTV15jffA1f7378SoQqlDddDP3Fzd+sjM5cCuCrcvamK9ovOGAbWxuaRgsmCX1k9XAPnzpUqPgV7cB1e2NQBur/xEbCOL22Al/eVWLfyDmqe4hq5ys6iqp0qVKlWqNEj3RfVUcvhYN5ri4dvw8X8V54hKKWlHg0S5PkpxQxk30TQGk2ngdI9vBWngzMnTOLkZuN0nnjwPAJhuhIbGE2BCuZlPngoAprcWc4pAeuLUFgDAmgPMSZJgoPskgrnkwu5gjsCVzlw4HrROuIJzpyly6R2LZiP06ea5UO+dW+Tl3Rl09Cxy58jK8wn4bSCOipK+VobDiAMfS2SmMQJii1rARC6tZx0LFUI7JvsWSY+9mUPILBo3An5pGLHhrsHuBW7d7Aev6bd+fAsfvhOkuk+vBW58vrDYI6e9AwTp6/T5UNc/+O//W/yTf/EvAQBf+ca3AAC/+Ou/hqeeeprGMnDye+0+dkiN9/VvhzDmN+lZ3uuAK6RidBQheHNrEwsW72dBOpoeUFKj+QzdTlBHLej9nxkBN370IwDAmKSN443F578akil9+JOQ/3txJ0ib44VHQ972m+SM16GLZrTiZeyjdC16xdI3d7jvMFWF5PeuYEMLdZQ9vlPOuax90H+mqiqTlIuSiDFpvcOd7KuXDk/3MraqChEGMmnqLuhuw53nVCWKSpUqVao0SCskipKTxuq9ZWmkybXM1e6SvO9HZfdeRaE0SVlAccmyczt4E7hJzj20sTnBU08EfOHJC8EM8tjmcYn/tEm66t1ZkAau3riEpyjvAWMfzXiCsQ1g7MSE+yZjg7alsBSzwBk3xJHOugOYloDzdkTHKeYzSsXqYxygEbFBj50Kev0PKG7UrGsRQeTQHWsAz3rPhB/TppiI+mHE5EEQqUBpeL26TRg6boz1n23UEfML6hq5QUyUHbB9MnD/jQnP/J0/D3kjThxbwM/CzWcpjhKaMa6SJLG3R2FOnMUu6e6Pk8lqR/3+6Ts/gdsNIPYXP/cKAODaxUv46fcCB98RyPKlb30dB8T9d/vhuE/9v3pwgMk09O3CsYBzXPMtPiUpptsMUt2ExmBj3mCLJMgDijK8885FnKXQI9/+9SBF/Pk/+kf45c9/MQzRxXcBAB9dDKD5eGLkK/WjMY2jhclxNvgoLVJ/GVQvsqRmScrNNSh+234JDpKWK0Wg1ce8G96bqKfPhH7AJI52aX9iL5yGW2RuRmA3n66aTGnZ65eKvwqVrBvXadDh7ghouK7l7yCn+5a4qET3ZYMYat/0bbrpj/QoAe86jKZhlpw4Fhbx84+dxisvBnv9k9thMba+wXREizxvGMeCKso3wA3Kfnb89AUAQDOycKRqadsASC58h5Zs5xfkk9FQXbabwnSb1DdacF0MTugoxPXYODQ2vMKzpOYa0+Iw9048rC0v8iZadDCo3boIbEtWvYLVk2kK707tzKLiEz6Cv/ROPG7FiAA2tkVj39gG+/tBTbO/CMcJjfel/QnsNGwQZjtsuJubmzi7HcYZVy+Hw5UrMLRRniSLolNnQyDAS29dxP7VsLH8yR/8WwDAGxc/ws5+iA31hS+HII9vvP0zGOISnn0rGAhsPhniS22cPo3nCAgHtbMJwFL2wj1ySDFkgHDru3+FnTFZO3Co93c/wGgjlL99juYCLP7pP/mnAIAxAfMnyON+1BgsaOz3SV3YOcWuqZhdYoBh1fsLjavfmtb5HocXwaGFqJRbu1QmtwYK2qVch8lAN6Lqs7Q5lZ4z2rZwodhWaVNYY1jCM+U70eFpPd+UVf1Yv/7l9Qxfr6qnSpUqVao0SMMShS9cXkMuS0XBvurnqIHtofZLJNE4mTNhVcF4jDNnA2f+0stPAAAef+ocNohjNBQeentrCxujwPGNTQBNTx4L0sN0chwbW8Frd4PiAZ06d0FUMbNZ4IJv3rqK28RBz8mPwlC889ZtwxuOUR44aN/tgAOLbo0YOG8xpn7M55SSdR64Wes7jJnbJA4zWLEy6Exgsm0iAi2JlGgc4eHoD8tqDa99Xwugo0T2pDhWmIiaRKu0DLOFNC7TDYttin11LDhGY+NMeLbjj7+K8XZQ1xxQbPj9nR1sbgapYQtBvTPdtdj/lExgXZDwzp1+AQDw6ZUPcWwa3sfp00HK+KVnXsbLX/4CAODVL4Xov//4f/vf8aVvBJXQtY+DBGI+DmrFzbnH9/9V8Kp+4iuhzPaTz+CFx8K79yRV7lJ8+e3f/VVcJj+b/Q2KyTX/Em698QEA4I9fD+a9n777Ntw8SDbbxzbk+QBgc+GwPQrPN3Yx0m5DJrMHFJa8XSxg6T2zWWX0lVFseCEe0BBF9ZG6q6BqWW2nP9BGXl7Xl81heMXHK82AyFDFPNOpZFPyFNeP5Ndhn5XvRuyqHqQ1qlhxbh1pYNVat47qaR2qEkWlSpUqVRqkFdFjc8Asxi8qYMNiqmegAKzM8QPo75R3m4i8fz0t59VvlyTZCUdOTkQO1zh//hRefDmYS7708rNUx0K4X+5nt2jREU5w8mTgTrcpKc7x42dx+nRw1BpNCMicz3HiVOA25+yY1syxcZwcqfZD/fN5qHPuF/Ab4drWOHDBYz/GbBGkhYYTLnUe3ocKb1+8Qc8Z8I6RadBS9iNK74DOeAGKCfNFZ7w41fmWpYDoPZd4cCNE24wacK+OGc7hGI9oFGZE4L6NJrMnTgZp7cmnz+D02cBN22kwDT3YC5z3+3/8PmbzcO1L3wopTs+fPodTTwXsYPFS8Hp+/6XP4cPLwSFuh3zT3iWgu1l0mO211FYwNjj14ot48bXgRb91PHD8v/gbv4733nobAPCtr34TALBHDnW77QFefClIHtMzFA/qYB9jwj7M7dDW1iZJoN0BHqf5xx/a7nSE9rFgUn2M3t3zE4ef/T69+/d+Fvq7FeaLbTvconSqbRvGcbK9jdEkSLJTAtetbeBpTjqyfdaOjjZzRg0JhijigHgix/wLtu+NB8h3EM/0cYgSOK3L5/hJTKWr8Gr5w/jsGqJ0ISl+9TWtwcic9QbxSkW2kNjosFqQo8BjVzodH1E7pXpzWgFm04QrTAy9KIt5/L0bIh8teS/Je6wCbBtaaae0GJ8/Hz7KV15+Ec89FzaISTOmKjqMCdzcPklWLdbC0iQ9eSyoP06fDOqHU2cex5mzYaMwpCvy1uAqeUzf3gnA60F7C/sHQaVxZ5+yoM2CKso3Dp5DOdBHP5oCZhQWon3KqNa6DrdvBh+MuQn+BSdPkEpix2Gb/D8aAlRnrsWc/Rdo0ZkvAHAGPLH+4g/QRptuSVwUw5KzB6t+7azgYGsp50ayIcsi0e3h2HYYm2eeIxB+OkNngrrlJG2qk71Q/uAYcOVK2ADe/lEI1PfKa1/Bj978DwCAKan4pidO48XHQn3PvfplAMD2yfB+3vzh9/EjWmCOnwsL9dtvvo3HyZKt2Q39+cZLr+Cp7dD+jN7H/u2gzhrbKb71FfKtOEOJjixA+whu3Qwb3K2dYKRw884B9ijM+K2/fiO0M93GfDNY0V14IvRt+/wFbJI1lzNhQxy35JFvgBF5qnPEmf3ZAToKFzIeh3e8vb0t49tREqTZQdhgvPdwnDKRqBk1wlRoNU/8rnOg1me/03M59qypHOqlRGyFoxdAZj7jZibMpyQqU2rt0mTM1iWjO1naFGzhIWTPSTefpU+yltVTYbzUObOqkQdIVfVUqVKlSpUGaVCisCOWKHzkMhP5kEXYFLC+32D1XZEKBTwiDvs0ca6vvBTUTY+fP4uxDSL9iADSyXSK82cDB3rmVFA3LPZ3sDgIHN9ZMoE9eSJwiadOnMKI6vCKKbqzQ/GCOPDfxgig/EOOWPp9ClZ3MN/F/iL8XvjAZTfTFiPyBua0mb4zaClU+alzYc9/7EIA0q8v7mCDANQxHRfw2JuHRvdIB2ZmMbJNKw4X2hghPoOc4OB0qbYpoZaFB+/E18Pa0Pb2lsHzz1Hsq3OBg3bGwTfhWZqGJBWOozUyGG+wFEjmv/s38OyF8P5ukLTmFjfxve8Gv4ifPhkkw2/+5t8GAOze3MXGVngvDT3fYu8OnqR4Ut/5938EAPjBX/wp9ikR0c1PyVOeuPZzpx7Hf/bf/HdhOCg97qkzp7A9Cs9w+kKYMzOaXzfhcI0kijGB8A0MPv4kqKou/uTHYTzu3MK5x0OI8qcp1PzP/uxPAAAHuzM0JA1sklnt5mQMQ6q9Ob3PML5hbKYcSYBSunrvxfT4gMq7DrANq63C/ToqgctiqXGJcHLA3BXpVFF30R89PVDybQIEwi90krj0AAAgAElEQVRh3A2iOa1IFqZsqMqSgckCKKZl1LmBZStvc6Xp6rpgdklNx+eGxLQHTFWiqFSpUqVKgzQoUUymDI55SXwPR05fXTQxlQitxSQjRb7igVDiCUq6yKYBThwPOt/nngtg6NNPB1PYM2fPYmMSON3TJwOH98rLr+L0yWCaeedm4DB379zAiLje4xQSe/sYxXCyLVpyGNuYhmut85hSJNnRmHXyC3Q+6LTnXThiFOrc2ffofKijI+OBrt3FjEwoRxLR1WI6YQewcO/pE6Gd/Rt72KCw2s0klN+aTjHtAsc6mZFi3c7FK7ljEJTGynn0IsrqdJZs9urh4zmxtCUponFg0amxQZq68OQxPPPccWpzh8bgACCzztE49HtMf4+9w+kNMm09Gd7P2HncuhawnRMnglTwya1P0ZEUPGuIcyYA/Tt/8m/gOsKM2r8X6jqzhZbewXf/9A8AAH/4//xjjKiOhp7BEXD8jp2gmwes5Nf+zt8FAJx58RU0ZwPOsaA+3j5w1G+LDXIaPP/1XwD+f/berNmy4zoT+zL3cKZ7b81VQGEsECAIgCQokpKoVrPbku2WpWiFPDx5jHA47Ff/DkfY4Se/2REeo61oyR12W5IlsSmSTYikSAgcQQIkCzUBNd2qO51pD5nphzVknqHOrUKBVHfEXg91bp2zzz57586de631fetbACahxnMMapS7FG1MrlzF9//iSwCAj3/qkwCAm9ffAwDsvXMAy3L1WYINmN6Az32bx91iMqGxnM8JIyn5uAeDIQYsTZ/nTKdta3i+Ls5J+9/0HhYiS4JHrPWSF8HpgLCy2VqIYjmyAJTP6xNA3CpXO4YYYSm7YZLfDInvq3TYpaUnzXjYNd66eQgw+5eRNVGMYgPe8cvK3nQRRWedddZZZxttY0Qx2uECsNajbZglw2QMx89wh4DVJipY/94v24wFlpqKF0WBU1xUd+YsMU163Ofh7NkzCC1FBue4F8HFiy/g9A5HFEOKKMaHWyjY6ywZN9g7orz2/bsfYGeHqLL5kKKSrOyj5CIsYykC8GGGPBMWlch/sGfXC7DsTdiKex24FobplJ5psq4xKsVhW8IhRgOWuOjXGLDCreeitrzfQ87eiS2YjtlO0LKECKuLJP3sPQlKIWU9AYYjSJ86k1K8KJGHFH8ZIOMPT56kY3vm2fM4eYrG4ZBVW6spsDUifEUovI7P05gGJ0+wR8zjffLcWRyyMuuVm7cAAFunzsJwxLZ/QNv94KfEBmunFTJH+7t3i5Roq16BW/v0+QG46PFUgWLCRYuCJ7Uc1dkx3vzTPwYAXPvxd2n7py7i3Keo+O7ix4lpde5ZotCeGF3Aafb4pyUNzE4Ablyl4715RPPpZFli0Kdr9dX/5y8AAPPrxI4b5j0E0IWZ8XgEC5Se5mzNN+RgMMRJlnGZck+LiufO0dFYo7ReSfNkOBwhsMJuVVFU1bpIgRM6efaIrmTK5HlkzFIjVfOARkFYpM7KZ9Yk762yo9atRCs4x5rjOHa7x7R/JbHcB9jGB8VJBkbr2qGacYOZCYf2U17AKo+WK0YNp6WMsViOmoz1GuKKpYJmj25LF26hYjxWAkuYmjPFtdfPceYM3ZQ5d4yznCLa3R9rh7EnQOH7nTvXYDklM2JJ73x7G3VL4PT+mCqiP9i/yT9pkfMNOHT0O8PMoDdg8cApjeO9e3cwnR8snIqtaZsyDNF6GvuRkbSNRc7AdssNbEKeoTS0ENVyDThVdWp7hIzb5DkRlcs8rKXv9jjlMhx41DWNXVMzvZJH0TUuPiBaGWesaO0YY6JsudBvGSg1bUCvRw+IM09wGuapJ5RqXHH3wP7wrNZ7yINlxik54wN2WPTQGbpWOUqcO8sPmymlcG7uXsO0puv26deo+929Xa7UbiaYejr3a3dIcO/sK5/GLa67+Ef/2X8KAPjRd76NYp/px04uDJ+vKXD6HKWZnrlEPbnPvvginnmF0kVjdqZuM0jtLkzQP0VEiDlrct2fNbhR03HcZdC7Gt/DwX2aP8UhVXJfukDOxr1rP9PuWL43hJhohuU8zm0zR9vGboEAMBrR9W/aFhU/UBru6DevMk2H9vv8gM4spjMar5rHUVZ971tkXHAktTswPgLhyUq+3MDsWHqskGH0e0BEs2WhzpY3T74fvxwX9vj3uu1l7vpksTZLr7rvNYdtERuDpcuX/n4C0C8/bNZqOSVjJtL+QVO7WKHxxsNa8whbqGdYPf74gEqbL21eg7vUU2edddZZZxttY0Rx8SJ5T7NZjfGEwtlZj15z1huajmvM+QnVcmVx8JnyzhJoCevIbo9v68By9iSC16e3eD5b2wOcPksh+pCLvmC4Erido1+QlzqriHJ5cAgELlw6xUVRZWYxnlLa4N4BeaSOaap52UPFnvCsYr2e7YGCg5MJeWz39/ZQ10f8+xQ1WPZ4vW9iBXegVIFv+xqR9Ti9AlugL+B4nzyuMcgjHfW8tmJtGeiuMIeTFqvsoA1djbqh36/5RyuVYTLqyLA6OtAmhXk2Xr+ouyNfpvPNC2C0QxHF088QeeDs+Yvah1y0jXJrURhOrTjy6CcDGvfedg7T6/HuafudUxcwPEnX6v6MIrO92R5MoDG9+u63AQANNxEK1QS2pO/eukEKrR/73N/HzXevAAA++/v/AABw5sIzuH8tRocARXMAOWo7W6zjxQWZ8/19fOMv/xIAUA7os2cvUerpyedzTJjYMOXmQ6effRaOU2zbHNHu378PXGUF3DffpHNhz7/sGTQcNTQcsfRChqJkim8jcyag4JDMJ94pABhrtTBP6LSt82gnLKfOxJRer1TPXdKWDffu9t4lKUkyazIsSAirLUYSmzMsYWU78pEXve7o8aYrSpL7VIr+pt+Kh7q+gdIa8PhB52Dih/GzlOmx/Bki5K67Xz3Y4BE5uxKBhBDf8uknSEYq/Z30OB82W7N54LqIorPOOuuss422MaI4z5o046OJeisDVq3M+dUak2jMsHfTenhuriNP+nXtDD5SCzbBKaIXkueiuEnvDbdKMJ6Hlgvc6ob7R/QH2GL9pSyjiCH4HpqaFUunDOBnJaYMpIoHZjkH7X3ANje1EepiXVdw3F5T9HVGo75GMnXDrxpRRMqqUgZ9DnB0Uc2YNpqXmFUczU0ZsGYguBzkyNmDrrkZk4FHm4leBx13z+cYNPSdecVRIm9iHLToDdJgyIY4zNpx06zmQjnCKsqAJ5+meXThIhUnVg1wcJ881c99miQxqukM0yOK4uo50V5tSfhPfgLIWItpOmY5Epvj0vPUgOitdwkT2Nrewc42nfP+7cu0D55/fV8o5jC5Tdd7iBxv/4CkNT7/uxRRnH3iSdzN6Dcyjhoaxn1sCJge0Xcv/4R+8/TFJzFmzOH963T8b5uvAABe+XtfxEu//usAgM/+9r8FALhz4gR2OZo6wwDz/YMadosIE/ml5wEA996mpk399ghDlhQZMtW2ntWouKmSBnWNh2hpiafduqhRJpiRyKnYLFMZGtEEa8Zj8Kmj5OK+HuNyWWPROJb80MjCJHpLD/ZcTXJfRq99Nb8f0/pphPBgf3mx4/Kj4Zyb+kAYm76/dGzroo51QMaCBMkynvpgkN+kwPw6QtByl9t0E78aJcUdbwbPj9OM2vigOH2aOOtlmavEddWTm4hTOsYAoJSBiJHVcw9emxRM9jAfbeOipT67Sx/ybyeaSUzf2N4e6CImPavlIXjixBCxL82cj3sMU9CNWnFKZDw5Uq56b0QLmAC2WVlga1s46zRoB0dHmPON7Vhfp+xlaFlW3OYMOpZ0rNNqiporvzM+tlH/BOAoZZGz9Hg1bTHnB8WM9YWkEVBZ5HFxyCWFElDJYsLV3WVpMRjQdjPu1lfyg6N2QGBpdZeG+YLxWr2zY38Z3krmwminj+deoMr3nVNMjpi3uM39ot/6HkltFzZDzmMuDZ1a1j06cW6ExtM4zzgHNjMDDE4/R5+f/RgA6medZSyiaHkh5QMqUaKZ0/E2nGLzVY0bN4gB1XAa5qkXnsf3RSTPi2gevWYA7t5l0JlrFO7fvYmdHarYHzU8tjz53/nmV7F7n9JcW9t03Du/8jlsnyBG3XssY373gwOc3aJaHn+JBA5v3/gB/c7kEFNOb5qS6zOGWxhmNFFrZkLVTZ08rCXdyuCzSdQT2KFxzqtjleVxGZD05ng85fOkfYxGQ1h+cApLynmvKVUk9RYri7DSI+JvJhSnNQtiSB4km1LL8Z34m+sW6MUFkuqrFveVvrdplVq3hq1jd21a69LtN+0v3nJGU7sri30IcTsRfky2Sf9avS4LP/rA4wW61FNnnXXWWWfH2MaIYotTKMSJJ09gysCo4zRJCB6eQ9ymYRqfd6ob5F18Ui2LOf6is1HkvdCzsOTmQ6dObisQLzUII9bEGY22ERi4rFnLB+0MhSPvynFax+c58m2ucGb9oJI1eihyoTMVCqN3Tv9uOXzPcos+h/emEeCfvZseAJYeb9o4zhlHEv0BV+UOB/j5faJ67t0nD30YiEIZbIaslGOj38ltT8fccw/noqm0WjfnyKbkKKYsWriWq8bZ0wwWqgkkLEkYs9IC1fDUystt7OxQ6umJJwnMPjzYx2c/T7UHJ7fos7Zqcecmefcf3LrJ25GXf+36fYxO0W+++CrJjE9nwOUrVI/Q26LI92jSwgdOtzmaiwVf/9YUaNmbrjjaOZgeadvawz16fe7SJRQ8vs0ReevSAKp1baRVSjrKG4zv7fGYcgQi0aUF9q5TCuzNr1Dl9ctNjfASnftRTeP91Meegv/pe3Rse3TOw22KvsLkfbAjj5ZbrE4OG2R9ujcl6p/P55jNJZVJc6zHtTt5USrNOee8ynQ6hvOLLQOsNXqfSMag5XMaH021WVKPq8LbttV5HauIgWUwW/utp9DrUrOs1Bb6Rq8DY1eigdTTXpcSWrVVoD2C6maD/7wJBN8YHaw9BrP270hCSCKPh+jjHZJroPe5NhJLt3t06yKKzjrrrLPONtrGiGKHFVHzIk8wCc5ZC/AaPJpWqnyZyukDJuy1eS+U2Tw2f1+TH1y2D1O1uLwfg9iAJWeU7vTp0xiNOPIJ4iERkNj6IUoGv2v2eIoiR5PTsWQMgg8GA/SYrhn5akwtrZuYN2Zq5mxWa17XS6RiYstKNycPcFJTVFC191G1RK0VTKNpAM/Ne8BEgXNPnUfL203nBPyWGVWD22GBnHWgsoKO1RaNRlE1e3l57iAp6tFQKJSMp7RzOKZ11hwJeXj4pdyv90F7U6hfxy1i+/2zOH3qKR4HGpe8CLj4BBeiTWg8itLjM08TrfTjrxIt+/CAqK7vXz3CN96knP3u7tsAgPPPvYRrnMcfMmng87/+D3DnCgHK14srAICDO1R5vT+7i7qg69Fy0eEHR3e1d8M7PyRw+qVf+Th6p2jee442pMmSMbm2PnA8P3MY5Py39C1pmbgwPzpCNWWKNP+Ou/k+vvNnXwUAPPUf/Rf0vSefRvss4RbYIm99u6CIojJ95LnMHaZHtwaTGe23YiLEcDiKUW3SiAgA5nWlCO2Qo6XBaAuOsSApoGuD1wii5DkzYEyybRul4jZTUawNyPi+0ry78UrYEDMSift2hTYagkemFdTJvFI9pyVX2kSMTCizIaTf3Zz/T49Vfyv5XvrO8vur35X3Vn8jrPyxusu0VM4sfLyIeRiY1ZatC7Tb1X3oe6sVkY+MqQDHPSg4pM/zPHbB4geEhK3OBwUHq4rea+sAx7IQ+rpGaOsXbcZYndwVVz1XVY2XLnyM3+Mwnx8UuQFKFtATWY/haKgMEH0YYAznFm+yUEnYl6FlUHMm9KGQq4THgFkkxnrU3JxGbmhJ81TzA9QtAeciQe58Hz1pUjOgB8B0MobnBXzAQHSv4AV6e4j+iIUKORXishY1L2IQsNfWKnAo3x2wpHfTJrUxnF5s0eqNIVWtwYaExCIrKTeFOnURzz9PVcw33ifZi9HQwVo5d645mVfIc2HP0TkdjPnBWR/heRYRvHN0g8YPfVy9Rg+KX/uNL9LxjqeApzThF75IKao+p2iO6l1MuZp+zo2q3r38Y2wxm+rt79O+vvj7/ya2nqAHVfXeFQBAwYus85TSA2LmxBEaKwNCr3KjO4+M013Xfk4pqDIvsPtdqpV46jO/CQDYeeFV3OZK9XO/+joA4Pp3vwUA6LmRPngsz01XeuStODuxSloujCzeQyZVtI3H/gERNw5Y9nw4GiijSWorRDIFAJpGhCJ5bvR6KJguOGNiRtu26giOeK4VRaGOkqSlZH5neakkB6mzsQYwImmekFDEhE0oFkJYs1oGrCZHjH6+6nM+KBGzni10vK17UhyfXz+W3JN0DdSMHb+zTH5Kfyok3zUxn5bsd/ULx7HGutRTZ5111llnG21jRDEYEMhrkqe60CWlsUkIQFsv9nz2LkQRQY422tYrCGpXUlAfwjY1T1F6YK5A+3xGx/bd776NT7xCEcXFC0Tb7DNIN9rJwGrMKIdMIww1mpZojA3XElR19KpFL2fImksm5JhNJ/ybdIxboxMoGCSUpjI+NJhpt0t6rwC3wUSJwBLQLXupNpQoDHltJ5hzf7g/UY9vyDLnwz5XOo8y5HwOYL6+h4ENFDVYzqNldoCCwf3A9FzOYKDMW1iu9TDSZSlx6CTFYbKgAKAwZoWPPxwGjIb05uufegUAcHD/JgYc5fSY5lnmE8xm5O0GQx7r6Sdom5NntuF++jM6lRP03mR6FZbB3Z//mKqwn3/6Ffzf3/syAOAru/+cxorn8Pbps3ji03Tdz16ka7U1zHH9GtdbyJz0Fueeex4AsP/Gd/hEuaFSMJppbDVwStr9aO9xGkdrLXJ22/bep5TY8GMv4wRHD7tX3gUAPD3ehz3JVPRXX6Xvfpr7dX9tjFM8P454HoZeQE9SX1zoYrNIP28ZaJ9yu9QsL5SYIvTXpm1Rs+hhjy94r4xaUsu6bE3TaKTS5znmvUfFqgUT/q1er4fBkMF8bYwkUXqB8dGY90fzyRiDoL14aSTT5INQfKOlx5XUTSk4vQpwLzvuC2D5ptzQY9hHUgqQZNNiGopMWcN+mUjA9NilCMTAJgQBeUlSUMesxV1E0VlnnXXW2UbbGFEUOctUewNXMv7QZwregNUo6xbzEXkHVS2V2QFNTU+rOdM8q3kVFUY/FEFr0ZabrxPIJe9JNXP8W566tz7YxTe/QZ7i3/sNkoW+eI48oFBbzI9Y92jOEVFo0Ah2xy0vTVYg53yt4AaWQerZdI6K8YqyR+O3vbOFEye4jSqDj/N5o1XdZcGU1pwiBWfmCBy9TCcEqDZtwPAkAZyFIc2p8cEUriFvcGtrkQqLvkNbcMjCEUMbFDrQHHGWl8i4WjfL6BysEU8wW6/wK2CYXFATohaPlQiHzu3sEzUuX6F8+ydepGKyc2cuYo9VXbdG5EmffvIJTGryurOSvNP7Y6LJvv3zt1GVlGM3rIhrmwDDzZfCPkUgr33hRXz6BQLE37j1TRq/Of3O0bVDXLnyDgDg5X9I1dLZmZMIrL01vsbU3MvXcOFpaqP6E218w8A/TIyKBZYA4IzQrekeEUKEQYBlL7k6oGjp6O4efud3fh8A8C+vE713dv8OwBLheyN67f3jPwAATO6Osf9TwnZyxttaf6S5/mAkmp8lleT8mcijzyaa6+9xNHDi5AnMuKju4EAKTwtsb9OclX0JBmEzq38L7pdlmTZHajjE8t5jzqGyeLADBtCBTIFtmS8m1T/lc6HxFtBrEXgNwSbZBLvwGe33IXikeJDHLySAx48GPmoJ8WXF2TgEBlF9V9ZCswiEy18PqAh/mOiniyg666yzzjrbaMdEFOQteN+iLcgrKArOLXM+s9+vMRyK+ihjFE1AzSyjySHnS7MGjYsZs1+s8RPW5JozFz2qgAw//AHJRpw/TedXNFzcVJfI+7ydtMPsZzAsJSHU4CwfCgEKzZx7F/CptS5DyQVJ26zNs7OzgyHnbXfvkWfZtFX6rKfjYAyil51AYOrslD9zPoNx9LlvaF9HBw22RtS34MxZem/OEUvW83DMLBJ9ex8MfGC9HiPSJlH/R5pOhCBFfm0ybrFdpnhcIotifCTMStFgr896RucNBlvMYjoiCi+GBZ688ByN0RZFR9PZHo6mJHdx/4C8+/fv/RwAMK5qOMZUAlOwx/tzDEEU22zO/UWqAfZvElOq4Lx3yzIt/bClnn9b0xgNnEfJdND5IW13++YtXLhAchotXwORoymCl+CMmuWA8+miE8LDmOVcSOm8NpsS5tTPfv4uPvfb/zYAoPoh6Uzdv38X0+efp11wbwg8eYmO+3d+D8OzNI8Ovv5/AQB2WgOTLdNMAzK+HnKMFdOubWYh1ZESiWRlroWbmgnwDodHHOlxZFEwfuFcpLYK5bJ1jUZYmWqqZSQPghh5zGua89k4U5q6SbTRpFGVZTfZmoCQ4g8LrwbLmm70+nCRxC/LHsZL/1AlABveDHJ9fEoNjq8PKgx8mOPY+KCQ1EiW5frQkB7NNaeghq7RDmmOczSubsHrFSqWgj46nOrE0YYiyWmLhHEMQ9c05EjDypUQ0ySpJwnBXNTryfQOh+NOPrMxvddMaJGdFhn8Pktbb9E+RicyOE7JOGkBFzJYTkNV0u2NAdgi28bpU8SJP3WSXtuqwRF3MzscU3plXh/oAyXrCWDNXfNCBpPRgrHDnczs2GE0pPRVweB0lgGntil1MxDwnQUDYQFv6FykSt4FwPHN6BgI9q5C8PSwqxoCS+cNaQtN2zFq7vwmMuo+swgi+OiTgFQWIF4A5Lgunn+W9LUAFJw2Oji8iwvneZHnNFN1cBfVnMZIFvIe12IMsgDwQnT7Nn32zMVXcGZED5u3v08PlP/5f/0neIdpqKKRlTOwDO81JbTNlNnq5j1dQCv+zXs3r+PS56gRkeOq+95EOgsmeko8/XITm2PJDeiY3GFJhpHOne+f/d1b+B//h/8eAHDUpzF6vpngLF+Plp9K97aZ2vzq6yj3aFz2vv3/AQDqdld7PReSS2wNHDe0spx+FA6KbRpYTh44Ps/9/T1YJ42O+DW38CwWOeYuefJZv9dDJlrzkmWEUfHKlsc5y2IVcZktSpsb45EVIu3PD7rcwMoypIJhrY7pMkgdgoeBOD5SuxPJFFCHJqZDZbt1xQ1RZiqsUko3WUgObp2lZJsHrsObF+iNlNUlsHrhexar55oC3MsPBnP8w6JLPXXWWWeddbbRNkYUlsPEwhbw7Nn2A3mHrmWZVTg4JxEFg22NQ8uS1Y7bM06OKtQVeUZSsaxAs7HJE9Loe2E57DTLf6d/rFGnNV57N0t84lqH2ZTeu/E+Hc/Hn30eALB7b6o01pILiIIrtKpam/dkBiWnF+R4xZvLbQvD2knzioDoo/0jGE71CLg6bw4x4c8te+vDgtvNDlrVZe+1UhmbSUdMBU1PnT4BaHqJQUc+Lm8CVNWX2562IdJtWxclzhspRJPKcJZdr9tKPcaoGm8SRFxAtCjjLpFFaGlqbfefRs4uphRfWuvw/gekUTUa0nzybY1zZynKMPfpuKWSe2fo0HIapeUiyXNnn0DPUHrkuUuUpvng+geqhCpRrlA6QzCanBBa6OHuvcTbpc8Ob9/C1uBz9F0utAwM+gZrY9MmGQGPqDigDcPlwzgnhU6ewwCsPDw4TbLrN9/5MT77OhErwHperVTQNzV2d2meuhOUZqxmd9DjQkWRCrc10LDisLd8DQSIBsC1eqjZc2zqGgVHbCrFX+QqqZ5BJjvZbF6rZHtPNMSKDJXnVCdHi5mxaOaiAsxtdzmaKspCG2ZVHJ274NH4RTDWmEipVdqytjq1WpAXXCRTaIi3ICkr3vTS/5NivMU6NAGKjynC020fnEpft991e9lka8Hnh+hDngY76Skv02MX97vxULqIorPOOuuss822MaLIxTsNHs4JsE1gYr9P3hxMUCVNkXnw3sC34oKyl/NkpsV39+4SuCUtGEMImgtVitpCDvDDUc1SvRd5elpr1bv7+Xvk1V68SJ7sJ155FgW3qTR9iigmbYOGMQ3R5s8zIAj1tOD9i2yBm+PgkDyqA27bibavdOEZe32mtJjVBO42jmiS84JxjiLAgimFPYrc5ofAmZIKBA2HNjbro245l5wLjTHpQeAFmyDvrW5bVFwI2YqGlJui5j4bDefJpUFNWnSlmj6wRFFE0pjeIubAWdNo/4D2efdujTOnt/gz2n5n+zQyI2PJAH2oUbOnDUdYjOXWqNPxLqZjkXngvHfb4JlnubHWIX2v3yvhWiEcMHUy6d8p+ejDA/LQj/b2tM2pFA/evXkTpaXf2GJK8/yutKwNUV4iUSuNztiSB2tNMoXjXPeME/gxAeg3v/JXuDqi69x77Vfo2ByN2d5ug61PfgIAMARtf/int+Ga23xITG0G0DiRqxH6qpybjeMNmR9G5T9s4n5GyQf28vnwB8OBKg7XlZBXDPKeePyCm2UAF2yKuohEbT4AeY/vJY4ycmtJGwXQa+dcq3NFIglZHwwSFeoEB4g6cmkR2dL1SKn0K2D5Ygyw+vcmn3rd+vTREXYeVql2/WdxDJbXwkcpCtz4oNBewT7TC2F5kSqZBQXjdXEQmWXXAr7h0JUXocz04LzRzwHg6IgXqNrFMEh1Xh4/2FkXngUfiAUCYMYaOt/5AQnCZaMCF5+kWoY531CnTm9rGqpgJo/JW1QtS0tLz2LWxun3CtQs4mY8fw9n0VR0Y0hKpuh5FBnfeJ459nNi/dimRckVy4M+PRzOX3wa29yvOefPxtMWgdNWvVLa0skDOj64BWismxoN/y0pJa9yfog1Claqti0yThvJIhuMWQQAdZwFzKQPq5oW+Zu372I45KpxLnu3ZqREiSkTCibjGp4fVIfjis+PgeYGeHN00UgAACAASURBVP8Wifv1WSzv7u5NfO4zXwAAPPs8aTN99ctfRl4It15uCqmp8Vop/MIzBIL/+M4eGmED8bzev3eAjEH60QmaC2PwdfGtnvNCd7UH8NLTildhSbkAFcEL+zSHpvf28J1/+n8CAMpvUY3Phd/+DwAA51/9LMZcP5NNWZ/r9N8gfEDpwcD6YDbEhVOFOPncXcgRMlFFoN/OYOEgLQO4PmLuULLYpXS9k8egMUabNUntRDWvMZ2RozKASKtnsQ6HwWyrDqFBKw+AOHixK2MpTChHLCtAX0U3ytq0qprPJc90rRKlguC86rBJzjZqSLmVxkVyLLTXdQyq5feOc16XK8pX7WEX6nXifWu7460T+4tl2CsPiEdhXXWpp84666yzzjbaxogiyqEYIEhFJTfBYe59MAE9lm92TPNs+kDTZxB2xJ6Um+HkCXoqT86d5n3RZ0eHY9R1pLrRHx7rw7dlfvCDzZjV56C1RlNPtqBjvHOPUgtvfPO7+MKvU+hfjihi2gotehyBDEZCO/SYsHc3m9J3y4xrLXoWFiLhLZRVILcM6HFlbN3cVwnvLDBoytvP6zHmlbS4pO9dPP+iSlxz4AYfDMqe9LKm7SX15F1QvSgBup2vVRXUK/0wA+yAj5fGviyZElnPtJJcPExSmuQDsNIqFOQqI16VLdaeeuqZCyj6XL/AwOfe4Ry9nKXPgzS7atWzFdZtywSA8XyMwZY0X6LPbu9ewZvf/ToAYLtPnv/u7u1Iq+QUlaRLbGaUbPHD71Kl83T/QPWZJMLav7uLgj3xHW4edDsF/9b2el7ipSfy+XLN5D1vjFZrD1muu90u0R5wf/DL9Nuv/bt0Tb6/1WCXUzm9cxQJ9V78Vcx2iRIsVdiFb5VQIJGFE/lwF9NSUj/jCw/LkYGOewh6HxrVkJKUkkXLytESoRpr0Wcl45bTRofVOKoV8GzI+f8uBK2x0GZJxmqbAlFPsJnV39We8RIdhFhjoenkEI8356jYwel9LtGu1AKF4LQ+KKWxapqSxwMLgPUGW6DqL6fLHz8FtSn19ChRwabUU0eP7ayzzjrr7LFsY0QRn8hmtZZNwCvbQ5aTZ5SzlzgYJFpJFVe+Fg5FQR7JiL31GXvldd2g5VxrxB4NVp7Gj6jIuG5za60KUDoup7YMrO3tjfG1r78BAKgc6RJ9bvQyCs7bngiUY9/u9xEq1n3KxatmD8zlik2AX03oYTAQ8J9brR4C4EZEjquN25ypoq5WqKbyHJW0ORoenHlNUcxgWKKFgLdcDMi0yRA8Wu6Z4ZgKG7xXDEi0pAIyICyqffYYoG8Kg5y9VFGRdfCJrhO/GBMjFAbQ8wE3hRq02DnFvTgY1/KNRy79PUWNtQ2omHY7ZQLE+B7RdafVPZQD2n/B1M8DzPDX3/qXAIA7N0iR9P1btzHs0TUSCna/R9dg5/QOdg+pkPBwl/YbKocMEY8BgNn0EIaJCT3VbBIvOJ2Ta/CvZa/MJ0VO/JbLLByPFQfY2JsfoeACzvY26Vv9zR/+73Rc//V/joZ1xTxHo8WlT8D/iHCZOVO3+9kMngsncwl6kupc9Za1d4ZBkHauqjrrtHVmUUrDr0RPib8qGIVzDg2rMWxt0bh75zFj/SzxQqWh03DQVwzhiLfJQqzmhyoIRE9ei0sFe0PQcRbcwnkHI5GKRqhG57OxMWqlc7EIonYslHPvIZMxLGBvi9c0euMh2Z6HNuq4rsdAHsM+TORAx7S6jw+jbLs59aQATtpIZLGjlfFAkctCx+J6pUW/xwyMHlcbFw2yjIE3nkHC3y7LElMjTVM+moEFFllPYiRhIIAepycaEcgDmhndbOPD+3zcHpbrEaop95IOwHZBjJhQ0RDWjrZxdY55kEpT+p1+r1QZ8CnfIAYGvZwbyzhKxc0rlmout1ALMaAZ8rFuYe8+sV48EwRGWz3ssYihSJVIrUJog7JIhP0En0y4IBWsBSxz9zPpq8wAeWbnypyRxcRao5fIKcstmR+ZNFCi7x0c3USWEeDZcDOoUX8Lzz/7MgDgaEyL/N7+DNWctptWzAbzdL6jUY75jI53lzvW9codvHODpD5ucz1MVhSYModf5Mv7/IDe2z9QYFQeZkVRwPtFEPTkyRMYMyvq5pUrNEZSG4KHTCSki4SuIvGJIW3k54ZrPawHOK0jC+Pue9RIaftbb2Dnd6lD4H3usT049yTKp0kyvT6gh9+8rWMzG7+YBgwhSneLTEYIDl4cJb1mBZzMO2ZmSY/tvCgx0/7ttK+yV2olvjCbrLEYDmns/RIQHULQdKy8Nm2rDyNhzsFARQzlusRFHsg5/1iyVL3NDDynPtsqSs3I4ItsiddOhTa2IjDiMLWaSpXjca2L4oua7tJG8ZFplfLeZOw/OtIT7+/RU00fpXWpp84666yzzjba5spsG5K/6dVrpSmSV/YSWLitNQE5Rxf9UgBShyybLexf9Xhym4SA/BoCVtSegtHQeVP4dGxopRLHwkFn+pxrENir+OA6eZXXrn6Ai0+Rx285DYTaYSDNfTLS6yk43nfeY15JOoi8rNLWOJqyjhK3kcxLC8vV7sGStHTD/aOnR1MIGb3MCVCdj3N8cO06AOCJp6gvtveNpg/qqVCPxfPIUHhuVgMGIU0Ny3m3RiIg59QrdAziqssbgnpQsVGVU10dEzdTE+9UfJB+v4eMx7niVKNpPN67/DMeDzqO/aN7MD2O3GqKLIRqm4UM9ZTO4dmnKCX4/tX7GHK00DZ0rfpFDxmTCgpOc824XmTqW20oVEgaowlKpz339EUe26fw5T/9EwDA3k2ixYq3D4NViDJsUOQxJunaKcBrrO2eMmU1DwaOvdOWPy05ujz88z/BhdepUtxvUVOj+Z5B72XSo6puXKXzrA6wxRGyYZqxeOjGGzhN68SGY95xupRPZntrG9MJXaOqksZZTMlVQjM0PdU2rXr+XHKFtm41guixysH2Dl0nay2mPAekLicrcuScIqo4Gmwah15PQHQBwmn/zrtYr8Vp19AEjZC1ottanZdKmZWB914p8j7EOhCZz5kGNk7PxftIzwUojbvc/CgkBJyH8fw/DD1203uPY8cdSxdRdNZZZ511ttE2g9lCIQMSz5KpsC62RBWgTABS54Ag+i1GcpKFegdSZRkrRyOQFAtDzWpEsSAxvGqPCtLIOZlM6KNOz+HOXfJ8vv3me/jNHtEvLftUwws7cNJSlM+vx/nSxo9RG4oekDEoW01Rzzifyt/b2e4jK7hYjqnG26DIojAnEAJhE0+cJ0rk3v4+bt5kfaRtBpvbTCmflim21m/pcWWFUJnlOh5pLnnmCBuo5kewLD3uWtq+bSf8/1qvszQp8t5hUTOHcr6CZUtF7XxO28/HBqOMZdeHop/lMd2n6zhj7KGurebHy4KwG4Y0cO/2Ic6fpGZCn3z1VwEA13/6F5gd8Tg30lCqjC4/57RP7FBU9fRzT2M0oO3O7XCUNp2h3KLxuvTqawCA3f19fOlLf0HjwJLbPWmogxRBO97HSgFgqRjOfFAsyEkuHDlqlrIXUa0+H3/7/nVUX/sancMfUETx3iBH+wRFQOZJKshs964hsBpCtvSbCDF37oUe6wMyK1gdE08mE2SCeTvZjnXL3AQ9vn6DgUja12hamuMSIcB4BYNr6fjFh3Hu/Dnkfco63L5NleXBexQFU8cLJhZksQBX6dxCkshy5IWA0xG/UBxO5PNNFnHU5cjXWB1nIbSQuCpnGrQ3QRaBdo1UeB9JtCGtZ7HQHOh42fMPExV0GEVnnXXWWWf/StrGiEKKrUJI9FpaKc6JjBcptmlYEsO7KNPRNCIj4ZXyaVIGDShPLrpCUtCXshYSzYgH2rpowpg1irKI+5VncxsoegjWIBjyfoV19M5P9pFnlAf+4t8nSuLpUwNtvCJa+lNu+Zr1GrQg+qV4Y3Xdg3Gk/JkbwjTG0zFstsvjwHTXHqmJbvfPYtCjXhYG5BF/6c//OWY1sXyKPh3vqdMnsDUi/CRnbajtAbdLzfsIHO3M2qgem2eUz7esduvdBE1DmEA9o7EXrSDnatUSErzKJgwaVUYNNBL8JgDg4ICO8YPrh9jm85pLNDXcRr/kCIspnWVvB4cVeZkTpnzOx7TPKz+9j+efJubPV/78+3QusxE++/pvAQBef5UOaHy0j5rP5eRZis5mTHWdzR3G9ykCuX6V5C+m4ykOpxRZfemrXwEAND72MMl5TgqT1zmntMqEJ74I0gDqQS/MPW2rCuQSkfHHVZahlValHJH1pb8Daoy//GUAwOnP/DaN38XXcHvKLVO59Wt95YdoWRKk4PvFBgX89KK1HB211mEomBRHi7P5WGm6BYOGLY9FU1ewEqHy9nmWE1sIcX3IrUWPQ8GG55HIfFy5chmGmY6i0ty0wPiIroFl7KgoskT7TcaSXq21ipEpbuqtNlOSRcY5pxkLMfHyi6LUfciaFbyPOJwUCtos6TUiBX0SOQGWNcFsQtP10oZ2gR67Prr4KDCKD0N1/TDHsvFBUTXMw2+DPgwkBSGD7l1AzROiUrGwVt9rOPxsqkrlyOWayj7ruk5E+1YPfJOuyeKb8hL1TTb2FtG/RAcnV3BLFj5XA1d+TpTMz/8KfePpp16GkR7Zc6JwnjzJYolbfUyuU4qo4urq+bTBgNuOCdg7GR/Bm0M+F3pQnOT+0U8/fxGupZTI17/2twCAWx/cQ2B68dZduvEQhtgeSJqG0mP9gjuTFT146R2uk7yXjJtMNKcLv6SNlO0arKbWCiUbtAomxhA9occmnHwAuHbjfVx6/gUaD14g5xnQ4zTDaIteD6a7OGIwf8KNgi7/jGoKrl65Chtouy/+5r8DALh+bRffeOMtAMCUx3k2OcScF36pjpeFoE1a2xRyvq2LuL007wlxjktqRm5+E+wCzZVeQ5ovxaKlE9Do15w26JGPHCxX0Uuleq3Iah/NAT3Y3Peo9/jZ88/iPotX5hcv0Wdnn0ezT7Tinpnw7nnxDgaBhS1rJgg0NmDOelzSdxs+LpJCeddrjKBpyPERzdu8LFFwjYcoDgTn4dpFYFm1oepK1w3Zl7UZ8p6kgeIiv3x/y74oBSW1S5LCLgDR9FIGtE8cTCatJGkjSTtLPzNbZDF13kY6rQhfWq0Ql05+ViX9c9ZGC00Lx+SJIM4nXKKXtQh+x4qNtEA8ptz9mgfMQ2Wejt3m0dNXXeqps84666yzjbYxopjNyHOoax8LdpaklL3zqObsXc/Es5thPOYWl0x5m80mWn09Yznpo0Paf1VVscHMUnqKflQAp9jeMCwBqul3UsLiRvBnScIYMDFcD/EZWk3pvC7/jOip5/7D55AVtN3du/TeyVPkXXxw+y4O74tiKdM3ZzkMK78GpsTOZwFFn1uhihYOS7k3TcCYezife4IihU9++lP4y3/xZwCAvKB0VFkA01N0DoMBA6Q9jlw8YjaIX4vcagVyVMo0UYKaX1spQvIR8BfdHusdjJM0VBL+isqseGrstr/z3o9w4QKlxz7xIslljyeHuHbjMgDg9c8QQDs7uodpzS1Ya06h8Dl98jNPYjajz+7sv0P7/dl7+OCDn/E4R8/Rmui5A4BJKKhGW2eyZYhNagQsDcl7Onfi/DBLadC0tG1ZW8yElJDBx2UAJ7RVocJ6D0DaBHMaiL/WhhyO00C3vvLnAIBXX3wVOx8n5dzD6hkaq5d/FdUtovM20z0+F6Zio4VhssNM0oVpPK3efa7Rheg6GQaarQdsLmAvfa+u5qql1WcyR5EXyER9gNNRojlljdWIJQq7hhgt8CEVRan3srRPjmMcCz3lFYiV9RkXj5rgNS3mmAouxYaubaPem1zbYFda2oYQFMSWsoCafzMzGTJOPUmPdFiPYKVpE73lfKUpaG0XLALPSeW3SajMcdotRyAJQUHGA9HWpqhW3kks1bk6JsroIorOOuuss8422saIYjolj79tvfaSwJKmj3cB8xlHFBN6mo4PJ5hwRDHniGI6mWA8pv3t3iUQ9/BwrPsQbyz2VF94VupfUb5m8cma2sOrIq7bx+JT3Nqo3/JdVh39p3/0R/j3/v0/AACMtk7yORGOcePqAao5Fx5yk6CyOIGmYrVULgQ7uXMRFfem2GFl1HMnCPQdH9W4cYOA18uXbwAA3vjrb+D2rfsL5zQcbuHMqScBAL0e4Ra9gnPFsFo8OGVtqMPxfTTNhMcoelRGQc1FSQIDA+9i3ph+e42cgIk9AqJECL3kGfDjdwmAPn2Sjq3MAg72OHp486f03jBoj48nnibc5eQ5OrerlwO+9xZFIN/4xt8AAK5f3oP03pAmRd61SrLQwDcBkc3KVPBxnom8A9LZ8Hh+1NpWk8HAS28XyX/DwHohWPDxcu6fjpuObXyPGlx98J1v4PRrRBOe7jC19OMvoblGEdvsR4SRDSQqyBwsu/A5X89RALKwiMEEOGRCGxUcgL83Go40q9A6ocO32t9EgK3+YIA8F+o441U+jkOM+KRwDWjZ0xbMEkgK8qRhVtJSdnn+ee9jcWEmJJOkaW0QsFlkQGKxnEhzeHil4MpalOc5MtGJ4rGS4r22qlFKI7MEf5Si3ILpznnI0XD73raNx8s/ru1lg87DeF4K/S2GDXiQrV/r1q+jK1sdg4lvfFAcHsnCZCRqgm9lckVwaT6nCzCfUag5mUy0cY3y9ucz7O1zD2lpWNSIvHDCeVaQNehJJutR8hBYSjE84Ew3ofkJlLT2U3mVRXLKjXT+6I/+GP0B3civvkqskwzSM/uE3lyOGTfXbuyhbWixfunFVwAAz529iKJPTKjBkOsHGBS7vzvHT96mm/2P/9k/A0Bjalkf5/49Gsfr1z/AqVP0cBFZZq2ULadaVVt52n7eHGr6L2pCOTD+hoY7EHoX+yULP12kpR+2SlSrYoNFn1kwL71MzKWf/+wtvPxJAu5PnqIbau/gNrY4lG88jdVkSvNkOp2jz2J/V66TsxGQqe5Y5NPHY5Fq80wrad0DbrI0ecQL88InD29RIp/3tc5hMUYXhRTAjH/x6iCy4NYq6N3nxj73vv8mLt0mmfHtM5R6Ojx3AuUr1Hd7fpkacU05ZVqYI5TyoKjZeQkGjdz90vQnBARO9cjD1/E9in5AySnSqiIHLzinneqk4r+ez9Cy5L30LxcGVTDJgqgPCq9S6XId14HZC0J3yppkzacyak6JfhXtW9JKIu6oO4g6aOIQJoKPIaTO0SLrSR9YbaspJS/absbG6m4W+Mwzq9LnLbcMaISFWEVwXzgDNlUUW86ArrWHux/XPjAWlr8u9dRZZ5111tlj2OaIgmWZA6ymUbi0As7FyKLlWomq4hTUdK4c6oqB6/F0gv0DAmirutX9AhTqKZCU0PH1yWhXPf6AJMxI/4+HTz2tADjBYDn1lLIf5el/cHCA/+V/+t8AAK+//il+Je2dF1+8hBn3Qv76G0Rt/c63f4jtEYHS/9V/+RIAYHvrHIKhsRkNyFO7zym5P/rDP8Of/dmX+ZAYgPMehRHuN43t3Tu7uHaVwHQJaw+OKAW1tTPEaIcvL/f3rpop5nPy0muO/up5g/mUrgcL52rP7GndauqwcTEtsBKlrWusIirDIUaLp89SVLC330OwB7wdHVvZr9HMIxmCdkbHPxk3MCwH7jSihbb6jBz7oIoA2go1UddeOxOWKJTk9cbvPJJpOmONLZQHy9y1ur1m7IRgwdFdr2eUV1HNuL6kuY7xt6h6/NQ//k8AAPs7O+h/giKK+l2KKKq3aT617RSG62aEGpw5i0aIIVn0YLXPumG9MtaPmk+n2gq1EOKJc0pDFu++bWtUrHVWcvqsLMm7NlmWpK1iekXqVgSQLvNcJcJVAUK+l9zPorTb75XqtUtWo6paTSXJPJE0knNt4q3zfk2S9tN0lEPCYab3EsA7SCqLI4s8L2B7rEjtY9ST8+/3Sqpsr2om+oQxWq5s96IpFQKMNKBaA1iv2ML6thQp4EEppeUZuk4FY9G6iKKzzjrrrLONtjGiuH+PIgoPoOXcpm8lfy0AotUmMdLEpGkcpqySKlWZ8+kM81qUGBexAZ+4VBGCSCKENX+JrYskFiKFDQ9KoZqFDZXf1sbqbvFuDAImXNj1xht/DQD4xhvfBACMtgYw7I0dccWpcwZ2m4Z6NCKvend3H1kuev2Up//ZOxQdfPWrb8AtgZuAVUwn4+rWumpx/770bCBQPe8TddbMZmgYsM5y8mDaMIXj6yi6QL7NFJsQXZ0xY07TqorV9pLLzWLV7HqN/MUctA/AhKOS23eoIOzw6EgrXMdjph02OeYTOt6auaFzgiowGpxDeY7GaP8efe/G1VusEYZYKr4wFxa99rXV/Q9R8f/LMZNEMYsUTe9a1V3qc87fhhp736b5duFzvwUAeOLCp3Bwmtv3vkZqs/cvvwkACJO72irXpfpHWlkvOmvJXBdgmQ+sV5ZouGJevP1BWaJe0gIzCEovrmV79uQL29PPtEeEg9Jp4zl79dLlPZkvPqmgliijqRqEfB2mIb1UpI0zfWat0V44JtGwkwhB57eNfrYU3mnxpc11PagqUbAI6A24WRn/pmucRsHlQBSheV9FC71PpMDX1Ql1V88Gy5mO9Dy1ZCAkW4fFNXbRzMr/josoNj4odnfv8cFYBYa8F6E57vPscw33ZKGpqxYHY7rLZ/zA8M6rVLCmJXwExMUiaSYyUhYBbAnHjhfdWvzu2g/pRR8YYe3iJxdOmVkhoPXy0JAjo22OjvZXQE0E4O5dkqf4b/+7/wYAsDXaxtYOTarXXiO2yvfepErjw8NDld9Y4PDLw1mE2uY17u1yzUHBXfU4PdD4IfpCFrD0wKIHk4CP0iymAEDXSPoqV0xAaNomXhvNnISVdN7iY2JxYXbB484ugen/7598CQBw6ZkdDLjxUzunebS3V6ENnKIw3OTpgI77yfNnMBxQLcaVn3OTIlsgOEkXSfoj6DMjhDhucpTLMyGY1RskFfJ7KEvTbvrWppsupp407QGLlZtXhAg9NBURnBAmgOrqFQDA5E2q1v7Y730S106y/MzHSYp9+gKlOevvXUUmUhycrqkyi2ypRoYYN4uLlDwwqtk0Ssh7+V5Av1yWffG6qLd8fST97Fyr6ahUckMW4fR+FwKJmLCfrLVa6S3Wti1qdlLlHs2yPC74IZUFonMUBpQQTqwN0WGUKR9Msh4tMaJsppesZmUASgnzm8x6MsiUYCLp4ch+yvj+AzKWPsiLHK3KJQlJwyVzUlK7cV5v6qa3zolb6fG+ppPfsnWpp84666yzzjba5tTTXUprZEWuAlgZUzhz0TJGq+B0zWD20dFUvVLRTXHeKygnILZKCpkkVZBWVa8QFYN6qrEfcOrxrnmirj2zJc8hqfqMYGz8zbCUKlvwQtWDdWvei/+1nCbZ59aVBwd7qK/QGL37zk9oOxcrWJ2E8kmqQEJt1SDyHofsddc1pa2OONX35NNncOYseeZ5Th7PcFCgxzRTCbkNGpVtdgyuN9qsxScjz+DcAyKKmPZbBB2DsZpauHKF6gCqowm2e+cAAH0G+Lb75zCd0Xfev8mRbEaV7fd2j3Dmpefo3CVlYE0UpdKq8JRjj2NtnZ7fL8M0GtZK7oQxoe8I0B0iZVxeA5Cz57/7LUpBvfwbv4v5eZIcvz1gavCQrnVmSljxYnkfrTWwMmU3ic7xR3VdqYeeF7JsBPWEMxX45PMB0GM6r1ymej5Hy5HHgNulFlmGVtI6et/EObac5jSItRhiPoTY6mCBgWD4/GOUAVCjtIpbvXqmiRtjoQuULEw+1gfJZ9q4KBhlSsTDCZjx/ScJgX5voNRaqRORgnJjcj3eECTVlqHkWqiWK8vbptYISNcqvTFDlDRfw74waQJrWadsIUXfRRSdddZZZ509hm2MKO7eYZXIItfiGdE1KQuJLApUTJ0VrKJ1Timthgt3QhtiXlL1gSXvZ2C44QjSSGETIGPWfbS63cZ8sVaZRy9UANK0zaFZ0ZUyK05Y/L9ZBccDFprDA+Td5DyWIsUuOlPOO2Q8vlrFCR+9D/VIrHprMvYfvE+Kq0fjezh7njzyM6e2+fUUzA7vNwje0mhLyYavR+1EgjyoR6LRYAgJZiP52BjhRQ9QPDGnwB4CeUqnti+hmpD21QtPUwHi3bv3cOcmRUX79wnfunWPcJ0bH7yPb37zewCAGSvMWgOpo1qIQRVS0UpxGauIb6VA38aWuuvwi8c0k0YI6qxGsDIk0W38YxXAtBx9Hr1HeleHP/lbnDhJ0dndGeE41U0av6xq1QuXSDVf5yOmjZb0PcF/oGCvY/JDFoDpEUWhPVaPHfT6Wr2sBZE25uYbxgTANPte2UPOxWk+wQcFNJYLaJP5pUrWa6ihMbKP92jjFqNzovkywMyYSQgtKqGABwGWna5ZOa9/mkdxrR6HBIZ52YtRC0daNrN6LjVHL4IJ5XmJNoiMu6jOOr0yQkPOUKLmXxYAXXSuqAgZDzSfpDXMStO3uP4eZ11E0VlnnXXW2UbbGFGMD6QUv03alvLTkAtLer0+siXtepvnmm9HouwZvWOytfTK1Hk3YfE9JO99pJZgFA9gBgCLqdxlKZGoHGojjhLpE0oHFI/b+6C5y0xZYOxm5UYjEGFMLORhFetJ6Xv0Kvo948MJmoo884alHIwrAUeefNYj77P1XiVYBGuK9EOADy0ZgzXjb9Y0rdXL7iD+SNtInnmIV14mraJrV0mqpN/r4YknSOJjsEXH9rffJ3pnnuc4PDjk/Ulka9DW4lWF+GrimKefYe21C+vS8w80k2hafXgL0CS4HodHSNhty+Y1Py9MMoMAaXtK1/bKD97A658h9tzTQy7g3KbmRkeN0UnT4/u4aX1UQ1rCI9L39FhDfE9u4ybUMFysV4FxsFGGPnvp0o+llna91qici1By59UUhRcPbGcdpwAAIABJREFUm3tb5Bksq9bKPZQl9Fjx5JVGaq1iYz7BNuSelKhAmFST8VjbPOfSkMuahd8AgKIoFth+qVmbazMv2caaTNu5GilKDGnr4EXmVJZl8E7ub9qirVtYHtOcm7hlWY6eqNeyNJK0YV3fcjWJsVeh3wds/xj0WIni0kMpJGvE1K02d+C1LIJcPsDJkfm4oKaaTfyWHmhM1ywDRIklgHVsTrT5BB8mrAppSLayEMTvR3psun9+1XRaEgsqJx4qGSwhtPcRwJRwWXZrEfQGaTkNlGVWfzemV7AwhkAkD9gMCC0d03RCYe3uvSPVm+kPecKZCvcOqYJXOo21rQjkGXi3ONOssfE3pYo+JAMhx+FXx1TO/e2fvI3XX/sVAMDHXyEq58deeAF371Ha7N2fEbgvQ+qCiw8s5bMHfS9NLylXXi9BejOvOiYrHHRjVunNuljgIzGVitZrZ1ZLOtKHWQJsA4ALGcAaS6UjZ+DOu9/B5MY/AABsX6SxPfcyqQXMrvwUc252VUgK1PlUSV+PI4ojyjWjT3yIukeSUnI+ILCEtwxWU0W9I6lVECp5672Ot5AS4L2mo4TunGdD9GQtkXuE91k5p+OQZHt1PciSCnh9yChNVs4l6HvSTM17r78lwoLD0UhTPNrhU5w176lhUnIuedHTVJY0p5rP5/qdnMdtIM3gfYAThwbRZH5K868yK7QcQTqIBc+U3LaCSNPHldolO0wc2BVy0MNbl3rqrLPOOutso22OKFTmJERZcX4vK0QW3Cj4GUSXhR7x9FWtao6ddCx78A6xKlJSMgKKLjzzski9VJqcOoqPFlGk3qSCw3mMfoyCvInXuezumYQiqi/8hw1JZ9C4D/HM5LitMQihXdxOog1Ezzg6SAGGL5fx8XjMUiMiJRGEAl4iirF4HIeopbr7kKqkna8wr8jbFNBNlGiDjcWQEnQFG2L/SDk4H4FitxQ92BBg+DrLeM99hb9+668AAM+9/B8DAN760Vu49NwL9PlcaMAEkCKrdeBcKzTCCCKGRvoYZ2kowWMp3qddSX0G75PrvOoz+RXwbzNyuNQKe+HvuKtMU0lWtKpgkqmyqJ3kTby0Sis3RsfSMyrc7t3CPtOsLzz96wCA/ic/T59du4rQ54K4XQa4x1PAcKqTyQwm2Ni0SRspcQRiAgBJp8hZFbCsQuBbArXn00obBRXczKjHr2havb+lhappHRyvH45bL1cTD9+nz5UOLWSaLBIxZA3KrE1k8JNrthB6x7QypaX4PmHaf9s2KnfuOcKft5WSd3bOUIFoxXpk4/FYo43cSkFfTDt7VqvwtVePP+uLBLrcj04r0HUtMEHvOSlYDK1Bv8eZAKbOZqDXqZvoeHtOXRvrE1KODEYkRaR0Xv2oo8d21llnnXX2OLY5olh4yEiu0Kb/XaCySXtFH4x61YJlhJDS5ZJcGtbjCAsKsOveOx56WDzONR+tvGcRcUas/v46C5pjTBLZSznt4w51vWbSw5kWRin2EQFe8a5aAafHDrOKvDa9jNZFj0cICyb2D0h84uRV8vpp1CVe22L+2ISgXn3JXqEBcOXGFQDAP/nD/wMAtZt98WPU2+PHP/5x8vuAyQyMnNcCLiKfSw46XrXloSRIaPFKrMWvErzlo6DDPq4tnCe/l4cAMEDcCvU4K1EcEF6xdYewnukRS3689nGcef05AMCdr30VAFC/9RZKBsKVrkuiawCi971QFLgsMwKfANzijVsEw7pS7BmPGKQe9PtaXBeLOqGAuNx7rW/hK8YXHOtEBc7r2wy5zlOJYn1CFokEkUbwBF6fhDRi80yBa8HjPHHj9XMAqFsHy159weC64BJlr686aNJAC7AaZYjcic2yhT4bALU6BijqTjEPQHSuhLQiUigtmox+q8dKvKL75b3DbNqkwwfCmmQRiuq3UV6Hr3d6aY+Z6xsfFAJkepsuhMLAEPZO7IkbHxRWq5l9IwMRL6KEq5puOuYgTZKOipXKD/uk4H0sA2BYXcBDCArwaXpnzQKz9C3+N1k05Q+9EMudk9cfweM8KMLyxUdMB8m1cq1n6WREKWMTQWHN2kjIG4wyMBaAVyl5kSpsYyKDRlMiZHkeNXdkcbAmU+HEn/6c0iV5VuD9W9TNT5krrIntDGCki1hydsv3QrBBJ3xQLZwkFSEpH9k+0RQPid5YzAQuXZ+FFNEvx0LM4iYd8TwyXhinJVU4uzrDrR/8AABQcJe1C5+jFFTz+U+hyYhJVhxwWuXdy3DccbKUWokQYodAuZcTKXQxPQ4fGT1aA+FDrECW9SOwcOX2Dkqut9AFMkTg1eZS/WzVyZHFWHuxZwVy1nrKOUWaIemVnvSUzrLFVNnipeO5lfTfjnpOEbSXh90hd+yUBl5FUaIouWNjj9NkWY6mFnCfHZssdTrlN1mtIsSmaJLezmGoWRViCszCoWkoPTzlbGGfx7EoctS53F+SQk/ujZRQoyPA90g654+Z2F3qqbPOOuuss422MaKQUghrAcuNipTD72IKSnvaKIUR6nUuPKhW+LHH2BKw/ahRxOoBrPt/+nM2eujCkU472SzTJulb/C9/z4TVvJhZG28cG+6tHt/x5y8OoYOPgHKSf7PLTaBs9DqcUmH54mUGclEz1eeK6QntU2ygFONIW+Y9ORfpgVlMPYl2TVZIyqyJUuaqZcXphyxGRwvXJTkHeYmaVNJ8RlKDSPw6fi+NKGRfPlEYW7o8Rk72AfYLT1VpmtMn6SLyZtE67N64CgBwIO/zs79NEcX45A6uzVn36XlqtGWf/QTanxKwnYscuPfcijPxzE0kNigYqhGIV3A6CTjjPSQedEPU0sODPWwFqu3Y2qLjmdc15lxvIdFrlsUUjqZkNAKJTYck3ZnnhdYtNBrFBK2SlnOR1CpgYkOkBZUBSWXJGHiNhgXUtrmoZkOLjOS9LM81aq5ZS8oFD8u/K6q6Oc/v2XSKlhW3JYo3yDRSd0wk8AnAHSqh0tNnRV6o3pZthLbsNFqIuHVYWHoWPsPxgXIXUXTWWWeddbbRjgGzIzi9JNQZIwaYpVwXvRe9N8kdRu9At3rECCHtF/Go+4htDldB6lhg5bEGuVigqPKP6n7ie3GnIXWvaKMEZ4ljFY6hpB1n6zxYq3TZxLNTTi7gWsEy2FsvEkqfDIN4NwFaIKVU0WBif4T09/3ib2nO2CYYk4B6PnpqSmjIDDIbPTk6lwSbkqghi++JN6v9DGK3VsXLlJbaesgVj+zliGkoGJ8GkPHypf/9OzM5bgujQKdc2tICluVg794lrOf7X/sXAIDTO88hsOdqzz5J33v5U2hukH6Wq4ngkFmvqsJRXUAKvIzSnJVYAK8tP8PCfbg4cFIFHbzD5OhQ/wZII6pklVnBA3xwinvKHBAA24eAliMUIdG4vIUtmKIqWktZphirRBI9ptw2daP9K2SuZVmWHLbmMDSSEF22FPeQ6mipQCfO9uIQZNZocV+mmGyMYjKpVpZI2buETJGozqraM30mFdrGRGxH9PjaqollDHqf2Q0JjLCwLq2zLqLorLPOOutso22MKOQ5EpxRSQ55wreieAqrdMpY0BQ1dGIT9VjcpPSvBSkF9gClYMyYFc8u3U6L5bC6j9Sijv1SBJDuNpFUWNFGMQZeGAxOtkvyfWu2Xz0IuxJNwSTev+wiJd4ssZmW/36QpUVlikfItQixt4YSslxy7OLVSFBiIuahQ2RDlP9JNGyMRCGqoYH4f/EUpQGCRWRfyX5NgPayUFaGRCW5UigFbwlIdX34KPKgdMYIK/E2uYlyMsJ0SdRSkR7/khxJ1E5Zz3papiin76/rq7CWDr2mx8LCeSTn7uMpoM24YCtv0UqnPz6Xy3/9NwCAUy/9GnZOkZzH7d4Z+rnnX4E5fwkAULOXD1ToCX1apo7QXk0W54xcTwK9aPvkupilm0IL5KyFZy/86JC6Hjo3Qm9IKsd9Zg/VdY2W6b/SDVNCJwujbFS5p9q60u5/xZDoo0U2gE+wKwCwGu16zfHDRA02VUBOClqFeaTnwHsre0WaVqFzadrYKlVuJWtVBVai1iZpM1twJCSdBD2gUYbxSWGeTEH5Q6m2jTKgpFAWSOaYYhWrE3dBuuiYePmh6LFIJrzE8nKj22B0sihwaGN7wyAz2vuYnlhJVT2+LdZYJDfuhp9aWePT/y0/CIBE28gki4P8pnwtyT0lPxCF3eS91WAvXUA+bG2FScJmnVwyWUKIDwFZdHykxdqoNshfDDoH4uiaOA5yHS3WzLP4oDNLlbFp+i+VKpdFIc61ZABlTJM6CpsvXjlvkvSMtBkVhXMXb5aFdtry4OT0RIilAaskigTw1LNckw5N7cMRMDabCfFEm5I1f4oWJa8rI8fCe9xP/ebX/wrnf+8iAOD2mL44PH0e7SUCto/ef5/2O7mNUvj/0rM6eggLjcbojzXHlvwdH9axlmqZKDCfjDXlOdomgDu3RmW1JYdicnFGHVqXpItAdGtxZGtOo3nvtOZAaPltM9cfz/S+lTWp1vlZZLTw5nmua5rUaQhxJweJFwIk5AcATVPrWYtKRTDQim85F/VNvNcHi9C5i7ynDp7hXtutC/rAlhtA1leXOODaYtpmsX5tQSXiAWuJiSnmB1mXeuqss84662yjbY4ofPRql4FRAaish1bGRupskjqRp2jTxgpFfjquiyw2heOpLdMwiaK57P2GpIpsFZE0yWbyugpSByzLKSGERAF90UsNIflPYiv7oJKxxd8Xj9uYtemMR4ouQpIiUpA4nrOAvUhaV66GBdH319QjwkIKhLaKHtpy2iYEH4t+Evl1rarV400IEHLJ0vSVXo5kH+LnsLCm8SmWyMeYpqKWTi8Em2gmJUOgtPDlL8SIIr0+D0o9pfZRRhY2xMhRadkhg2HKelGTZ1lyWmX3ne/h7G9+AQDwxJnXAACzZoTq2VcBAEenqeixmR8AnoFtvr8l1dLCxsydpHSSVNxCfLw0TXMhIPh4gfLkhqgmVMzmOf1SliWWp1OU3c+0OjokN24mWmB83M28RWDQW7ZfRJq5gE8utonpcpOcu1IuNOUTIwW0MiC8j7aNFeLJ4iIy660qO4vMeK4Nn6J0usNgtMW/z/fN4SEm4zHvjUyZviGlkYusegYnrVVVaSJeFJMWoQI0yMfMzy6i6KyzzjrrbKMdox4rj1gTMQeNKOgjizYqrkqhVEI7FIwiuBhReOVhyu4/vLcVMYLkibmwv2XMYZ1XnkQna45phVprUu9+GTRKmtuY1INZxDSwkAtf+izgQ2MUa85KXfSFYkClswJhyV+IflryiY5zVC5V/f6QeNgq8sXb24AQEvcHPEbqmUfcIqzMg3gkRhsj8FvWRC9J5p8JiDGKRGdxCASk9MlciVFtElksJeHXRb7r8KQHbfNRWzABGbuKPceFYD5HIdRnxhfygqU0pnu486O3AABP/hsvAQCuuRnMifMAgOJZeq+5fxltQyBzjwc6k0gyZFHnLcGEtHdPeksvBvtxLfAq9BKlWKxV77tlmqnxXjGJjPtuSGbCFLkCwBV76s41iTozb+cjaNyamo8nXtdM90/vZdZqGKBFc20TwXpG0OWaNnWrxXJR3iiC0kLvDcHH4kEsmrWxGK9gBdp5HRTPLKyo7wa0gldIK1kTFZnjMh3vY4vFObyY5NDB4k2OX2OOqcyWuMUoL15vTtEK8l51keRBEVLcRFgDSYWu3sTyvw9zQ5m40OlbG1hP61M563a8moYxS+cewipzZV1Gad0+kCw6yz+/DiN81AdGen6rWkWr6YHg4wIdU0SyjdeHuhCWgrULqTr63oPnWvBhbdxqdCFXdUJ9GMhCHtMraagsUvUhismppDjUaTGZ3Ei8EBjFEiMLJn1ucoqKTk8cHtl94mysS7U8in0Ez45gI4Gr5HPKg1ENK7mAjdY9ONz90dsAgGdeJ+D6/Jlt3DYEHg8ufQwA0F4+i/aIqrXD0mJiEyZjHIP1Hf+W39G6HGsSTSbeKqmp0S5vTQMIM6jPLKO8r3sXParY3KtV8DjjfRR5ibKQ+gzavuZF1oQAayRfScdjbaEpqoZTRFVdaXpTRAHlArZtq6kkmVF0v0mKSsDvNjpUDL5HrSePho+7ZCn2rMgxnbN0PO+rXwzQO0WfT6Yk/Dibj3n42sSdSx4KPLH1AYrU5M14Xx23vnSpp84666yzzjba5joK1dzx0NJY8Q4k1WJMrC9ItlfnQ91km3iPWHhNMMK4vQFW0jUG0d3dUKF9nGy5vievSViyrlp6ueFN6jNJlKHAU1qdmWytUVeSCrNLvyXVz4sVz4jf07GMkdtKsibhdq7tUS5fkP7mwer103GQ8D2LldAKirkkwkoiLW0Tu1RHYYvY5vNBGrqyDw1WJXJDnGP6HWvSb5CpGmwES6VCV4HGOoa5qhdlQ/QopYuvs5HGm0s0FWXXvVu8RjY5l0WiAhajxwWSxsrpr6ER8GtyjaNCgIHjimlppelthcxKuogjCXbKe+hjsku1Ere+/20AwCu/9zEcMpV0fJ7B06cuwty+AgBoHTez4jqN0rUk4wvA23h9QiKxzUeH1ZgifrRcV4IApX7q+SEqH7R8jIHB7LLfx4ArrK2VGoQaASVvz5GFyWKLUtGB4toJ1zqEIM2SePyCU0VeYaIWudUISFKasT6s1barRmvLog5Vzikt7+LS2WjTLZHFt1qjVfF5lmVPI+mWa1paY9EbsFT7Nl2rhq/7bDoGMo52hLsbrK5H0NqkNdGDLqXm2KxOF1F01llnnXW20Y6pzCbbRNEMISSeaMw/rgKTwPos/N+9qcf7gMNafdouoAgLL+n2cagiGGsWxijo38u/9zDvPY5pxXwSqSz3lDA2aM5f89IuRAxBPO+AOAxLlfDGp3MhRoiiF6V+aAgaYYWlIbULg5v8ocjog+enRqohwAjYa+J5am8N1e03+rk6aGn0oK1yRe8o8bWWSBrYcN88jlGUwfNIjtUGjQQdR4G5Z8DWG6AmQPfOj34IAPjUb9zEM9vPAwDGLXmp7vlXUF++SvvdpRy4ES/cGi3IFBygWWkV+3imw5bMJwGxG6a6Ou9Rcj+K/oh6cZQnT6ka7CE3b7KZRSOFeRz1bG1zO9OqwnxOlFyZOrPZDMaKii1FKkVR6N/K0fBS7BcVKWQOBO8VzBZVAWuBhqvGRc1WIwpjtJpaKLTTukFZsCIw72M6HaNhgkJ/SH1F+n0qJnSu1f1JtOtdUFxI+2JsCBgeZm4+8oNi484TltS/FrYs75GkPxa349dVnHstcL0KUkfA2KwF/zZcqI/42bruQS+mFdwatRolLejEN7HiO32uLNdK6IOlDbG3taaPQhwHuQMTLrdZGueHnU7r5qnWQkTNNyGMIL3c+mBDZFOFpKJYX5eZIsavAvkLVfiLT72Ax783DBK2kT5cvT7opc5BUi7GATkP8+w29Uq//eMf4dI/IhD75pQW3uaJFzB77mUAwPw+PTB6nhbexmaw3GWudJJCqVYYcx/G1k3tZeag/L9pauzvEzNrWNNxnDx9Cv0RLZwNPxCDsZjN6W9wI6f+aAcAkPcaXZjrahJ/RqTppQrbpZIDy6lum8xLdiQyA62cloprA5w4QdLqAqpPplyrYjJNwfWYyVXXDZzIl3hhkwY47iOvPcdZ7mQ0GsExW0xYYL51OocX1rbHsC711FlnnXXW2UbbGFGkoGVs8KGfrmy/zuH+18mMMbEaOHkArwQN6cNZvWX2KoJdSHcA5I2r56dywnF8N7WETWs4fhGNcRb3uRQ9+EgblajAG6uic/otk3i4WIwKQjAxrcPbkze2BJyb6L0t686srYuhDxbeoqDkAYB55nW/MTVoFqmvdNa6w0zrP9jTNLFCPc6FJCwRgoDWiKST6KOJJORVReeE8hvV4pWH7/R8gTxeQADA5b/9Dl781V8DADy3/QwA4OhUjuyZFwEA7WVqq2r2KQXlbKPnKYoN3hp8NNknieCSt5bnB7+dGaMCgOMjERZsMGAxwLxHnvloaxv5RCqiBVimCKvX62Frm6KNI8/V29ZFIUL+MWq6JWOYUmDpYM1S6JvnGQLXRbRtPJmTpymimE4okpgx/dUHr0B+KfTbAFQz+lxTfXkWNadE/DDn8837OJjTfkV63MMhXyKcHLd2dPTYzjrrrLPOHsuOwSiit7f89EztF94C8u/AwgOc2OPMG59EIwlKHh7sNUXn9Bc/jg9V3Jgca1jCHOA9ogozR1HGwJgEa0Ck1RJNb9G7IcB46biARbwivlC+dW2kINFO6u0tjqkcayqZbpCe32LUgHipkohCIoX4Xe+TcVGgcxG/MAvHGrf/KK6y0QiWox9PgDMQ89gtT8QsM7ANgaw5+4YHV67gvb8lGfLnfus5AMAHoz4m554CAMye+wQAYD67DwAo69vIWQdKrkVtChRyvR/jpMKapWW5QFYj9vQ9OY5qhoqxhuEWAdzUEIm89K1t8ugt00ins7G2/ZVmPyF4pdNCNe5SfGqJuh5iIyDJthRFoYq2qR0eEDVZCvSkEJDA8EWyiAlAIZ/rfPVxjjFo33D7WuMLxTS89qQOirM8KMB+VOsiis4666yzzjbaxogibayznEdfr/KaeAFm+bPElM0SP1t+8qVfW6fntLLLNVTE4wtJ1vzYWgrG6v/DkrZ82iRoWVHWGKjsQCz3hxbBPKo8x0Lqe/m7SQp9RWYkUQFe3mf6t77jQ2wzKv1jkPSvkO29iQ2OnESh/IXMqisv2MfCtbVx7kTWaoxUls9lLVwh+zdA9NBkH4INmdgeVbpPJscUEqVk8dJ1+2TexcZJooAco5YVgkyKz2hq28CvwSseRoE2ysXEiMxqBGW1EY16uklAK+Ng+PrkvsbV730fAPDiF34L+P/Ze7NgS47zPPDLrHOX3negsS8ECIAEQBIgtXC3JVKkJC7WOCZsyRPSvNgPEyHJb/M4enGEFZqZsGcc4dGELU94keygHbZMjUe2ZFGmRIrEQoAAARBbN4BGo4He0Pu995zKnIfMf8nl1Dm37+1GN5Xfw61zq7KyMrOyMv/9B3D7lj24uC+Yyr55292hrqOvAgCWjp/CUuQoVihRmVnmuU5zuZYgR9qvx0HKpY6VUSeq0oXqfgZ/PuLYVB2x/IVzwUprdXWMLVsDJ7H/xhCqZCmmXL10secEShLPTvQQ2pIrTy5G4TeClCD+ZJ3GhK/z+7EWFy8G50Ub619QaVUpXtSEw4sAW6JFE5naSqgQ0X2srJBCqufI1BznSpmO83uZIUmYdX2GMlu/zPRHUnERBO96QSkOmlv8k22O4AVEGaJVFNe+8kyjNhlqw6ASKl/Q19PuAfj8JTtfxEc33vOiCo7tZYQ1pvnrpA5OUiZafokhFZ9orRAX5abglPkqQZdX57gv5aZa22x8tspLZmgZB6//41cVr2mj//S25KeW3Jq8/Drh4Tl7G8H4IH7Kn0/lOV4cr4sG77z6MgDg/KGwYdx+z0dxelsoeO6OO8O14w8AAOw7r2BEoo0tJBpZAkAB9zbPp8L7mhGxdEokn3JulBGm/doazo1Px3PRj2J7MI/t+x6LMc903C/gncRz8pYU0qU3s05Y1o149vI1MoudTKQcNZPjXNFmPRpxb4g4cr3DmpP4eEBMy2Bpcw7lx7H+5aUFFlURsdqPneTnrozf5aCJnhoaGhoaBjHMUTDFqHb4Aa4hp7GvdTCLXjNLrYjW9BiUVE0pOhCWWnEIvdxWkVToqtL69LmyWGK2t15M40YMTMKahzYKJc+3hbCtoRiLKGMZ5ziqsDxQt14o/3xmpV3KxBMJhanEV/E6E9c8qC6VBVItfCt52XppGz9CuEEuzqHbpRynAqZrTtrpUb7H9ULf6rJ3YD04ZleejCmEkk/nqbUGuBREIq899hcAgNvuvBs3bg+ip5PjIK7xu/YCAMZmEZ4CYlEdkwmsmcLGJBheGWriqGLe67S4mRMjAO4s3bZgJInRhTPByfDShcBhLC4uccIgSv3aAzCg0OPRG9uPmfP1uSjMqhcfYa3EPKPEblohTiNFXuTO9eytTROxn4zFuxsiUiWOrQfN71DpeLwGjrYVTXP7IJxM2us2MvHQOIqGhoaGhhmY2+GuTMZTQqiz64SnqMq2y35W+y7aqvCvz8pCUbpGEUZUhRVlGMc2Yu1j+eyUU5muoyhUJzOQKBWzZxljWEUhBKlwGaJnMPxg4picGh7D52L5kVFROUVvwEmPWL9guYSEJFBtzFgwbXZL0TOdpq4zHUUHkUeTjLhPnq8UlwjyYUmVC67XK0W/bj+gHdKIK5E61gsdUo1+swGvE4WvcBLUboijp5K1k97iyLMhV8WZj7+O3beFEB57TgdHu3fPBvPYS94Cdmt8VlC8LmJFxoNbmczKKcccIrmgcjLfaU6Iw6/WdVF5iudE4937CUcEpgnYT6Iyvr+I8VrQRywtUuykbRjFuE6rPaVdFbcA+g7YrLazklciljfGsHnsKJrdTiYTfkmcnjf2JcQOS79ba02aAI5HLeVsaGqurq1iQWLkhDqMVcY2qUTgcjFXrCdALVQVxSudk33iOtkoMvZsGoYshUT/K4uVz/wBgm0+v8VwyauXyVYc+ap8dURPQ5BvUYmIWOQkco8kHhKU+MWL5ZStWPuY6s6WV8Z/kk0sl9mFD7v0fQCA3hjeiMR6zoofReWjhLoXCJZDpOjukg+c5WzxoAiL7P2VAov1w1NF0uxMNEAXJfOfBHIUcSi5DVw8ETaDQ889i/vufT8A4IboKPxWXJH60QgXJ2G52DKJCux+zOHcvZkunDBqQafxkP2z3Fiq0QIqBBP7bhjZIIwaD8qzTdZgnCTRew4vvkopEnqL0WJUIquuiF9ESmTotmhrRNlIOm43R7Wg98Jh8cXfgb8feNiFLi3nHJCt9PRfZw1n5OK6sBkRxVKaxTOyAAAgAElEQVQ00VNDQ0NDwyCGRU+0mXqhANgfgG3NE7o2XEq2M/mHqQEV5jmUN6IM1WakOTGtRAt5jB6ng87wfdPzGYdikVLUiYsq9J6YFqaikdAFYk2FgjB5w7XHsLKrJ1O3xOAdQvVRfaF82UbN9FM5p8fWp+UBi2zYYtGckyGFtMS+QhJZ1ibNRu/Fnp+6RAl+rCgfTYxP7XppgNERWq2aD5BxN0rWwu4oRtm4J684ZeUV7yB28vQcr0SqinDkuFzMy8c+rSkhCr3H3qUyIQCOxg8GvbiDgztFfdGc0xAJmLXHwMD4UVKvsz4xwQXEa9t4oTHF095hFNvWxfhBh599Bvd89vMAgH17bwQA3HTnHQCAU88/CTcJyuA+mmYu2AUYL0mdgBhwmNtJsZMm3A6eOzESrTOdiHd4Tnj1TcpYApFzYLEf3WBUiHyJJuER051SJqI4KTo1hxHDd6+unMHaaqwvmpt2ow4LMeQ3KZ3tIqUzFW6RuCmdE1z8KSxH8fXF9+gxid/VZDLh8rSWmNESjwfP4+hTQV7ky6MFrEQ/DTcREZhPlP8k6SjXknnROIqGhoaGhkHM0FGIxNbL5h3OZRFB09t0pFPP565XlFL0kuuoeqxXa9mE9mhqmcXhaSu9EPKFAnag5rKckNByYB1FvGQ8MlG/xMvpvSjqSAluICl1iSuQoLubLF9V+h46kpzcSjuJe+kMROeRZ1AaeaHIWYws+Tm8s0lxZ4qhCtfzTwPzUXkyPqbCGZqpn5hRSlOGl8izpKx4+60jOPn6YQDAbQ8GTuKW28Lxldtux7ujGF/ofHS4O7+KUUwpmrQnS2wlEgFlXlwJz6z7lxtnmMpXmNyfSx9UjWT2Ks8p9SKA5BVxE1JSG46j1EVnvNEoHG03YoOMhZFeRsM5ikTrnCuabJSjKjmjSrTXjvs+WgwcxXg85hYvLsXcGkvROdA5jKNXtzbh1RGSNwNzKrPVx2ayj6faECUmSSbo9btZXC9ILK0qSnijV6c5IIp29b/Ez4sVe2XZlBIQIfEaiZxExCAbi96A8mfN10ZuhvpA+Bz0fE2v6XbwJunAoaK7fLFSIjBZGJUshBaaXs//dAVLowBIu6UPZTndm6xXlwHqTIdx7Hwfc4OP3RrGJ94CACxfOAkg+AYAwOiBh7Dr4eClffbZp0P5p7+PUUwU1Kk0BDGxHiv+LSvVTUHQGPQqiCYPCLd2I+KSEpoUKbdwvqqyP076VFHcx/5aO2JFfschORZYzEQLv1d9mPAGVHpmL8ZNYcuWLViJYchJLLW4uMCbEYkTaQM7f/Eixqtrs7u8QTTRU0NDQ0PDIGYEBQxHLcZgrtlpkiej4kzlmgGwifFgrhXUxFEpFRt+bIbJcJWqMvm/vriQ6nqJxY3/KvkVN1FTfXSSKCvI+zWsvBWjAZMlNYITBbthcZMKIshWEpYV5pxjW/taDFBGafjyekEDfS1Se7bCMcGAvGBNR/F1qO/CkRG1HMQIKYvFMXeMnOTw5B5sFq1bWjODRlFK+rJRG1tngEkMBT+xgSJ2/QRHng9xn3YtxUB6++8FANz5qU/j7eUg7lhZirmn3ziB/q3gb7HISvsezqTGDt6IpQDPHZAvgRdT6mpLN5OjmAGSKjL5LHS0mLFGD2nTw8c83uMoP7K2Q2fJjyKM6Whhgc910eyVwr8vLo7IshXb4thu37kDoxhD6lLkFPpJzwmOLl2KyY+iAnvt0iUWlfEIeV+I7jaKxlE0NDQ0NAximKMghZ0X6pH2LaviQAm0bNEV5340ca0p6xVtXAi8DQoVU9UzWzkM+ozbcF7sGJXg3Rfl4qVe5MGil9BhyeM86iQkt+UjuTxvNieqqdRsPhtA6CedpjX8L86D8ZQFXOwYmYX6ieJY+DuRrrBZMVslK4NoZiwGdHv+8r8qCQvu0VkKlR5jG/UdXn3upfDEPpz72F8LDngHb74RR1cjH3VL4DK6Wx7A+MQboSsxMZJO7iTKbDKhtbCRF7MxFlIIf546tXlfchCbQyHPx4ZJYitRcFuIniVArvlI0Ttj+d2urQmXQfPHZnNhdWUF1OfVGHfrzJl3MY5jSVyr6x1cjGS7GhMWMRfhfTHezKYAqXRgA2gcRUNDQ0PDIObKRxHSzeey72Frpjz2f0iaU5QarONKQ9IabqQWkY9znQXBrWT4NTPa7BoqzjHazkWn3GRTT5dS3TUHm6o+Ah46mZJuNyBOXGwpqnJPsEOcM2zRxKalnecyHFGT/J4m4qQmdTgYRw5xNW5V+kXHWmKmPKwC31fUFM7mVnze+8J81SiZtVh9E2Vu0I1SR0zNdZBBqFNcTBefSdYviYVaxRKq1vpURzgbMhdIv9QDPsjAFydkUbMNFy+Ec6++FHJV3P3mK6HMbe/D0iRY5titIaLs6J6PYOXl74X+nQ4Jg7Z0I3ZiIyc1StDpYHg8JBFRz5ZhxHno8cg5CD2HCZozrCOd2LUQIfnPcJd8Qz5zhvXeS34JnkLicEf9nPS9erc5lyTfxoon6yrP71YiDoujrPei2wl917ZcaVtnoZbsbRoGN4qukw+RG08NZk23OldZGHVDrlA4ousAyhpgM2rLFLBXDFPek0xMtVD36UfjWLcpXtvkER08s0nBHRf2iVGLfCzHOm3HXtgcNycxKS0n/Fx+CcZXRYYSCC4VFaQmtuw2DmVjG1Gy/ooG4FDRiVkoJX6qbRRX4HsxALoYC2nEqZYXMYlr9fmoLH3qL/4rAOAjd74PO7v9AICLJnhtn993C3DgdgDA+Fwwq+3MKhbJU5mC5cVge9qrP/e4Xg/0WG4U1XmiHjDNxkB/0bINKXKaiWwhHHnTcXIHskgXIT98TsA6NesygjCZv9TusksbRRM9NTQ0NDQMYk5ltlE7GrHSWhQSfyhFZs49pFRf7Wm5aKvhWoKOI8QerDr+E6d9TeNGBZFUnCAU0tt4VgrS0SnqjVh6qspaXxWt1bnWKUKmynmrYoxJnCaj8iSnOag192xZ/yo5wRNzXiCeD7+pOJzqs6LTWLTBzdTfS0rWGsxHTdcSAXmleLcxXhSJLKyZ8HjQrUefDyKoe597Fgfv/TAA4KwLIqi1HctYjLm1Lx0LIqq1lXcwiulRR47iHVGso56dGSlvuIXdkPvgPH2usWS1a2VOeuV1X3kv/MyEqOePoyydcUJOzQ85J5IXEW/WuAxqTiW23BVA4ygaGhoaGgYxw+FOdqqO5dGREsgUd1wQuSIVU8/9pVNVXCfQcXucqVyrEYCsnMh0WUqaayLX0RvlFqiIdwoD0lufXLOXQc7kyjlvZtCtKoKpy3RATOA5MThgU14AnnR5/LB46Eo+2So9B0d0dXLOcTTdCofNDTHTKUzdpUHdzQg9YrrTUQwtYc7zMzrSu1wM7Xnl6e/hI/eFEB67TdBfmKUl9LuDvmJlZzj6yXmMV4IT3jI59DlKBGTQE5u4ybGIaigVtLXBqp2TNpbRpPW7TrkSp0p3FefinGOxOsIznau68FbaSeuqFxZfGxwV02ODwzy4UZCHoPPyYIpjw0YXTprD1gv56gJS7ijlZ4K2ZVyr4MRoWreqjBYAEgPFiyYV2zg1P8hB1/ayAbEFlwPH2DHkBU6iT+eq1kwkjrKcp9tXFsSATolP5SNTBVR55d6Q3mDlY+xUGdlQ4kbIi0THmx/W1FhB2hu7zuBvqFcfPUuy1JOYAKv0JYMWybFCFR28CSIkZ4LF0sSKXf8oWjiNoi/L0VdexU2vvQgAuPnBAwCApdUel3btAgCcu/Gm0PfzR2FWo6gpegJ05M3s1PJpRJm9GUpp9g+ZQ/Q0zRBCNgHKejh9owCcIqLK54gfWTkXjTKrE+V3PONq98mGwuOWUCVZ/dh8NNFTQ0NDQ8Mghs1jl2MUTVhEs3hMgok0eqb+IHGAKLS00eZ+cadXBr9JshrkHIawdqUC0kCYvlLBV8N8u2tJjqVuDrV25OV9+TvpX8ZjqnI1M2ufHXXBlKqm8S2fXfqJlKKItB2pts0Xf8O4V1SDTAk7FUU075QbUyMtUyiuE06BFLqc6zlyr70FhyPXTuMm7zP0OBO3EUVEpq7Q5h5oMQ0dM87XGKBLAqABtcRdKtOVUlLG9vdSjqKqGuvZ2cBn3AZg4FhcE8soj3lus/fIX4zQo9pwM84hN2EuZNLF79dYVW/0oI6Jp92FVRz+7lMAgL9y38Ohjm2LOLd/CwDgxN7AWfi3d8BdDHGL1uJiEbOqwnrHHtkSD6oTjix5P+lHMSUARHGC1gUDo37XFolStCefieP/iy+fvw0v3vlJrSXnWwox9ZWcG1DxpXiJk3XPZ9+jbrkK4l62y5fzmZ85ECON0DiKhoaGhoZBDHIUC0ty2YnYFYDIkfsxJBVgL0nEeefr9Q6Y71pKVleIAod3uDyRyUY4ilr609k1TN+9q8/IqdP0YjhUqNqkWPXc5ut3NAMy7BxZUkq5EtbAKC+ySJf0ooyNeW8iL0BRWolzIorbwllVH4KCO+dIg44iNVU1Op/vOqW3uVy/U1wJKbV7LWPndsT7rKJbdQrXzCDEezVEmWe794ZT1SYpcFOGOnj55t7a6lXkicSM8ehiilL2t8MIxElQvC9vw1U/Bk6+chgAMH47ONfdft8HcWE1rBHH9u4BAJzdtQfjk8thbKJ57LiPCY/gQVFjmXMyov+k8Uj1mShgil/ajHUTvoeB2GJE8dsp352sCvNxFOU1eXbyHdK5LIZezRYgfEvxPfOYVriZdSiHhjeKhRgszHv0tOCzEo2yeXlQ52xMmTUxACYkXoofvw+lQx300SM56sZfMzH2Gi4LycbIc1uJOPr0nFE+Dd5k4iurPjtaNDthx2Xxk/lDYlD+31gV9mAGETLNF8OKYID8iIwB+kLER8SRg4/9JCsi30MWft4oJMBcLiXR7iNe+6hUPcrjVZ99q74sFBYT2vT0JpzlT48d6GCwcuZdAMCrTz8DAPjMAw9h754gctp34AYAwIXde7G6vBMAsBqT69DL7uCVRZ2EZs/FL95LOPKNQESpfCL8uxna801CbglVD70jY1FkDQwFkZ8sQv9scDib6KmhoaGhYRAz/Cgkvg7tUKLLi1SL9ehIHGWUos/LTyCEyt3wttZw/cErNpgV717iOJEBhGPilYMCMjU98aKsV0H8xBI3nlPiKE6WFJ/tvIRjJqyXs/DKTNFoKi/+ZEV3olwUsRhdyRP1+F6MAeDT8uE5GWsw4zOqUqLZOWOshIuPJrBG949+xHcwMiNW3r76/A8AAB88+ga2HfggAGD3nmAye3zPDVjdE2JCTVaCv8WY2Effw0ZBl0m4nvI9rDeek5hnl6KWa4iBSGCMyJTq74x/8TkJTqg6xWI3kdTU0iBXn09VzAjA1ziKhoaGhoZBDHIURNF5Z4XCyCgfYzx73LJSyhpOyi5Ho2ID5SZh6vdAfJaG6wjMUToxERVyXGTQklOUb5KwzTIXmEOwoqwWOXbkdnXaVdYhkPLUc/RawryyanHoU7638d6RN5hwlNls7nqlvI1cj/MIWnEEmT0Q4lx1cku4pjWZVE7FVMudWr33wiFkSu3gFJhS2t6DjVCofotO/SYFfjRa6ccYxbDhp4++CQB4/vHv4oHPhyRG23dtBwDsPXgzzr91BAAwOXs2HoOuwvgVLOQ6CtgqNZvrW2a9qzQSANVxjbISCnl63tRMu7IW1sJaNB1FQ0NDQ8N7jUGOQnZpSTiSW3YE5Du3Z0enJIHRtb/Bvze4DiifeZEbmlQdjzwkDD8Rwb04p5GoX7gIwBHXOhYLJ+IQRKfh+V4xdDHpiaSt66U+ldwbUi8Z4DLBT/0w0jYdN030FuCjidc7It34yzQ8WGxy7rTF13pareGVs6Ap/jIXw2ZKXriztaBnePXpp3HXj/8MAGDnjmAeu2f/fry9YzcAYBzPTc6fCcfxCsdAssIyTG3h+jB9PErT4Cv/vdV0DleivmAltSmPGMSw6Ik/BsPBykSDWDPd4uJK+R2nnzXskV3bc4a8n8UbsjZQQz2YT6mzGUie4yvnquUzdjP3kNa/K/tyLctZVWnFdZXlqkmmTFluCEP9TLw+ST/qvJhpqnPsdd2T1y5tFEY2CvYlMGqjCMceTtquCRQEJWCfJT3q1htt0KOIqeSNZsvTxdVYiZVFScAMUIyH94Bn2VOm6Lasa06MgTkKQlVsk4l2vQQb5JZ6tVGwrlx8GrhtlvwGDAf3owx9J4+8icM/CBnu7v7opwEAu7YvY9f+oMxeOfkOAKDfGry33dlzas7G9jvHXveZc1HR3tC3KfONl6pShJMjTaI2H7FQGgOsZx1JxUvV77vyLJGsliazqH2jWr9dWfc2svY10VNDQ0NDwyAGOQohXQ0rzzS7HI6ecyFLulSJ4qnZvsKETekFq6aIDdcvmFVQ3AO/fxXPhk4lHFP67q3xcGxiSWUAS7GjKBw5PCtyTQw9S9Oq9z1sfK5VnMSQiKCM+hkfrLpnIZFcKanXKLa/NypqK4fhlQYTF9El1H5GcXv9rRG3YVhxnXKyXt1Zp5aTeF4m/ZiNbgc5KlJMK+NZLtaRV/rqJbz85J8CAO77YAhBfnDPFpy+KTjfnT9+DACwFj21zcoCzGrajsRoWVHN0yj3WdzAkGL8esY0jqY6BtA8TFNmNzQ0NDRcBQzrKJgZ8BKBJOMUvMeU3Yp2PpEtZpZgud6s4UcUQj3a4pyK5CBy5j6jjK2lyB1sTus74WDdRCl7WSFOlHA074QpyCJf0VHMK8dNijET4JNnq1Oc3Ch9BisCkGalkEud15xEHEdjJDozWwtrRXt6zmCabJ1isyldhaJKAWCS1BGLTcKPxZHB2SMhBeqZN2Ouivs/ghv2BWX2u/v2heMr0+XqxnaK6pW1pcY1AJjKHWiHxvwZhYXF9YKB95eGPUm5XKPunXdtnTXvBzeKvpfJm+fFlgQrKPIZa9GTOqubVZzbbCuBhvcYapIzSFwSVqR4ksQekIUibgY0q5zx5eI6cey/QP4InRFFck9Z5+JtnbHIxaG9jjiglKDTFKIGplhsvC8VqBKYzklsHkUcGfaB0H4MmVc3tdUZjnzA12DVppfl9Ua5UcCXiYuCKDhuFOoc/+I+jeJ9E1agU2Bw0/ds0fTiU98FANxw2924Yf9eAMCFm4K39pGlEGh8xcg40HOsspzQi+GQ6In7mfQp3Uw1rlfRU83gpGqco5TeQPx3nUr35pnd0NDQ0LAhzPCjEPESp6zMEokkKVFqlAlVZjzz4V6zuqBdT0RUVDynDkxCiV5FDDFH660qUzjWrs289zoKrRtESqk4IxES6C7n3sakiJ4AJPzsKE2lgUQEoKRGE8V5sMd3PFotI6UWOClgajRTPotNcSU7nT7SiP8xmZQmpFkn3xKJ1jjcOImxOvFh4k/QeekzeaV7FUkhM4Ut2hvbWJ9F2bepnklc18jGVKeux0Is9+YrQQR17uRx7LsrpEUlzmLn7iCKWju5gD6yR5bEi+NexoHWhwoFnXswh2vIriGLhJuJnpTvi8yBchRM7mE/L+ryyBk3DSwuif9HhUOfWVedw6rf2TiKhoaGhoYNYC4dhdY5uEnKUTjn2JlHU8SO81DQ0Us5U+7mtb0wdzkMurZcDjzvzo2ijbre6TfIM5jOqDgZztq3hZMQLk2uTW+bJKsRue3Qw4ad9Wo9NZjd+vWDauw1laUppJwaNBUBdvy/Hxn4vpdyCFyE9tIGQk8oRpHEdYrzdtRLxQlxn7MDRnFA6buyKlWoV2Nqc27Eyw+bULixajaVjRR6J/VxmiWdf4ljTfXxmkfMUMqJwUK/o+I++85g1Fzg7poyiq2Reyz3U3lSR5amJyUSPHMGZ08cBwC8+tILuPGO+wAAN0bHu7vvCxFmz5x4GysxWZK5uAoA2Drx6Jn903lIrPqtiXXFPSRzK56jo1UcBdtgy4tnHWvy+uVboxtz50guqXVClTJ+QDeQcEtcLJ3L4fG8MKh70yRWSj2jGqDqZWMRzWWma7g6NRUzNgppKOdmZvZW2MRcSaiVUX38wBMP4AobOYRqONzrR/oCQFsrCJxLx+0vI2rKNvqIRezhJIQciWh6j54WXBWOnNeCCVn0xEsARnG2G8rFbVRojYiu4mMxpeVzlKndpbOOy/fCET7isRtF8c44+JEAgB2JH0M/pkVVrZAmXy2z1WKTYWBgJrFNsW2HX3gBj/zkXwUAbNkeNordt94Z/n/f/eguBbHU6muvAQAuvX0coyxrW0D6TdSU24Ne0moBFRHV+ojKawVpn8tzNcg6mW9+89eh0URPDQ0NDQ2DGOYoxlqMQcfZprB93zMnQaazaUyh2TtZLTZJzQv1eiEOxLpTC6uuTwrnSqDGVXE8I4jIjk22rVGUs9wjfjtputHOgM1MWWzaexYb0bneOeYqCi/YSgKv5OFzMIYGopAn8ZiDMoHNjEWsle+Lk4ZZw8pvEQGnicaSBpkyJHv5G1x2yhXRBfNk7rBggulrH+NznX/nHRw/fAgAcMPdW0O7d4fggHd/6qfwdoz/9O7CNgDApQvfw2glmNiiX431SpIz7Uge/p8xyL78bUxal3daqFHjaHOxdom5OZsNIHcDCefycam/Tz5PUovEFYHq1XNiuO2No2hoaGhoGMQgRzFek2ivsvvMx1E4V1IwLHumK3NuU1oRPKiXvYbBSZuUgjnX2fxl1lUAZf/5bTvhvlw6ZPE3zY8sD1KsBQBgDRxFcFVUmeWkW8QpGKmjZjBRoeRKk+eByWkMy4tH8TixotPj+E9ct5G+Rr0LOq90hfGa1ZRtvEZjZX0e1ilrkupLRsZWYwrJF4xJ1FGQBAHnz+KZ7/w3AMA9sY3b73oYAHDDzttxaXswlb14LHAWl3a8gsmlkOCITYgTwQFRxLR2aMpY2lYd+8xbW8bKq7GimGCGSw5Fqp0nzSzfPAMz9QxawZ1xmumtWkqh/wezAsbrd59z3Vq6UUfjKBoaGhoaBjHIUazFBCVhZ40nTSb/RIUSrNhdaUsoMfmcFyKL5nuuU7/8mmpFR31s0BQ6mTAKOcQpRVWiI0fRYw3EcS21pgUmQD9KB98asOVU1ynZ7xTquxbCY16YlAVK6u8guV+MahsAjDrDJsbElTrjhIsiyy/rJHIu62kUhc5E5yboxeQj5HhZozh+/eoFHPrBkwCAhd0h1tMjDzwKAFhdXMTSUtBbXFwN6VG75WXpbLSy9MYrxznpwhCqvZrSVe/18jG95mquliukj5gFP6UzNa5nGkqdh/RlVh2DG8Vk0pcnjdhQDzeqHMicRRsa9GRzqtW7mS8qa0/4LaO6nkkxLSlJ3m4D7e0+nc3XdRXtqLHoU+7lG4qNyiOPM3Q1MTRBk8CTtGmQcs4Ak0kmFoAplLFqTcN4TIYVob8LC5br6zlmkmFlc58tuCKKUqJEHYfK5x9xRUkYnBXCdSUTsSLTisWofqfELbT7eVj26o5HpeDmjVO1VWfkA8I4zhNfTaIBoPzmjIGPbXJ2Es/1mKxeAAC89dIPAACrJ0KO7R1btmNp7RwAYOdiWHrOOQ8bTYER36dT7TSVDW0oORe3W5/MqDOjfWUq36vV5eZ4dg3Da5sSmQ0q0/U99IPWDFuUrfmayAfvleg/7/vsNa6JnhoaGhoaBjEjcVEFOgzmIDKm0VTOFUcNWygTg+KcqiMtTdvrftRhvLCXnGPbKYo5iix6CyYHmUAaK3FGxoxaa7WzLl8j81KTOeM5OFj2GJY5WaSiZEZRy3zkXI1i7JhrSbmTsZvws9hs1/ZCsbLozKjnBgq95zIWPvvUPFzBSWul8HzRnD1zFDaK9dYmHl0ct3ePBk7itacfAwA8sHc/DowC53F+V0hmtNJZFZaLOC0tG5pX+JQ3LZEvZXUYTBNLXUtIxWPZeNC/rhQbTRMlyZxdf6fbKtvQ0NDQMIhBjkLLZAnrt+BUCrWpHEVdR4FCltbwo4Z53q2OBEqxjRLKnIis3nCE0z6n5DuhvilGlDVKdp/Mycg1UIRTFlpbiDFHPFWj3hLVk88u6fLSRqLgKdcDOeN1iquh5EceFr2LhiY26lacYeaanQFj6lLjPMieVqee9W6j35XHKLZpjbmwBXRRfm7XAq/w6ne/DQC45/77cPPNIYTH5JaQLvXEgf04/fbrAIBFjizbQywO0jhX62EscnPh/HxenyRwmk1xJxFra3qGTdH3+cpvOpJExSDX43gvjqHGiP6R+52k5UWhW6th/aKn9wBVRct1avXUcHkovGXVIkcWTtZ49CaNGyRWTx4uLrpkRWR7V3roGvGtKBx0zbAoKded6nbozUHEOrJJTdswld5VHUtlNgxUIDra4KR8rpA3qsLLJsSMR0e2+NGPwthF9MGgCYtxw7jwdggY+PoPvo8P3hIsoXbGLHh777oD7x76YahjcjEcnRFRR2YBV4dnP4vMjTmeyl7kdbJ2zBNrTG8mrKyuKvtrdcm4zBJHNdFTQ0NDQ8MgZnAUsmtd7VSlNdMxLY7icrNrmuNMBVeV6HivRWvv3fPnnU8SMl2iprIQiBXcBpjYrFz815axxkIkahJPkJLacJ5tOmeNYtWZcBVqjJTfEm5ciYtyRSMMG2L0atwlVL+J/8fyVkQLxkmfRqMs2JN3/FNiPtFDjdKFUiGDzFYEcB5uwMy6hEffRxEYew5bdN0otjfWNVkDAPzgie/izo8FL+1t+24GANx2/704/vxTAIDz50+FaicS5tzGhvfKtJPDp9M71gmoxAaWW5mbjeq1RScBq+WjzpcBfhcVn5rk3AbWyeGlpyZdiQdSVjvdfi3mzLjsiIqEtEDjKBoaGhoaBjFDma0VIXRu+n43K2a8KB9zBy9dpygLJVKtL8+JAHaoC4Pt5FpzS7wERpNmqjn5FqwUZ5Vu5XJS7zWVHA7WGb4tEYsjUCsub4e6LgrUWmemk+GMLuYAACAASURBVAs+NCS2Pb33veZzBK76mk3+WnrLEWeFCVDBbpD3T0a657lm1TikD+jhxCkr6juCvjjlAtJ5QlQsXRHKzjk9r+mWVAHrreSqoCRIFkYSFkVfNec8k32sv6A6Oi85cCBHW0mMw18mNYCSl6VaX+6LOPfRWK3C+RAFdkzlopL6xPETOPLCYQDAB37q7lDmpv244+HAZTz3zjEAwOT0GkZRWW/ZxzDm57Cd5CHxQRmy4Mesn3WWYjdZftvWTqeHk6RT9B2q9ye6nayQ19S6XDPJ3EL9I2LHPlNc186Abo4vUPetUFaHGuNFvSZTG70UmcEyzNgoZJLXvf7SBsqgznYJv1wUntmXsVHkd1yLVlWlqmqzkCpqw0aRlRh419ciyHrHqYxkJN5xlPSn93BxUSVrJmc9L3QSQE/ukW2F6nQwXRq+vJ5RsPKhQj7OOj1iktKaiMjnpzUG3qYbkLUGtqN70oWgGxku5/h76dXOlh/Vb7Up5P3MhWpUjjOFUzviZtNZg+e//zQA4P6f+HEAwJ7tO3DP/fcDAE4eehUAcOSZM5isRrHSOGwYizHr1Ng5NfaqTdzONJe4bi+3dMaaUSd46d7BW6866v4SlU2hFiViHd93Ez01NDQ0NAxiBkdBR6OIpOk71GZS5mno23KHX1899XPXDh+RUvnJlSqVuglP1ErLomqTHK4tVHitiriNKWhSqPaeU6BKImERqJDtf+D8SVzFtYV/dR5mFS2Auf8K5S8KRBFFzKIC9f89PPePuSVrOVw9icCMleREuf+TMVYFPRTRrs24Eg8RdxiKJcQKUglVrlGc8obFwlmsP1jX48SbRwAAxw69DAC446GHsXtvMJm9+f2Bs3jr8Ctw42hjSzGkXBBndRjBxCDsLJ41Hf9XMwNdL0dRu08C6s23Fm1G0MB56qhzCroAXyzE3y0VakNDQ0PDpmEujkInT6kpta9E6N16VETdttk6imtR93CtQOuTTHbuMkLBXGHUFPO2PKeYI+GYSE4OOIo2q00ixzR3o+fyBOg7ZIgUsjWswGTdh62ENCdlNUQRvZkIaYLpt8SeEo4DfI7alStljQFHmxWnNqHI+TsnU0sruqA0+ihH3OKKhfMIP0bMWvRYuxgiy7701PcAALfffRe2bg2pUm+4804AwK6bb8E77wZT2eWlkGoVq5Gj8BN5B/zarRr8jWv3ricdxcw1N9G3pWVrTqDT0DiKhoaGhoZBzMVRYMDF+2pT7evhWgLlVepPKB7Q+pyLrhx8/kuL3zdZR2Ey0+QaRyEWOtcKTOW3A7WwZjacz1cDgFUUbCKjnPAm4WgVtY5FGqtw7DqTcGJAoGrLfBUytj4rL3Y560fiEMZcT7Rs6sCOVmR15NjxyoszFucxcBx1V9SPpdVTMieo4b0uR/3T1mC9vsQmtst2hMla4AwO/yDkqnjr4Ydw8MPRAurgQQDAXR98EKeOhPhPq2dPAgCWYlUj41Tujhgl14yURXrNguvykDr9rq++9eoXLhfT8lkUXJEBryHaIS/egVkKycGNQk8gmmgUZrnvXVF+3Yl31LX8qTqHbb3s+gY5MWPM2qvbnSbLiaWzIG56ca1n96uNw/QXweaXqshwAqJ5aq0jj0tU68uVMo+93A9j9n0iLtHfBSAhyIOSNfvojcqxbVSyIl4wo8gphtDuOxSbqrWaGOEWxzLSQt7L4EE+5RQ23PmBEHJmymKQBfTrrAVGodyEWh9lYqEsBe0TkRkphSnftbfqjceBYVNh2ZfhaZNyEO9rOioDYJv1ysBjIbbj0pnTAIBXnnsWBx/8EABgy5YQevzgLbdi7623AADefSWIqlxMoub7np9JAxzSPtHz82Ti4muQf+85eO5k5VNMX59q4vghbIaovqbMnt6/VDRZ97mqo4meGhoaGhoGMchR2Iomjtnbgd1QO9xd7RhR68F8Ia619yMpRtcbE6dhQ/AGpW2mUkgnU7HCESJQxBSpWkKQi/Nbb0ougG8grW8nybTYuMN5gBzdMvsKnWgrbV/kUJxLzialPCnNpeL1SiE55pPRYafVd5mFmzbecB+SFKgg5TdxD+C6hCb1fGST2nhtRKHCe8emxH4txH9689CrOP5mEDPtuj14a++98Ubcdd9DAIDnjgfOYxKNDrB2CcaN03EwNaPYEkNmydNEOMNSkIE17lrTelc869NLTZnd0NDQ0LABzIgeW+oO8k15etq9dJdNEn1Iocoza4rLqw9t6ijy4LI9OcXROIzNRyDeRAEdz/J1rXAslNg8D41KesQ1K7ltpOCVpSUnOorvv594ThREMv9AtZPcPz7L6PlSttdW0vcWHEVRQs8tAyiuaNpd3LdOcQYkEbCaM1DVSsircIqjtoJDn+g+0a0UYdeJlkB0gTHWk3cTdDEEykIsf/r423j9xecAAB86eCMAYMeO7bj9nnsAAEdfew0A8PbZc6GyvseIEkpRmBaUqDkHEoY4Bbpe1HetMQhzY0inq+fQBpTZ9YcMs2pFU6pOEHn91w6KOChe5VA28tHkgcacq03Xhk1HsWEA6SRPN4M0KGUsTbGheiVy0inYsykgNIMOZx1zRdcCsHFTfP37M+l8Cq3ONriBQHYeNfGTtshCbJso3El6xkcP0G6QeOlTkD8uSPU79tY2USTnId7gLIZJbPjC80nE5uDVphR+rF1aweGXwkbxvgc/CADYf/M92HcgbBq3vu/9AICTbwSPbu8cx47yFdGdmZNQS/pcXuVf84ie6rgedhZtFDC8fjXRU0NDQ0PDIIb9KJiqkd1GUgZP5yhMooCrcBn8Y7ro6WqKcAbjpRhTKLPhvQoRnSoEryXUhBPXI6pzwQPQitnwC/n7kLDdBrkXsfGeOZR+oigqlj1JkiQA8NZw/myn4kZxaHNDdqMiIjI2b7tHzRi2KMUmqyJm4rhVMMofohSL1uJGUbspwqzzSqykFLrsfZ0z1l7lC1cTS0JV1zTtJIqL/ylTYkpM1BmDtw+HqLFvvhxSot5w070YbQupUnfdHkRQu+4OHMXam6+hf+coAGB88TyPy7ycRNq5Omchnu1DovHr/asC9JyZtd42jqKhoaGhYRDDDnccZVXvtkTxa5kWyXk1ZR6PWV2znlUIiKffEI8xqbupU1Rl4nuRG3PMGiPRKKULyvCvIPests1LLhmIxzefcyrxk4oPL0SYcColhschH9dOj1+ljeyYpO7jBClFM4bf2ZWK7ZWfs9YIh6B0DuwN7PS8S2kfVrY6z3NFchd4+MhJmI5OGriYcYdk8o5yHFhwPgqKROucYac9z7L7gK6rUe0envQbFR0FHSXarFYO0yBYeJdyudrRk8bAqdSpBZcBqKi3sXzv1PiSIl9xBWQYYFiRAfa79TKHaA5a7lOsa2SZEyKubuQ9xpdWAADHXjkEADj34Bmc3xG8tM/tvgEAsPPRTwAAzixuh7kUXAr7i5Qg6ZJwf6S36CxsdCik9yfT3IA9CuXDlDXLSLCv0gFXo3Tk42Huy28jT4iVOu2JviAPgjEv71KX3sh6UH6v83NFMzYKfsY1glJpyf9dhiXDtde/zYOeguu+V2SDm9OYawKVEfEiamEP5N6qqOVp7mwYhzFZPZmYF9pa3mT4k1d+BJyVzspiQgu0Uz5JdK/jhVplLqNjkqWsnP9lmJ2hRU6BF8gyRElqxxI3biP9Zasy2qQGnufVssWh0+HRrYRF++jzLwEAXrz1Cez60I8BAHbs3R+KL4TxXjx/EW8fOhzuHR0HACy4NfTRC122T/2sSncrxgBcPDGAyIi+GcRR6Z1fq3f6/bXvdtq5q4kmempoaGhoGMQM89gIbzFomJzhSqZCnQfV9JTV65u/L+t4UXO70L4HuJY95q8MtDmjEv9lxhPOedHZUspUOmEdTAykZCNn0XnL4cvNKBPVeoicC5TLWcS2EoOIm6S4AjWHM3oumWPJOfkdjtNHA0aLJURsVFDMJLXpRTSTNFGZjNOzDdbxfXmLzi4AAC6cOgsAePmpJ3H/npDMaN/eEIK82xrCAi4fvAlvL+8AAPRRjNU5xyKnruKjUj7T8eDoHOUshap8G0PnuNqKVGPaOamjfI88pvQ+zXvP2zeOoqGhoaFhEMMchSYM1pnNZr36gvVCdngJm113ohkyZ0uVuBuBfnbBUfhNesgVwLAJ4PoU6dc2alxmGY7ZG8BwTKhMsdsJ99CT1/YIwES/Z4GDh40RXTkclPEqeicp6yGKYhBnoypim3QyiUXibEZ1eZ9SvYOxjTTdzx564Yr8BfrI/XhrRDFPMZ/gxfiDp7yVtqW6WyQOiOrTM32M0hvrP/vGYZx48SkAwMF77gQA7Nl5AACwsGsn9txxBwBg5USIEeVPnsaIKyZFtBXDkSzSrq/YeShCvnCwDddLnUOOaTqN8pxqS0WncSUSwW0UjaNoaGhoaBjEfDqKaxg6F0BNNVAk4/FemUdeOzt2w9VHQkWSubdTtFO8RrlXbK90dcRFwIEMoSjsBZuQA5LDh8NpgNkFyaFgRBdQdTglSyTd9tk6Cm31JDGt2MSpbhXHn0ssryLF+ozCNUaE52JCr2XsKcfug4dePEdtszBxzCm0yvj8u3jzhcBR3PSB+wAAN3wo6CrWtm3BwQ88AAA4c/wQAODsuSOwq9FMPibLMF5/3Tmrp/U/uo2pEkaPG5dP9Bj52qLKc/9m6Tmm6yiuJcwwjy0bbApv2LpSp6yrknVJXVsvm5V7RmvTvnkhXJ/n/4ekKXMnE8o2JwPlya0+5vVKbqZN2o2iHLdS9DQrjtdGxVCDbH5149+sZ8Y6KN8NknUklIvjMOkdCqNV79CNUi9pMn81nWGfAx83FtcZWDKnjd+Xc14S/xREjFELtBafpW0M4hKTXNPj6LJOWWvQ02anwo1zEi0qTn2BYSW8Fm2Rr4kEbxDxUkGI+bQPVAf1jxIi9RjjzPHgff3Sk38BANh3170AgOVdt2D3HTcBAG59OIQif+HYqxiffAcAMLqkcndHk9kR+ZXQhm+VZxjLAacIp9cxx1KxUa1Ebf3g5nIdamGadlvyzCHoJXwj30sTPTU0NDQ0DGLdoqe5I8RuEPPvfvMpXtPKp1Qxb1v8vO279pS9872rlDLV911fCuxZ0OKDMnpmYW7qVEZmJaVwVEekiEmKFQLRuuQGW0nJGhzuvDqTiZnKlqE2aecx+2bOxStz14yq1Q8VJb8KyV4rmByntEMxcHzKeI4MwI6IHujXgh3ykRdfBgAcfSnEgbrlYzdiae8WAMCBBwKX8fbRh/DWE4+FeychHHkHw6lYiZMgH3f91uuY9xspOXwZ09p3UnKGeXmdmnieeNRacrVOe6N1oXEUDQ0NDQ2DmC+EBzauX9lsrqN0jkmeFs9N0X34lHqbV0ehKqhU+aNEaQfMG8LgR6PvSieQXWHOwsmbp+ix1qhYWSTfJ5n/xAjF16k5RicpblRiHpu3RxTc6bzLZf369/RrNaR6Hz5ZlPEV/SRT6ZoDYk+x/EmVhhjA+ejFSPocY9H5sDSNTwcnvNee/B4A4MAd92DL7bcBANyBvQCA2x/8KC6+E1Kmnj33g3CtX8NCrK83FFurfHwWT1gaNRM6BtwQpzULpc5G572ZB9xqUrddAc5irqCASdyZXNFSwex4KPMN4jzlagryIc9s72tBntfZDj/d8zxRUl+DfhTrET1Na/N64t9cb5uIVwpd8UuISudeEyjhR9+LJzLrf5XrsgTGo6OEGWcRlbW8kcinJtrkalhyXxNMlB7c+W99pPhTEnRQbWykmKeAgc6xxRT5J8ChSkGWIqoaxSnnyMudnumMxSj2ZbQWzh37QUhudOTep/D+20PAwPG27QCAvbe/H7feH+I+HT4WlNrnjx8Vi6K4IVuQMt4VgSYMLMtu5hHhTPe4rm3qxd1TzyUK8TnWjNqnZ/zmi6Ga6KmhoaGhYRDziZ4uw3z1SmMe78V5zW4lnHoTPeWYN+7NNQ9TVwDLdU0F52am6kjuFr3cplTc8aC5CHqmRBAo0ocaL+JPtu4Ue906dbgekeAUzhDpZeNVfNdYH0XVTapQivyNw8PG8OzE2ZhuBDcOv7dEC4HJuZCk6LVnnsbBRx4GACzeGnwslnbvxS33Bd+KC4dfBABcPH8SfjWEIcckviwSCfZOZdRl+98pIr6BllfXoHXLK4o6N7rWGigxFNW7wXfVOIqGhoaGhkHMFevJO+0cVjN5y87VEhglQkGSk2YPSs4pylWJY/NbfErEZYUqhnB+Ol2pd+JUmZhVYURqTOVtpTxTKF6SFLF8V7WD5d6U3hJeyg3AmOmUgpaF5+fTOuYjNTjKpi/l3Sb8k5WPxwEl67TnF7LfRMU7Q/9l6uW0V/G0qSztzWTE7EldJpUBHDvQUXnOKeEMaxbJmSzMSJKdyziScxhHlO1UWtVMoG68YQ9kFeqs6JCORMtmsVyJculTc7IwX1WDlesejAE8Ow8KCZuPPc99p8dPPmDS6XBrncTSmrg1AKKIPvHiSzj5/aCvuG1HSGq0fc8tWL3pZgDAngcfAQCceuco+mMxFlR0LJzQ3LAeC31QoI/iEtj7BXgsxpatqDbOoSfNPTQBNpWucRspR26ya+r7MrqOtJzwiuXaacz6uf5ZwcHnEj0ZTB+ucC27OoV9klPUCaVGG7Beqjaq6vVZu2+OBfeyJCjqrQDJwq7HTUpnHzvKCSQ8vS5Xv78oMAdmTZryHahJWLH9ZjjFtBfP2GSR5bzVDU2PQehNKT3l1WKciKN8es7o8WM3ChJ7GB5AHjUHXljYgZ9lUHqu0GIsFIKvzaM5LdOkf3oTnT5SecSBGtXlraov3+DUepcQhGQ1REcvBgJ9jIHioy/E5NI5vPH88wCAG+7/IABgtGs/thwIYcl3fyB4a+9/8xCOnj4RHrAWdtNJP47tWMVCVKD3k3BuYhbQR3Gf5cbNt1Ek/c/6NO1qea68Vie94n9qV5jrm7usNU7QRE8NDQ0NDYOYP8x4AaGiysBWQtSkIqvUpO9qKErnjc80X13l/aIIp4tl+U2mqa88NOeXi5RMpV/WMCk8PW7UlcNc4itf54tnhY1O6tDUHhH3Timnec6H/60y3CfltIFhMWVPJrFGiS6jmMuSl7cxbKLKdQHK67g27imVqunieb+HoVDl0ifhjooG6iOXl3NJe7OYXqayppBp6wger/8wcBQ3vvgCAOCOg7dief9uAIDbE3wrdj74IRw/9S4A4N1DxwAA3TiIm8z54xivnAr1jS4CANb8GruGK8n5VBHwekz8a4FJNwtDrgCbicZRNDQ0NDQMYi6HO61PK5RRAAqFDIyY+amtiJRrJqujFvl15ua4kd0z13PMaTsmyjZVvlDPqCT3FQ7kusOAl2hCwyZKVX2tnojlao9JeHSmZJ2DE9HnPFxh1OG94UQ3YmKLeM3xXLCRJgtRnehcLKcTBrETnHhtO6TjJ5GhpEupEUg8B3WtMk/VHVPOSy01IxNAPp2Eh5nCUcCo8mrJcNwHSczEVfANEghq9UI0lX0uKLVvefAjGO3cDwBY3hs4i/N33YNtq9Ez+7bg3e1PnAEAXHzlOVw6Gu7dQqqj8UV0XVCc+0knzZ+i65qlSk2EJzZdb0ySrrUmtpnNecz6fjY7+VHjKBoaGhoaBjHIUciurmTwgzuU7ISlBc08sRCvPDZCydbv9epvfoU4rOsfCd2jjHWAVKZbdNZfHV1UXu9m3Fevq9THMeNNJktWyOUiPalT3xVT4x5MbdJnRjqfTnI+cHljuKBzRKVqWXXNIkraG58AcEiLIR1FcSlFqYwsaeQkjlXlWXRkYynLJ0k/QzoK5z1GLugaTr12GABw/MUXcfPOYCq7dEOIA9XvvhG4f1eo75aoMzoROIstu/fjUiSRLx4NMaQ6M5b8s3YU21HpLrV1PfNWbLaTe2tmryntvk6Lq8r3tVkYVmaT7fXMtqcnk2ByelBYkUXHyBr2vRq8yPYZScSSD7A6lT06b1xFwef1IIr4YEYH4yUztQjHB4JXZr/0EevxiIfaojn9yZeFeSbLkBgBSP1DpGDaUi2Jy/1QpnkMrzchTO13DdNY7vk3grIuuaF+DycFitcsi107yX2tprIIjsgc04hXcv6NQIXLVo+2lLeaMqkl4ttySSbxlU6sU/PcnhbHa+oiRH22krxJqqXNUcVdz0xhtSDN6GGhzZHyhPMCOkEXPbkvnYrxnZ7+Hm68+U4AwPLWIILavuMAuh1LAIDVtfCs0c1hg7FbtmKhD2KmS2tBqW1OrWFhvJKOhjU86I5SFVJ7kjFAASEGps+xNOhmhXCqrXtFHK16vYTNIsSa6KmhoaGhYRAzzGMr7GdOZBkdbterc0MUXZ1qmV7vdCS1FuVLEZhm9mok/NAz63v49SNYuhx2NPd0nho1N3/WnG3ZTNHTMNdwFZTn4kob/vVq3BLJK1H8dDQsOuLPRsW/5ltrYb7pkdYWY8m0qqJcKTe4tWUsgQ3Zh9ROVhz0CpmA8ZJjXHFfJh8IVgAbmD5S9y7Ecjpx6BUce/ZZAMDBXcFDe2l5L+z2kODI7QjHC9ujie34dthxUIgvnnsLANA/ewH9qTcBAJ0NnEfvetgo4bCcVSlyGDO+JVOjwTNOq3bR+8v7ToHNFzdpNI6ioaGhoWEQc6VCTeKbVxQzpWt/KgvlcvG6JHqPSiZFDdWc2mr1F7RKJWyIloXX5HYSq6jDPKhRXDq+iuD64TJq0ErIQtaPOvdQcAgcv6ee/OhydRRD54auX04d5Q2oy6PnMe2umFSL0lmU2Z6ixpKS2opCVzgED09xoqha59UcZDYjllfcSzL3RcGeIzcDnoYqv5YpuHl81Phx0FYHOOKZot7FeqOYEaJlyVx4xKF7FyNlvnriBA4//l0AwPYDtwMAtu2/HTu2B+e7LaPwfY9jXeMDB7BoQviPpYuBs5hcWsH4YuBQFleOx2f2ogNiY4TY1s4Mhv6RdaGm44sHpYZN1046VvSYU3RIVxrrzpktb5r+rVgxJN6CsZj+TqqLa/aUZGGX6cjKnIq4q/78oeu1BtSUr8P/63a85xjwe5h9bzjUrGX0a6/GsZlmt11hpTcSSnk9ntTpuc19P/Mla5KVURNMImaIhFMPXq2pHHtmu8RrIpwzovz2VtWfj7MRMZPEBuKrcymzZ86kbL755JKIl6jdInGS+cIWW3TOq6CY8TChmruOn8ItW1vDu2+/AQB453Dwjzj4gY9gZ/St2DlaCHXEAIYXty5jsu9AqO7294e63n4H7o2Qn3v8ZtgoFowVH4/Mos0AVferXKxd81eprYVqQa2ui5vtF7FeNNFTQ0NDQ8Mg5hM9QSj4vnDxNJkhPWCMVqwR9WT5d57uMcTLCTtlamaXU0GmOGcq1FCNe2Dv2aydodAs+9+U/UyuZCK2HwkQFWUMJLKnQPxDcnVoSX9uhuhpZnPXKVa6bDFURfQ0bIqouYfyHM3JOgUpntl9zUyXxFZsimvLPthK20hxrIN2efKnKNsxa1i0hzU1LuWeVCVG1ae+c6a0eTxEjOZ0e+NjPInUiNzvJ1i7GHwk3nrpGQDA4ovPYPuuwFEcXIp+EaNgLjtZXsAKtgIAVvfcCABYuPUe4KY7wzNPvBrqXTvHLgKW1grLnRsemAHU576MUeZZgFnc8NXgMhpH0dDQ0NAwiBkchaKGUpHhYHikNPG75iym7YzaFHbavfGYi8C9Ll3Stbzbptq27Pk9BpEzNjWQ8hHl7luT6VepakWdDeUFqN6ziSj5hPRabUhFNpvJ09MKi/IV/6KhBsmp9fZby4Vreq1aXzeFUiPugSr1MLbybl36LFY+O8DkthaK6kwGkt2607qcd3JKMqyqj3gefcvUEtP/z7kNA8nLoYvzKVHasxI5r8yJ/oKpfOPRRcr/7JGjAIATzzyBu28NXtr7ti8DAFZGQbl9GotYHYd7R4vRhHbrdvi9B8MzI5cxPrWKbi1El6VXNvELsV0dOkQHPvJwB+DYKGHK8MxAMI+lW2lN0e+nHFT+lbL9afENYr6ggOq55BGqG1DoL/XEZDj5GLMJmrLvTp2jZzmuQ0qSh6R6LpWXnC/SxqpdM22EOoVeZVGvCFQS7S6g7L6n5GCb58WxyKfiuXwVRFtFL5XiNbFcYej54ZNjDTqkCY+WWjimWZGkCtJKu+c454biMUzDHGOuFfPlxlLzQJdsc/q+or16zc/npEHx5RpAXA7oST0pkb0oYRPJCb3Tae3X4rFSjKUV84kWN58qZLEDLxsFlfFG8lZTvQZi/cWLpeX/6Ht1PI7gQJ3j8xcAAKdefgY3vhwsoHbeGEOPL4QNY+/idsCF+twoDOTKtmWYG24JdewJvhju0mks+ksAAEu+GxRNAh06TxtFzPntDXzc1Wm+pWt3JoLVv5PxpR/p+0nL5RtpOudrxNBG0ERPDQ0NDQ2DmCF6ymJHX2Xk/hPGWKWU9nwOSCk7iYl+lRq6SeDmb4AKqCmfL6eWqVfmbFsiMqiIo/xAXwu63FRaZGZlz5ZyU58zpS/rGf/1viuvuAxtfDHN76N2DQaFqEqPR570BwagWElGeXdTvKJaDwov74rJufelKLgGzcVU+2LStgFelN3MNahvOuduDTCOP/sYaOvEiTfx+g9CwL8H7rgHALBnIXAWq7v2wZmw9J3sIoewdy+6u94HALBHgpkszr6JfuU0AMBFjkI4LJuY+AZYcIwqxTldXqrlawuNo2hoaGhoGMSMWE9aRnZ1t8Vh2a9W6UxvV7gt1SVQ3Q3z4bId4+IxiMJLyrX6DrJXxZSYUa9v2Euz/hupmL8WrTdVbJfnqM3FuUq5QQQBfFFvrR2DIP0Dx0lyXC97PScvIT7Dll/OUNytzXT0mmUWzc6DrtJeVsKrL57Ka04l6jb82iWceeMwAODcocAh7N1/FwBg+5azOLsQQpAvLgeTmbuzYQAAHlVJREFU2ZXd+2AuBN3EtlsDZ3Hx7ZewdvoIAGA0CV7bojrueT6T2raHYdNdDyfls6GrjkBNz1Yr9x6hcRQNDQ0NDYOYoaMoqfHNNruaB5SkXjvmlWFAJEmMUUJRU5jZKcuqmjx/HmajMh4z5ZC16/k55qDmaMMUzCm5n7ey0rkuNeMIB5/QefFShTqlurQJYBy4KsXCMmi5upmhP2bluSj1BtPqXk87LPJkRon12BzzL9ENkEMpOtE/0HthSyfPzmx0ozdGsotWLJuqz6z8ngvJ91h5huWJFNvtxZmOnmllLWLNqTKgsvGbHjnK8eGx8u4JAMBbh58P5W4OVlB2604sbQ/1bel2AABWF5bRL4TfbnkPAIT/u2ApZe2lWL+01WY2yt5YuBg3zlasnpCUvr4kG+uI9ZSaZQ2WNLVJtz7x1TSPVxZHpRZ1Wv8lbGryOF0HkgK6pfJY/fy5m/3e40pt4IMSH1OWo+/JIX1JVCYjOHztEZVxr72K9XZ5VvKXzVRm166X4hwRyZSK7spGlAw3iZ7EBJbu5QXVGNlrU4qpOJWLoWZuHjWRSWEvP7WKOBcqG2bxHQrBUvPFGMVyC3H3W+snGJ8P3tqnjx4CAIwOhThQ23YdgPExQdooKKmNHwHj8HttHBaQSW9gYzkyBujjhtSFiV10b1MJtQpyb+2rFfupiZ4aGhoaGgYx7HAXHUm0iZ6EKwZfy6HLp9RmRvFYy2XqcW9S6krfwyy3E++6XKEFo8VR6b6f1K8coGohzXN4pV3NRWF5e4dQKEZZuiPtqMWoutKYl0oZLkedqfnLV0RaqBCeIrFQN9aU4LPba1R7Z3EUUu306xsZo5qSfL31Krlc+OshqYt53BTHnIu7DCS8eIVczMeoNqeNKSPbGmMKqrq2VnjlFFtTAOcOnEZ9s1lg2WCVGp+xGst3sLB9MJo9/0aILIvusVDGL2HrA2Ft27E/mM6OF7djbMK5bstiqHbbVvjl4IltJ13Sbg/D66M2tDBKRDYLl8MN1GLLVcVbm8xpNI6ioaGhoWEQM3QUOjfhbEp7M5FyEXNQdrZClRn+o6hTD5F3ZuUBCf9RUdpO/b9+Km3nXOXKUkMUXQ2aurlsrNNDyKNUZktdSpfJvpI+uR4rmd6cjXA4AzqQGo8ZK5r2ADU2lT7kFZuKUt+byg011PiwCrWqrToysi/lctNqjUqaM8TRbEiJPQBxLkW9q6zkigc2sTasi/TKZHYS14g+cieLvsdS1FeMz5wDAFz84UuhjFvEwijEeNq2tB0A4HbfgBVEE1gbOJHRVouVhdAA0k14friB95Q0hCQvIrnQMeamzrtZ4znIjV5dxelcymzvvSh+iwhlm4U5F8JMyZZN6+QarF6H4i8Hie2kJp+uO/89z4e9XoGBtLZyrWJvXvXQraCyjF0+5uxULdQ7/28Me7CqnVk+ErVITI31NPN7MtXfWSOz1Wk+sVByzpRiwMHNaUOhqPkXUNtNS7M/Rdel89mYEIcoNE3EU56tjaSOeSyg1ivWqCr06Vu1KOaCV/dwVkyeJwaWxGhxwkycbMdaNE3f+YieNQ4B/taOHMJKTFy064abAIRseecvhthNbnwGANCvvQvbr8THxmd1QRTVO8DFDcLq90N+HPOInuywQMfk65m+lszhK79pNNFTQ0NDQ8MgBjmK0YJSIMVfbijpxiZC75RV89jikZqqrHADKgQz15F1JXiOYuozNxsFS1rlklT5q2QKN6+UcZqCE9DstfRL3VjnCKdx1TP7LeJFYVByjWdOhaWwFequjEs0zLnM4+FcF4KV5TTnPJzOdwAUZdUY2DgyzHRYK2amcxKk88y/wSC9+n7d/kyWpBk38p+wTo0f2fpSqm0ApicldnzHdoQ+Pm9igihpHMOC96vv4sRrLwAARluD6GnLDW9j4oK05PyRkLjo4rHX4C4GsRX6yG1EjsLbDp4TnlGaAvHbYnuaAYPZmcYUg/dcXdFT4ygaGhoaGgYxyFEsLBKFpGSEfUqZ0HUA4NSiFZm1VvTUJOmify4pqaQcH4lyVTQkVyKUUk4de6O0S6L5zeoaRm2nL5yAigJVjd2UMtK4tKn677TGzWQDhq/Pqn/Oe4WKrFSYvNuK1iYn5Geoi0q+V06a8mUnRSSNyBApHGuYpv7IuKj1GiAMPDK+r6w+g/JcrQ6lUOWhJV2Fh5iOs/5igPrlinS9whmSbsMqL3qlVmA4zRnEi6w/VImI8velTWLZjJYizPYeIxs9oqNHt+s9p1E1C9Hc1VPeiAlWTrwFADj29HcBAEvbXkFvQ9ynC+eCR7c9/w4WJhItFlDMj1drUGKvG9o96jou6HKO0E//mpPxr06f9LtJVomqsMfXfsYT83OogxvF0nJM7tF7zttrXWTf4k7RT0T7rwIdK4UxDZJ4MooRU2TZPGBzlspL0C3NbnFIDq8sspBaRpEts4feqOKzrWLv+cWpdhfyoHxUZDNTj1ft0mEK5GOWpE3ZjcmzZPutSdak3VLVtDAT0z74wcRCbHc/p+yppqR0bIoCAOiUrb2v1qcn67THzBIDsrywqNUk7yftnzGmeH9JrUWqQsM1z7MF1MRGYd2Pvx0pPiv9Uw9gvyPqppG2aVGVyV5+Ik6j8NdO1Z8bGVjAdKl4jo69m2C0EJaL8TisAQsLC2pvp8RCplCIT6LYZjQaoetoHOLa4hz/NjH0t7cOhsMrUMIg6kdYj0IdoV6zMILvYztpvK3lEBsuelp3sY2dteyFPTkRMuL5d49j4sK5UZQbGe94znLubhctokwPXuq72G7vEbsHF/tsjOUxpT70eu2id6WWB+5qmlEh3jIktqydlB82//7iNWsM+5JNQxM9NTQ0NDQMYpCjWN4aLvdjjz5SCRM6ToR1dJNMQOK9UPg19qYQC8wL2W/rlDSx0HTSq11WcRxTzCST1I7XNKjdlSvrVHhXqdkNKM1ryraa9/qVQM1MVk6V6Ubze+iGoXzw1wQ8lCw4osagDvhAGBjx5JagUEJZMrNNFHqH1bU1AEAXxSrBaj6aoMaUojt37cT9990PAFheXk6euba2hhdfehEAcOyttwFESUARJFG1M/tGJ2trsIYC741ie0ZwkRvYu28/AGD/gX248WDIfb20FERKLs7Js2fP4oknngAArI4Dh2DdBAu0fnE0hDKlc6dZG/5cRPxGXBxxX33vxAAojjOL56y4lDMHrCXjNW5xA5gWv2uez7JxFA0NDQ0NgxjmKLaEnbtfcJhE2WYXuQezJrLDSdwDJ2OtNxAFGUDyWhTnCEMeoTUMhYDWppG51WOol2LL8Emug0XsSrZXl9XPoczYZOQy6Muh/MtxK537LqcntXoBwHjh6upmy/qh0+fAkGlrTXlclhfqUF9j7kHptS6H4rqSKNUnZTrVwAmRHo7MR+VGZhRs2XcheB0oSiq9CkfRUhcsrA3LxcJCdDqbeNx6ewjd/dnPfAYAsHfvPhw+fBgAcPz4OwCAyWTC1774hZ8DABw6dAgA8MQTT+DsuYtpf02pJ6N+Li4sgpx+x3EN2rplG37lb/8KAOCRhz8Szm3dAhvTnK6srSbteOmVV/D8C8E8dnLuHNe/JSYxurhyIZwDVHKkdLA8SpPqpaUljNcCh7KwHMdq0eDSanDa4/SuZKrsnVjtUyXqO+B10k5P+DR7nazo3jLOou977N+3b7CexlE0NDQ0NAxikKNYXIo7Tmdgo6WBJQ0/cwWerZh8tFDoexWrnXdiAxUhfxOaXth6qWfGM0pvwY92iBZYXICvBfnsvG0rZfFXC4VTG4Yp7iFcjvxzSJasCsU2+oLzCZY/2fwwiYEGVUqXhlHReaXWTogGJikXE8IupWOZOLVN61v98ZsaC6lWv0bxLO/FXLNiwcJhVBTT71PmAd6VuR44hEbvision/n0p/Cxj30MAPDYY48DAP7j738dFy6cBwD0E4k+DQC267Bz104AwFe+/GUAwI7t2/Ef/+D/rfTUp0eeT4brpTHYtWcPPvnJTwIAnnrySQDAn3zjT3Dq9CkAwDvvHAcAtmpaW1vDyspq0s+HH34IDzzwAQDA7/7rf8WtEdPh2ELiCry2WgvXxm6Mbdu2hv59KfTviaeexCuHggMf6UNoznXWKqsxeQ5ZJ/mKJeV651jF4r7QGXZ2hNtvu2OwnsGNYtvWoIzq+wnWJpTUIy1jO8cfY8/WnQ7e5TIfGeSclw4+FuXnMJhfN1+gK6alHkb5dsjuUYpwqDMoTQavIQxtEIRZ06gc5nKDTS6tF/l+UfxN25gErptahyo0pJA2qShGH43aiHgcE/PY6SKtaxJeZjmyX8XGZUw1pLi4G8km4sjcvEvFWN57bFkKgfQ+9/nPAQBuu+02/N7v/R4A4Nhbx0IVzmFxMYimbrvzNgBh/QCAt44ew7snTwMA/uDrfwAA+IW/9gu4/fZQ7siRI/FZ4O+5ZgDTRZES+Sp87NGP4uSpsCn8i9/7lwCAN984wvfSBkF1LCwssEj6hgM3AAB++X/4ZXz729+O4xLHoGIAsy8qy733OH48bECLrLju8dFHPwoA+OjHwvEb3/wGT8VuJGa01BfLoq1oIuw9unjDnn17Q/snE9701g/ZYMTvI1zhNA8w+Mxn/spgLU301NDQ0NAwiGGOYlugICaTNYyiIsiukblc3CZXJkwxdCTC6QDHURSFXGfigCiZJCRvabpVE6eUUVX5ijonOyXXyxEn60IrIHiNsucl1eqMsL3UbgekvulIWPbLFUEMUrBzErfstTovl2ZKmtTnHNcccD4dDzYQ6NW5pNr4PpQXmTZl1SgchTIkfZrCjQQpU9ovUwtfX7ehrVWtGzDYvrJ4Odd9NseS87niGnpMhLOQV0DlleMbk4RRlKScBw1TltIA1/NXASCYv378Jz8BADgQqfDf/d3fxbunz8RnhDq2bduGn//SlwAAt0dFt+vDJPjt3/5tXLgQFMVnToc0pUeOHMVdd9wFAHgjchSxUbFNqWc0AI6jdMvNNwMAvviFn8PX/u3XAABHjwXOBl0HkAOdjfGZaBydjPPnf+ZnAAArayv402/+KQBg+7Ztod6f/Vl876nvAQBOHj8JAPi1X/01AMBjjz+GP/iDwBWRg+H+vfvw81/8eQDAn/zpnwAAjr19jJk/MiGOSym6ruO1sB9Trm9g567dAIC/++t/FwDwjW98A//lj/9LKNen4rzhuGLD57nd+w7gx3/sx6r3EBpH0dDQ0NAwiEGOYuvWsLOurVmYcXC24fR/FJnSWfST6I6/HM71F8H2eJyp1HhlTpbubJejUJ0eRVZ2P22Sm4i5mbIkyo6Oqr5E7r2upl0T8FBibK0dMBklqp2yNsHRrojhZJWBgFZS5+/FeEyj2WfG0dLtyBSBwqSoGEQVDqGm/6HIpRWm9T2GcAOE+hxVsdeIo6YQHla/A/XOCpYmlL/1tlvxgQ8EZe/X/t2/BQCcOnWanceWFkM8pa98+cu4OVL63/9eoMY/8MEPxmcLKU+K4BPvvIMde3Zyr/hHZtBAmIwn2L4lRHz9ha/+d+HcZMIK6w8+9GC4zXu88srLAIBz0QSWHAX7SY9bbrsVAPCTP/ETAIB//i//BU6eCDGe/ubf+BsAgC984Yt44bnnAQCf+fSnAQD3vi+kTv3zP/tzbF0OEpdLl4L566OPfJTn/3ceCzGk7KhDD9LhRq4hxqVyTsJqLI5iylUHfOHzXwAAHDx4EABw4dIFdl48f/58Mh7W2sF0yfR+nBOTanGYDP9//OMfR+/6egURgxvF9u2BBVpd7WBXL8Unhw2DFlnngUlPm0bo9GRk2VPSs4VCyj4GaHFNdiWxiYf6nSrxfCLyyD92WXxSG/rUntmky0NSh4co+MQYXX+oaXndjuRMvkBvSFsufco3WJdY+ZS7ZCGqU2uDqfZlqBnq2bnSkcQrTp2sKKVlD5g+HushIqb5cwClx7UxlRGqeWbrOXeNbBbF3jnQrhAAkDSY8WSvb6DNVYUFpI02Wjl+6hOfxpNPhoX/6JE3wzVjYEehws9//vMAgI8+8ii+/a2gFP7pz/4UAODff/0/AABWVlYS5TgQfA/Wosc3x0sTbw61h4V2LSws4uGHPgwA+NmfDT4ZZ85cwK//2q+H+raFBffc+bP4jd/4XwAAl1bi2hW/363bt+ILPxPa+9obhwEATzz+GFtv/vCF4D1+7z3vx9/6pb8FAHj4oYcBAK+/8ToAYOeOHejign/zzSH50Re/+EV87d8FEdip0yfjmMqcIf8PrVTevm0HAODLPxfEdQ/d/yAe/UjwBXn+pbBJHThwAATyYaExq4XH16AgjH3f82Y+jtZXVO8nP/1p/Nk3/xwA8DOf/FK1niZ6amhoaGgYxAzz2LDbWasVnKmJVd8DkxiJkeybF5Y872TE0fQSbFEwN6FYI5cqMiU+QyIDTUlrJXk8WrmDmsMemEr0JLb4QnkVrPEmUJpzK7PneuTwVS0qKpXI83EW3HXvM0V1+ZzsLJc0Ktf5tCBLl2OmWsZwKq/5SnlfKXfNwftBDrwoDiXapfhOBhBxVJz/FpAsweHagf2B6lwcLeCZ7z8DQBSqXdfh4E1BPPKVL30FAPC1f/u14B8A4BtROfzY48HHQsdCWozU7cGbbsKzz38/PlQOOSdBEWh3bt/NnASZ5P7n//zHOPRa8FV441ig+MfjNbzzdvAM3717DwDggftDDKotW7bg0UcfBQD837/92+Ex3uN977sbAHA++oGcOHECX4qK+WeffRYA8I//r38MAHjr6FtYWQkip698JfR9dXWV+7e8FERFl9YusRKbPMlpfBa6jju6J7bx0UcexROPPxae9U/Cs958800WL1H8qve///0AgKeeegqnTk03nWVXMWvZM51w6623hLrufT9+53f+2dQ6gMZRNDQ0NDTMwAzz2KBk6kamCJrURxlnP3HMUbiY5GOy5tBH2WU/EcqVdC42UyJrcm8wYqIR+Z6YNpZ7nWWOQmnFEmV2dgMzER65f55Xprvz4b2mQjWnNV1XMp+Doy4zfQxq8aJMhTxM9AUe2Tn9jPWN4Uac5Qqdhp/K2Fx3SJzVcg4EKDta4UpIMX3s2DGcPxuUwlbZkH/up4Lz3UrUYT7++OM4fTo41W3bTsYwEnWWTOnvvPNeLvNqjPvEqkAIl0tLxCjK5h968CHcckughH/rf/0tAMALL7yInoxsDCVQAg5G6vsnfuwnkv598YtfxPeiB/dLL70EADhwwwH8UtRHfP3rwVP8m9/8b7g/ciG/8//8DgDg0KuHeAxui46C5BXunMPf/jt/BwDw4v8cYkmtnFjhvixEzqKP6+S472HjO/jzb30LAPC+O+/CP/ln/xQAcOT1YC5srMFHPhT0Fr/6a78KANi+I0h7fvM3/z6+9e1wLyH15OaT8s3F9/dXog7p8ccfxxuvv4EhDIfwWA7WBb4zGMew4i4mLXGjcPQLE7gFCh4YGrDWAS68V/Tj0OC+N0okFI9OFi0WepDztgFodkvUARFx5G68urxWcIuFk5wjb0gWj5EXqNXfU8xoZSTpEIcThimDB+ZWJQq6bUMiM72Dzeupnp/q+LYyX5lXfgNJCGpev+NYkpdoZQNPK5zeEElW5ArFtUkWgtT0IP1FYhKL9D1nqJzLPfcNbMWYQl1XFXdlg+L/tv7aKn4O8URl8fWZ8QTShG4cakPEoV795mrzNb5sVjJYVeFtDLnD79mJLwsdlxeDCOXC+YucsIgsabz3ePTDQYTz9BNPAwBOnzyNBz7wAABgz74gTvnmN78Z2+CxZWuwFHrkx4Pn8nM/fA4X1sImYzv5LlnpHb+zbUshNMZP/9TnOFwIBR+0SmTGfgZdh09Fvw8KANhPghL3/ffey34X5C391//6f49T774LAHjplbB5/I+//Cv44Ys/BADezLoRrQsj/OSP/WQ4RwmX/ITH4cLZ4C9inIGnYKk0/1m8Z1nB/IXP/3Ro6/PP4cibbyTjfPedd+PX/qfgv/Hqy0HE9nIMC3L8+Al0MVgjfWDjyQSWM+yRL8mIN6jd0UjpA/cFK7Y//P/+EPfeFTbuaWiip4aGhoaGQQznzF4IYXc9HBYXw27soj/FJJpYjcc9FkZRiR2Pi4sTNo8dj2KY4rGDJzfdioTB5z+MqdgAZjdtEKzo4fSMmgMRxTXbKXMSF8PiLc4BrJTgRdhuP4USvlZRcG2p8vtyq6syA1XRU3bnDE6r4TKhQmZPA1Ho27dtY86DlKLLy8ssZtq6NVD8H//Jj2PHriAW+dZfBJEIfT+jhRE+/omPA5D3+MwzzzDH5DndqApvHk1K778/UL83HbwZ//p3/w0AYG01BvYzhlMpT2KfHn74YRaxUEjzr371qwCCp/O7pwP38MgjjwAA3nfP+/AP/89/CAC45bYgbnv00Ufxv/+DfwBAlO+f/Kt/FQDw3W9/Bx/6yIcAAP/hPwTz3y996Uv4zneCafANNwbv9d73LNaRdLHR2xxg0RYd/7c/+C1Wfn/1y78Q2vHhR3DmTPBk/4vvfCeMaRyf1ZUVPPRQ8FN58ntPAYjhzieUfjZ6pfeOuewPfTi0e0v0A/nhiz/Ez30xGAhMQ+MoGhoaGhoGMchRLEX5pIfH8lLUTcTwseOFCR9XRzFZRxe4jYWFBUzGYfcaRfPHcQeYPg3LS1S4sdM0iJlwe5MVxbli13vPznXsFAOj9PhK5ptxO/yf8oDcbBRRb6uYzyw2PTnlRGKnqC4VjxAhe8kTTDF5Lc74omw1TauuYz3cxY+KhnodmDkP80jJRnSEhHfeCSlLP/zhD2NnVKCePRuo25WVFfzTfxoUr5/4eOAU1tbW8Ed/9EcAgPMXg5nprl27AAAf/8THcd999wEA/vm/ClFeL1y4wNyAjwYyC3bEOoeFUaDkP/upzwIA3njjDbx++DUAwNJiTHHa90B0fiNl9t13382K6oceeggAWAn+9a9/nbnUz0VHwSef+h5ej5T/L//KLwMATp05jTePBoXypz4dFNak3/zEJz/BaV237wxK++WtSzgZHe1+7udCzKdv/Ol/5bGkYWYOq+uYwyIO540jR/DTnwv6it17gi7hoz/2MXz/+0H3Qe/0xZeC7uQrX/0KfvhiaMdyTLw0mUyqjqdbtoT1nExriRv80Ic+hBOnTmAIjaNoaGhoaBjEIEfRdWE3H416LMTde2FhOZ5bjcc1dPG37Va5vLFkIRGdTAwwzkxVmYJMQrp6ucay8pyz2FxUnfGYyvLKFDe2zYvpqVHRT7m8SjgiuH725CQyR34NpUVRfr16I9QQQRGzykorN2XO9UXJuYYNIw/dEk4mBxx+9TAA4MK582wG+od/+Ifxfs/moodeDVY4tuuwEPNR3HxLkPV/+lMhTtLu3bvwe/865K8grsQ5J3lqSFfhZW248UB0MLs3UMHf+rNv4atfCbqGR2Koi/3793MU4r/39/8eAGD79u0cm+r++4L8n+IZXVy5hFvvCJFt90ddwu997d9gEtmYnbuDW8DWbVvx6c+Etu+/ITge/qf/FExnv/LzX8Ef/9EfAwA+99PBRHi00OEzn/0sAODUu8EJ7pVXX+XR7MkCaUR2dR47dgTL0sXlsNZ+6ctfxvbt4dxj3wnWXb/4N/8mXn41xK1aWApL9i/90i8BAJ75wTP48z//MwASmsN7j0WOVBvNhp3DzsjZffgjIQTKpUvB2uyhhx/CP/o//hEA4Dd/A1UMbhQimrEYdeHl84YRxVKjtVWMInvYRcUJMFasvpj2WTJ3JVNSXnitijOkFoVc4uQNajlgLx+ZrbFqrzaJJXaTw0NbiPlilvQ2FcBs7sY2j+ipDAS4Oc+siYEGRRvJeMRTehG6zL2/FqK7oY65RU/0L8B2/YRx9IH4/d//fXz5KyFr2y/+4i8CAL7/9NMcGpy+kZtvuQkPfCAoV///9u4npWEoiOP4L0VacS2KFTeJSBHB6g2Kt7B3qmcI6AlaN/4DN+oN6r8odWHp0lZrceViJmlXbyeifD+bLF8I4U0yb968xWU75KfbtZ5F7U5bky/bzTzbervoaVfUwUdFkcjCvC2Sx96KfG1lVcM328/xmGWSpJOzU318evvykQWg7PlJzf2mj9uRJG162W68HmvJA1B+WNJLr1e8k7d3ltbZre9oo2apskNPleWl96P3oR588t4aWGprrlwuFpmPPaCMJ+PpOeUzpwtKlmq79/Lb+rYtMFerVaVpas/eJ/6Ly3M19hqSpNqr38+R3c/1zVXR5C+/StOCgzzNVYpKqlRsnk6SxMb3Z9w6aKk/6Cvk73zmAgB+RfRTC68AgP+BPwoAQBCBAgAQRKAAAAQRKAAAQQQKAEAQgQIAEPQNpH6gktjTKqAAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "im_ind=15\n",
+ "\n",
+ "input_im = np.moveaxis(X[im_ind].numpy(),0,-1)\n",
+ "input_im = input_im/2 +0.5\n",
+ " \n",
+ "fig, ax = plt.subplots(nrows=1, ncols=1)\n",
+ "fig.set_size_inches(7,7)\n",
+ "im_h=ax.imshow(input_im)\n",
+ "im_h.set_clim([0, 1])\n",
+ "ax.set_axis_off()\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 103,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([480, 242, 116, 255, 393, 89, 154, 246, 286, 224, 254, 1, 500,\n",
+ " 103, 153, 55, 426, 32, 259, 245, 503, 216, 87, 348, 472, 108,\n",
+ " 168, 140, 206, 198, 157, 249, 196, 76, 57, 295, 336, 177, 112,\n",
+ " 120, 49, 274, 438, 199, 19, 17, 221, 63, 62, 335, 363, 443,\n",
+ " 31, 142, 247, 99, 235, 285, 146, 294, 386, 34, 373, 150, 433,\n",
+ " 159, 172, 464, 179, 269, 355, 371, 132, 209, 16, 314, 323, 223,\n",
+ " 227, 402, 11, 67, 405, 147, 205, 45, 43, 106, 207, 504, 330,\n",
+ " 97, 166, 144, 417, 276, 4, 74, 152, 316, 317, 82, 27, 10,\n",
+ " 290, 98, 488, 263, 52, 452, 378, 461, 441, 95, 479, 143, 457,\n",
+ " 75, 35, 467, 109, 100, 409, 485, 511, 501, 353, 502, 456, 498,\n",
+ " 92, 390, 36, 365, 398, 136, 219, 345, 305, 362, 413, 463, 38,\n",
+ " 312, 420, 325, 324, 251, 510, 300, 60, 240, 59, 396, 354, 389,\n",
+ " 465, 180, 8, 2, 64, 165, 442, 131, 70, 307, 232, 352, 41,\n",
+ " 191, 408, 430, 265, 18, 505, 289, 424, 5, 282, 79, 123, 236,\n",
+ " 288, 496, 111, 241, 277, 65, 77, 182, 73, 321, 148, 359, 96,\n",
+ " 379, 380, 366, 308, 256, 33, 268, 135, 372, 54, 375, 302, 266,\n",
+ " 237, 356, 322, 231, 279, 414, 174, 499, 66, 492, 406, 358, 3,\n",
+ " 459, 445, 69, 434, 425, 260, 407, 313, 126, 184, 226, 23, 12,\n",
+ " 347, 25, 21, 47, 273, 262, 257, 428, 476, 421, 309, 275, 187,\n",
+ " 399, 304, 419, 243, 344, 411, 360, 319, 509, 439, 491, 454, 118,\n",
+ " 258, 395, 494, 44, 214, 340, 466, 357, 20, 332, 418, 422, 392,\n",
+ " 487, 155, 163, 261, 13, 162, 477, 192, 105, 272, 248, 169, 183,\n",
+ " 217, 278, 193, 381, 506, 72, 296, 40, 388, 233, 15, 370, 228,\n",
+ " 24, 156, 203, 215, 7, 138, 469, 455, 204, 114, 234, 264, 280,\n",
+ " 151, 460, 253, 303, 470, 48, 185, 450, 68, 130, 122, 211, 160,\n",
+ " 451, 61, 382, 374, 478, 250, 197, 327, 471, 175, 493, 440, 385,\n",
+ " 225, 341, 137, 301, 244, 483, 6, 368, 200, 238, 400, 220, 149,\n",
+ " 267, 252, 141, 320, 202, 39, 115, 213, 51, 334, 171, 437, 46,\n",
+ " 447, 104, 361, 462, 133, 306, 333, 489, 173, 403, 431, 346, 139,\n",
+ " 102, 178, 427, 210, 475, 93, 338, 339, 291, 195, 391, 229, 188,\n",
+ " 497, 349, 482, 458, 113, 329, 145, 53, 26, 423, 384, 81, 14,\n",
+ " 415, 444, 435, 369, 343, 474, 383, 299, 287, 326, 194, 29, 342,\n",
+ " 85, 449, 377, 292, 212, 58, 110, 315, 364, 189, 80, 448, 230,\n",
+ " 507, 222, 495, 90, 293, 486, 101, 432, 281, 30, 397, 484, 78,\n",
+ " 318, 297, 158, 468, 201, 94, 42, 164, 129, 88, 401, 119, 376,\n",
+ " 404, 328, 416, 91, 84, 429, 387, 239, 0, 56, 167, 127, 490,\n",
+ " 22, 271, 83, 298, 367, 170, 410, 310, 436, 284, 283, 107, 128,\n",
+ " 50, 270, 351, 218, 9, 412, 481, 350, 337, 176, 125, 473, 71,\n",
+ " 121, 190, 181, 37, 331, 86, 508, 134, 124, 117, 394, 446, 311,\n",
+ " 453, 186, 161, 208, 28])"
+ ]
+ },
+ "execution_count": 103,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "np.argsort(np.std(activations[im_ind].numpy().reshape((512, -1)), axis=1))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 105,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1MAAAA4CAYAAAAYY6KYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAHDUlEQVR4nO3dQU8TXRQG4NMppUUBjdGNce3fNP5NNy40MZqYKCAUKO23aM5wGaalDNKWz+fZTJm2U1fe3Puee25vNpsFAAAA91Nt+h8AAADwFJlMAQAAdGAyBQAA0IHJFAAAQAcmUwAAAB3sLHvz48ePT6LV34cPH3qb/jcA8G8px8i2zrjlvXzd9rler1dfy9flddFzV2GMBHg8kikAAIAOliZTAEB30+k0Iq7TpPy7VFVVfW1LoiLaE6ryuQBsxtZPpnKgWDSQAMA2KUv68vXV1VVE3J5cRdycTO3szIflfr8fEXeX++V9kyqAzVDmBwAA0MGjJlOrrJStmjg1nyWpAmBb9Hq9W+PUbDarE6m8TiaTiLhZ7pfJ1GAwuJFS5XPbfqv8DQA2RzIFAADQwdbvmVqkrBUHgG1zdXVVJ1CXl5c3rpPJpE6Vcn9UplcR18lUys9ELG9mIakCWC/JFAAAQAePmkyt0mXorm59y55hBQ6AbVOOTZk2ZSI1Ho8jIuLi4qLeP5Ud/IbDYT3mZRI1GAwi4uaBvuW4WaZZi/4NADyeJ1Pmp/0rANvmrgYRzTK/nEydnp7W97Kkb29vr/5eTrB2d3cj4mZzivLZzZI/YyTAeinzAwAA6KBTMlWufK3SBCI/U66iNcv7lp38DgDbrm0MayZTp6enERFxfHxcp1Tp+fPn9ess78tkajgc3kqmIm43o9A2HWC9JFMAAAAd3CuZWtYEorkiV26WTdPpNM7PzyPiepUuN9kOh8MbG20X/Z69UwBsg3KMW/Q6x6psFHFxcREREScnJ3FycnLjM2VSNRwOI+I6rVrUNj2/m/fsnQJYr7/egKIs2xuNRhFxPWH68+dP/P79OyKivuZ7h4eHsb+/P/9H7TyZvhgA/MNyzCsnUOVkpyxzj7heSDw7O4vj4+Nb9/K7z549i4iIg4ODiJhPqtrK49vK+wBYH2V+AAAAHfy1CKi5YjYajeL169cREfHy5cuIiDg6OqrLGr5+/RoR16ULVVXVZQ25Mrdspa3X6yljAGDrlGdFNZOrTJLOz8/rZhR5HY/HdWVGJlIvXryIiHn1Ro6X+Zler7dwvJRUAayHZAoAAKCDBydTzbasaXd3N96+fRsREe/fv4+IeT14NqD49u1bRET8+vUrIuY143kafLaCvWtlzcobANui2QSi3+/X+4LzWo5bzYN8Ly8v6/Ev91OVqVXuo8pmTW17s/Je2bACgMcjmQIAAOjgXslUW1vyfJ0rbNn29fz8vN4D9e7du/mP7ezEz58/IyLiy5cvEXG9ijYajW4dSDibzaRPADwpOZZVVVXvb8o0Kf/e2dmpx7dMka6uruLs7CwirhOp/Hs8HtfVG/l54yPA5j24zC/LGfI/+RwAer1efP/+PSIifvz4ERHzRhTZLv3NmzcREXXZ33Q6rcsgAOCpaU5uyslUXnMMHI1G9YJjTrQuLy/rMbVcmMy/814+o6qqhY2YNGkCWA9lfgAAAB10SqbKcr9mu9e8Hh0dxefPnyPi+vDBV69e1WV+Kd+bTCatLV6bLdcBYBvleNVW5pdpUo55+/v79UH12YBiPB7fqtDIEvrJZFJXgOS1qqqFY6NUCmA9JFMAAAAdPGjPVHlgYLZzzRWz6XRatz3/9OlTRMxX4vL93FRbHm4IAE9Fmf4sqqhojpF7e3sRMR8P80DeTJ8Gg8GtRhVlq/NmMtXv92+1Y28eUwLA45JMAQAAdLA0mVqlNXm+n6tpWQ9eHhiYhw9mXXj5frmCt6zG294pALbVbDa7cVhvyuQok6ZMpg4ODuoKjRwPh8NhPcblWJrf6/V69fPLVurl7wOwfg9ujd4s02trTtE8i+ohvwMA26xt8S8nRdkOfX9/v251nkajUf3dbFiRny/PYczPKOkD2DxlfgAAAB3cmUzdt7yuXD1re9ay5zSTrPv8LgCsW1t5XSZG5XiYr7Mkfm9vLw4PD298b3d3t35esyxwMBi0HkWS95rj52w2U/oHsAaSKQAAgA5W3jPVJVXqShoFwFNX7mnKca3cO1U2kIiYJ1N5L/ch556pMplKZfokhQLYjKWTqbs67HWhKx8A/zdtY2XbOFd298smE2VX3JxMNSdfg8Ggft32u20TLQAenzI/AACADu4s87tv+d7fLPcDgKdilXGvmTiV9/r9/q1252WSla/bGj0ZcwE2QzIFAADQwcoNKO67x6ltv5V9UgD8362SElVVVTeZKO81k6kcN6uqqhOpciy1DxlgsyRTAAAAHaycTHVhpQyAf9VdR4q07X3KtKotccr3jK0A2+NRJ1MA8C9bVPJXTojK8r3muVHl5/J18wrA5ijzAwAA6KCnnSoAAMD9SaYAAAA6MJkCAADowGQKAACgA5MpAACADkymAAAAOjCZAgAA6OA/zw8J6O71ynIAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1MAAAGKCAYAAADpFhtSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3dWYye51k38KckMx7bM/Z4iWM7dhNnc5M0CUkXWpJUVVskkIoorQoSBQkhOEDiEIlTOOCIA4RYxAEViyIhoCJI7QEFIcJSmgZCmmZrNjtpndix43gfz+KE7+gT+j7e62/m8juTsf37HT6X7ue5n/299Er/533/9V//NQAAALA8P/BeTwAAAOBypJkCAABo0EwBAAA0aKYAAAAaNFMAAAAN16biQw89VEb9nT17thy3uLg4cvm7775bjvmBH6j7ulS79tp6F6ampkYuX7duXTlm/fr1Za3rnXfeGbn8/Pnz5ZilpaWyNj8/X9YWFhbK2oULF5a1fBiG4ZprrilrExMTZS0d42pcGlOdy2EYhsnJybKWdI5HOi9pXErNrGrpuk/7nI5jul/Sua5U1/alSM+J973vfctaPgz5OHZV+52uga60b50xGzZsKGuPPPLI8jd2Ffv5n//58sZ+4IEHynHVczpdP91rPN3X1XMkPW/TOzK9E5Lqfkrvs7m5ubKWxnVq6fmdnsXd53R1HNPzu/vc77yb0jM6XcOpltZZzSNd253je7F1rsS75HLWuXbSb4bOu/9itUq6Bk6fPl3WfuZnfmbkxlwZAAAADZopAACABs0UAABAg2YKAACgQTMFAADQoJkCAABoiNHo9913X1n793//97JWRUumGMUUiZiiKlMkYhUHmuJKUyRsilJM+1ZFQab9SvHn3YjIKpY0zT1FqqbjkY5jFS+axqQ45zTHzvFI12IV+z8MOTa9Ewea9qsbx9uNW6+OYzpWqZaMO269e992Ylq78fhd1TzStrZt2zb2eVytduzYUdY+85nPLHt9Kba7+9xP93z1zJ2ZmSnHbNq0qTWPjnQdnzp1qqylT7ekT5KcO3du5PL0TEoR3Bs3bixr6Z1Wxc+vxCdCxi2ds+77s3oWp2s7HV8R54ybKwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGn1ubq6sTU9Pl7WTJ0+OXN6NPO6Oq+LFUyxmqnXjZ6v5d6Oju6rI0m7kbjfquTpW3bjSNI9OVG9aX4qzT+czXcOd/e5GeqdtdcZ1PglwsXl0rqtxx6mvhO51msZ17unVfu5crbZs2VLWqhjrFA/dfU6nWvXZgBT3vZrS3GdnZ1u1FD9fRaqnTx6kCO4UjX6lxnOnc5auq7VyzUHHlXk3AwAArDDNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinB5sSJE2Vtampq9MaurTd3/vz5spbSrFIiTjX/KuXvYutL86/SmYahTrfppHStJSnZLqmOR1pfd1uda2cljn0nRa+TRHgx477mugmX3XV2kui6qYJpW9X1uNrJmGthW1ez9E7oJH+ma7V7r3Wu8XEnpK4l6V1dpRumY5hS6K7UxD7g/+VOBwAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xGn5mZKWtVhOgwDMPCwsLI5UtLS+WYKk59GHJc7OLiYlmrIlzTmBRl2o3u7kQRp31OVjOKtXusqnErEaWddCLJ0xy78+jsdyci/GI6scyrHQVerbMbcd6dR+faWc24cvHnqyPFbKf3Z3X9pHdT+nxIusbTu7qKdk+fRdm4cWNZuxyiwDux7913HXB1WPtPPgAAgDVIMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGj1JkbCd2OCzZ8+WtTQuxbRW8a5VdPswDMP8/HxZS/GoqZaOVUeKdu1EZq9EHHw6Z1VUb/f4rkRM+Lh1Y9M70vFYiWj3zvrGHY3eiTsehnysOsejO490XjqR9axt1fnevHlzOSZdq6dPny5rc3NzZa26ftI7K70/t23btuxtrbbOPdp9DwJXB/9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIUajdyN5q9jXFLeaooFTbPrS0lJZq2zZsqWsLS4ulrUUMXvttfWhrKJYu1GxKdo1HcfOmG78eSdKdiWiczux6en4dqOvxx0Tnsak+zadl07kfrp2uvHnHd3o/O657syje32PO6J9Nc/LlW7cxzLdn7Ozs2Ut3fMnTpwoa9U7rfqsyDDkd9358+fL2q5du1rrHLd0rCqrOT/g8uOfKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANMS8zxTTumHDhrJWxcWmuNUUX7x+/fqylqJYq3UePny4HJOibm+55ZayllRzTFHxSTpWqVZFLKc451RLcbGpVl1X3djucetEhF9MirfurLMbtZ621YkXX4k4+45uRHu65lKEcrXOdHzTtpJxnxfR6OPTPacd6Zm6adOmspY+91HV0mdA3nzzzbL29ttvl7X0iZM9e/aMXD4zM1OOSdJ9mH6HVESjA4l/pgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCFG1KQEm3PnzpW1TlpbN30nJV11ksampqbK2jPPPFPWJiYmytr+/fuXPaabuJWOY3U+u4l96fh2EgI7qWUXq3V00/DSuLWSoNadY3Wu05huwl4a19FNtht3AmM3STHNY62kKV6t0jN8NaXE25S8Oz8/P3L50tJSOebEiRNlLaX5pVTBKulv27Zt5ZjNmzeXtfTeSimd1W8U9xmQ+GcKAACgQTMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRo9RZmnCPEq5jTF/6Zo16SKdh2GOgI1RRSfP3++rO3atauspdjXgwcPjlz+1ltvlWNuuummsrZ3796ylmJr0xwr3dj0FBnciZ/txp93oq9XIga3E2/djTHvRruPW9rWuOO+u1HraY7p+VfNMcUup1rSOVareZ6vZukZuFasW7eurFXP6bRfaX3dT1hUkepnzpwpx6TfDNPT02Utzb+KkU9j0nMCuDr4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0x1zXFo6Zo9KWlpZHLUzRwNWYYcizppk2bytrJkyeXva0LFy6UtXQ8UjRzNf89e/aUY5Jnn322rKVo9HvvvXfk8tnZ2XJMJ1p8GHJcbCdKNo3pRk5XutHi446j7ka0p/knaf4pXryzvnQvjTtGvrtfnfPZXV/3nK1EjD//e5dDLHaaY3Ufpuuq+1mG9LmManvp0ycLCwtlLY1LkerVuPS82rlzZ1kDrg7+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEOMRk8xv5OTk2Wtik1fXFwsx6RI8m5c+fT09LLXl+JWUwT3uCOz0/HdsmVLWdu1a1dZO3v27Mjlr776ajlm27ZtZW3//v1lLc2/Oo7pvHStZnT0asZbd6OLk07k8bjj4Ich71snoj3pxqZ3jnH3nKU5Xg7R3FeycV+PKyFFklef7Uhj0vWYIsmT6jimz5ikY5/m3zke6f5Mc0zbAq4ca/9NAAAAsAZppgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCFGo6fY3SpCdBjqiOsUZZoiycc9bv369eWYc+fOlbVufHulG4ecIpbTOasi6z/wgQ+UY9J5PnbsWFmrYumHYRh27NgxcvmZM2fKMel4rGYkeXd93Tl2tpVq3ZjtalwasxKx0dU6VyI+vHt/jlv6JEPlcojsvhKs5qcXutLnQ2ZmZkYuT+/BdD+lmPDOOzK937vPwPS5j61bt45cvn379ta2gKuDNy4AAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTENL8kJfpMTk6OXN5NvZmfny9rKW2uStzauHFja1tp/il1qNJNqEupSONO/kq1KpVvGOrkwGEYhiNHjoxcPjs7W45JxyMlSCXVvqVtpePRTWAct5VIFVzN+XfmmM5Ld+7pPquef+n+66Yspn2rtpeO4biTJa9ml3uSW/WcTul1hw4dKmvdd9ri4uLI5efPny/HJOn3SUoqrM7n5X6egZXlnykAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADS0o9E7Ub4TExNj31aKL16/fv3I5XNzc+WYmZmZsnb27NmylqJTqzl241ZT/HInGj3No4q5H4Yc2ZxicKvY3aNHj5ZjUmx6uj46xyNdU0n3eFS1dF66104al45V2rdKNyq+Gz9fufba+jGXIpTTOetcV91tdXSfmTAMwzA9PV3WUmz6wYMHy1rnkx6d++xi20qfMXFvAB3+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEM7Gr0T5Zsij1N88bp161rrrOKXU+xrik1NtRQhXh2rdAzTfnViqoehjmbunpdU60TFX3fddeWYI0eOlLWNGzeWtVOnTpW1HTt2jFzeifAdhvHHW3d17omLWSv7VuneE934+e5xXK31wUq54YYbytrzzz9f1s6dO1fWqmduesekez59hqX7rACoeKoAAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAECDZgoAAKAhRqOPOxo4jenGbFdx30mKRq3isodhGI4dO1bWtmzZUtZOnjw5cnmK2U77leafxnWi0VciZruzvpdffrmsffSjHy1rR48eLWtVfG6KWk9zXFpaao2rpDjydJ7TtsYdcZ7WdznEsHfvpWqOnedRWl/Xu+++O9b1wf81MzNT1rZv317WOtdkeqam+PP0DF+/fn1rnWvdm2++WdbS52VmZ2dXYjpwVfHPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0ejeut4pETjHEKTa1GxNerfPChQvlmBTFunPnzrKW4lareczNzZVjkm6MfDWPdHzTeUm1znmZnJwsx+zdu7espX3+xCc+UdYOHjw4cvlLL71UjtmzZ09ZS/GzyeLi4sjlnXM5DP147s46u/NIz5Zubdzr63wWYCWecZ37rBv5DpcifSLk+eefL2vz8/Mjl587d64ck9651113XVmbnp5urXOtO3z4cFlLvxk2b95c1qrnXHW+hmEYpqamyhpcqfwzBQAA0KCZAgAAaNBMAQAANGimAAAAGjRTAAAADTHNL+kkZKV0sm7yV0qt6qYRVlKS21NPPVXW9u3bN3J5lSY3DMOwffv2sjYzM1PWUhphZ0xK2EvnM6UHVVLK4uuvv17WNmzYUNbSMT579uyy5/Hiiy+WtXQcU7pUdX1UKX8X29Y777xT1rqqdXaT8lYzUS5dp0m6hjvphmkeqZaecelarUjzY6WkRNNvfOMbZa16rqb1peTA9Hy89dZby9rlnER35MiRspaeZTfeeGNZO378+Mjlhw4dKsfcfPPNZW337t1lDS5n/pkCAABo0EwBAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAEBDzK/uRgpXulHl3Ujhccdzz8/Pl7X77ruvrFXRxidOnCjHpIjlFJu+fv36svbqq6+OXJ4iuFOM8sTERFlLqvOSYmmnp6fL2vPPP1/WUuzrs88+O3L5L/7iL5Zj0vH49re/vextDUN9ru+9995yTIrBTfdEN8q8MyZdw0mKdu/c06t5PFKMeVeaR3U80rHvHENGG/fnNy4HaZ+/+93vlrW///u/L2uHDx++pDn9/773ve+VtZtuuqmsVbHe6fMbqyn9Pjl16lRZ27hxY1mr4s+HYRi+853vjFz+5JNPlmPSufzUpz5V1rZt21bWYK3zzxQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABra0ejjjk1PuvHFVUxxii9OkaovvvhiWduxY0dZ++QnPzlyeYoCTXGlb7/9dlnbs2dPWativdPxWFhYKGtJWmcV25yOfToed955Z1k7d+5cWati5P/4j/+4HJMi8FPs6913313WnnjiiZHL0zHsRnCn+7YTZd65/4YhR/wm3bj1Sppjimivxo07av1iqvOZzvNKxLdfrc6cOfNeT2HV/fmf/3lZ+5M/+ZOyNu748+Q///M/y9ojjzxS1nbt2jVy+cc//vFyTPpcxrg999xzZe306dNlbWZmpqwtLS2VteqdfOzYsXLMU089VdbS74n0KZDqHb+axx4Sb1UAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTEaPQUmVnFSg/DMJw/f37k8k7U8MWkuOEqNvPs2bPlmDTHKuJ8GIbh4MGDZa2a4+TkZDlmcXGxrJ08ebKspXP21ltvjVx+6623lmPm5ubK2vz8fFm79tr60nrhhRdGLk+R7/v37y9rKZ74uuuuK2v333//yOU33XRTOSadsxTHW21rGIbhjjvuGLk8Rd124+zH/bmDdL8k3U8rVPdSuu6780ixu+P+NET3+VeNWyuftbjSPfvss+/1FFbEG2+8Udb+6I/+qKz94z/+40pMZ6y+/vWvl7Xt27ePXJ7eZx/72McueU7/v7/7u78bubz6jMYw5Pfx7OxsWdu4cWNZq953KcZ8y5YtZe3AgQNlLb3/qznefPPN5RhYTf6ZAgAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xG//a3v13WqgjRYahjOFOkd4o476ri21PkdIpDnpqaKmspOrWKbU6R3u+++25ZS9HXr7zySlnbunXryOUpVnpiYqKspdj0FJl9/PjxkcvXrVtXjjl8+HBZu+uuu8ragw8+WNa+9rWvjVw+PT1djtmwYUNZS9G0KY73137t10Yu/6u/+qtyTJKir1O0e7oOqlq6X7qfQljNiPYkPZPGHS+etpWOVXX803lZiWft1aqKsB6GYfiLv/iLsvbTP/3TKzGdsUkR1o8++ujqTWQFpAjur3zlKyOXV+/OYcjvn5mZmbJ26NChsvblL3955PJnnnmmHHP33XeXtfSJkPS+u/HGG0cuT3Hq6Z379NNPl7V0Xb322msjl4tGZ63wzxQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTENL833nijrKUEmCqVLY05depUWUtpVkmVpPP+97+/HJMSzVIa4a5du8palV6XkuHSPG644YayltJ+du7cOXL5iRMnyjFbtmwpa+m8pHP9gz/4g2WtcuHChbKW9jml11UJQbt37y7HpFSn5KGHHiprTzzxxMjlr7/+ejlmx44dZe3IkSNlLaVEpuTGSkqxPH/+fFlL5yWpkujSfnWfHyuRENiR5lHtdzoe6V5ieVIy3K//+q+XtX379o1c/tGPfvRSpzQWVRLuMFzZaZBVauxjjz1Wjjl48GBZu+eee8raiy++WNb+8i//sqxV0jlL80jPx9tuu23k8pSInKTEwfROSGm+sBb4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGj3FW6fo7jNnzoxcfuONN5Zj5ufny9rCwkJZSzGtVdz6fffdt+wxwzAMBw4cKGuzs7NlrYoXT9HRaX1pjukYVzHWKRo9RV+nWhWPPwz1dZWuqaNHj5a1KgJ/GIbhjjvuKGvVNXfo0KFyTIrAT5Hk119/fVn7/d///ZHLf/d3f7cc8w//8A9lbWpqqqx1YraTFLOdov/TtZ+uq0qK9+3s18XWuVZUz790XtZK5PuV7rvf/W5Z+63f+q2Ryx9++OFyTHqmjtuHPvShsvbBD36wrKXPVFzO0mdR3nrrrdY60ztt3Ou75pprylp6Tt98880jl6ffQl379+8vaxs3bhz79mCc1v6vBQAAgDVIMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xg3hycrKspbjyKvZ4/fr15ZgUb51iflPt9OnTI5enKPB77rmnrKWo5Oeff76sTUxMjFx++PDhckyKfN+9e3dZS8djcXGxrFWquQ9DjjJ/3/veV9aq6+PcuXPlmFRLsfop5nznzp0jl//Hf/xHOebuu+8ua+n6ePnll8vab/zGb4xc/vWvf70c841vfKOs/dAP/VBZS9d+uj6qc532OV0fKeY5zaPaXtpWilpP99nlEI1eSXHwaZ9ZHV/5yldGLv+pn/qpcswXv/jFlZrOslTPq2EYhi984QurOJPVk367dN6rwzD++zBFnKffDJ3nXFrfStizZ8+qbg+W6/L9tQAAAPAe0kwBAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAEBDjEZPccPvvPNOWZubm1v2mL1795a1boRoFQ984MCBcsz9999f1u69996y9vbbb5e1Y8eOjVw+Oztbjjlz5kxZO3r0aFnbvn17WauO4yuvvFKOqWLMhyFHTqdI9enp6ZHLqyj7YRiGs2fPlrWHHnqorL3wwgtlbcuWLSOXb9u2rRyTosW7MeGPP/74yOXV/IZhGL70pS+VtTfeeKOsdeLPh6G+Vk+ePFmOSddAivDvRplXUkz4uCPEUzxxdx7jtprbYnmee+6593oKF/X5z3++rP3kT/5kWXvkkUdWYjojTU1NlbX0KY1K+nRL+gxIUn2ao2vfvn1l7c477xzrttLxhauRf6YAAAAaNFMAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANAQo9FThHiKAH7rrbdGLr/11lvLMVu3bi1rKXo0xRdXEcApHvrgwYNl7YEHHihrb775Zln76le/OnJ5ioNPMdsf+chHylqKaK/2bf369eWYdKxSxHKKzK7i1tM8UrRrmseOHTvKWhXBfdddd5VjknSdpvNZxb7fcccd5Zh/+7d/K2uvvfZaWfvQhz5U1tJxrGLrU2RwiiBet25dWUufUKik50CKaE/nLB2P6r5I0fnpUwiTk5NlrfOMS/Hy3Shn/qd0TtMz8JZbbhm5fNxx2avtd37nd8paOh7VMyv9Bvn0pz9d1n7sx36srP3BH/xBWau2l36fbNy4sawlt912W1mrPouRni8pGn3//v3/+4kBy+afKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinB5o033lj2uG3btpVjUmLV0tJSWUspaSmNq5JS+R5//PGyNjc3V9Z+6Zd+aeTy5557rhyT9isloSWHDh0auXz79u3lmJRUVKXQDcMwLCwslLXp6emRy5988slyTErlS2ltaR7pmhu3ToLaiy++WNZSEuRDDz1U1l566aWylhLgNm/ePHJ5SujqJv1dc801Za2T9FelRw5DTiVN91mVPJnSNKuU02HI92Dnfk/HKZ1nlifda3v27Clr11133cjlH/vYxy55Tu+l9Fz68pe/XNaqpNnvfOc75ZgqEXEYhuHHf/zHy1pKtvvTP/3TkctTEnF6NyXp+vjc5z43cvnDDz9cjrnpppvKWvXOBcbDP1MAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGiIGbkPPPBAWXv66afL2qlTp0YuT3HfKTo6RZx34q07kenDMAzf//73W+NefvnlkctTpPeWLVvK2pEjR8paFWE9DMNwzz33jFyeItpTdHSaf4qBvv3220cuv+OOO8oxyfHjx8tainavpOujE3F+MdV9kWLHP/CBD5S1F154oaylCP8UwV1FzKdY4MOHD5e1Klp8GIbhwoULZa2K/E5zT+tLEe2Li4tlrZp/uifSdZXiytMnCKrnRFpfmiPLk+7DFM9dXa/dZ+DlYN++fcuufepTnxr7PD772c+Wtd27d49cnp6b1ZhL8RM/8RMjl585c6Yck6LigZXlnykAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTEaPQUN5xqVRz10tJSOaYTcT4MOfa4kuKtO+sbhhz7XsVKX3PNNeWYFG1cxUMPwzCcPn26rFXnJcX7njt3rqylOaZY6e9973sjl6fo+a1bt5a1FLOd5li5HKKjX3vttbJ26NChsvbggw+WtbfffrusvfHGGyOXp3tpz549Za26BoZhGKamppZdS9HF6dmStpX2rYooTvdm99MQ6do/evToyOXp2HfuCUZLn7D48Ic/XNaqZ//k5OQlz4m++++//72ewjAMw/CRj3xk5PL0Xr355ptXajrARfhnCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGjRTAAAADTEjd8OGDWVt27ZtZa2KIk6RvN1I8hR7/O677y57fSmuPM0xjevMI0Ulp4jldDyq45+ixTvx0MOQ9/nUqVMjl6frbX5+vrWtdKwqaZ+TtK1Uq+af5nH8+PGy9pnPfKasPfbYY2Utnc/3v//9yx6T7vdqfcOQ962K6t+0aVM5JsWmpyjzNP/qfKao9eTEiRNlbWZmZtm1bhw8y5OenakGSfU5hCoyfRjy8+r8+fNlzXUKl84/UwAAAA2aKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIjR6JOTk2Vtenp62RtLMcQpyrcbf16NS+vrRGlfbB7VOrv71YnZHoY6OnXdunWt9aVI1RQjX80/7Ve6dhYXF8taN769kuLb0/lM0fkdS0tLZe25554raykmfP/+/WWtOjdpfel4pHM2Oztb1iYmJkYuP3bs2LLHXGxbKU64ejambaVaupfSMa6ug7RfjE/3MwqQVO/q9AmIhYWFslZ9jmQY8rskbQ/4b/6ZAgAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xGT5GZqdaJAu/Gpo9bmkeKwe3MMW2rG3+exlVzTPuV4vHT/Ofn58taFZue9ittK0nrrPY7HcPusUrXRydeOY05fvx4WfvRH/3RsvbKK6+UtZMnT45c3r1Ok3Suq2j6vXv3lmMOHDhQ1tL8U1x5NS6dlxS1nsZt2bKlrL322msjl1fRyoxX97kESXX/pudE+txEeh+fPXt22evcvn17OeZK9vbbb5e1rVu3ruJMWGv8MwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0x8imlcaU0v06K12om9iWdZLVh6KXGpW11E/uS6hin9U1MTJS1KgclvDwAABJGSURBVJVvGIZhamqqrC0sLJS1SjcJMqnGda+BcY9L60vHPiW5vfrqq2Xt0UcfLWsf/vCHRy6fmZkpxxw7dqysdVMA03OncvPNN5e1ubm5spZSrqr5p2Of0gHT+UxmZ2fHuj6Wp3M9Qtc111xT1tI9f+7cuda4KoE0jdm5c2dZu9x1foekc8aVwz9TAAAADZopAACABs0UAABAg2YKAACgQTMFAADQoJkCAABoaEejpzjqKjYzxQavphRvmWrd6Otx68Swp3Ep/jzFeq5bt66spejUxcXFkcvn5+fLMd1z1rlO0zHsXh8pQryK2e58YmAY8j6/+eabZe0LX/jCsrf10ksvlbUNGzaUtRQ7np4TnRj86nobhnztb926taxV1066hquY4WHInwtItY0bN45c3r12WJ50vmHc0jM1vavTczPFple19ExK78hdu3aVtcvBtm3bytrhw4dHLr/c95n/Hf9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIWaVp9jXFH1dxfKulbjetRJxnqR40bUiRbGmCNcqpjVdbyneOsW+dmLk07aqGPO0vouNW79+/bLXl6TzkuK503F8+OGHRy5/6KGHyjGbNm0qa3Nzc2UtqY5j937pRK0nU1NTZS2dl82bN5e1dB1cDs+yK1mK+IdxS5+NmJ6eLmunTp0qa+kdWT2n07u6++mWnTt3lrXLQfrNw5XPP1MAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGiI0egnT54sa6dPny5rVTxwiodOtW6keic2eCWihqt1pm2lfU4Ry+PWPfYpwnVmZmbk8qWlpXLMwsJCWUtzTOMqKWY7RcJ2o9Gr7W3cuLEck/Y5zT/N4+WXXy5rv/IrvzJyeXoOnDhxoqylCPHuMe6MWYnnTiXFsKdzlu73av5r5TMUV7pz586VtRSbnmKsoWPr1q1lLUWjp1r1CZz0jE7Pue6zeMeOHWVtrag+b3HkyJFyzOUeB89/888UAABAg2YKAACgQTMFAADQoJkCAABo0EwBAAA0aKYAAAAaYjR6ir88f/58WauiMScmJsoxKRo4xWmmcePe1lrR2eeV0J1HdR2kuOAUm17Ftw7DMCwuLi57XIqVTttK90uKt66uuTRm3bp1Za0r3dPVvv3Zn/1ZOebTn/50WUvR6OmcVbpxvF2da7/7KYQUGVxJx0Ns+uqYm5sra6LRGbf0nNi+fXtZS5+3qGrpkyPpkwBJmn/1m2HLli2tba2m9NuFK4d/pgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCGm+U1OTpa1TsJUSulK20o6SV2rnYbXmWM6vt10w9U8Vp15rF+/vhzTTexLSTrVHLtJaCnhKB37qnbttfXtmdaXUjO7XnjhhZHLv/jFL5Zj0jWcjnFKFayug5R8mM7ZWknGTNIcq31Lx16a3/hs3LixrKXnGaymzZs3l7VO0l9616Xnd0oO7KT5pVTbDRs2lLXVtHfv3rL24osvlrXbb799JabDCvHPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0+szMTFlLkZSVFPGbIqxT7HHHSkSLd6LiO3HZl1JbTWkeVTRzGjM9PV3W0rWTYtOreO4U2929PlK0exUlm6LR0/WW7pduhPjZs2dHLt+5c2c55q//+q/L2o033ljWUozviRMnRi7vRsWn66NzT3dj2NP80/XYIRp9fDZt2lTW0vsT1ordu3eXtTNnzoxcPj8/X47pfsak2tYwDMObb745cnmKP7/11lvL2lqRjgeXF/9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIUajz87OlrUUCduJe0yxwd2o6s6YThzyauvs81rSmX+KCU8RxOnaqSLV0/wWFhbKWhqX4qirKNkUP5vivlNUfBqXVPv2/PPPl2MefPDBstaNAj958uTI5d3Y8W5UfCU9P9J+pW11jlX3ecrydD4RAmtJegbecMMNI5fPzc2VY9Lvv+478ty5cyOXHzt2rByTfr9u3769rK2m2267raw988wzZe2DH/zgSkyHS7D2OwcAAIA1SDMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRq9G8FdxQN3ooYvZVwlRQ2nba2V2PQ0/zTHcR/HcUv7la7FqampsrZ58+ayVkW4pujolYi3rmophj1FnHdrKX6+muOJEyfKMSn29V/+5V/K2q233lrWqnOdonqT7r1Uxdl3t9VVXXPpfunMHbj6VO/P3bt3l2PSJz2S9EmPSnruHz9+vKxt27atrK3Ec7qSPq1QxcEPwzCcP3++rK1fv/6S5kTP2ugOAAAALjOaKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIjR6N0Y6KrWje1OUZVpjlU88DXXXNPaVlcnYn41Y9i7keSrKV0f6Xym2PTZ2dmRy9M1lWKlU7RrWmdHip9NcauTk5NjnUeKWj9w4EBZ27VrV1lLcfbVuU5Rsd3n2Lh176XOuLRf474Wr2aOJVejFI2ensVJijnv3GdpfemTHlu3bl32tlbCjTfeWNaeeOKJsvbggw+uxHS4CP9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQzvNL6WaVVYiOWvc6XspOSttq1vr6K6vSghcicS+tM5OUmFK7Oues+np6ZHLu4l9p06dKmtp/h1pnxcWFsrauNP8rr22foQcPXq0rH3yk58sa88880xZq55JGzZsKMecOXOmrHUTRqtxq5kO2HU5zPFysVbSTmGt2Lt3b1lL90t6f1bvtO7vrpT0t1bS/Hbu3FnWnn766bJWve9mZmYueU7U/DMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANmikAAICGGI2epHjdKv4yxRCnGMtu9HVnTDc2eDWj0ZNOnPO4j++ljKt0I4g7Ee1VZPow5Njxblx5VUv3S6qleaRo9879mT6fkNZ34MCBspZia2+//faRy8+ePdtaX5Lupep4dJ9j3edOtc7uPFie1Xy2w+UgfX5jz549ZW1qaqqsVc/39NycmJgoa+ndlN5p4/7ESdddd91V1qrY9B/+4R9eqekw+GcKAACgRTMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRq9G2VeRUuuRMx2J9Y77Vc3orgjzSMZd5R5JwL6UmqrqRMDnSJVZ2ZmWttK6xz3sepeVxcuXChr1RzTmGuvrR8vhw4dKmuf+MQnytqjjz46cvmWLVvKMemcpUj17nGsdKPRO/fZSnzugP9prUQlw+UgxZ/v2rWrrJ06dWrk8sXFxdY8Unz75fDpiN27d5e16rMjR48eLcfs2LHjkud0tfPPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0eop9TbUqLrmKTB+GHKOcpHlUccMpGjjFIXej4qvaSkSLd2I9u3Hw6dh3jtVKxNJ34qjT3NetW1fWNmzYUNY61/fCwsKyx1yKdH9Wx6QTp36xbT333HNl7Y477hi5/LbbbivHPP7442UtRfXOzc2Vtc79mY5V53naJRp9fESjw3ike2nr1q0jl6do9PTcTO/47m/RteLuu+8eufzpp58ux4hGv3T+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEPMgEzxkSnGsoqWXFpaKsekWOxxR3CnaOCJiYmylnRizjsx5hfb1rilOaZ466Q6nysRB9+JW0/7leaYYtOTKlI9XdvpXupK26uk45vmmLZ1/PjxsnbfffeNXP6Hf/iH5ZgvfelLZe1v//Zvy9r1119f1s6ePVvWKmmf03Ec9/0uGn18HEt470xOTrZqV7LNmzePXL5x48ZyTPoN5Rn3v+OfKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinFIyXsVePSmJSgllKwLly4UNaqVMGuTtrZSuimAFbHOJ3ndF6657PaXjq+aZ+7tU7SX5KOYydZKB3fdN2nFL1uAmOle+zTPBYWFsrak08+OXL5z/7sz5Zj0nm58847y9r58+fLWpXm103lS7VuCiArT9IVcDm4++67y9qxY8fK2o4dO1ZiOlectdEdAAAAXGY0UwAAAA2aKQAAgAbNFAAAQINmCgAAoEEzBQAA0NDODk+xx1WUbzdGOcVAT0xMlLUqNrgT634x447I7cafp3HjnmM3Nr06nynKfiXiz8cdK925J4ZhGNatWzdyeTqGaVvz8/NlLd1Lqda5drr3dDpWR48eHbk8xan/9m//dln7vd/7vbL2m7/5m2Vt//79I5enY9+9NzvHPl3b3WcL/5NjCVwO0u+rzqdb+H/5ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA3taPQU19uJnO6uL0UsV1GQKXI6STG43XV2ttWNOO+cl27seFIdq25cdnce1bhuDHuytLRU1qpY0m40eoo5TdfOuPe7e6zS+VxcXBy5/KmnnirH/Oqv/mpZe/jhh8taFVk/DPUc0/FNnxLoRqNX98W4Y/8ZLUXhA1wOZmdn3+spXPb8MwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgIYYjV7FEA9DHTs+DHWkc1pfilFOsdgpTrtaZ4qcTvvVjQmv5j/uePnuuHQ8VmIe1X53Y7uTTtx3irBO+9WNzu9cp13ddVb32cLCQmtbmzZtKmtpndU8Xn/99XLM5s2bW7WDBw+WtbNnz45cnq6B9GxJ0flpXCeinfE5cuTIez0FYA07f/58q7Z169axzuPMmTNlLb1/JiYmytrMzMwlzelK4p8pAACABs0UAABAg2YKAACgQTMFAADQoJkCAABo0EwBAAA0xGj073//+2UtxSyuX79+5PIUv5jiqFM0elLFKKd5pDjnbnR3FV+8Etvq1sY5Zhjy+az2LZ3n7n6lKPOq1o0/7x6rSppHiivtfkpg3bp1ZW1+fn7k8vSMSNf3c889V9Z+5Ed+pKydOnVq5PJ/+qd/Kse8/PLLZS0dq4ceeqisVefmW9/6VjnmwQcfLGuHDh0qaym+vTpn1fkahvFfp1ez559/vqy99NJLZe22225biekAa8yLL75Y1t5+++2ydtNNN5W1ffv2lbXqsx3f/OY3yzGTk5NlLf0uSL9Dqmj3HTt2lGOmp6fL2tzcXFk7d+5cWbvuuuvK2jj5ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABpimt+BAwfK2oYNG8paleaXUkFSyliqJZ1xKd1ramqqNY9KN4UuzTGlAFbHo5vu1R2X5lhJ6YBd407z6+xXWmc3xTKl6KVauj+fffbZkcurxJ5hyElFO3fuLGsptadK4rz11lvLMddff31ZS6mC//zP/7zsde7du7cck66dt956q6ylc3bLLbeMXH7ixIlyzLXXxsc+y/D000+Xta997Wtl7Rd+4RdGLk/JjSuhuibTMzBdj3C1qpJmX3311XLM0aNHy1pKytu0aVNZq9IDn3nmmXJM9Zt9GPJvr/SOr34P3X777eWYz33uc2XtX//1X8vak08+WdZ+7ud+buTy3bt3l2MOHz5c1nbt2jVyuX+mAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgQTMFAADQEDNyU2xjihWcn58fuXxmZqYck+IXqzjki42roiVT7Ovi4mJZm5ycLGspNr2KiExRyWm/UmR2J047HY8U992NAu+sL8XxdiPaO/Po7nOKOa32LUVYp+OR5piuj3R/VrH6KVI1xZ+n6O40/29961sjl3/2s58tx3z1q18ta+m+PX36dFnbt2/fyOU33HBDOea6664ra2+88UZZe+mll8paFdO6ZcuWckwVc8/yvfnmm2Xtb/7mb8ra7OzsyOUf//jHyzHp/ZPeW+n9WY1LY9KzLH0yJdXSOivpvdV9dnbeJd33cXrOdebRXV86jtVzv/u5miQdq+r66J7nNK57n1WfSUifGErPjzTH6jf2MAzDY489tux5pPdg+i2a5l+9P1NEe3pvPf7442XtkUceKWvVsfrlX/7lcsw3v/nNsvb5z39+5HL/TAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoOF9446VBgAAuBr4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA3/BwhY6E6TVBJgAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "\n",
+ "v1_ind=np.array([427, 208])\n",
+ "\n",
+ "fig, ax = plt.subplots(nrows=1, ncols=len(v1_ind))\n",
+ "fig.set_size_inches(15,5)\n",
+ "for v1_i, v1_ind_ in enumerate(v1_ind):\n",
+ " v1_k = v1_model.simple_conv_q0.weight[v1_ind_,:,:,:].numpy().mean(axis=0)\n",
+ " v1_k = v1_k / np.amax(np.abs(v1_k))/2+0.5\n",
+ " im_h=ax[v1_i].imshow(v1_k, cmap='gray')\n",
+ " ax[v1_i].set_xlim([0, px])\n",
+ " im_h.set_clim([0, 1])\n",
+ " ax[v1_i].set_axis_off()\n",
+ "plt.show()\n",
+ "\n",
+ "\n",
+ "fig, ax = plt.subplots(nrows=1, ncols=len(v1_ind))\n",
+ "fig.set_size_inches(15,15)\n",
+ "max_activations = np.amax(activations[im_ind].numpy())/np.sqrt(2)\n",
+ "for v1_i, v1_ind_ in enumerate(v1_ind):\n",
+ " v1_im = activations[im_ind,v1_ind_].numpy()\n",
+ " v1_im = v1_im / max_activations\n",
+ " im_h=ax[v1_i].imshow(v1_im, cmap='gray')\n",
+ " im_h.set_clim([0, 1])\n",
+ " ax[v1_i].set_axis_off()\n",
+ "plt.show()\n",
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1IAAAM9CAYAAACWhEKIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOx9WW8kWXbex9y5FItVrOrqfbp7unt6ZiSNLGuxDVg2YNnP/h2CnvRseHoGhgH/Df8DPcqAXwwIgkeQBpIsaTTjmV6ruxaSxeKaCzPph8R388sTJ4IRmZEk2zrfS5CZkRF3Pffe851l7fLyEoFAIBAIBAKBQCAQKI/GTRcgEAgEAoFAIBAIBL5piINUIBAIBAKBQCAQCFREHKQCgUAgEAgEAoFAoCLiIBUIBAKBQCAQCAQCFREHqUAgEAgEAoFAIBCoiFbRlz/60Y8uAWBtbS19xr8nkwkAYDweYzgcAkC6jsfjdF+n0wEArK+vAwC63S5arfnXjsfjdOVzGU2wbFTBjz/+eO3qu4Af//jH6YF8L8vNMm9vb+P+/fsAgMFgAAD4x3/8R3zxxRcAgEePHgEAfvd3fzf9/9lnn6X7AKDf7wMAdnd3sbOzM1eG4XCY3k3w3drWP/zhD0vVif2k8NpNn13m/qLfVb2fWKZOZWHLVrb8ReUuQtk6ffzxx5mXeu+0n62treWWrdFopDnjwZtHeX2nn5edTz/84Q8v+duiOdtoTHU2zWYzXbvdLoCZjNAr7yM4XwaDAc7PzwEAJycn6Xp6egoA6buLi4tUFr77v//3/35lnbw+Iq4aH0X1t/O7aEzWLfOWmUsWq4ryyvaoOpe8tUnB8nI8XFxcZGQv16Nut5vGZLvdnrtH1zk+S5/j9e+idaobi8q1IpSt0x//8R9fWadGo5HKyLmq7VfmOyKvrmXaoM71dpk1Z9H7vHvqHHt1jKNl5EeV+VRG5umaVeX9dd9/HXui68Y/tToFIxUIBAKBQCAQCAQCFVHISCmKNKo81VuGZ+5FovVTzZLiKq32qkHN/mQySWWkhrLVaqV6URNOzWSv18Pm5iaAmdZsNBqle1gXatlXoSEMTHGb86Ityj6pxtYDx5zH5tr5tCiTmAfvXV75OP897b9lppSN4hzjfDo7O8PLly8BAEdHRwCA4+NjnJ2dAcjKnlarhV6vV7leV8GTXfZvXotYk7w2v63jmOWtu3xVn+e1qdeWajnBK8cUwfHWaDQSE8Wx6JXLPpO/1etNy/ibfj/BuTcej1N7WTnlMXtAfpvmMVj29/pZ3vhaVTtdXl66c32V89qTN6t4vmIZlqZqW9TVdiqfrTWHvqNoTBXhtsy9fyqoe0+zCCofpAgr5IB58xs9lAAzUwk9pNiDhQ7sVS3YClsn7yClCys3c8fHxwBmZn/dbhdbW1sAZnVSc0fW4bYssoQn2LX98zYot3WTB/h1qmOBqXORumoT6I0Tvt8K/slkkplr9mpRtPmoCu+AZjc5rVYrzSMemnq9Xtpk8Ts1+eX8oYksD00vXrzAixcvACAdqE5PT9PcJPjMdrudMSVeFnmmlrYtdIOom3W9eigyw7zNsuO2QNcOO29Go1E6SNk+aLVamcO9HsAInV/W1L2oX1eJogN6FZPuuvuUJvL9fj/NUVUy8urJN7sXKDpIeTLTU1zaZ93UGLbv1XFTpIi5bfDkfllUdRlYFnaMeWunlss7rBft4W5zP/3/jKK5fF3rZ5j2BQKBQCAQCAQCgUBFVFbVWo2baod4qh8OhxnHXGqINzY20m+tllYZqVVpJpQdymOkVPuoGnR+Tu04zYna7Ta2t7fn7vfYOWXi2C5W63Yd8JgITzNDTb69fzKZ3IjWrM4AEZ4Wo0xdlqlvWS0xx45q0KxmXK/2fi2j1dg2m82MqWmZMuXB00jyuRw/nU4nzQsGnen1esl8ivdx7A0GgxQ84vDwEADw/PlzAMDBwUFipMgMKxtFlkuvZIvrgjJSalZp5Yc1IQOyMk/7g/evra1l5KCO11XPtTJMZZEpo/esVaGIOQey5uaDwSB9xjWJv+t2u2l94tjk2FLLAp17/G3drGcZePLEW5/zoIxamWA0i+DNN98EMA0Cw0AwZJlp0TEYDDL7BWWpPCYhr8+bzWbG1FnbwGPjq47RqmuAx5DaZ2g5bH0vLy9d6506UQc7VLfp3zJl8X7L9tRgMZ65qV3HVGbnMb+3kY26DSZvdaHMnCszN1a1fgYjFQgEAoFAIBAIBAIVUVmN5tmPKqMETLV4ZGuodaLGbmtrK91nGY/Ly8uMxv06bPEto6baMGrSlUlTJ3dgqrG4c+cOgJnGXRkprQvgawmvk9nx3q8+BCwH+8ezNfeYw2+almMZLKLZKNL6qrbVMhqj0cj1K+D9duxoYAdq3b3QwXVoZwmVAxpQApiyQspE8TuOK2WyganvE1mnZ8+ezV339/fTvOP9jUYjPZ/1ZfCXu3fvZtIPlMFVIcxZR9ZZgxjo3OezLPtm20qfrwEzrAZ7ER+csrLFk092jKh21/MF03vynl838uSqhjrn/On3+xmmgn2wvr6e2EuOI7Iop6enGRl5cXGR8ffVMq2ivnkslC2H54tXxkfkqrW26lr8zjvvAJi2O9uSewNePbbK86ni1fN9I9S/mSjLUi2DIm24yg3Liih7z3Go+yhbJ52P1hKh7vFWxjfaCzKk5bH7xWUtSKpCrZzs2qnrqhfwRP1seb2t/u6KuoNyKG6ivnn7pryy6OdFflN11iUYqUAgEAgEAoFAIBCoiIUZKdXmWcZCGSmrAbpz5w7u3buXeQbgM1Kq+awbeRqXi4uLTLTBjY2NVE+GP6f/xuXlZfKRokaTv1fNmhd1yKJuG86roqlZ23RqBBuNRiY5JX83GAwKo7UFfHhhV+14Vy0Z+yZPcwbMhxa3kfG63W5iQjQyXpUoXldBNeHe+4Hp3CFjxPI0Go1UF2qjOZ+ePXuGJ0+eAACePn0KYOYjdXR0lNqF9d7c3EzPJ/u0u7ubrpybVVDUDlpXyoTxeJzRnGtyYDJxWn9g1kbAvPzMK8eyvgJFWjzPn84yHMRkMpnz6QLmkyB70ZJWoQn0GBqdZywT2cF+v5++t1EjNzc3k2UBx5bONxuKfzweZ1iEuuH5ZNh+UjbDXrXdPc289dX1sEzd3njjDQDTNrOROJWhok8kr2dnZ3OMFTDvU2XlobIz1gIkj6UCrk5oXhU69ixrob7RtvyaDoJQ+WH9TvW51+WvfJX/UhG7af2OdA+0ivIWrbVs+8FgkMaiF2HVsk+6dnos+KojYN4GXIePblks6zdV59516YOU5t7QAWipemJ7ezvzmQoHK2g8h+26UHSQ4nvVJJGbILvxG41G6QDFhZjtowcp4qq8QHXCGywaBMCaEdBsqt1up7pww6GbRiuA1LzktsDbzFkz1OsUdrro55kbDIdD13zPmstxLGoYcZqf6qa9yIzTmqJ5OXaugh7i1JQPmJm5rq+vZw4Qo9EobZpoxsdD01dffYWvvvpq7jM156O84Xvu3LmDBw8eAAAePXoEAHjllVcATA9WapZ7FXRc2LGhmwDWR4MSUC6wPalw0T5lWfQgZc3C9ODsbfLs/VVQZvHh8zudTiavkm5ONYADMJP1alK3KkWLbmjyzA/1cKsmY/Z+jtPt7e10EOczVCFo66sbdE/W1GnGpOuuVZxoMBcvlYANuKHrnJWD3rhfBiwXTW2BrBn5YDDImP2dnp6m+WMPWScnJ3P9qXXTueaZMBI6D1axbnlBmzqdzpycB2Z9s7W1lWQD76cLgY5ZjtVGo5ExrdXxv4oAGp6pnsIeVEajUUbxpwEcrJKmTnNYPdx5ykpg2q4qswibz1APVJ6igvBk3nUfpq7jfdfphmLfU8ZUzwvcUvR77xlVcbt2voFAIBAIBAKBQCDwDcDCMVtVi2Cd8tbW1tKpn1olami2t7eTZsmyPo1GI/2Onw2Hw5WH/LQaXmWk+N3m5mbSgFMjub+/D2DKUFGbRI0my392djbndM5nWi1Y3ad8T9PIZ1Pjsr6+nnGMJ8vW7XaTVp/aRPavhgLWPqyzDt6zvHHgaSMIq5lst9up7oRqBj0zmqJ3V0WR1pfjQbXLHHvtdjtpItkXZEA3NzczIZtVk6ZaX2BeC8frMqYtfGe3280wUcqM2UAKx8fHaf58/fXXAIAvv/wyXWnKx6S77MtOp5OYUs611157LZkQvfrqqwBmiUA3NzcXCk2tJiH6GTBv9sq6XlxcZBgpyrnj4+PU/jYUe6/XS/OK0GATXt/UoUH35pcNWKDhwHnVxOqsE2U8f392dpYx91ml43Kelvzi4iKVkX1zfn6emV+cUzs7O7h79y6Amdb64OAg8yxegWzY82XMla5iooBp+1tGVllpy/yOx+NUXsuCXFxcZCw/PDZjmTqx/ba2ttJc8QI4eWyBBvoAZuPs5ORk7m+95/z8PLFaaiJt2XeVxZY1qYK8NlGrA2U+7XpL1ml7ezuNQ1739vZSnQjKj263m7EeqGP9vYpBtoEYOp1Ohp1hubQvWF/PZcOGGfewTLAJy9ypxYcd/14gId0vcQzbee8FG1Im9LaY+XnBP27COmdR6D7M2+MC03FkmUOvTzwsOoeCkQoEAoFAIBAIBAKBiliYkVI2x9rRa5hIq5G4c+dO0sKo5pW/4zM0bPqqT/X29DkejzNlU0dkah6obTs+Pk5aNmrJ+f9wOJyzqQem9bwufyJP26Z266wntV5kAjY2NtJvWW/+7uTkJKPdUWayTNjUuuE5f9owu1tbW6l/1N8LmGov8xjKPFStlz5f/VD0nZ1OJ2mX1e+OmnIGTlD/NdU+AzOm6eTkJPkWcQyORqOMZl21clXrVBTinOPh8vIyo4F9/vx58oMiE/XFF18AmAabIBPFOmmbkCn91re+BQB4++238frrrwNA8pWiVrfValXSOHu29YQGzmG9NbgMteK8j22/v7+f5hcD7fCebreb2o04PT3N+N6odrvO+eQ9S9kPZWuAGQPXbDbTOGNfqWaabaEhq+uEx6Sp/xYwz8ZowAK7XnEu3bt3L80vMh3EcDjMBD3w5ERdDvReYCBe1T8SmMpqy7JpSgGVB8A8e2NZDfV19UKoV63X3//93wOYti3HkA3MtLGxMaf15z3Wt4v1ODs7S3NLWV/W0dbz7OwskwRY2blFGSkvoIT6n/K5Wl/rd8O0Dnfu3EltwHYiU68pVBhER8eBDVxRl69RkX8J/261Wmn+8KrlYr/YJOrn5+eubMsLK15Vfnhlzbsqms1mKj9lH/tP/W3tmqv7PA3gdZMBKLx6qr8eMK2vZYOvmg+r8o3y/EyLymD9+z3mlmOS806tzYrGQVVmqvAgVdTpKuitqUG3251zKgdmQm1/fz9NKgo8bn7UuVkDWKzaqc0uWsPhME0GTvKNjY00oXg/63F4eJg6ikKQE84LzJAXBKBOFJn26UFKFxtgdpBSEyTWm4vc3t5eZhCr8+t1wr6z1+ulevKgywhwOzs7eOutt9LfwOwgyHYAVpcjQk0GrEBj225tbaX25uHp7t27mTxlehC0JjAclwcHB8lUkwvY2dlZxsRnGZMQNS/yzIqA6UaAm22Or8ePH6eD0+effw5gtqk4Pj5Ov7Ums2+++SbeffddAEjXN998M8kQjlsVnDbYSxmoyQ9lgEYnZV11nvCQyPqzX/b399P8YjAMjrt2u53qyH7wAlDopqMO81L7/9raWqa+Wk+OP5pM9nq9NI644dAyW/OJ0WhU+O5FUZSTbzQaZTbQqjCxY2tnZyfVk/drNFMbQKnX6xWaFi8K3Qh7EfpsJE6Vvfbg+PLlyyQDOAfV9M0LQGRzni1Tt5/85CcApuOGY4eHAf6vJpUq52z0T841VXTaIBWqPNIr9xpWudbv9+dMNReFPcDquq/zxColKQ97vV6af5QRlGmTyWQu8ATgm6B5wQ6Wgd1DqFmU1o3fcz5x/dLDsEZkBaZ7CNbJbmzrRl6kPTUBY//1er00Fjk+2R/37t3LBC7i2MlTft2kaZ+ORa0fMFPodbvdtG/Q4GmAryi4Tnhmpt65g/XkHH/+/HmqJ03+Hz58CGDazzaKY5FysmzgljDtCwQCgUAgEAgEAoGKKG3al0eLqimAOptT42JN/F68eJGYAg0PDEw1GkWMVN0O/545GKE5fICpNomaCpaNGr6Dg4P0XN6jJj82xKZHYavmos7Tv9bXOk9ubm6murCMdHBVDTK1aNQ8dzqdTAhbdZi/LtM+L2iDmpiRiSLjsb+/jx/84AcApgEKgJlGaW9vLxPopG7WUDW9NlgBr3fu3Eljh/MJyOYuUy0S5xODN6jJKeedal/sONBy2cAHV0HDLltnYzX9ouZVzfj4N0Ocs46Xl5dprFkzvvfffx/vvfcegKlJHzDVNvF+y4RrcI0q0GATXn4UthP7rd1up37ToCzAlKmyZnAcd2tra6kNlbG2jJSaFdaR98aa1XjO3yoDWCdrxgPM58Li7yzr6Wlnl5FzRaFu1exYw2KzHJo3CphnfnXO6e/Oz88zcnwymeQyR8siT4uu2lMthw1wQlZ6b28vyXTOL/6u0Whkgtjou5YJs0/89Kc/BTAdL2QqLDO1u7ub2Bd+du/ePZelAvwgKLxXTTA1SAXrbtmqk5OTJCPLosgcVoMQ2LDt7XY7acbZBmRlPvnkkyQbNIgOMJUHlO181uXlZe465a2LV8HbW3mmsmpeCUzlGN/POlEuv/vuu4nN4XdqVst1R83P62LVPPlgc3GpZZAGnuEYZPvzuru7m8agTXGhLihqKl+UTmRVJtqeHGT9yER98MEHAKbzh+swx6KuTTa10U3BrhmtVivJEwaYYlm/+uor/OpXvwIwk4MfffQRgOlewTtPLHu2CEYqEAgEAoFAIBAIBCqicrAJz0FLw+UC8yGZefrXhK88JVqb7e3t7YzWQB3zV2VvajUW6hxPjWS73U5aL2otqWV//vx5uo/aM56WG41Gxqmfn+u764Zn30xoyGrWWbXnwHw4cy8EstV6LOu7UVSPvHC8HmvQbreTdpN9QG3e559/nv7md9Q+ffrppxlftkXCZhdBw7xae2pqro6PjzNBC/r9fuoX1TQD07GnDBQwH8SFfc2+Uzt9QjWbVTXqqqmyTuGc38+ePUuBJegXpSHO1T8NmM4hagHpB0UN2vvvvz/HRLFOHBN23p6enqZ2qQprS+0xEeoj5jGJwDyboc7WfJaGuQem/aH+WIq6/R+0jtZHqt1uJ42rDe3e7XYz7KXWkX2qvkaL+KpdBW0fK5M0sbFaA2giZ2AmCzY2NtL41xDOwHwaC9UyWyZKGaQ6mEOvbpZlGw6Hc4FNgBnL++zZsyQzeD/HlgbfsQm/6wKtAVqtVmp3z4mfjAVl94MHD1yWivdroApg3tfU9q+X8FfDpy8qH5T58QJ0cLxoCgfWhcFx2BdffvklHj9+PPcZ2ZydnZ3EGrDco9Eo1TNPViwC7xlewlyOpWfPniW/J7Yx++add95JTADlOPv88vJyLiAXML9HsQEPFpV7HiPlBcXi+Nne3k5WEGx/+tns7u6mNtd9IcurKRaAebYuL5XGKuBZG7B+ZG++973vAZjOH5aXgWE4HzQo2nX4SpVhhXQtZl+wLr/zO78DYNpvf/InfwJgtkfn/qPT6aTxWRTev2r/BCMVCAQCgUAgEAgEAhWxtI+U2pzzurGxkdEY8UTb7/eThob+HhpFyIaDVu39qk7Dnq+UTWTaaDQyNtvUEj1//jxpYG3EtVarlbQXGh2oKBLSqmxnvag1/MyGwFStq2UJvSRuNxGVBpgPN8tycOxRo8Rx+eTJk9Rn6pcHTFkEy4xcxYZVhdp/s7zWh2k4HKZycJ7s7+8nDTOZKGWhWBf2K+247969mzS97MONjY1MUl/186lqC63tT80W5zXL+tVXXyUmiiF9NZId38k58/rrryc/qA8//BDAlIkCpr5SZKI0gpLVBpLBe/nyZfq7ClTbzL7Rq/Un6nQ6GX83Tf5sQ7TyOhwO03PVz4LPrdtPL4+p1oSSqiG20QnZR8pssozq92cTxA4GgwzDV7fMsM9XRorf9Xq9NG5Yftat0+lk+kn9XGxKDC9UOFHXWuWNQRuJ8OjoKM01zi9qYPf29tI8YxlZb/Xr0LXBrrfLRO1TX0VqullWDePOPuD6ee/ePZel4pV/k6XStdnOv263m2Hm+Z5+v5+R+2Xh+SKp/w37jHL8+Pg4le3NN9+cK/8vf/nLxN6xndhPDx48SM9V1p/zzfZXXfPKRnT0fGgHg0Fai8hMkdn4q7/6q1TP7373uwCA73znOwCma7P1o/fevag/pf7ermlaLxvNzmNH1ZeP40jlNzDtMxvlEshaUV3nPkmZONaP84R1U+bcJrne3NzMpOa4DnhWGDZKdL/fT3sisp3/4l/8CwDA97///bRH+NM//VMA89Y6XOe8/sorw1UodZDSzvccrwk9SNl8N5w05+fnqaNUwADTxrGVW9VBquiAsba2lnGYv7y8THXhYGQbHBwcpM0aByjv0QzkuqjX7ZycB0+46obDCio1L7KHLDVtss+/ifCYCpZNN2w0W2HI808//TRRvQxEwXGpGdpXBd2gcizwM252jo6O0sLE697eXiYPB3+/traWcbjmGHz48GFmw6EbDQ2fvCg03LLmiAJmbfz48eP0Nw9Zw+Fw7sAHzEwovv3tb2cOUDTne/DgQfqdBrKxwTjYdi9fvszkBCqCN549512bxV7DEdsDVa/XS/2r45TtZjfo+n7rDF1XfiKLyWSSyqbySnNKAbP5sra2NrcYA/Pj25ZbNzN1myt6h0Je7YG32+1mFAlqzmZzLvHa7/czJr9qdl63PLfrq9aJY0fN+Ti/eIDiJuPo6Chjoq19qilLgHk56I25quOPckfNWym7NAAE5yvfvbGxkQlPz2fdv3/fPVwB082uPVxtbW1lZJ6uc9YUtwpsP+k4Y100DQLnCDd6XJt++tOfpr7jYZgBdu7du5f2HpofTM1VAcyZkdW5YdeDh+b5AqZ9YQNo/PznPwcwHYusk7p2ANPNL03oNOdf3dDxaueqymx1X7DyWwMqec8A5t0MdK7m7ZevY7+kezoNIATMlBnD4TAdgK1LgVfOm9jnee88Pz9P+wzOL/bT7/3e7+Ff/st/CWC2r/rZz34GAK55eR0B3sK0LxAIBAKBQCAQCAQqYuFgE3rS1tC8wPRkSA0KtUnU/g2Hw4x2Qh2wrXbIc4Cvm8ImVNtgGanxeJzR/LNcL168SIwBy0aNTa/XS8ybamzzzCbqDn+u2iq2l5oasp48zWsWdv6WJ34Nb20dOBuNxrVrKzSUN8fg+fl5YiX4Hanfr776KrUBHXupRQOQMVuoG+qsbhNnqnbWshfdbjdpXKnFU6reCyfM7zRkPTBP5VtWZTQaXZnV3IJz9+joKDm6Uzuu5oisH9+9tbWVNMdkomjO98EHHyQmiqYhrL8mrlQTSL6bV01EvEj4cy9FgQYGYf/x2RqogH2jZr5Wa6wmZNbEcZVhtbV+FnZcDIdD1/QLmI4Ztj/rq2YXnvlenuN1UULEMshjpLQdPbNzapyplb64uEh14rjm/2rqrKZONgR1nSk6tE7KRHC8sIxqvmpNhdvtdqo712LKhwcPHmTSdfR6vUzI42XkOoMqnJ2dZZLhaoJjjh29h21KjTnH2dbWViq3Vyf9G5jKeN7HOUm5qPUtiyK2muh2u2l8cQzt7+8n5o11Iev08OHD1J80g6Zc39zcTOVXVs9q15fpJ48p8cz3NUw4MF2P3nnnHQCzENNcW3/1q18lRop9zT49OTnJ7PV6vV6hudUiKGJT19bW3OBmhCaUZx34vQ1tf3Z2NhdkAphnDa3MW+V+yZpkrq2tpfdzLv3d3/1dKisZUEItKa7L4uiqfa+V451OJ+1j/+Zv/gbAzMLtiy++SHKHe3bKhKOjIzcI0LJMYTBSgUAgEAgEAoFAIFARSzNSageqiT6p4aImhRqkfr+fnmHt0c/OzjKJNVUzbDWaddnWe9pfy0gNh8OkQeHpliyOauOpxdAktpaRGo/HK/fH8RgvmzBOHaiphaAW786dO3PaI/2dhvX0/GzqZgqLtAV2bAwGg9QX7C/acH/44YdpfJGxIDRp3aocQr3kp9ZRv9FopDIW2aZzXu3s7KS/rT9is9mcC2IBTMezDXig7EHVkM1kYl+8eDGXDgCYtXG/309tq/KAbJMNcf7uu+8mlopaWdZJWQNqd58/f576nOVR35aipIgWOl8830lg3k9FWRmbnFod5/kMasjY/8pI2VQR+m6dX3WwU57Ms8/VkL5sV5XBLLcXUIi/Y108n1Tvs6pQR2TPH8E6k6vvDccU7z87O0uy2jI8Kh80oECdoae1ToRlCTWMvOdza/tibW0tyQ7LXN+/fz+ty5raos7Q+5zH5+fnuSHIdQ7wOhwO55Ih63ea4Jt9qMFQyHTzuru7m2Hr1Y+KcpOhoctCmVSbOqDVaqUyKTtOtp7v4vXtt99O7U2GgFr1RqORZAmhPo3eWr/MfLLQMUi5p75gHEPW7+ujjz5KdSETQtbt8vJyLhgP32P3kopF65T3u0ajkQlEoekFOD5ZTl1HOBbVAoJ143fe2lO3HC8K1KFX1onzRvd0LLf6gvH/Ve9TFbYuXr8pI8rxxjHDsfZnf/ZnaQxS7msS8rz36WdVx1owUoFAIBAIBAKBQCBQEZXDn9tIbprIUSM12Yg71ASdnZ1losDxBH96eprRqus7V22nqX4/lpHq9/sZW3PW8ezsLGkmqLHRBKjWL+Li4iI3MlLdoUu9NlMmkJoKy95oklNqaTWUsWWkNJT6dUE1b+pfQm0y+4J1IwMCZCNHqd+XIk9DsYhfh7aZ1ZRrPWyo3q2trQwTxXmysbGR0eyprwrrp9pd1Zjpd6PRqDIjpREG+bd9p8dQv/HGG8m2nr5RjMz3+uuvp/tYN9ZJoxpSu7a/v+8yUUDWd+EqKFNjI86p7LNservdnoukCMy05A8fPkzz3SYDHAwGidXiXPJ8kzz/qUVgf6syW5NL8l2aFBqYyYJGo5HqyzqxjsrYqcOgq+QAACAASURBVB/eKhKqe0m5le21Udo0pK+NNnh0dJTYAytD9Bmcs+12O9enY5HIaXp/UboR6wOxsbExF95d0W63E8Nto85ubW3N1cWrhy1XVXDdH41GqS2tf6jKpKtYKvssOw8PDg6SZprj8s6dO5nIfxoBkEwdwydfBV1bLRuqLJHKb5affqM2afBbb7015/MGzOTbaDRKz9IInxox1CtfXVDfULt3076w/mc7OztpH8f215Q3no9XUdlXsTeyjKKmH1F5BmAujQbHJOuzv7+f+k3XP44LL63IqiM2K1gHtf4CpuOW39l9RKvVutYyWhSxQ+12O40pyi71w+Z+nPOmyAqiDmZqYdM+QilmHSzsFAoKCi51pid0k0fB6Dkme5TlopNLTULyQg1r2fr9fqqT3cwOh8PUcayb1p/PU+GjYYRXCTVXY301NxaFGQccD1LqaEgBT8GiBynPSfMmciXohGf97CZ6a2srUzbP9GhV0M2KbT/dpOkhnOVW52i9H0DmYKSOsFZw9vv9TPvoHKvaDmoKxY0Rn8Gy9nq9NP/VlIWO1qThmfdre3s7jSc+U0M9M3Qrr4eHh6nOXt9XzY0FzDsie7+35h/NZjOzkWef3b9/P3NwZv+pqSJlhneQ0gNVnTLDO0jpeLCmKay35t7hZzq+7Yar6oG2LPTAYsduo9GYC7ICzCse1LySZeTGSBUswHwaDjWBsRuNVckR7X8NnAFM68ZxZde0Xq83Z9YIzOeNyzObr6sufKcGa7L5yobD4ZzCEvCDU3DMqeko57uaO6rpOjA9XPEAo4crYHqo5MHyD//wDyvXj+1szeZbrVbG7BCYmcjyM7b7K6+8kvqVdaLCqNlspnWCfany5iY25EV5DzVYgZWdetC3snrVa7BnqmoD1Gh9rAuK5l3jZ3r41TQ+fGae4kqffx3IU2J56YuIokAddaNoL6+BKLzAJzaNha41Vtng1cN7d5j2BQKBQCAQCAQCgcCKsbAnmZ7krfZxbW2W6ExpXmCq6VNnXT4DmGooqKXQ0/F1ndyV3dDgBcBUa0ktljVTOjw8zCRM1fqznl4iXPvuVZn26albKXX2mWXbOp1O+i01NGqi6DFS123ap9B3WzML1rvdbmc0tlVN2fLeWQZqBqQJIYFZ+3sOnhqmnlo/1aBZJkq1u9T6lkmmvEhIVjW74ZiwCQ3v3r2b2CY6n7/11lvJmZoBTigrLi8vU11oTqHJicn+si00aIrVQGnS1CpQGcC+0rlpwzWrptGmFNjZ2cmwW54ZnCdTtTwsQ9U+KhPcIc/0xDJKGipczd4AP/CMlyZjVcFo7HOVFVDG1yanJPs0GAzSmCLjocED2Gacqx4jVTejYxl3lQ3q8G8ZTGW6bRt45ohlzAoXgU1gCszaj+zEeDzOBNAYDAaZZOXKVtkAFBpK3ZqVamAUPoNyZX9/PzFlVeG1n8of1l2ZOJaD+wWW8e7du5kARGrya8M/6zvtdRFLnbyxYO+xskSTk3vmznlJ31W+5aVFKCpLFZQxF9T1VOsGzKd/YRk5xpQttQmnLy8vV5a+gqi6byy67yb3b/r+MqZ2nom77kXzTPPy1s9l6x6MVCAQCAQCgUAgEAhUxMI+UnoytKEF1UnNJqck46H3E+q4qLbty9ovXlUX+1y1h9dEr6ynZdmOj4+TJtPaCW9ubiaNoGo68nwgvHItA++5qjGx2nOybsowUdPC/72EvHVrM+pg6LzEn7eBNfPspJU9oyaW7CYwnzQXmE+qrAkA9V4v8bMyNF7fVW0fZSqoEeJYou/BgwcP8NprrwFAuj569Cj5TdnwsoPBIGmMqbklC3VwcJC+o8Z2NBpl5JIyP4uGcLUsjWqMWW+dGzo/+G5gPnCL9bO5vLzM+Okpe0PUMW6LbMHzwg1rkAuFtqnHnnk27XXCY1AIy8oAszGmTLudZ2dnZ+lvy7ZpqHNNL5DXL8vWt8i3wqtfnp9vq9XKsEIee699WGdf6TrnMeAsq/XP29jYSPOIeweVeZx3lplS/yll5jXQk9b37OxsLoBDXfXVNVL3Mnw/WRuVB9xXsL9YLu0PL0VH3XPL0+ITyozzXhvshW2tocTtWlOWNVt2X1Q2eIWOf2vRorLAphPRtdeT2ZaRWpXfURmrg28KyjBTZX6fh0VDnBdhYdM+XSCtMNYCWhM/pbB5PyeZTjx12swzv1rlgYoDn2U8Pz9PZbORaJ48eTKX4wKYN9XiYdLLvWXrsqoDiYLvVhrbOsPrwm2j66gTZdly1yHsvQngTbo6I+3VCW+jzLbVg7sXCcoeoDSvTNFYssFAdGNlF7dFTPs8EyIexhkN65VXXsErr7wCYD5Pmd0wcFE6Pj6ey08FYC6aGjdKuinKM51b9CClGz+rVNHNg27CeL+N3qeBWwg10bEmqPour1zLIG+RUpmn9bYbN29u2fFa5n3LomgjqWNAcxsC8+a93GhrHik9nAPzuRF1TPE9ZTdqi8IzwbSHXm+s6tU+Q00sbZ/VbZqoSta8NcMrf6vVck3i+UwrBzWKnw1coUGsbOTA0WhUKc+cRd6hQ9vOkz8awACYD4xC6H5qFZu/sih6p9d33rixiuOrnlu1HIvea8uqezNLEOjabA/kajJWlAO1SHFVB6q26U3uh67CVWtH0ZwoUgaU+X1VhGlfIBAIBAKBQCAQCFTE0mmLLy8vM3mk9IRnmY6tra254BLAfPx6a8LEd+h11VCtH9+pzq/UJtPkYGNjI5NnhWZNGn5Ww+wuE+RgWah2yDq4evlErOmORx+vSlO2ahapLMtVB4qcaT2nfJ0L9jNP42nNejRsrn7nzVPv/zKgI3W3280EYSEjtbu7mz7T8Lc2PwzNXV6+fJkYKJubw8sWr/X0WINFgk0AWe2hZ5aickrNnfX/ZrM5lxePnxFF42JVY7+MKYjHZpSZ+1642uuEF5hBzausWabHZlimHpjJxjwHev3dKs2tvHmb1xeTySRjXVFU3lUxh94Y13lVxFJZtlnZbxuOW8Ofe6aA9jocDjNmnGVRtDZ55nAasMSyHqyX/a3ey3feBpRZL68aS973N1W/IoZb29+aMRJqBq3rhl2Tl7H+uAnchjJetQcswyytej0KRioQCAQCgUAgEAgEKqIWRsoL/Wk1TJpEkFoYq9lT3wNrn8p3rQL2ROv5BmiYad5HXxCtEx3hqeVqtVpJC6/BAGwyves8+XvaF8/unrDal+t2aLxJ7Xad9fPa3WOkPJ8P6/uj88vzi+DVY1PKlK8syLyur6+nv3klC7W1tZVxJj87O5sLZwzM5s7R0dFcAADAT7Srml7PF4z3LNqHeb9TDZkyU3lhfD0fCe2XIq39qlHETFVlgz2/qVXLB6/cHiOl88YyUoSmC+BzdVzZMeatTddhPVGmTfMc6fPuWVV5i3xjFmGpeLUsFcvf6XQyicbVx9Qm8FVLkzpQxLIU+RUqi1vmmd73y8y1Otb0OuZ6XuCY68JVzEfe957vIpBdkz3G9SbhyfjbUC6LunyeVlW3YKQCgUAgEAgEAoFAoCKWjtrnaVTH43HmdE7NUafTSX4V1ndBE2mpNslqelfNTKjGi0yThmZneViPzc3NVDb6ctAfSv0jqNGcTCZuhEP+f13Mi/eeIrv/m8Z1a0pW9b4iraXOAc+3x9POVmGdripHVZB92tzcTAwtr0wB0Gw25+YRMNUEk23inOH19PQ0zR8b0h3I+oS12+2MhrrORIhlWTwrn1RG5kUO9LTURfet0mdqFT4Lq2KmyrSVRtXTuWQjvakfrOffxv/tGCtbnjpQ55xetKzL1NFjvzyWylsXy2jztb8sS9Vut5OPHGWSRj21UWlXhapzva53VUVe2ao+07u/6hi6buuXq8pAWPngRcC8KlLlbcFtKktZXOWfeN11Wtq0D8huHDxTAjXx48HCUvy62SgK0VoX8ihNb3M6Ho/TwsvyUyhvbGxkspFzIwjMHJ3z6mfxTRzYN42yG82bNBO0Zcj7LG+ToH8X3a8ocgCuow0YYGJrayspFmwYX1VC8GB0fn4+lxWenwHzASWs6ama3eqByh6uyrZPHsqaPJRpz2/KfF7l3LiOxc2a9k0mk4yTtwZH8swz7UFKZXZRThiirrar2lar6rM6n1FknmMPVzpeyh6u8t6jJs56uOKzbzLwk+I2mLCXcerX+xd9z1Xvuur914EyezNttyKl3W08QH3TUcbc77oQpn2BQCAQCAQCgUAgUBFrt8VsKxAIBAKBQCAQCAS+KQhGKhAIBAKBQCAQCAQqIg5SgUAgEAgEAoFAIFARcZAKBAKBQCAQCAQCgYqIg1QgEAgEAoFAIBAIVEQcpAKBQCAQCAQCgUCgIuIgFQgEAoFAIBAIBAIVEQepQCAQCAQCgUAgEKiIOEgFAoFAIBAIBAKBQEW0ir78+OOPV5Ktd21trfZn/vCHPyz10Kp1YlkbjQaazSYApCu/m0wm6Pf7AICjoyMAwPHxMQDg9PQUk8kEANDr9QAA9+/fx4MHDwAA9+7dm/vu8vIyPesP//APK9Wp7nZdRbLmjz/+eOF+Klu/KuW+6pllnlW2Tj/60Y8qNWjRuxuNqQ5kbW0t3WevHsq2Ydn5VFQnLQ//5lyYTCbp7/F4PHfV7+zVQ6PRSO3hXTlf/9t/+29X1qlsH11XIvOi/qqjj5Z5fxlUbaeqcnyZ+VumbleVv8wz6libvPcsWreqv/PuX5XMu0lUnU9eu+hnnjzm32xnvXqf6fWq93i4DhlRBraMy8iVMnXy5pK3Nug6CkzXoNFoBABpHzYYDNJ3vK/Vmm6dO50OAKDb7aZ1hs/UNYt/e2NimT0RsUx7LruWee9eZtx5bcS25T653W7j/PwcAPDkyRMAwN7eXrrnvffeAwC8/fbbc2V89uwZnj9/DgAYDocApn3X7XYBzPpOy8JyFPVTMFKBQCAQCAQCgUAgUBGFjNSqUPUEvAoGq+w7eUJtNpsZjQPrMRwOcXp6CmDGSPE6HA7TaXd9fR0AcPfu3cRE3blzZ+6Zg8EgaearQtu1bJvZ+/R/209lGI9VoEr/16FBLvusm4SnyVx03NSBIk3sZDKZY5uAaVkvLi7S3wDm/uff3pizmlqdm9QU8n8gq2Vapm7XDavBXuYZdf/2JuRyFdzm+VsH2P5l+2jR9ijznn+K8NpWrVT4mcrlvDZcW1vL7C9UbnnrtH3nbemfMuVYZK9SN9RawWtDrlVci0ajUeY+rjeXl5eZfltbW0t9b3+n/ReYIm+/qXsctQpj/5AxJIPY6XQSU8g9t7KDdi/ijcWqfVN4kKpjgtZhZnUTG3c9QPGqmzMAiQI+OzvD4eEhAODg4AAAEu3YbDaxvb0NANjZ2QEA7O7u4v79+wBm1DBpxouLizQwloHXZkUCy242G41Gpv914FUxJ1sGdZrz3cSz6kKeSUir1cqYyN1EufLM91guzhWWcTQazS1QwGzBuri4yDXp07nJRazVauW2j52zVet106hjnHnjwqvfVeaTeShSxlT9LLAY8sar129FyonAYig66BDj8ThXVquZmVUGNZvNjAma/n1bD1Rlcd3jUPd2bGt9L/di7Cv+PxgMMm3M/Zs+QxXtdk28iT6qQ5lynfAOVHY9bzQa6T4eoLjn3tzcTP2yubkJYHbYury8zChwgeXHXZj2BQKBQCAQCAQCgUBFlDLtW+S0lmeSUnQi1pPnTcAzl1KTIX6mTBQAHB4eYn9/P/2t2NnZSWZ8r776KgDg0aNHyaSPp2OepgeDQTph142itqfGpMgRU7UrnulVHjW7CBZ1BC8yAdN+LXIEXrZcq4CdTxyX7XY7YxpntV+rhLKUNmiEsk+cM7wOh8Ok6bPfjUajXOdk1SIqg2LNbT3NbRncpMZuVf3Ffihikr35W5ahquI47z1D7y+LOtuqaiCHslg1S699Z81UVNNund+LTFq+KajD0mVRlJEtHgswHo8zsppXABnzZF7b7XaGpfLm320xG1vVfKqCq/YBwLQNPUbKmspzneIeTZ9Ptw3PtE/vK5KldaNI5pbZ/xTJheuUEzqevbnB/mG/nJycAJgGcGNQCjJSKvu4z6jTtC8YqUAgEAgEAoFAIBCoiFqDTXgaSQ9F/jW3QZOimgrVAFn2iOzT3t5e8o2iLebW1haAKSP1+uuvAwDeeOMNAFMfKb6LQSn4zNPT05UzUnoSt5oS1nc8Hqe/aW/qaZC9kNV1OMiXqUfeZ9ZWWa95YUiv0s7epMbWYxI4PrvdbhqXagdMrLrcykxaLauyTyybXvm3ZaQ8nx7VzhJeQAnr29hutxf2k6oTNzl+dI7m+bF5rIbKhjI+lyofLJvthajX71bdPkUaas+PSFk8i1WVtWzwCFt+z0KA33U6nTRnvDQDFrdhrtSNVVm6aF94Pkx2vHtBd4qYKfZbt9tNazA/a7VamXfq+LltDGNZy6RV7x2AWX+0Wq3Unh7Dxz7ifowBxRQazMDzy/X2XHqtGypLvTFpyzMej3PLaJ9LXPfY8vyiG41GmktkopQx3NjYADDbh7MPlZGy71gGwUgFAoFAIBAIBAKBQEXUwkh5kbJs8lovgaiXkNNqAq86uddxsvfKbxmpy8vLpEF/+fIlAKTEXk+fPk2nYtpmMkLfW2+9hW9961sAZj5SvV4vc4rWBL5Vo/aVtUm2ber5rrD9G41G0tawTrx6EQy9ctStdSnjg6UhTVl+ZdmsJtDzsyEW8d1YBdT/iGWkhvLOnTvJV49j6joZKW1PZaCA+bCk1Ajp1fpIaZ9Y+UGor6L2s2pv9ar+BatGkV9Qnm36dTDwWp4iTbDHVlXRpHoMk+dzWZQYs2547W3fz7GztraWy9ro74p8EFYFll/lm8ouywITm5ubSW5b/5zBYJCeoZpeLyllnXXwcBvkbFWopjxvvwPM+kmTiNp1SCObEuyT4XCYWYM7nU5mj/JNasPriiTnsXO6fijDR/BvzhdlpCxjRsZjPB5nWBNl+evomyJ2XK9F/nTeuMvzsdbf6jj3mK464a01tm2B2T6D+x/OrUajkfqFPlK0HtN9iu4t8sZI2bFZuMMo63zvOSnbzY4XytOjty3VreYoee9fFHaxV4c2u4EbDofJDE8PUMB8gAmGOn/rrbcAAN/+9rfTQeru3bsAphOTnc9DGQ9S5+fnczT/snVTeA751mGP9PVgMHA37cB0cJI61U3IqlBmHGpZdRMNzMZSv9+fq59Ccw+U3dRdlxmqZ9rHBZXjDQBevHgBYLYAXIeZjhe23+Z1OD8/T+OL136/nzlAeWYdVn602+1Ud47B9fX19LduNIB5E5hlUTTGdYNrryojrXzzzGvrBtuE7wOyZhzeoUlNkbzfVTlceeZ+1wHvcGgPUDTPaTQaacxyDGs/Wcf066iHnROe4/V4PE7zkPKN9d3Z2Un1I3jvyclJ+ptoNpsLO1wXlb/sfd+kwwCQ3UNwTKkCR83gOb64Rumh2AvIw/9tyObJZDIX6ECfdVvasKyCdxXl9dxFvP2erhO8VxWvwKwfjo+PM8/ink4PbNqnHpFQR50Ib/x5ZqAExxbH4XA4zLgGFJmZ6juvUw7aclxcXMzJMWBWp2azmTlIsX2UPCDqUJiHaV8gEAgEAoFAIBAIVMTSNi+qyfQCD3gnZntS1tOgl4CTqJMB8Jx2VZPMz3h6PTk5wd7eHgDgyZMnAJD+HwwGePDgAYBZQIkPP/wQAPDee+/h0aNHc3U7ODhILBZZBDJSw+Gw1hO+FwCEGotut5txtqQ24uzsLJWJ4Ml/e3s7MSE88Xe7XZdKXhRFfa3f8Z3Uzq2vr6f6sS7U0j5//jyximwLlr/X682Zt/A9eVr3VZsj2P85t1hGapnv3buXm+zvOhgz1Z5a8z2l3JWJ4v15TFSr1crUkxqmra2txIzys/X19XQfx4H2ZZX5VPZeK9darVZ6NxkgZWutJlADbdTJAHigmbG2RVGgBUJlsQ1OkSf3+X+ZwDarHp/6Th1rlA/sJ2qVlTmknGB/eUEEVl12vSpseOzhcJjGFy0c+P8rr7ySZBzHI+t0cHCQ0c56CUa9ct02XKc5s/ZNHtvR6XQy5ngasIprkq6f7E879gaDQaGpPuUOsSpT2bJtrOyLukYAxYFOrgu6znBO6Li3lhyU1WdnZ6n/eA+/G4/H7j7SppVZBjrubPvruFNLDX7G33JM8f+zs7NUB+7vlKW27h36rlVZGNi1Qy3ElC1kublP5f+dTiftEdgWup+nzFOWjWCdvIBXRQhGKhAIBAKBQCAQCAQqYmFGSk+N1sby7OwsaVx41VMuT//Uxqg9o7VZXWXYRevA5oVWpFZ9f38/MVH0jWLdtra2EhP13e9+FwDw0UcfAQDefvvtVCcyWIeHh3N/A/OhG72T8qLwHCupXVAtGOtLbdje3l6qJz/jSX4wGKR2Ubts1erou+3fddaJ71RGguV89uwZAODzzz8HMO03jlsyiLu7uwCm/l82TLCXGPYmNLI6x2x979+/n+aW1aaohn1VWlodE9Y3Sq/8Tn2qCBvavNfrJZlAzRIZ0O3t7Tl2Cpi2hcdEEavQflrto2oCKdfYV5PJJM1v1tv6pljUOc7u378PYN4awF6VBSxilj3/KRvoQO3Xtb+tT5xeq2oAy0CZXG1v9g/H1sOHD1OdOGZZVsp41fKzX6/DYd5jpqxvl/rQ0MKB5X7jjTfSfWTeKLsbjUaGkbq4uMj4vd6EzPMsRgjtV3sF/GAmdcILE22ZlrW1tSSTKKc2NjbSHGO7c23d2dlJ45I+2OzL8/Pz9FyV9Xm+KtcRXKdM4AP1E7NWExcXF9fCShWF8tY9Eee0+kISGtSA65hld3U9K0oDsQy8MW77vNfrZdbMXq+X2oGsE8ff5eVlkgccb/wfmDE6fKamBroO/3g+3/qFDgaDjF8/+6Db7abycg7q/PEsYZYNsFM444oWCs9kTDuXhWXHsZP6/X66j5107949ANNNrTXb6XQ6tTrNa1nzIu3oxoflfvr0aTpYkErkQHr11VeTKd/3v/99AFOTPmC6ePF+fRYPUmwfttcieW+KhJpufKxp371799KBgoss3318fJzq+8UXX6Rys8w2ipya0XjR7+pcjPUAbANEnJ+f4+uvvwYA/PznPwcAfPrppwCmbcwgIIygyEAgGxsbGefF0Wh0oyYIuomyG009SLG8NojLdWyAdMOcF4VPy66bdZunjHW6c+dOGo/2ur29nTmw6AHBjvvxeFxL8BaFt8nTjQ3bX4Nv5CmWrsMkSc2H2Ma8qvlRUcRDu3B7ZnM6Fig/2Qbn5+dzf+t3egivE5eXl5lDnkYj5QHztddeS/dQLrOM3Oi22+2Mc/8qzc7tAUrntDV7VbMimvax3u+88066n0ojjkHNQcfnc27Z8tRdR4WaKunG0DrN87vhcJjqYJU24/F4znSdV7vGL2P+7Jm82jHR6XQyQX82NzfT/oZ14px5+PBh2gfxHq63+/v7aXOr77OHSPblqkytrvrezrXxeDwXyGUVZbuqTEXQQ4EepKwim310fn6eCRaipn36XF7tuNM1uurYK2Puu76+ng5Q3Nttbm6m8rFu3JOOx+P0N2Uf/9fod0Sn08kEr9G6LSMj8n6r/aSBPCijOTd4z8bGRppD7FdVqNm9VNGYLFufMO0LBAKBQCAQCAQCgYoozQHnxVlX8yqeXnd2dlKABZ4Wqdk7OjrK5FLwwk574WrzylIFXjheL9Q5T+U0D3v69Gkyw7PmYR988AF+8IMfAAC+853vAJiZiwyHQxwcHAAAvvrqKwDA119/nTSH1HYoO1YHNW+ZFDVbYb3X19dTXeiQTu3s+vp6YjpY7l/96lcApgyV1dwq41U38hzkm81mKj81lE+fPsXPfvYzAMAvfvELALM2fu+99/Cbv/mbAIDf+q3fAjBjQ09OTpLppoah9cIPXzdU+8+rmutYh8rrZKSKcg6pdtnO4VarlQljTi3S3bt303jklYzU1tZWup9yQ+cvy6BBMOpmO9TJXNkna85G2Xd8fDxnuqtl7/V6qR08s8Q6QE1xq9XKaLG9wC0sR7fbzaQS8EIs2zYfjUYZpkADjti8H6rprQMek8uyaVAQMjSUeScnJ6ntKfsop9fX19P4JDzT2brnnBfkw1p0ADMNOcvNtWpvby+1ATXVrPfGxkYmYIiy8KsKp+2xbdbMfnNzM8lmDQYCTPuEVhI2JclkMkntQsZR19Q6AmioRtuyMfod209TVXAeUa7x/wcPHqTyKvsOTGXe/v4+gFn/at4iG8hBU8ysGpeXl3MBuYBZn0wmk1QX1leZn+sqn52bXioB9sNkMnGDTgHzTDvHGGW9Btjx8uPVsX/Q9TXPnHN9fT21OcfTnTt3MqanytRwr8s5RNnRbDZT/XSPr64DvI//ryJlgvaTBkWzAaw0wAStMPg7dUGw5VdrkkXLH4xUIBAIBAKBQCAQCFREZeqjKOwvGSnVKPPkzhNzv99PJ14yNfz/7OxszgnOvrMOzZiW2zJRGpqUGiAyUhoqlpqyb3/72wCAH/zgB/je974HAHj99dfn6vvs2TM8fvwYwIzZOTg4SFpZe6pfxEfKg9WYa7JjbX/2AbVILNc777yT+pN+RaqpJiPCdjo9PXUzYy+LooAVk8kklZf1+OSTT5K2kmDgj3/37/4d/s2/+TcAZlpo/m5/fz+THFmT83l9cp323taulxqijY2NNP9uwjlc3+VlHwemZbXfra+vJ60RNa+UGffu3cswUdQ2aXh7tZe2fjoa5rVuHym+E5j3D7IsNn31Dg4O5hKkArPx9/DhQzdpY51ji5pGTTpt/aGUGVOfNX7vJXm0a4GyJtZfQn3orJxYlR+iFxBDEzqzL6i51fupXWef5smhvDm3Kmbq4uIiPVsDM3EuEeqPyzpw7im7q+syMNVQW0aKWETjrO3jrUl8p2VvdnZ2kp8xKmMEUQAAIABJREFUAznxns8//zzJbbVE4DPZn8p45/l4L9NP4/E4o+nXkOXWD1L3SjbQ1tbWVvrMhq/W9A7eemvnoZdqYFW4vLxMdabcoyXIy5cvky8yfchpqdTtdm/cX8qzStJ9oWX1xuOxmyyZ33ljrE5GqsgXXn291NeYV45TygJlajiH7L5c/Zi5Rg+Hw9zURnX7whPqa6bBSshIsS/YBmq1Qmi6EY85DEYqEAgEAoFAIBAIBK4ZlRkpa9/sJaXrdrtJK0RtEu2yO51OOklSg0EG4euvv87YAvf7/Vp9PrxTKJ9PduPw8DBpcVmefr+fNEaM9PZrv/ZrAIBf//VfxzvvvAMAGc3R48ePU/3og3N8fJw0BNYHwWPKloFq/6wN8+npaSoHtRdkn95//328++67AGYaW+Ly8jKTfNQLZ6r9tagmM+95wFTLwDrpuKHGlf3zb//tvwUA/MEf/EHqJ97PPvnkk0/S32pH7GllbwOUycwL3XkdzJTHQllGygtTv7W1lbRcZHh53dnZSWyBMlHAtE/svFWfHNaZmsK6NYCEDUd8dHQ0N5b0+vLlyzRP3nzzTQAzVmAVYb8tvvzyy/R3kb+h9ZHShJU2ibpG9POin3rtbhko9ddaVfhz24fNZjPJLutrp36kXAuoyfSY10ajcW2af20zGzG11+vNhTrWcu/v7ydNs11zNjc3098eI+Whjvlk/dZGo1EaO5zvm5ubib3gesv6DgaDtBZ7bKg3b1fFGFo5oAntbWTBwWCQCb1M3L9/P/Ud90q6r7K+PC9evEhj1D7zuvyj+E6bOoUWOE+fPk3jkP7k6hvmlfM61tciZseL5uxF57RMt8dIec+vC3mh773y63psmZeLi4vctCWtVit9541b792LysOiMPVeePLRaJTOEewLTatko/UpI+U9n1i0/IUHqaKBoYugOnIB080sTQu4uXjllVcATDcQ7GC7+dGFWxdzawqyjBD0QjWyHKQ99/f300abwqrT6SRzHJrx/cZv/AaAqYkfFwB2Ls34Pv300yRYuKCNRqNM/gI106gqCL2Jbilr3XCzPQ8ODpL5EYUg2/3Ro0d4//33U/2AWchwNZVgn6vDsLeRr0OQWNOQ4XCYGUO7u7sp0Mdv//ZvAwB+//d/HwDw7rvvJvO9v/7rvwYA/MVf/AWA6aaXz6W5hW4ur3NxqgLd+NxEYAwNTWyDxrDtut1u2rSq+a8enIDZgX17ezttDHmA0vd4uYFsXibdeFYx7SsrWzwzJRv2nfXf2NjIOABr/fJCjCuW6UvOcc035F3tQcdzalZ5kpeTqt1uZ0wB9eDl5auqc355ckc3ony/lWEnJyeZnCSqKCgKWFAnikw7x+NxZtOu4X45ztjup6enKTAQZbyGvufmnfUGsvK7rvrmhXTXQEU6t7n2UmbzHi0r5xEPXePxeM7pHPCD0dgyVYE9vOnfGnCGn3FfcXR0lBR/vGq+L8pDa+Z49+5dN18R77MbyroVaFelwWE5KNt5EByPx2k88sB+Feoec96zdfx5e0rP3I9XWy7vWXmmsYpF6lcmTLeX17Xf72f26BynOoa9eltZ7R1q6oZNo6Prgwb+sDnwOF/UZJnzTPM42jWhjvrczt1hIBAIBAKBQCAQCNxiLG3ap0EMNMGi1RBrQArN9A1gLgGapymt01RJT7ZKwwPzznY2yenu7m4KbU6TMTpQ7u7uptMxtb806/n888+TmSDbR5NCKhPF7+oK767lV3aFUIc9aivJID558iQxaUxoS1O/1157LWmaWH5g9SZlniaTdSKrsbW1lZg0JkfmOPvkk08SE/Xnf/7nAGYh3S8uLjIhWj3TvusM5EB44T853s7OzjKs3HWaH3pJL22SQ09jfu/evQwjpU7wHhMFTLWcVqvmOQHzOhgMMpqrOsH+6Ha7qR40jyX7tLa2lkmSqMF4Vs10qJmahiMHZmy8holXc448awAdk7a/NaADr+vr65nPdOyoHKkLqj21rBkwYwMon/f29lJbaYoIXq+LkVJYlnIymWQCqmxubiZTNzu+gPlQ6MCMMWg2m5kAI2traytLn5DHbgKzepJVOTw8nDONVWiCWsoVBnm6vLycSy0AzJs21e0ekBeM6OLiImPp8vLly7TOsk7smxcvXiSLD8pBlnsymczJUmDa99YcVpnKVa1Tduw3m820n6MbB8t9dHSU5g/HnA2MdBMoYpGUhbfmlBqsx8oC71mLJN0tgjInXiAMYD75uybn5hrI8acmyzZYHMuvQWzYj7rOW3im1FWQFxDGS4ukjBS/t+cKwE/YXWS6mVeWqxCMVCAQCAQCgUAgEAhUxMLhz71AApqYjZoWnoo1GSxPjBruGJhql6wmWk+/dWoxxuNxKhNP6Qz7eHx8nOrEMr733nuJ4WA4bdplAzOtJtkbXp8+fZq0v3ymamytT9iy9ppFPg2aMBmY+q1Rk0cneLJQBwcHScvBdqJGs9VqJc0ANRYa4nrVCRzVD4xjSZNsUvvFcv/DP/wDAOCXv/xlYqTYP9S4ewEO2u12Ict2XeyUF0aVdXv58mXSQFn/uOvQ+nG+qs+MOn0C84EllJHi38pEAVNNsjIHwLxPptXIa7JXe+33+7Un5AWyQTY2NzdTmTn+2P69Xm9Oo6e/n0wmmZQPdQcwUP+EvLmkDD1lhvpNaZhf3u+x3sB8QCENSqO+OcC8r05ZH4qqsJpj9blVn1VgKt84tlhG1k37V7XQq2Lh8ywLdLyQlVFWk2OPa9NkMskwb+yn8XicCYOv60/dDLfHRAHzvid85/HxcQrSxKBUxGAwmPNdBmZrmo7LVTGIXoAdG8hjNBplmPPT09PUFzbw0+HhYVpf6ePLdajZbGZYdfUn12ApxHUFQVHfWI49lrvf78/teVhu/m7V5QL8fWpRmga93/qqeSkidCzbUPh1y4eiVDCsx/n5uetTyPpR5lE+NxqNtE6z/5TJtelHut1uZo+q4dDrGHdFIeM13YgNMsH9Q7fbTd9RTqjVThlGqnKZl35CIBAIBAKBQCAQCPwTQyEjVRStRW3P1WYYmJ5kbbhjamKGw2Em6o36M3inxDqjhOg7WSYNBw5MNQs83ZKx+fDDD5OPlE26++LFC3z22WcAZj43jNr38uXLzMm51+tlolrVHRnJs2W2PgqvvvpqCgdOlo1asWfPnqXIi9Sssaz8PeBHXvKwaP28CFyejboymWqTzroAUxaK/UJQC/Pw4cNMxCvVrqwqfG4ZqD8K20ATEVO7aaPdXCcj1W63kzaWn2nCXctI6Wc2DHWz2cyEyOYYPD8/T/OU/Xx8fJz+ttGw+v1+0kbVCav1brVaGZ9PZWP4mUYdYvmUbVsFqK0fjUaFvmT6N7+z92sYXFteHaM2Qp/6QXnJfeuU8cp8sL1VC6mhwYGZ/N/f3091sv4Cm5ubrm/AKuRAkcwDZn3Asb6+vp6xoGCE2bOzs1Qn1pfjUtdbfuZp8OvSqnvyO6++OvZsu2uoad5vGWz9rm548lXXRMBPBq0hm20UtfPz8yTDaBlDH0sde9aHXMuhTONN+CBZ1qnT6VSOJLiqMOEK9e/XEPyE+ggB8+kCLGPNOmsUaPXdsfVfZg55PoVaF2A6jijP+O52u51JyKv++mSb+AxlQrmGKyNl52PRPqkOeIyUJtbVaH0so1rHAZjbA3iRCJctd2nTvrwB3mg0UsG0QnR6VSqOV7sAeyFmtZJ1Ti4dcDYUKRtf82Ax9PcHH3yQnClZTw7KL7/8MjnGMmeL5u6w7dPtducmIOtJVO1UbyGyFKvm6tCNDDd/7C8eEo+OjtJByppnjsfjjDnPVX20TJ0slPK1AuX8/DwdMmyOqePj48ykU3M+u9n1ynHdByhgfo4RZQ5SywYuKQPN78S2taHOt7a2SjmtqvmYDXHODcjZ2Vlm3mrYarYL768z2ERRW+qh3gY20A293Qh54evrBse4OkTbw5uauegG0N6nV1vuq3KMWJMKvdaZO0/L4OWB4yaC8o3zhfIcmHdYBqay24ZNvw4U5WrRsO0sE+cX040cHR2lNmD9WO/RaJRRCGgaC2KV4bS9/1kO729bnpsMWqDytUxOpLW1tUzuJw3lbpVGlG/b29upX3W988zReM91jVENMGCd/4H8/rmJoE1FKSs0OJpdR3XPwPutUkjN2vT59rNlcNWhEJg/MLBOqpjk96xHu91OhyQbyKrRaLg59+yea1UmpV5+Jx1rLAfLpgdA1pNziG2he6k6FWNh2hcIBAKBQCAQCAQCFVE52IQHSy23Wq3csKBKeVsHZtWYrgpKpVM7xHLwpLq9vZ1CGDPk9xtvvJE07Db4wqeffpocY8l+KHVqTeo0rPaqAjMQ2rZWq6waGWq82AZbW1sZZ0s1lyrKdL1qXOWEqKwjMG+eY8PI8tpqtVJdtM1WHdK9DLxgE6zbixcvbtS0jxorL+S1fmfHu4ZxtuVWh3obrlsdtjl/T09PM0Em1GSjSkLeZdoszxxRwxGrtpLXVTNSyq6USepIeJpGrYfnvM17qjC5dZs163M9poDjjvOG6Pf7qSyWkep0Om4yzlXLBc9si2A9zs7OMswSWUgNSsDxSGZKy1/ESN00blL2evDcDqzMbTabGfPWTqeTZKImCAWm/Uq5xr5W5tEGVVKW2wu7fZ3QYAP2/Xl9d13BJjxou1kTy7W1WcJ3y0htbGyk/rLm29of+vs80z4NirJInexvdU/tMYQ2aJAGFrKJq7U/bUCTdrudCeRRt2mf3d8p26frpk0ETauXRqORsUrStvAC2S1b7tslMQOBQCAQCAQCgUDgG4ClGanLy0tXA+KFqgbmtYQ2FObFxUXmtFu3NkoZKdVCALOT7SuvvIK3334bwCzJ3M7OTiZs7ueffw4A+OKLL1JoWWqa+Ez1HVFn+jytX131LQpvrJpMaicti5CnYQHmHXuLtNB1w9MIe9o41lPDTwN+iG7Vwty0Zi8PnmZdQ/dT+3IT4c9V2+r5N7JclqW5vLxM49DO+YuLi4xvlDJT1g+q3+9ngid4jM8qocFWrAOzagSJq0Kd1zmXtD/yQsuqv4fH+BaV0V49tkrnqpUr+lmdyPMpsD5zqsG1fqSEF6L7OuH5nahfhE3YTS2zBv6wTL06WZfxa1kGdTh03xYUhYXXfrBWORqQR304gXkm0KYdUZ8n9qEGaGE5VhWspmwY/G9K/6pFjvoRAdO2tvsHyoI7d+5kgtdo2gAr13TM17GnKPKRUhlm2ZuicjSbzbk9n37XaDQye/Vms5kpx6rWV89HSq1YuJeziXgvLy8zftQ6b2x8An3+omO48CBV9aFqOpbnhOkVXt+3alMqm9sBmG20GVHstddeS1nGNUM8gy48efIEwCywxNOnTxMtzzpphDubDds7RK3aoVc3szrpKKxZfpZRozFayj5v4qzCsXSRzZyN5OQ5vBPaJlUn03WaMtrDieYm0cMJcL0HKW/TbQX5cDjMbP7Oz89TnewCNBqNMrmiWEfNC6VzOS8fSN3Z5YvgLaT6f9FvVglreqN/qzmllUtXBfopmu9lDlLa/9exGGv5AGQOVHltkPccD9cZmEHNdWwOGz0I2gOg54RetZ7fNKyiXp55lncg1c2ojXyrMsweWHTd5VjVvrRmhbZsdcN75m0fL0UH+PF4nIkMqaZ99iC1sbGR6SPNuaTPte9etTJP61gUBdYjOIr2pXYv4eWZqxtFe1eu85PJJBPwg+4paiZLpSt/rya3RSbMVesWpn2BQCAQCAQCgUAgUBG1BJvwTm/W8drTXNyENsOjBqlVYEbxR48ezTnrAlOKkCZ9zEXE/EQnJydzMfv1mRrqfFUBJvR5nsM1MH/69lgYtouGA7XmIkXa3bLlK4tFNcBF5kueNlefU0ULcd1j175PzXqsZv0mtMteEBlPo6paVMtQqDkeNbVe3iNrOqfh0otCcl83botpqGomi+ZGmc+IskErtB/yTAGvu488ho7Is6S4LZp3rxxeKHoN/GR/WxSu+LbUsyqus9zKSliGQs2i7Pqpn9kcRcoq2meqOVWRWeyq2qDIpFU/u61jx2M4gHk3AA2PbYMTcf9GxkOhLgKWfVq16bJi0SAWau1SxNBcx57CjjP93wbJAGZzx86li4uLZNJn3R7a7bZr2rcsgpEKBAKBQCAQCAQCgYqohZEqgzrDIi4Dz8mOvlG7u7vpf35HH42zs7PEQDGwBDNIq5OyDYvphX5WXKXlqYqiNrKaB2URLNR53rPdvg3ap7x2ytOeFLFOV7X5TdbXe7dqZqw2+rqZKGDe3txqTTW4ipbNskjKaFmfJ2WrPD+oVYcQz8NNMF5V+9djIMoyTGWYmUXl2k0zdkV9d1ssKIrK4DF6VgbYRN56T1n/wbrkym1ovzrhtXuRz6Hnj0KZ12q13DQCgG9lYd+vWCUzVTRe8qxhbgJlWDQvMNXa2louI0UrI4UN1AD4PlJ5YdBXgarPXsQPdpXw9m3W7xpAJt0K+0n9Dbl/5+81NdNVsrQKgpEKBAKBQCAQCAQCgYooZKQW9YnJ++2yqOMUT81Ot9tNNq9kpLa3t9N3NlTsixcvUgJeRu/T8M3WTlPDRV5XVJ0ieBG7VNNVVK6y42DV2otVjL3bzEQp8uy8NdnrTUAZTcssqRbVll9ZJMtMXVxcuMmj7e/KaOTrxE22M7FIvRZlvdfW1kqxRlXLdBva0cNtLddV8Jh29fXKmxPf1PreJuSxrFfNHc9XsUz6jTIRWVfZr0VMj1eOm14/r4ra5/WDtdLJS4kAwE3uStw0414VN9FXZceR3QcAMyaK+3j2gfpRq+8bMN2X27RLdcyXyqZ93/RFU8N78+DEKzvm8vIyhU/k9eDgIJny2XDT7XZ7LvOzXqseoq6zvbwN7jLvX5WpQZ1BIG5zQIkq8A4PNwE9BHGse+YS3oYjb+OgzvOeuUtVGv4292NZLFOHRcfHqkyMAz5WpQSIvqgXRQEWFjUX1YASlKNXybnbYDoH3C6TPg9FfeTlXLKKOi9/JqGmmnZjrgep2xD86JsGbU9rGqmuOdy3s5/6/X66zwa+UtM+xbL9EqZ9gUAgEAgEAoFAIFARa3FCDgQCgUAgEAgEAoFqCEYqEAgEAoFAIBAIBCoiDlKBQCAQCAQCgUAgUBFxkAoEAoFAIBAIBAKBioiDVCAQCAQCgUAgEAhURBykAoFAIBAIBAKBQKAi4iAVCAQCgUAgEAgEAhURB6lAIBAIBAKBQCAQqIhW0Zcff/zxlUmmrspibfNUXV5eZjK4MyPx2tqam4W6TFbojz/+uFQ67R/96EeZh9jnTyaTTCbl8Xicsl975WG5bZ2azWbKpMzPGo3GXJ31qvjP//k/l6oT+2ltbS29i1fW4+TkBEdHR3PlePToEd566y0ASFminzx5AgD45JNPcHx8DAC4c+cOAODBgwcAgI2NjZS1ezAYAJhlBr+qTj/84Q9L1enHP/7xJTDLHK7vOj8/BzDtp42NDQDAzs4OgGn28ZOTEwDAixcv5n63vr6Ora2tufoy8/VwOExtpX2YN76175cZe95zvUzx7E9mV2e5RqMRzs7OUh0UnU4H6+vrAGbtqFnC7RjXupatk46968Kiue/K1In18d7BOuqc1izpbE/OBY6t8XicaWP2R9EY87DIuNM6WdmlMk//5tWOkaK2p1wB4Mo8KxutDASAH/3oRysZd2XGzKqeVVbmFckHb60pU6ar7l103patk449jisrf8bjcfqb96ytraU5QplHmd3r9dDpdADMzyO+x87Di4uL9Def743nsvOJa5O2nW1HHe9F9ymK5qZ3r7e3sijbT//1v/7Xy7xyXzWHvX2cXvVvLaPurfi/yh79TvvwP/2n/3RlnTiXPJmn79B9GjAda1wzdbyxflxjuZc6ODgAAOzv76fPKPc7nU7aO927dw8AsL29DQDY2tpKY/eP/uiPKs2lIiwynxddT4veVXbc/Zf/8l/SuCPseFf54M1ptjf/H4/HmXGkY9OuTe12O33GPlG5wvt+/OMf59YpGKlAIBAIBAKBQCAQqIhCRkpPnHmn1jztltVKqFaAf/MEqdoNe1qson0rAz35euwXMK+p4ElYv7faLWVj7Hs87XWr1cpoprW9qmoVrtKeszwsr7IwvI9aGDI8jUYjMTnUCLIt8rRtVbSiV0E1Cnw/r9QK9fv9dB/LvbW1lerc7/cBIDE2p6enGe0mNZuqfVet6HUxLZ6mjtB5oQwdMG0L9if7i/d2Op2MhkW1O3WgavuU0chW/W5VsJpM1Xp780vZKWBeTljNGL9rt9uV5tKyyJN53mcqq62Gz/udJ7P1WWwfq3Gsysp59chDne14k2PwOp/lMeKLQtd/O58oh0ajUZJhytyqHAZm8h9AhqHn/8pI5Y1DW8dl2thjonj1mJqifU1eWfPYYjuPyrDGefA08LasVzFSRJl2V9ZA2YY8Vn80GlVatzwZ5jF8dh/W7XbT3oB7Cn53cXGR1lha6ygjxf0Gx2Kv10ts1ubmJoCZdQ8//6cO3c/YNdXrJ68P7Wc6juwcUUZKrXRUtijsmp5bj1J3wadmLbxDlZ2UKgDswUTvUwG56OarqB7eYcATALoZ0kkNzJtS2cOVCgTWSTez2omspy3XMvBMDPkuCoN+v58+40GKE77T6aQ6eaZj9nDINqkLemhjHVhGluvk5CSZ8VH43b17N9HnLBMp95cvX6by2oNjt9tNz2U99SClm2h+t4qNVd4G1R4mdaGhqSP7lUK63W5nNhq6KBeZqNaJq0xbrNlbXZucZZG38dM62A2dfq/jySpbeP9VJkDeprZOUwzdLLF+rJOOf1t3HUce7OKm5S5zmFsVisyx61DaXaeJK1GkSLuNsGukHqR0s6xzRP8HsvNON2R2rnlmhWX2M3koau8iJZy3npQZe1cdpLy6Va2XtrV3gOK1qLyE7u/shrbINEvHgXf1FNZlYOU30Wg00rihQnV9fT3tgezmejgcpr3E3t4eAOD58+cApnsLvofuA71eLx2c7t69O/dds9lcuD7/P6HoQM72bLVa7pzWPaJC9+p2juhBSpWZRUqIMrI0TPsCgUAgEAgEAoFAoCJKM1JEkTZRT3U8AVrmYn19fc5RHpgFD+j3+5mTZKvVyjiUqjZpUc2bal5sGfU71QRZDQoZgMFgkGhd1bjwd1Ybo5+xbtpeZelEC6WxPUaKYLlPT0/TqZ4sBjUnvV4vUxf+Ts10VHvt0aiLQrVr/JusE7U6p6enGQ3R3bt3sbu7CwB45ZVXAMwYqcPDQxweHs7VVx1L1VmR1zyt9TJjrwheHzabzaQxs4xUv99PrBw/U0aKbUYMh0OXLVgFqpqr3hYmypbBMug6Jjm/ut1uZk5oH1nNGKHmPlazVhe855Y1LcrTeut3RczmMibLyyJvbcozxyoyrVRW4CbZnlW/e5XzriiQANcWtWywpv+UZbr+UC7yqr9TlsiyrV4Ah6r18KAMrh1XzWYzM9d1/cwz+/OY6iJZoetWWaiLgZbJKw9QTkZ4rBOtPUajUabPh8Nh+t4GsdJnlYHK7jyTQF1X1UJFg0sAs/3p8fFx2mc8ffoUwGzf0e/3k3WL7qXu37+f/tbvxuNx7VY8FlXX3KLPqjyzCjzTeO+M4bn8eMy2/gZAZk+nz1cLsWCkAoFAIBAIBAKBQOCaUchIlfGd8LR4yt5Yv5ytra1kN0pbVF5PT0+Tdp2aiYuLi3TytDbTy8I6t6m9rLW9Vrtjnm416AG1Fsqusd62LYCs9qYu2BO1F+CC5T47O0v9Qs2MteUFZnWir9RkMnFZvLrrAky1DCyvBpQApm28v78PAOm6vr6O1157DQDw6quvAgDefPNNAFN75q+//hrATJPEZ+3s7CStlNrrW+2mak3q0N56/krWb06ZJX5GZ1cNa2/7ZGNjI9VJ/dvyAgbUxRoU+Z4UhTnVcuWxm1f5W60CHhtjfRxbrVZqa84lftfv9zN+hp5fqMceE8v4sWnb2Xd5KRm8dBSew7b1f1Am12Or6gxiUARPzmo5rM8qr+pHmmdjr1jlWKyLGSqSU9cxf4rYDPUh5HrJ+TGZTNI8sj6y2odcp6npX1tbyzAXXvjkOqwlPH9Tzwnek3+eNUwRW+VZRNj3LDNmvHcRnlzWOe/5OgHTvY9nvcOr/UzXevssL7hGEYraRNdVjhvuLTY2NjJBrbgn3dvbS/sHXrkOt1qttEYzFcvDhw9TyhjueYnz8/PKPlJlmaKqvp95lhdXPb/onrLQ/amXMoPlsYHSdI208mQwGGR8Fr3gE3kBKbQuZesUjFQgEAgEAoFAIBAIVERpH6k8+3DVYHjshGVoDg8P04mdPixkQTY2NpJm4OXLl+l3eezNIhp09Veyp1ai2WwmrYT60NhQ5cpMsZ6np6dz17Ozs/Sdx7JZeJrbsvB+q5o7G3r55OQkacr5Hfvi7t27qT81fDh/b7W6yjRYbdAimgo+z9NWaoJghh9lIuHPPvssjas33ngDAPCtb30LwFSj9OzZMwAzBovPunPnTtJOeZGCvMRudULZTquR7PV6mYiFHFMvXrxIfl/sO/UlY520HlYTk2ebvCz0eZZl0bFqNZqeP4rHnHjvqavMnsZOtdq8T9MF6N/ArM7n5+dJc2nZbL3PS9JbR4Q7lc95iQc1wqMmO/XmN8uRF6pYQ/J74Yu9JL+r8M3xIpsByNST86XRaGTKbesI1B9hdVl4LLaGA/ciV+n1OqDvsuu5ynjKtfF4nGGkOL90/llGqtFoZCxAxuNxmm+e/0/Vsad+F3ZM67ix33lrvsoKOyd1bipryt+VifxXFl7ZPKbZY5/IKKmFDjDvO+5drd+URjctYoLLwGMIPRaTY4pWUd1uN/2G+x5arzx+/BhffPHF3Ge89/79+2lf++jRo3SljxTfqb70bLeqdSqyTlCrA09O2Xa5uLiuF4/jAAAgAElEQVTIfKby2Y4tZU7LMF9XQdcjz0KAz7V7BU2iS3jhzz15XoWBK3vGqBxswnupHaDr6+tpgNoB9OzZs0SLcvPLje6jR4/SgGYjnZyczAnXvDKUhVL73sBhfTzTHetMqIsVhQHLSjr4+Pg4/c2DyPn5ecbEpw563nuOHni4MLG+p6enaYPH+xk6/N69e2mDwQnPegwGA3ey2jIsA50kHDtsPwqse/fupcPSl19+CQD4/PPP8bOf/QwA8O1vfxsA8L3vfQ8A8NFHH6X7/vEf/xHAjKLf2dlJpoA8tAwGg8xiqPWuYyNS1O8q8NkXNrP63t5e6kO2Cw+H29vbc/MImBcydSNPoHpj4+LiIuNYrGYddnypqW1e8Jm662IXHxXOarbH7zi/aC6q84f9xas6xnoLSJ118vL02cOEHtZ51XFnN7XAbCGy/Xh+fp5RoPX7/YxzuZeJfhl4ZlZWvqqZLNcayvXJZJLmCftaNz42h9tNHaSsAlAVDzZNwng8Tu1uTeSLgmvkoapsLzIr0vlkc/4Nh8PMmGN/6eZP8/8A07pZs0wvmIWXJ7IstPw2ZYYeJqxZmyrmvFDceXNTcxvpfLR97R2yysI7ANp56gWD6Pf7mUOSzn37ncp6PsszlSR0A2/nXxG8frVjZWNjI40pPYhzn0FlK/cMn332Wdqzso5ca+/du4fXX38dwMyVQPezti3VHaQsVJljFY2q5Lch3dWtwwY+u7y8zA05P5lMMiSJKtw8JWdV6EHKBo6xByot/3A4zLj6eMFlitYaLwAFn6UmhGWCv4VpXyAQCAQCgUAgEAhUROERv0hjpVo/q01cX19PzAYd/x8+fAgA+OUvf4n/+3//LwDgF7/4BQAkc6u3334bb731FgA/AzS1Gcto1KkF0frYMJfn5+cZDZY6uKoWF5hqONhW1vTq5OQkMQbURh8fHyfNGzU1enKuw7TPPkM1CcTZ2VkyobQBHXZ3d+c0gKwLy2w1oKr18AIoLBqOtdlspr5gWTm27t+/n8bL48ePAQCffPJJYqTeeecdAMAHH3wAAHj//ffx1VdfAUCi6Dn2dnZ2MoE2NjY2Ms6vGpZ9maAntn+UFbXhftfX19OYY1tQW/b8+fPUP9bZdXt7O8N8qonVqsJtW3gOnhqAgfND2R2WzYap7/V6ruN1XQ6wRU676vDK9mS/nJ2dpfsouziOLi4ukvklTVGV+bDmdmqGe5WjcJU66Zi1msxOp5NxvN7a2kp/s/1VC2gdgDUojWXhT09PU3/zPtX+1cmSqsbRjvVOp5P6hY7gnDenp6epn9SiAJi2F9tHtb91mF6Wgcf2amAMKzPYb7rWECqzrYbae+eqyq3jh3JN10XLPHNeDQYDNyAPMJUP/Fvbx2NtFq2jaumtybfCslW6r7DhvRWWOcljizkeedUUGVVTqHCca1JcW1ZlmHSvZINRKQtlmTpdR61plTJpKgv5nYa4rwIv6S4w70rCe/r9fpIB3FN89tlnAKbM1IsXLwDM+oZ7htdffx1vv/02gJlLAc36gHm2Hph3+SgLax0D+EGDWE/WTVNzWJcbZVVt36oLh+55rVltnvl3GajliWVi9WrTEV1cXMyx0HwGkWdWq+NOr9aEW1OYBCMVCAQCgUAgEAgEAitAaaNTzwkbmE+URu2j2i/S/4mswEcffYS/+7u/AwD85V/+JYAZO/Dzn/88aUZ4qtegB9ZG1rOpvQqa4M074QPzp3qyMtvb27h37x6AmaaB/29vb2d8wlQLYu2be71eqqf6TQHzDpeLwHOyZHn4ftW+kOWhNon37O7uJuaHz1QtM5/PdlK/jiItXVlowkWrKSEb8/Dhw8R4fve73wUwZTn/+q//GgDwv//3/wYA/Nqv/RoA4Pd///fxgx/8AAASK/qTn/wEwFTbRA21hkK1iZZVi1Gnj5SC/UPNGccgMNMcakJAamqpHWM9tra2kgZNGam8wBl1+Xt4dbJJnY+OjpLmj2Xk+JpMJmkcsm5k2dR/sUhTtGxdvFDhWhfra3F8fJzqRhmgIW9ZVzKg/P/8/DzVg3UejUaZoAHLQFkZqwX2NHWeM7NNgKqy2Grx8tgHqx3U75ZhciwDrnLdMhdbW1spGA3nCaEJvtlPlIvKdHh+knkWG8vUx4O2LcegJgPlmkTZsb+/nxzjeQ/Xr16vl+pX5MBeB7xn6tizjJTOC441rpWaUsRqxzVYgLLfdl1eJsAO58BkMskwJ6pVt5r1drud1jC+Xy1f8thBDezi+cV5zvPW+uQqkClXX0ab0sVjn87OzjL+T1qnvDQCKl+1T2zwEGXJOX6rwHumF+qc7Xt0dJSsVj799FMAU79rYLrWsvy7u7sAZvvU9957L1nAUL50u920prG9+P/JyUlqt7JQ6xLrI6hrFec+9wObm5upnuwPyrnBYJDqxL0Fr8PhcC6VCsHxbK0aqviwEeqbZNcYDfRm/bImk0km3UhReiQveIn6KXssFTBv1VCEyjUvorDZAU+ePEmDkRuG3/7t3wYwPUh99NFHAGZBAP78z/8cwDQAAIUlnfqGw2HakFjhsIgQVGFlHSc9up2dq+aKXKy4ED948GDuUAXMR4KyG2PtTOvk1mw25yJ6VUVeRJJms5lxHB8Oh6l/OLFYjvv376cFl59p9D4OQjV38SJ7LQpdINV8CpgJ/b29veTg+f777wMAfv3Xfz2Z9v2f//N/AAB/9md/BgD4/ve/nwJQ/PN//s8BTE0BgWnQCTqVsi/v3r2bBIgNiDCZTGoxjfMiHHqCxDrAMkjG0dFRGoc0n+X/vV4v4zSvC/aqDlCeM7wNWPLixYt0GHz+/DkAzAU+ofkVy69tUnajuQj09/agyf8nk8mcGSIwHZNUSvAz1qHdbqf5pQdgYDqX1HQG8Dfty0BNnPI2X2qircomKyN18dL2sL/zIvMRXrSnOsagtxjaiG+7u7spqhYP5+y3w8PDJAOo3OOzHj58mGvSAviBkFZh5re2tpaZ04PBIM15KpZY7r29vVQXbvS4RqmSSg/bqwh0oibLXhAXa06mprL2IHV+fp7ZoKuJn43wpYcaz7Svan05B4oOhxroRANQFEWxywtAZevHNskrd9WcS8Bsv6VBEKwp7unpqRs8wrpBeFEKrUJGFbtqwq6HHGA+IIzmtrwKekjT5wPzLhlWSfz06dN0gOKVa+35+XmSGdx3cD/x3nvvpSATnF+DwSC1nQ02dHZ2Vnmfp3sQq8hW8zarVNHAYew3ts/R0VHGdYNr0/n5eZp7rJM+35r9VT28K7yDtR6ArVk1MBtvuk9iGYmi9ccLOOO5p5Q5SIVpXyAQCAQCgUAgEAhURCEjVaRFVHMUngR5/+HhIX75y18CQDKz+tu//VsAwB/8wR/gX//rfw1gxgpQQ/jTn/40sQkaBIGnep7CF6EQCdVIqaYW8B0Blf7jCZmaEZ74Hz58mGED+N3W1lbGSXI8Hme0m2ouUGeIWXXktE6pwKydaV7F0/fdu3fnmA1gxhicnJwkbYqXX8A6Bi4SbIJot9tJ40CNJMvx+PHjRK3TdPS3fuu38Dd/8zcAgP/5P/8nAOB//a//BWA63v7jf/yPAIB/9s/+GQDgH/7hHwBMWRFqnkjNb25uZoIdqLZtmZDN1gxJqXqrQWs2m6nuZDRoejQajRKFz3lEJvHy8jKjQVQHUo8ZqEOLXuRMTq3X4eFhYqKoDWW/qhO3ZaYA38G2buh49RzVWR7Wa29vL9WHc4qBUDRMP8cYGfujoyOXGS/SoFeFMuyUZ1Yrd3x8nNEseg7tvCpTbOe7xzCoxt3TXC+TL8YLbsM6sozUqO7u7iazHK4j1BJ/8cUXKQAS+4nWEA8fPsy0QavVyg1nXTcb5eVhUjaJdaLDO7Xpv/jFL9LfH374IYCZfBsOh2nMahCdvDG2SJ2KAp2o9teGK9aAEhyPypDkBZ5Sc0s1t7PmR8vIDu1/y06qrLDmg15YZjWjKwoVnjfO+C69qplUWWhYbxs0gnshZZ+UPcvT2GvIchugy8vhtLW1Nfe3Xjc3NyuZ9imDZ9dwXrVuZGE+//zzFFyCwSa4LvV6vZQihRYw3/nOdwBMGSnOQY7bly9fzrHdwEzW9Pv9ygF27PjQd7FOms+Qe9BHjx6lNmDfcjw9ffo0lYP1ZFucnJxkUuZsbm6m39q0R9y3VIG3RywKhMR36F7F7mtVBhStlyy/l8+r6jobjFQgEAgEAoFAIBAIVERpasdqmXli6/V6yW6UrAxPwsDMmZ/+Kj/5yU/w7//9vwcA/If/8B8AzOxMv/vd76bTJR38Dg8PM86jqk2tenJUO05r28+T/uXl5ZxTPDA9yWuSLgBzTn1kb6jtU4aK2lBqW7QMnj3xohp2TTLsOdfZsJiafI5+R9TQPHz4MNWJWllqrTxHyVarVYtjPMF2aTabaUyw/ag5ef78edIeURP74Ycf4l/9q38FYMaCknX6H//jf+A3f/M3Acw0Sr/zO78DYBqWn5pb+kmonxjLoKxBHVp0Qpk9azs+mUySRot9QAax3W4nJop222ynw8PDjHZRQ4vXndDW1knnrecDYR1x+V2n03GdyIFpP1jmRLVTdSW25nOBbKAFnb9s14ODg9Q31OixXjs7O0kesI/ot7K3t5dhalQDXYevlKZw8MLB8uqNZ6vt9hKC2iA2XkLhonfXxe7atanVaqV5S9Z2Z2cnlZdyhH3xs5/9LAWhoRzUVAiUg5Sfnv183UEmPM2qTZzZ7XaTppxr71/8xV8AAP7+7/8+yXiuTZQXjx8/zvgFL2Pt4cFLJ2D/18+VsbGhmj1GyrKbQJZtUmakDh8pDU9urQc0XQD/VhbXtq8GAbJJib3UASorrU+VWjdUHYfqF2N9xlU+2TmsrK9l5brdboYFUoaJf2u6CP0emA8MoalwroL2kWUz2PfD4TCtoxrqnPKA33F8PHjwYG6vCsz2EY8ePUrP557x8PAw7au4fmvC6ar7JS+FCaEBpLxATRoKHZhZTbRarfQslk39uaxffb/fd+UssJiPlAbxsn79us+3AUPa7fbcGUTL6AUB8pgv9dezfolV90bBSAUCgcD/Y+/bfiS7rvJX3auru6e7p+fimbHjsT0JjpM4JCYJN3GJQII/gRfeEOIB3iOh2OYBXhD/BwjBQ14iRSAkoghEMAhydYIz8dhz7Zm+1726fg+lb9d31lnn1DlVp6rb/q3v5XRXnTpnX9fee12+5XA4HA6Hw5ETmdVP+rTIJzicBBEHcOfOnXBif+mll0RE5Jvf/KaITGKmoPGHleqrX/2qiEwY/XDyhL9puVyO+XVa9LpZYWmHLG0V6gttDMdN6Ziter0eTv+wYED7d+3atVjc1ObmZkyLa/lRLwKt9UWdRSRC1a4pxaGlvXnzZugDWByBdrsd+oQtR9onnLUAefuJNV86Ng3lf/r0abAigZHqC1/4gnzlK18REZF33nlHRET+4R/+QUQm7H2IlwLDzmc+8xkREXn99ddDvAC0SI8fPw6aMYxx9NsiiZNF4uPW0qIzRS40hoiNgrZwd3c3zDtYPdBenU4nWH1w/3g8Xoj6N0t9LC2QRa2tE0+iPOvr62EeYQxaqQZ4nCWlDMjbR6wN01Yg1rRq2tzj4+NgkYJ8e/nll0VkIgMwdmENgAXh/v37MXnCtME6DmkeyxRb3rMk27RiM7Smzorls5KFctJEHZ+yCGtkWr9yGTF/eR6jvaGF/vGPfywik/QbiEHU2vLd3d1g1dLWaS7PshLy8nu0Z8bm5maYL+hfWNbu3bsXrPVIRQIPibt374Y+Zy1zEqX7IuBkq1r+WJ4lSax3ItF4Im2ZOjs7i1mdrPjdRSxSlreHFTuOMYcxxHE+Ogby7OwsFqsNi9TR0VGMmvr4+Dh8ry3686RQYcpry8onEo0J4xgkHf/EViQd68TzKi0OSjPssQU8C3i91nstZpmGzIYH1L1798JeCP2LufXyyy8HpmnERiEOdmNjI/QbrE9PnjwJz2K6cSCvLOc9XVpcqGa/Y9ZKzCX2dNDx2pxEGeOarZ5aLizi2cJ1YtZXLg8/m71D9P7BqlOahxbATNlWKqQslsPUg5RlltcHjJOTkzChmfYXLlS4fvnLXxYRkW984xvy7W9/W0QkBPZi4j548CAcvCCEOA8AsIgbCJuHdZ4entDatMwuRTrvDZvDNSHCwcFBJO+RyGRTqN1EWEAVmZ+IDySahp3zWeHwgLqhnCJTQcL5pzgzNsqtF7BFDoS8+dPlxobm8PAwHH7effddEZlsFnCo+q3f+i0RmRKe/PSnP5VvfetbIiIhn9Trr78uIpNx+t5774mIBKKUx48fRyhERaLU8fPS1FsbFLQZb9KBk5OTUE8Iadx/7dq1SM41kejmHmOUSQaKdO3Lstnid/JhHgd0Lai2trYCjTMOvDiAbG5uhvHAbkBJ5B1FQC9M7C6C+pyenoY5hA06rjdu3AjKFNSZXYCZxEUkuolEffDueepluVhi7GIen5ychHKwOzO7ovCz2IVEu+1sbm5G3HVEosQtOr8Op4hYBHo8cw4/VkqgfpjnOHQ8fPgwtC/mO1wx+TAMsJvLKg5QuOJvtB/nWgSJCeomMj3Qoy6cMwf9qg/s1rsXAR9YrPxBSa53InEX/H6/bx76RSayQNPy63Loa175h3exgkhfLRIUfq8+VNZqtRjxFLt4a8WElaeK3bitAPo0sKJE5wni/tLv5PxOWmnBhyVLHuA+VsToTXGWnIEWuA35sCoyPZDv7e0FGQ3l1+PHj0PboaxQRLz66qvBQIA9BuT52dlZkCtYq/f29iL5ArkMvF/KCh4zWpnCyg9rvqAdtVsw58PUFPs8xtLctheRgeyuiDLxZ7jqdZ3XDD1OWdmEKx/SrNRAmlzOyneWBnftczgcDofD4XA4HI6cyKQGLJVKMU0RTmydTidoYkEZ+/Dhw6DdRDD/7/3e74nI5FSPz77zne+E+/kqMqURZ8pLyxUk7ymY3fmSTpqXL18O1iN20YN7lXYBOzw8jBBViEw1ZQjqE4la83A/NDWc6DKvdjatXSwXOR0YKzLVQqOO/X4/aGTgXoX7B4NBTFO9trZWKHkBl18nZYVrytbWVnDFgTXpJz/5ifzyL/+yiEzp9UG3f//+/eDuBxc/WHNu374dzPawIJ6cnIQ+Rt05sDKvxS0tWSdrg7QLAls1dRD8888/H9zEoM1D3xweHoa/MfZqtdpSXPsspAWLbm1the+h/Ue9L1++HOYd6gbNX71eD3XBHGMLzqKw3BE14UK9Xo+5oLTb7RhFPdxFnn/++VgWdk7qDXnCVkOtUbNcr7IizYWOrVXs0iEysbJpQhBOB6EtxRiT7XY7yDcOiEcfMVkDt8m80BpJ1qDjb5R7f38/zGm4BaO/xuNxjEYcrjuXL18OY5jdQDhYmuu06NxKS7aqg6ybzWboJ225vnXrVnBDwv2o/9HRUUQrW0S50+phBXmLRN3DmNBFz2krnUISZTjfnyan56kv5gK7/WDuY4zs7+/HPF6YYEFbYyxPBGu/wK62SdateawdKA+7YOo+aTabsb0DW5a01WlzczNmpWK5qYkMmLBK782yJkYFrKTZGBtM3ASLFFzmO51OmCew4CJtwGuvvRasu9gfcmJbPAPy5OnTp2EfjH6zLJBZwR4RGBva4n52dhbqiXfv7+8nEpmwBRd9A8+WSqUS+gZ92mw2Y+kutAUpD5jAjT2euG5sYYXs5fmivV2azWbMLXhWKgHMJU3gwknd0+AWKYfD4XA4HA6Hw+HIicymD63t45genN7gn82JDRHI+2u/9msiMiGfALkEtM0gnXj48GHMj1VEYj67jHm1s+wLrP1AmWyCE4lCkwGNHlum8B1O+py0DG3GNOv6NM3BcfPQSOr6Aewvq60CGxsb4f3QaMLycXp6Gu5DjBS0EkdHR8EyghN8s9lM1SbNC47rQP+gXDs7O8Hihz754Q9/GLTJ0CjBIvWDH/wgUKIjTg9kE5///OdDUl9YEt57773wfGhpYA0riu5daxNZg4Y5sL+/H8rBVKwiE2sHJ+AVidKvapp6Dry2KKqLgBWHoIlOOHEgwONNE7RwEkUdYL4MWEmkOZCcA6pFJvMFbQ1ZAG3nvXv3YjGRqM/29naoo7biiMRjL+ZJ+cBjTJNkQOs3GAxCmVibZ1nE8HtNSsAxa9oXv9/vR4haRCQS61ZEUmWtoS+VSuFd0MSenJxENMZct52dnWCRguYZsXk8/thyt+oYKZF4LEOpVAqeBFiDsaZ96lOfClZ3tAFbtzV1cdEWqbT4ICsYnq1sHH/LYNIUnTS20+nE4rmZ2j+NYj4r2PvECvbH1bLs6NQWbJHScZdc77Q5pi3Jg8Egdxw5xrnVFyzr8DcTRWgrG1ufuH585fXTqqe2Jg4Gg1xeBzye8TtttX348GH4G99VKpUgj0F1Dk+VT37ykyF2V8elPXnyJHhUwTJ1eHgYW6N4/5x3/4D+4P2pRVqCMnG8O+7TVjmOn8IahbV3fX09fId9T6vVCp+xhXhe4Lds3dWxgjxv2GLNceUi6R4wbEHNkjqB4yCzyHa3SDkcDofD4XA4HA5HTqRapCzWGa1lYYpLaB/ee++9kCgVWlmwqr3xxhvhpI8TMCwBGxsb4aTMp1zWxoospjVjLZfWIvEpVmvoRqNR0ERBe8EWKp14DZapXq8X06zV6/VYrBmzisxbPytGyqLMRN8xkxZO6dBWHh4ehlM84lOglTg9PY3RJ7PvvtVPeTW2rFXUPrgcZwMNMmv/kYAX5QbTzq/+6q8GbTRYrf7t3/5NRCYxcHgWGHnYogMrj0XnnBeWtYOtoRgvHGeHcmCOwZr73HPPhX5iC5bIZJzqscdxI6uKkWLNG881jtsSifpowwKoY9OYdn7Z1oAkbTMniYYM63Q6oa2ZFUpkwgil2fqAtbW18Axm79PywWIDywqOf0mKq2BGLtbicVJQkagPuY6vYWuCTgDJGtgi6M8ZSYleUU4RiaQBYG2sSDStAuYVrDj4jpO0s+Ww6NioLNCa/MFgEOY8xhCPN1gIsEaxJU5rtEXS51MR9dMWqVqtFhk7IpO+0G3K8pa9O0QklpqEYbF+LQKOsdBrE6/j+p3j8dhk6xOxLUGskU9jNdRyap4E16DGt+KgrHgo/iyNslxbPK21h/cobIESiVrb8rDkMnMwfse05LhiXQd2dnZCWyC2EPvTW7duxdZazKUHDx4EixTmWbfbjcV3L7J/4Hgo9LVO4TMajWIWqX6/H9of7cp07Pgtyzr8Du3I1sYki9Q8bNqW55b2zrFo9y1WTy6XTtfB8wzjE+OVLbj6d1nT3GTqSSsrPVCr1YJrEQdQg1IWblL4//DwMByyEMjLExCbCh1cqcsD5BXs6PRZLgdaqFWr1bA44RnYFLHbH64YqMfHx6Eu/G59eNNUjvNCu9vwQU0POBaCaGdMvv39/VB3HKBQ/729vZhrBbv6aGE5z+LFmywtXHlSYMzxJhRupTDDw9Xvi1/8YhiHcPGDW+mdO3eCCR8ugY8ePQoHNL1Q88RfBBYVOeqJzd/JyUnoOxwwULfd3d1IwKvItA/b7XZM4LJr37Jh5YlhwaaFHMsBTVnLC6sOCLVcdxbdMPHvNekEuwVj/mxubsaCWDEmHz58GGSk3kRWq9XYhoU3YZab7LwEO5xDTOfgYNmLed7r9WJB/LiyPAGs3DL8fO2ewe1ZxAbdcmvWbk/dbjdG+Q0FSqPRCDId8wztb7UFL9jLPkBZeWLwTnbzYjdFkeiBBGsTkyWkKVWKOHRY5dYbpnq9HiMsYZdkrQBk+me9DrXb7dgB3QowL6puWtHBckqv+9aBjpUz7OooMt17cC4knkN6E71IrjmknVlbW4vJI77qwxLPa4uyPCnUwFKQDgYDkxwA/+c5SPFhQKd44dxOnLJHZLKnu3PnjohMD1BIwXHp0qVwPxNWiExcavFcznuq+5TbaF5K92q1Gtvw83qJPQravtvthu9Rfg7NwDPQBvpekWh/63CUpLxjWaDnMdeJr5byTc85DgHSJEAAH8os10Erj1SWerlrn8PhcDgcDofD4XDkRGb6cx38Z2U6hhbv9u3bQcuH0/y9e/dEZKKlhZYfWgdoazlZLmtq0rTMeTVLTF1rJevS3+HaarXCCVYHULZarZg2lzOQw7LALiFJgZOLWgysBG26Tkx3CZOtTsb57Nmz0C86EW6z2YwlZub6JJVh3vpomk0muIAWBWXt9Xoh2BPWJ1jUbt26JV/4whdC/USm2qkf/vCH4RnQwF2/fj20i6bZrdfrC5FNJFE2j0ajWHLnXq8XtCgoIzTnGxsbQSMDTRj6kLVN7A60LJKJJPD72M1MW2fYnS3JXYCpj3kup7n55amfZWGwXPxQD9ako8w6ud/BwUHQXEJmYN4Nh8OYXOn1ehEq9EVhuVZasNyBtWzkK1Mx85VdCHncWYlYcX8Rc0nXg9cm1l7ifk1B3Ww2g4zTCSyZVjjNlWUZlrVZ37P7iXbFaTQaMTcppmIuwnsgCywPELZg6qBwJkJCO7N3CJBmkWLqe601X6Sead4jaSkurH0FjyGU11prdELeRqMRs3Kw+1Pe+QSvjbW1tZjrk2UZsywret/CLoZ6r8XWJ8vqpGV8Xtc+fo72FsKa3u/3Yy7aL774Ygg9gccUrLsi07UVIQJI+bO3txejOueksVkS286CRc4B8FxhogTUUxOwsBVQk3ixdUu71bJLot6XzbNvteaj3j9aHgvj8Ti2H7RcH3mPgN+lJcPWZExcjjS4RcrhcDgcDofD4XA4ciLVIsUnsaQkXCcnJ0EDCw3Kzs5O0CzB9xaWmidPngQLDZ6F/5miO43qfBFtEludkuhDu91uLCam0+nENDSsmYLGAZoN1kRw0k48Xwenal/rosDaWa05aDabQXOpE7Y9e/YslhXS1vsAACAASURBVGQQlp0kKmqtqVskmNxK5Ki1KdVqNbwT46vdbgetEeLzmCoc2iZYSBFP9cEHH4QEnRizm5ubwQIEyxWPmXkTiVp1A1jzjbE3Go1iyR1Rrmq1GknAKzK1ZHEyOW31ScKyNNLWWNB1hxaIg/i1VprnLVtHkixSi9RH/9aiIGeijCQrUrfbDdpQ9J9FGcu+20nlXiTekH3NrXgVK15Pv9cKDNfxJ1lj1tLGQhHgGCYuD+aCRb+POY3fYS5y0HqaFpWxCuIJEduqgXqUy+WY5ZDvXVVsF0NbjNjiwvNDx2pwTAOTQIlIhAjEsvJri9QiRDW8ziVRNg8Gg1An3l/otABWLIe22FWrVTP2WXuYLNKX8HBgq1NaXA+vIyy3uS7WHoutCGlWJ6ZyxzUP/TmnrWHvIP6uWq0GjxaQzNy+fTt4UcESxUmXmVxCZEp1fnR0FLOY1Wq1mAWPSTfy9pe2BInY3j96vjOBmabR53h97SlgkcyxJU3L9nlipPKmJeB3Wh5KXA9+Po8/vce2vGNyJ0vOemPSZGUTLQfbMe88X69fvx7bvLNwyyIcFmGDszpbT/ZKpRLLAM1sNhxALRINnNfl4ckElMvlmKDgNpj3MMVucNYGXbdtrVYLfYHDLDPc4DMdjLixsRHbPA2Hw8LZuPCMJCHQ6/Vih9vt7e1QBwhOHJparVZw3UFAKerY6XSCyykOw8yohkMNHxyLcEeyDhN6PJZKpdD2ODCi30SmY1QvFLxp5M3Fst14LKSNBT0nWUhafa/dRXjDzM9YBBazot6Ui0QD5vXGnDdJ6CMcqIBarWbOyyRyH57j8yCNtU8zI6UxPLKc0m5/vOnhw5XFYsbXRerDsNyrWC7qDYOVw01v2pIO66s6LDHS6mwpSpI2K3nLPk9dWfmgc5GlMUZubGyE73XesUajkUhG0+12Y89nV2F9cFnkIDUr/5Hl1matm/hdWj/pscpuYxajX96+wvpiyQFWZOoy8r5CEwek5bpi92W+J82VOM++COv1yclJxEVeZDpHNjc3w3qKw9PNmzcj5DP8u+Pj4+DSB+Y/yPNOpxMjdmImxjTWwqxI21fxZ1mYRFku6oNU2qGG92P6ffPUid+V5SDG4y7tIKUPg3yv5ZptuRPimmWf5659DofD4XA4HA6Hw5ETuRPhaMriSqUSs6602+3wmbYYVCqVmEtUkvZVZLnuE/rkzlpIaCFgmbKykjPFr6WlwrO1JpZPuVprwDTi8yDJ2sAneNYEQouO/kFZj46OYjkW2L0MWh426+u8CEVZptIINNjND2WDNVRnMt/a2gouDLhCE/Xhhx8G1ziY669duxbLccRjZh5TdlLdeAxqF6JarRY0hrCQYQwy+QLTrorYuRkWtWjMg6T+t4KxcU1yQWTrk2XRWEbdZmnoRKIB8zrQtVwuh3GKeYP+aDQapssV5wVZRl2ytB27PGhNnUg8v5+lxWN3R+2iu0hurKyw6pQkp7hu2iJlWT3T5Nqi7sxAUTTdq36nhrW2W+lGIGfH43HMIoX72SIF4J5erxeTeePxuFDXPh4nWchbAMvV1LJgpc1RK9zCGr95A//5udpSxNDWJ8vVmi1M+lmWtSqN5IvbKU9fYS3sdDoRKn2RaK4y7APg2nflypWwz0G5ILOfPn0a9gacYgTlY+8EXNPyH+VFVpmSxzvDct/LKy+yvGcWsq4BliVUE5RwzkK95vD+yiLMsyxTWSyhbpFyOBwOh8PhcDgcjpxItUhlOZmyHz1TxuIUpxPrcjwRYGXpTivPIhYODmDNEzQ3GAxCXXRAGmseLGpfIKt2pajYIpFo3bS2p1KpxKhOgdPT02CR0rFha2trkaSUeKYVz1EEkixS7PuO7+r1erDeaI3FkydPYj7L0Eix5gqWqbW1tZgVkjVqi8SyJfkZs+UQsOiB8XsO0OXYKJGoZSCLJqxoTfSs/k/SnJVKpURLjKW5WrY1I0tMWblcDmNKx6vw79BX8OFnQhD2Nde/LYI0Y9Z4tTThevxYstrSpFvWmyLjieZ9VrlcTtQ+iiS30TKtZqvCeZQ/bc1mra+miWbqZR2DwcloAY7PscaqFVeJ8s3bLmnWoaxEKtb6rMvMZcwqD/LOD02Zze+30iFwe1uJdfV3aTFPVqzrIhZDrk+v14vFLmE/s729HdZ/Tn2C+2GJwj5ob28vxEQxQZqIPYaZKnxVlt8inr/KuE9rbU3bR/L412MKfZ7W7jyG2Uq96Hhzi5TD4XA4HA6Hw+Fw5ETmGCmL/UgkGoeBk3ilUkn0RZ7F7pbElJX0Wd7Ts6VRtZ6htUPD4TBm4cCpt9frxRh02EpnWQOK0rxkxXg8NinWNQMh6tjtdmOsfdBOt1qtmN+xxcoFFBWXY40NzRTE1hudIPX09DT4NoO9D+x3Ozs7QduE/j05OYn5n8PqUBRNvWUF1RpYjsGz6JmtpMj4fRHUuFkx7zvmtTCdp4XAsmpY7G8iUe261p6J2HSzWRma8pQVZeEryyHNwseJPS2ZrdcEfrYV61B0aodZSBofWWP25sF5sPhZuAjWP0Za21YqlZg1VyRKX87gtBfaasUxOxxHlWSRmmdMWvNUxwvyd/x/0rwej8cx1jTLIpVmSVtk/ILxlddULQ+sBN2DwSAxDQLvmaz2z5IiYV5wnDHGAdZ6eHXs7u4GinN8Vy6Xw94G+wbsGfb392OWKIDZDtk7KWn9/ahbt4tCmrdHmoeYtZ/lPWDSumV5ZvEYnnc/nptsgiuCF1sBWlrApAU1ZzH/W79L+iwNXMak9/LhUAsABvP1a2HCgbJ5aMGLmmDWxLXqgkmPwwe7YsK0jcMWu1ZwnhKRqGAscuGd5SKhFxgmKkCdeEGAINQH383Nzdgmlw/IrCTA74omAkBZtWsYZ97W45Jd+/SmYJ7s6R8FrHoRslz8rAVAuzgD1maNhbieN4soitLKz+XW37E7pSXH02SY5XZkbfwsmnu+rhp52va8y5i2Rl7kTZlV7ixKiPF4nHiQYsIQ3R5Wzhz+2yK2ytt+1nP1fkfEnjPW4Srp+dZ3LP+zbEKzAu5rSbTt+F9vQmcdltLmfFoZ0w6hWcAud0z+IDLNh3np0qVwqML4Y4IM7BWgYD09PY25QPI+As9PIh7j3+m//3+FtR+3rml7LT3uODWNDrGx1iYe1/Me7t21z+FwOBwOh8PhcDhyouSnYofD4XA4HA6Hw+HIB7dIORwOh8PhcDgcDkdO+EHK4XA4HA6Hw+FwOHLCD1IOh8PhcDgcDofDkRN+kHI4HA6Hw+FwOByOnPCDlMPhcDgcDofD4XDkhB+kHA6Hw+FwOBwOhyMn/CDlcDgcDofD4XA4HDnhBymHw+FwOBwOh8PhyIlq2pdvvfVWyNZ7dnYmIiJI4ItruVyWer0uIiJra2siItJoNKRWq4mIyGAwEBGR4+NjERHZ39+Xo6MjERHp9/siIuH3rVZLNjY2Ip9Vq9XwrtFoFCsLvnvrrbdKWSrMdbroyFqnt99+eyV1KpUmxeEkznkTOmet09e+9rWxyGT8YAyh/wEeezyGMA55DAF4xnA4FDwfV3yGe3h8oe5AqVQKnxXRT1Y76nfOuj/td3mf/+abby517M2bCDytzLOQpU5Z6sNln7ets/aHvo/vydpHf/InfzIWEWm320Hm4jnNZlNERLa3t2VnZ0dEJFw3Nzel0WhEytHr9URE5OTkJMj009NTERHpdrsiMplbuL9SqYiISK1WC2sC5mO5XI5cRUT+/M//PFOdPv3pT4/xLvwez8W8X19fl/X1dRERuXTpUqgT/sZa02q1QltomcHtjXWH1yH8rdem0WgUPstap7xjz0Ke+cHyLe0Z1jOzjr0333wzvADPsa74m8cCYO099D5kEeDdy5Z5s5BXpmfBInVK6i+R6bwul8upfYc6oQ+5L9P6ddE6LWNdWmTtSUPWPvqLv/iLWOH0PGC5wzKJ9+siUbkMmQf5jO9KpVKs/0ajUdgn6T5lvP3225nq9Md//MdjlAvv1dd6vR7b5zUajbB2YY3i//GZ/l2tVgvPZRmv+5brhvr+xm/8RmKd3CLlcDgcDofD4XA4HDmRapECLAsETmyshcBJr9FoRKwAItPT/HA4DFpRXPHMRqMR02SWy2XzxDsv+FlJ2ocitFxpSNNsLEvrMQ+yaJryapPyvpu1ItpiJCKmtUprHFgLA60LnoXvuN21BZQ/s6xyi2AZbXaRsWh9Z1mDlolVWtGKHBewwJydnQWZ2+l0Ylf8jTk1Ho9le3tbRCRm5WUtIeYULFRs+cI8Yw00rvidSP42sjSG2irEz2UtJ+SCdeV1h39XKpUyy0H+/aphWdCTLAtWn1jPWGTOpXkv6DbmzyzrB2vF9T7EslBlLeuy1/usSLNaJ3lGrKIcSZYoXC2LlYhtOeR+S/rOwrL6KM9zz3PtYVgWFGt8pFmR2IpvWXREotYbq7/xTPzPfZoVbAVjK5lIdK1BmbisKK++skVKP8uS8QztdTAcDjOdPzIdpETiByj+3HKt4EWSCz0ajYILCK5oACzgIvYmHn8vYtZH56c9xzoULDKRLUGTJKwuymaYJ4w+kIhMx4GeoIxF2gzvHI1GkYkqMj2A83dwORqNRqb5Gld9yOKNkgV28+MrH/BWiYuy6K8a5+FysSosu/xXr14VkclYRztC1p6cnIjIxD0PLnrtdltEJnMK8/vKlSsiMnGNw1W7T/CiiOfyXE3aOPG8zAvLvU7PWZHopj3LQUovtrPcz6y6LaNfLYWPBes763BouSsWuaHl9tBrBK/n6ANWpOpDVZr7kqX0WuXhYxm4KOW21jx9gOW/+R59X1al+Crq/lFdS1mpY80RvacZj8dmKAN+D7mNfTiUZq1WK8h43i9pOciKrLxGDz7g6HWED0+6bM1mM/I3XxuNRsylzyo/t4+WI7znzKIUc9c+h8PhcDgcDofD4ciJ3BYp639tkWLTGk6oOOENBoNgiYIrCcBayzSzvyYdyIO0gNU0F8a0ZzHSNCl8ss0SeHsesDS3ONWjL1jDwf2qtU9ZtacWoJU4OzsL2hNtmep2u2EsoGxcDm1R48BKPb7YAgewdtZyIVlWPxX53IuidVtEky1iu2uxZuwiaG+zWM3Ooz9gkWo2m2FeafeMg4ODYJFiK5Xl7icyIacAkYMV0Is5B3c/nqt4Fs8p7cEwC5Y2UVtXuK3TXPuYBENbqi2XFqBcLsdc0RfxlsiLNPnKLnv4m4O3cUXdWbucFkSeF5Z1wrJM6c/YIqXDBNgKyV4s+j2LuPstC0nj4rzLxUjb67Grrki6RdiybrLMttxok6y+F6l9zgvW3kPPEW439FW3243Je3jwdLvdiGeAyJR8p9vtBg8EWH9qtVqihaZUKuXem8OKxK7i2o1vbW0tvJ+v2iLFv2MvJBFJ9XhgLyb+DMgiB90i5XA4HA6Hw+FwOBw5kZlswqKvxBWnXfa1xOlQB/WPRqPggw8NKMDWBIsaV/svzgOU0dIMWZ8VSXQBWCf689a4aC1/tVqN+Z6i7TjOjf3W07SxeesH7QSeLSIx6ubRaBQ05rin3W7HNN6stdF+uJbvLIMpnfmZVhxdHhSpsf4oWLDyWGRYq2zFvWhrIgeqrmoepb1HW2St2MK88RuL1Gtra0tEJnTg0DZCPnMg/5MnT0RE5NmzZyIysSbBOoUrxyLu7u6KSFRbKRK1dGB+HR0dBbmv+7Tf7+cedxYZjU5twDKJ79dxkqy91BpM1ppbYwx9q60+i8oHIOs4SYvx5D7jMq6vr5sxEPrdSf9ngWU5tGQp3s8WD7St1i6z5luXm9NYZCHSWCXyxp6tsoxp+yGLtABrbK/XC39jfWbrtSZ7YRmhLdnc58uMGS8i3v08wPNFry24sjWGvYUg99HW+/v7IhL1RIB8xrXVaoX9DwiLWGZYe/S84PWILVAi0Zgt3McWKSs2SsSW47PIxJJi+XjMp8EtUg6Hw+FwOBwOh8ORE5ljpADL6qA1R0xNqONbBoNBsCLg5AutEvs7WzFSRWgD4PPJsUj6dG/FKeWNXUpjs7His4qydFjvB9LKb2mOcPpHLARrDTX7V9GaGh3LhPeKRJOC4jO2bkKLAg0ZwAw1mhkmySKl29GiYF8WLCpTbSG1aJlnjZ/zpm4Vma0BZY2nSLQvdexevV4vzLd+EY211nSxxQP9pq2lHAuyrPgatkAgCS3mNDR9HKN0//59EZlYkWCJYmpzkUl8K/rkueeeizxza2srNs+YmQ/PSEq0nQVpTKJs8bKsg5YlCletyeSkjZa23Fqn8J4iLdaz4nx02diShjmk2/309DSsh1rG87MWmROWZUF7GHBcFuZ0r9eLeaBwv+m+w3e9Xi9mGeE6nCejX16m1ywxl0WD9x+ambff74e2xVjqdrthP6e9N0SiKXFEphaFJM8CLofIxfK2OG+vIWba0xYUtCV7A6Ctq9Vq6BOks8A60Gg0gifC4eGhiET7Vu+l+v1++K327pknvp/LYcVBiUzWKJwn2DKlLVEWxbkV926lydDWcrayQn6mYe6DFG/srIMUGgGFwD2DwSDm2qdz+/D9TNmbZaM7CxhI7A5k5R2ygo2tHBdJsIJsuQOTAjSZ2nUe5HEfsOrBi5YO/uNFNk9bzANeILX7DG8EsNHD2Ol0OuEzCHieMLqveVOOegLs0pp2GC4S1gGfy6EPrvV6PWZiT9tIXhRYmwpeuCHAWagDup94Luv6FkEKkpVEQh+SePOgD4R4ZrVaXToRBeZDo9EICxeu7LKBhQlteffu3eDmB1l9dHQUnsk06SIit27dCs/E83m+Ya4dHBxEntnv93O7ULOs1odnnrOW4iGJbII36Pq7SqUSc9+zXPyYfGJZ/WnJcZRFb1ir1Wpw7cSGCdd2ux1T0KyvrxfqJsubY00CZLljA0dHR4lEJ5cuXYq5k7KLn16b+v3+ShRfWZDUptZ6Mst9eBmHDJ4zem9i5QDtdrth/lsu9UmpSHjDb+USKnJ/kWUeWqEqSdesz+T7iqwHKxV1vj6R6VzAoWNjYyPIdvQNCIguX74clClQoD19+lREJvNS768Gg0HoZ8h4Dv2Y9yDVbDZj7nv4v9VqxQ5XTImeRnGeZqhgwhqtuGVCPE2KZ8Fd+xwOh8PhcDgcDocjJzKTTViWKEBbpNhMB60lu/Zpi5QmpuBnViqVcHIsIls8W1fyJGa0NF1WwLPlsmdZnVBX65oluK0IsFZF9ysnV7P63iIfKVIDyxpnTdHLiXmhLYDrkUWvz8HHmi6UtRhM3486JbUBB0/nxax20ham4XAYNCW4svkemhumhef/GVlIElaFLAHykBHoS55r0EQVnUQ0D1AHdkfEuEAS2+3tbXn06JGITOuDscYWgKyufXnrg3eXSqXQZrBSwCK1ubkZ/ubA5HfffVdERB4/fhx5FmsrUSeMuxdeeEEuX74sIlMNIrvUaavw8fFxzA13FtitE7+1KGz1HGWyCYvMQLursLZcy3+mRNeupVkTOSYhi5XS8nrQbq/Xrl0LbnuQke+//76IiHz44Yeh79jibbm6zwu2bqG8kMc87rW7v0jU+ikSlWsoE2uo8UxrnYY8KZLavQjweqQ15RfFigbwHMb4KpVKwTKgrS9MIGARAmj33DSLz7wyPKsstcaD5ZWU5qGS9u4iLFM8f/QYYZdr7YrWaDSCbIcF6Nq1ayIicv369fA3ZDbkw6NHj8IcxNp2fHycOE45vUZWoDyW1Yndz3VCXovinMeRRRqB/zU5Sq/Xi8kfPqOwTEqCW6QcDofD4XA4HA6HIydyJ+RN03JDw8BkE5qKsd/vxywGOEFbFimLdnaRxLw4YSclhMP/FrVjku+4pUmxfI5nxU3hOq9FKq/WxvKH1lS5XDZooQaDQazPitbwoU3Z75YDv1FGjCVoLY+OjoKmQWvKh8NhLBibteQ6rqNer5tJBkWWZ+VgrR/K2u12gzYZc4aDRnd2dkQkqpUSiWqXreD884RFkoGycj/pIFAreWeRsQJZn6UtEWdnZ7GYT9CD7+7uyr1790Rk6n+O/uMEh9a8L6JuH3zwgYhEg+8xxuErf/PmTdNKBQ3g97///ciznj59GsYZ6s00urdv3xaRqVWOtaJWkDnmb1ZYcU06PYIle633s3UrySLFHgwWKZG+FhUjpfvf0nKzPMb6BpnXaDSCxvnGjRsiMu3znZ0d+fDDD0VEUtt/FtFFGtI8OjC3u91ujF7/9PQ0zBVcObkz+hp1Y9KUtDJqbf0yLVNphE+8volM5DisNdpjR8ePJT17EVjxflZZIQ841lwTN7H1CfNJk7GMRqNQL7YQaA8d3hcVRdTB91gWJcxzbSHhcum4MN4vZbEizyPXWW5xjJpING4KMah6TolM+why+fLly+FvXLFu3b17N8RNIVZ2OByGOkP+c3xo3vnE5EdJMVLMucCy2krZoMuhE3f3er1Y/DVbndBmeS1SuQ9S1v/aXcpykQB484urtXmfl+RhFt577z0RiU5M3ehcP4tcIEvwOV8t9w99KOQ6Lnuzy6ZZHVzHbjIwrUJoYmCXy+WYwCvatY8FThLpRalUipBMiEwmgnY50i5+qIOIvdCjnpVKJbw7LUdNEeAxwrl9RCaTGpsJ9A9vgHGQwqaImZV0IOY8AbNFgse9ziOC9m82m6FsGJ88HrSSxnKxWnYd9Pzt9/th3GFBeuGFF0RkskhgIXr48KGITF3rms1mJEcWrkXWA+54HBiOcYz33Lx5M2xKceDZ2tqKufuh7d97772waf/Zz34mIhJxj8Di8/LLL4vIZMOrF0jAIoyYBV5fLMIB1M1y5dLKlLTcNvwefZBKc+0rug912fnv8Xgc2hsumJB97777bhhzr7/+uohIkBfXr1+Pudl1u93UzVBemcFtrfuJlRAYlzgs9Xq9MJ8g+7BBtIgorl+/LiKTw79mirPWbt54rlIO8uFXJKr0Qr/osAJmyQWWJed4HmoCk1qtFv7GHN7c3AxkBZBpTCDAJGMiEnHFh/xAn5+enobv9d5sWfXV7VqtVkN9sMZi/1MqlUKZIc8xRuc56OWtE5Ms6QMdKyCwnkAW7O3thbmjCbhu3rwZ1ivUGzJ/Z2cn1B0KtP39/Qjxk0iUTTBvnfBOy7WP80lpecwkZADva9PII3TerJOTk/CZ3ju2220nm3A4HA6Hw+FwOByOZSAz2YT+2zL3s4YvyezGVIOautHSGs5zyk3DT3/60/BuTeXJ2lptdmaCiDT3Lm1hYhID1sixloevbA7PirwaNSZrwMkbGhbkEhgOh8EiBc0MTL6tVmvpuW8sgghLK6E1jN1uN+YagTrt7++H+y3SCT1Wm81mzAWQaUTzmrGzWDJZ08LuOqAsxvuhRbpz504YSzDDs2kfWkK2JmYJlC0aWmPfbDbD+GKtlEg0EzvmBbTSo9HItGSt2mWRtekiE00Xxh00ei+99JKITPoDrn3ox8997nMiMtG64XfL6gd2C9WU5Wyl/cQnPiEiU4vFxsZGxDolMq3b2tqa/OQnPxGRad9Aa8nBu3zV+aY4SH1e7SzLcU0gw2uHdvEQibv3snXLskjpHFOWZwHAruNZkeYKpp+NMuGKfoT1EZ4X//7v/y7vvPOOiIh8+ctfFhGRL3zhCyIykecYv3iWlXupCHB7WHIc4NQWGLeQ35Brx8fHsbQIGAPXr18PY5THAxPTcN0Gg8FKiSfwLtQNZC737t0L8+Mzn/mMiEzn4aqs7LjqtRFt12q1grUJxAS7u7sRNzGRqTyvVqth3mEt3t/fF5HJeEBfow+L3utlgd67tlqtUB/Ib6R1ODs7C14fKDvL7mWvo0yoogmKMP6Pj4/DPEG7fvjhh2FvAPkAmf3aa68FN2xYn27evCkiUZc69Pv9+/fDXhF15zmVNyyF3fm01wPTm2vZm0Yowbmf9DrUbrcjFlBctSWKLVLaAmfBLVIOh8PhcDgcDofDkROpFinLEmVBn+rZCmP5MaYFFOpnWjFGiwA+rhx0pq+WtWowGETiY3R5AR17U61WTS0n/tan72azGUs4WhQ0bWWv1wvaiwcPHojINIaj1+sFLTQH+gMo47IsAdD4cNZ7yzJlJevVVJbo19PT06AR0/ERVgzE1tZWLFiWtddF0tNaVPoYe4eHh6Eu0BpBi/TKK68EjS00LfCNHgwGQTvIWvTzoNXVwdWtVitYOKGJRd1Go1H4W8daQlMkEu2TZWgyk2Qea22ZfhZ/I14DmswPPvggWAgwNtEvzWYzRkGNd+QpUxpQVg6c1UQR3W43zPNXXnlFRCZWT/yt/ec56e4Pf/hDEZlqO/f29sKz2FKMd4L0AM+s1WrBSpUVabIUsqNarSauNSJTq5xluYB84/dkSf5qxTAtAis4XceubGxshDbFnMI93/nOd+THP/6xiEzlA2T+q6++alo9ihx7bPW20qSIRNuW5bmOq+E4D01UxVfIE7ZMaSvLIlr0rMhiTUS5f/zjH8vPf/5zEZmO409/+tMiMpGVaQRXRZaVrfvak6bRaESSpIpESQL0noBj3yBnMAaPj49jgf3dbjdG3mDFqBdZZ03GtLa2FixriHF98cUXRSTqwaMJgopOG2CB+0Eno+U2x34He90nT56E/R08I2BZe/DgQfCOuHPnjohMvV2azWaYS7z3gpUKeyn033A4zL0fnBUHJRJNsGul4OG9oohNZ84xUBh3fNUxUjwm3SLlcDgcDofD4XA4HEtAZtY+ICtjXRJtK58k02I0luUrC2YqK1aLT7b6lMsxUtqyw7BipLQ1g61U1jVvUrOs0OXudrtBywdtMvsAow2g2YOmZn19PUZrr/9eFBz3oLXErB2xKNF1zBvH4uEzaFP42ZbPvmXRwT1837yw5gn3D+qB9oaGCFqy7e3t4P+MK+JwONkrW9uKjAnIQo3M8gD1WF9fD7F30PxBmz4cDkN5ntXeOQAAIABJREFUtVWxVCrF6IDPk9KdtWAYP7AOQGt479690DeoIyxuHEdQRNJGC5i/o9EoaN50gmBLy/8Lv/ALoS7MQCgSTeCLKyjSP/zww6B5vnv3rohE5x76ki1TeWUeW6EsGmVcmVEK5WBfepGoZRPtoWWOlQCSLVIW+2qR/cjzTK+frVYryAPEQf3SL/2SiIh89rOflW9/+9siMrVUQ/ZhDOAZKHcS5qkP2pot4bodORZDM7NyPVHvg4ODEKehvzs7O4v1LzP5WWy5q5IfzDiKeMQvfelLIiLyO7/zO8FyyLEwSVgWK6R+B1+Hw2GYu5jfIlOLIcYXMBgMYpYoZurTsp29T1bFpKjHg+VNxYnuUX7MIXiDDAaDmPdE3hjJWWBrpk5ay8lrUX60YbfbDXsCxEahr+7fvx/2fLBMffKTnxSRiXzWLI1XrlwJz0X90CadTie3dTeNFRWwGJI5TZCmgudYYG1pmmWR0ilz2Eqahtw7QaZc1UgjpbA2XEkU4EkoYnJ99rOfXfgZFrLmLVgG0vrEej8vNDoPkxU8mSXzuPXZIoKe6Su1ayS7hDBVOcrA5nbUU2RS7729PRGZboAhCB8/fhxbzFkQMlkD6la0oEwqf7lcDodYBILC7fLk5CSY62G+x4Z1e3s7RhXONK1Fj8e0g4B2pVxbW4vR5mJD3u12E/P51Gq12ObpPIE+Go/HMYUDNg/vvfdemGc4PKLufPDn+VbkJgmuGuw2inGPhZVpYflwhbLhIMV5obB46wPV9773vTAmIU8ePHgQc8XAOH3uuefCGMgKlglMPMHPbTQaMYUYK/I0hTAfpCz3M4uAwlKw4P+8fZgmX3nM641DqVQKB/RPfepTIjLdFH3mM58Jf3/3u98VkamipVarxdqCDzBFjEEOBNduluw2zbmHcOW/UV6RiWICCkCtGGA3X66bzhHIsmpZbsFpchAK3a9+9asiMllf/vVf/1VEpv2EDeqq6dn1mOCDFA5NOMiyYg73WxtanXeJXT0tkrK0VANFwXrecDgM7Q7lF+pwcHAQIQcRmR6oeN5kKfMiSglWtmNuYD3Z3t5OpaFHnXCQOj4+Dn0JuYDwjjt37gRlF6eswD4J8p7XLZ2nbRYs4h4rx6pe8weDQUzes7u6dYBCfS1iCesAJRI1qqTBXfscDofD4XA4HA6HIycyW6SynKDZ4qGJGVgDpN2N+FRq0WoXqZF59dVXw7ut7PX43yqjdg+wCAIsSkadzHU4HMasDvx/lhOwhbwWI/5fZ/OuVqtBswHNM7td5BkPumxZYNEUp5F2sOlZa8is7ONM8y4y0f7BWsVZtLUGk4lAluG6w3MH2NzcDO+F5hkak7t37wYiA2jScO/W1laoC1stl63htCyTaeNRU8menJwEaw7qqeUInmu9b1ng/tYEObVaLQTuYy4hePyDDz4IGkNo+DBuu93u0sk/MGY4aBeARvXg4CCS1BpX/I3vQHJy6dKlYP2ANYEtVN/73vdEZNoGrPnUlOT9fj8QdGQFxjhb9FAOdtW2XLR137FlCjJGk3Gw5cuySGnL1KKyQY9t/l97ETx48CBokSHX4OJ39epV+cVf/EURmY5LpAA5PDyMEfkwinA1ZWsGYBEyMXGGSJR0CVf079raWqDah5zAWDo8PIxY3ADUE/KQvQlWlex1PB6H8YTUAeiTGzduhP7UY2kVlnfLS0HLXCYAgRWg3W6HcusEu+12O5L4WEQirmLoa76ifwB23c/TT2lrg/Ucdt+DnNKuqPv7+yH5OMYY2iErWc4iY40t0GgXPW+q1WpYh+AZcfny5eBKDosa5PLh4WEkbYXItP+ePXsWKOAhnzc2NmL7X5bF82I8HkeslSJRi6jeQ7M3lZbVTGduXbX31enpaSyNAoeDZKmXW6QcDofD4XA4HA6HIyfmjpGywBo+yzddZKJl0ZYFDs7TmgTWoBcRG4GTu+Xzblk6LCtVWkyX1naORqNYW3AQtCZEYOr1RWCRGOhy1+v10B7QWnPfwI8b2gy2Vi07QBdagFKpFLQFaQk0oa3c2NiIaTQsixSeBS3GaDQKmgpopNbW1mJB8EzZvMg4TJpH7A+Mel66dClovFBuxAR0Op2gjcb96Mutra1Y4Ck/P0+5FoEVAN7pdEKcAzRasI6cnJzENOyYJ5Z2aJYldll1YjSbzTA2oCFD/NHx8XGwRIEsBPXodDrhWUWmeWBwgkxthcc83tvbC3MB/v/dbjcWNwVt5Z07d4LGE4HzTG2v6dJ//vOfJ8a1iOTXZrKcwtzHfGcadMsipWNFOVmv9rdnawjKzTE7STEdIsUEm+uxwH2IcfPw4cOgaf7Rj34kIlPij9dffz1oqAGOz0N902TCIp4FvHajD6wYaSvxeVJsLMdXIi4UsrtcLof1ArKj0WjErDsYI8uIc00CWxMx9tFvOzs7YS220sGsomxAmsUWWnym0sd6CfmtvQlEpn3IY4/jZXUZNIqQiVn2rv1+P0KewN8dHx+H73Qia2t/WDR4rur4dbQvzw3M+93d3RCbiz0d/r97927oP9QX8n80GoXPMJeuXr0a24swoUxeAi6W+6iLtkzxHtpKp8HJc0WSCSVwr76/2+3GCOdwnbVfCmXPU2mHw+FwOBwOh8PhcMywSKVRW7M2SVsA+v1+OF3ipIcTfKVSiSVQnEV5WiQlJk7fbM3Qieeq1apJG53kB2/FSHGb6PbhOBitMc3KEpIXHJvGfrVg4UKdcMqvVCrBEoUra/GWTb2qtWFcRotaHtd6vR40JqibldwX5UdcVL/fjyRXFZlo2zTrHd7dbDYLoT8HrLHNllpo7TCfYLE5Pj4OfcbWVpFoIkdLI78sWHNBWxwODg5iYw7/93q9WJJhZrbLorlcNL4jKwMmns8xHZAxGFulUikk52VmQpFJXbV2cZ7ypIHlmkXhjXfD759jbziZtci0P05OTgJNLrSbuJbL5Vjs4sbGhrz//vsiMtVco0+fPXuWu59YZqPdtUy1LFIWxTJrPjUTFGQB9y8nw9SeC4vQn6eNWZZ5Okl4v98P/YIYDlj/7t+/H+IcmAZfP5etH1mtU3nqxO+w1jddZ06xgVhdTqGAcYXvMHY5YTfex4yU2gLFSd2LhBUXyom80Ye81uB+fLcsC3UaWL7qNZj3JuxBY3kdiUz2GWhvrF/or62trWA5wXrNSVl1/Py87IpZf8N11l5C7E2B8qDMLP/1HqHo1DBWeh5mtsN70HbYD6yvr4c9HLxVIKtv3LgRUlRoRmP2DMHze71esHRpxmSR/PIBbcxzULP2MUMf1qN2ux2zSKUl2GUrlKbdT/JcyINMO0EeBFrosMBgEzB+o6l9q9VqWIjwGf63qEuZrKGIDSA483liWldLiCUdJtNgbfwsd8VlkWtYdWLyBgguCAYOaES/JB0m8pYhD7T7DZeNqYb1QQqfi0wFCYTGYDCITRDU//DwMEb3yq5NlisIfrssWJSqPPlFJpsEtC82Suyiquu7aipdkejmlRddFtgiUdO+3tDqxXoWlrn54DnKCynABA4ik7mFDQT6lOmAl71R4qBpzHOtPOI8anA7YrcdHm8ik4UJm3ccqBCQvL6+HkgpMF/Y9VEfqIbDYSQvTRag/DzGLeWUptzm4OG0oGbrQKUVgHyQ0kq5Reia01zpmCoc8317eztsbtC2mFP7+/uhjOg7/D6JBCZvoH5WWDJdv4fXQWzU0LaQ5yLRccXfHRwcRFzKUGadc4ZJDIpUiOn3amgqd6aV1jkRgUVcK4uAdYjncuMzfVDnMA4cpPC79fX18BnvL9JIW1ZRd2v/xQdEnSKF91SWe2+RZbZCZ6ycmfqQxZToaGvI4t3d3eDuB5c+yHyeR9aBH+/JS0LG4HdYByjUTbtct9vtmNs5H56sVB64WuE0ae60Wdx/3bXP4XA4HA6Hw+FwOHIiN/15GnUknyBxatWufdVqNZZFHaf8Wq1mWqQsak7r/yzgAGft4sH/J7l/ZH2vZQkCmOadXSTx/7JcDbTWlK1O6BNoCM7OzkxXOi7zMsFaUa3BhHaB24rbDOMJn0ELv7u7GzPXs6sFtLiMpODzSqWylGDg8TieyJE1x9oqNxgMYu4TrBlLsnwuE1rzzGQTHMTJ7gIidlLlIghmlgnWZnNQskiUGpdp80XibiMiy3PlsYK+9Vhh6zRfoZ1EneAG0uv1Yu5+r7zyiohM3EUgV0CuUSqVYu+Gd8D+/n5ugh2U++zsLDwXY4vJJyzXPm2R4qu2nLJLCbuViESTxloWqSLkuB7/g8Eg5kWwtbUVLFKwvsNNp9vthrLpOTjLRbvIAH9r/eR+sAgO0Ae8PxCZtDtcldjdT2TSJiAEYBdC7cKIZ6/CTd2C9qQo2hNlEWjPFYDJBDDHtra2YuszE7BYCdXxbL0HKmLfVSS010HaWLEs0EWPK5YnSRYpllN8hXxAvzG5GOQI7oE73/7+fsy6y66PGB/oM04XkxWQpeyhYtWNLVG4JlmkTk9Pw334HbvSM7U5Xxl5E0G7RcrhcDgcDofD4XA4ciJzjJS2nPB3WsvDSSY1vWqtVgunYg7cF7G1uxwjZWml85762X9Ua/v4PZa1Ko923LIwcRtqKw/fUyQlK2tTdGLbarUa6qzJNQaDQayeedu6KI2M1mBCo2C1Ld9nWaZA2cyWEZFJ/RHTgjHLliDcD81GrVabW+OctV0sWm+tuR2PxzEff53UWv+9aLnmgR5LnNwRsMb9RdDSZtHacxyG7iOLRp+1YMueVxizrPHlOB+RiR89xx2JRC0uoHLH3Hjw4EGoA7R+sEwdHR0FundYure3t+WFF14QkWk/49kPHz4M8VJZwfJK18myamSxSLFmXXtZWAkgmYDCSuWxDIsUJxTGd5ubm+H9iFOD7Ds5OQl9ZslKixBhGWBZqq1UvLayR4TuA943oC4YX/h9pVIJfYJ+4rmmZQzPiSIxK66p6PizopDkQQNwfJ1INDbIsj4lxX0xpTVbSrUMLTI+PiuS3pV1b7asftSpTESiXmC4ass504HDkot5U6/XY4RjkB2XLl0K1l08nz2VLEIwvc7NAsrFsprrIpJMdc4Jdfna6XRMq5x+D6/TSYnUs3oWFJJHSrvxdbvdWKOkBTynufZZh5oiGKw4D1BW1748759FXKHd5BZhe8qKNFKNJDc+xiqFmc5RwO/nA1XaeNS5HkSmAgRmbBYCGI88ufV4ZJefVeUgSdv4iORje1tlH/KBTi8CXGbdXxdhQ5EVPJf0AQrfcd61ImVZVvAiqDeNHCiMYHGLEQ8bKLiMdTqd4PbHOVhEJoscDlVw7VtfXw9zD8HNACvXsoLlg97I4VDB7KjMxqQVc7x+Ja0Jg8Egtih3Op3QPpaLX175kDbPWfZp15d2ux1bU5k5DX9nYaJaJrGBPkDx/xbxR5KbJbsQsYJWZOruyJ8xsxzAfbNst+GLemhipJWH9ypa4ZqWj5PJB3T/DgaDVLl/Xq7ci8jjZfcpH0p1u3JOPBwoWPmLz3AwAvlEq9WKHaCwN2JiG+yJmLDLOnzk7TesExbBFMtbfWjqdDox971ZhBIon5bxFqEJE5plORy6a5/D4XA4HA6Hw+Fw5ERmi5TlLiSS7NrHOS4YnANCU2PyyS/NlWERMG1n2ul5GZr8rFSey8qMrTVATEud5O7CmGUZse6bF6wZ0P3EWhg9vtiFRAcrs4ZM04CythbjkC2r2n2Otd1562RhGZT35wXL8mlRDCeRl1iEG6vCIv2Q5CbFAat5n1/EuOBM8Xp+s1sc+ogz12srD1um2HVEZEoeMRgMgnYQ3127di3MNbwHbibj8Ti39YY1hoCWZcPhMJSXP9OuHdzGmuSEZQ2vb7hqdz/LrWkeJBGPMPkOu0axtUbEzg+VRqu9SliWKcsrRMtxtmboVBxMUa2JXSqVSmQO6LKsKo8U46NkpbJCEtI8aCyXL+2mybl7rDyP50GStAhW0XdWTi195XZlbzDIBy2XNzc3zdQuIhNrNtYCzvGowyIWAeeJtFz6UGZtfWq32zHZy79PIpSwLO48rnUeM7a4psEtUg6Hw+FwOBwOh8ORE7nJJqyTt0V/jvt0Ql72h8epjyml9TP57yK0E2mny1VqhFatYRmNRpmSKWcN7ly2FcEiN7EsUyg37od2gu9HnRqNRqyMnNBRZ2avVquphAh5s18XiYtswbLi/ixNcJpMWVUQ/DzI2/bLmBfzYDgcJpKWnJ2dxYgTGo1G8JfXsrrZbIYYKWg3IeufPn1qxk0hqBmWKSYMwHvyolKphDrpWKlms2laqZLioESm7Yv72XqiE/+yRUq3Xb1ezx14nQaLvIi1rNo6nhQ8zcjqYbAscNtqKxW3t47BZuugtmrzs9giqGNdV1HfvFboiyrzeJ7o+YHPRab7v7Q9Isfy8N+4Z5X9Y2GWJfE8wQQfSTHtHLvNcUGQU5qwq9PpBC8tjpcVmfQBns+yzJI/uOYds5ZFSluamGyCP2NKc75aXgcAlw/1TCNMcYuUw+FwOBwOh8PhcCwJuZ24s1g1er1euE9bpKrVaoymNqtFqgifzDQtuIUi/afT/NUZRWpE2A/dsjZp1pd5rX7LsmqIxPvA0lpZ2jArJkxridk3FrFUbJnSrJNJZVoGimzTZWnZ0rSus2IC89TPYthZBizNdl7M29bLqhvHkSTNc/6OLbjQ1MGKBBldr9eD3AZ1ORiVhsNhYIcChsNh0CAiBcHGxkZ4ZhatH4NZ+7R2FnOcrWxWXKjWkuvyikT7Usd+cJ2wprGFKq9FKm2M87ql68v04fpZSc9Len7RyGKVmfVuy4Ko03akWb/5Pm2ZWgWyWNxn3X8esMqoZQTHf1peKlbsm/7uolmA8lgSz8OLib05NEtnuVw2U9nofuM0A9g7sSeCSHQ/zu/WsVRWXGNWwJuh1+uZbH24WtYn/K2TrlvykNtHy896vR5L/cHJv7OsTZkPUmkDxqIp1ea/8MJqNXQUOjWNLtOi7C0CWYM9z4Madd5npAnsWXmwzotudBayHKgYFgEEHxZ1MDa/hycPfqfHMb9zVfTnWXGeC3Cam9BF2RhcJKxSUWG5x2rXO5azfNXzBW4g5XI55u53eHgoIpPFURMzPHv2LOZWwjlN8h46LNdf3aZWndi9MS0/IbuRaVh5qrCooy16vV7uw6FVD4uYAZh1eLCep5+xKuQlKrLusdYy3S5JCi6LOGFWWZaJj5JMTDvwWOlJrPuzHhzzKrqXiYtQBkaacoQPBfogVa1WY6EbwHA4jKTH4Hs496g1L4voKyjfOE8frkyHrt33OKWBJaPZDVL/r4mKarWaSd0vkv0g5a59DofD4XA4HA6Hw5ETpYtmVnU4HA6Hw+FwOByOiw63SDkcDofD4XA4HA5HTvhByuFwOBwOh8PhcDhywg9SDofD4XA4HA6Hw5ETfpByOBwOh8PhcDgcjpzwg5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw5UU378u233/7IJJl68803M6VX/sM//MOxiMje3l7Iqoysx5ubmyIicvXqVbl+/bqIiFy5ckVERC5duiS1Wk1EphmgkWW50+lEMi6LTLMtj8djMwN0UjZo/jxrnd56663QT2nPtcqhs1kPBgMRmWS8xncAZ4TWGaA5ozx+xxnO8fc8dbLqkoSsGdSz/i5L1u6sdVrGfFpWHri33nrr3Oq0LGTpp7RxB2TN5J6lbxbJCr+scVfkmMpbv0XkwyJtOQ+ytlPWufT1r399LGLLasjXUqkUZG6lUglXLYexVlUqlch9fE8SsBZAjg+Hw3DF33/6p3+aqU5/9md/FlubrKtVRl1evi7a17w2AV//+tcvlMzjss1b36zz6c033wwvyyu3suxlikSWOqGP5pFl85Z7Xnl/nvuHZSFrnf76r/96LDLZQ2M/jiv20iJTudBsNkVEpNVqSaPRiHyHth2Px0F26b2o/jsL8Ny0OrlFyuFwOBwOh8PhcDhyItUitSpkPSEWoeGAhanf70u32xWR6QkYVqXBYBC0ckC5XJZLly6JyFTbx1pCXTa2TOn6lUql8Jn+HVuwigBr/aznohywRPEVbYB6wgrF2kLWhOo6sVVu3nKnaRJm1cn6PIv2bNY750WRmn7rWavWyC8TRbZ7USjSEpUXy7I8LvsdRWjV877rIoyVecAWGNRBW2qq1Wrss1qtFuSwZa3idUpDW53Ozs5i3gm49vv98HdWsGbY0g6jXJbVSa8x3D7aApe2FlvgdXEVc2senMc4ttY+/izJS0UkXl6rLyyvmGVinv6dV45Ye5ainv1xxfr6uohMZBjaRMukbrcb2o3lFO5bW1sTkehe1NpX43dZ+sn6bRpyH6TOU+gUMQhv3LghIpMGxWEHB6iTkxMRmXQcvmPBAaGxtbUlItOOW1tbSyxTv9/PdKBI2sjngWVu56t26RiNRmFgYoHE4bLf74dyoJ7szoe/cagUsc2oRdRJ/5b7RLt/8P3WQpBlg5420c5DELJrZJYxlMXt4rww78b647YAneeh7Lze83Hrw6JhuePpA1K1Wk39zJKHAG8mRKLyH+tdv9+PrYvsts7uNlkAV5zBYBDWGH1QY/kGlMvlUAfrMKnry/9bbZDmzujjcdqO1lqjxw3DUhQD1kHKOvAue93Ku3EGln2g8nEnwT2v0WhE5J9INIQG+1Len0J2oZ1brZaITOSoln+4t1wuh+fOOy4suGufw+FwOBwOh8PhcOREZovURTV/5wVIJETimjGcdk9OToLmTWvRROKWqXq9HjsBs7YhjYACKFpbq8uBUz6/azQahbJ1Oh0REWm32yIyqXeSa0Wj0QiaRn6u1VZ43yIWKa05gHZhNBoF7QJrZ7VGQwdP47n8zCTL1DxugkWBLXzaqmnVxXKP0fecFz7K8iPJPYUxy2p43u0/C6t0cboolqm87192+0CmJlmdRKJufJb7Hq48FiE7tPdBv98P1ibW+OJv/d08rn27u7uJ72IrmF47LMsXW6jYrZHbolarhc/Y1SfNFXCVyDOGVlk2eJgwLIuUtabq9TXNgpXmIVOpVBJd/4poi/OwTGVx83NMZB9c9DSJxNnZWZBdx8fHIjKRIZAn7O4nMnEXxDOYII3v4b+LWPvcIuVwOBwOh8PhcDgcOZHJInWedLhFY3t7W0SiAWlaC9btdkO8FJ96tWYC1+3t7aDRSQumnEVAITKfpSMtNkpTQ+r6whKFkz4sUqPRKEI1KRKlnkSQIE78FkFHlrieJHBbJ/m3j0ajGO08azbQJ2x51JadWe2eJV6qaGjqTiuWzbKQWlpaaGKA855/H2XMijvR/WYRvVy02CjGPDFri2CVlqk0GZkFs+hzi2gPyFe2OmnrSlIMEHsZ8HUwGMRinmAR6nQ6Qf7zFX9r2WrJ+Fm4detW+K1+P9YatoLhM47HYvp1PAvQGme25nHb6c8sa95FQ9aY6iLA4yvJasdW9yxrE39npUTBfWlrMO9fivbWSZrHaXHUy4iXOg9cFK8JyATet2G/CasS72EgE46OjmKWcz3W+BnWPhiYJduz4GJKEIfD4XA4HA6Hw+G4wMhkkZrnpKrjNtKQxvZWNGCl2N3djWnr2VKjmfwslhCgXC4HS5fla6yRxuS3iL8ma220NonZSphWEpaoo6MjEZlqJsvlctCQ4rqxsSEik9gwfMYaJq19YQ39vHViukpLq4jyg8K+0+mE+iGGjWMPoMXQ2otZWqdVaZZYI8ZaZa19YcYrjl3jMnLs3nnFBHyUkTSXKpWKaQFAn2BMsbY8KYbS0oBeNO2lhYtsWWOksZcm9S+DZZkVa1nk2sU0vlYcFMqIcrJ81ex7bP3R8a+c+FJ/x7EHOrbXinmZBbDksgVCl7HT6Zhlw2dcNvxOzzUus2Wl0pY9XkOYeXbZSJo35z1PMH5KpVIsNprbzNLs67gpiy5fx5wPh8OY9VQkbqXmFCxFr11WHXiea1hJovO+67z7+bzfr4H9db1eD/IP+0x4PbVarTAGUf5+vy8HBwciEo+5ZMu53gNa/cbeJEDedlpKHike9DrIUyTOE593kC3iEoJ3tlqtkFOKCRNEoiQMENDHx8cx4W1ROeNAhU1tEiwCCv2seZBEPCAyrTsfEjEYcUUd19fXw6EQ+bMuX74sIiI7Ozth8eHFzSKDEJlvAeYDmu5vlGttbS2UA+V+9uxZOFyhjUEwsra2FsYjFmw+kGicx4GKNxy88cBmAhsftPt4PA5toBcAa9N4kWhXdb9yoPx5knwAehPOskz3EW9AMd7T8q5ZLoEXbZETsQ8iSUiTi+cFqw+TSHQsYhteq9Lcay035rx1t+jPrUMT/uZNKrvriUQPTdis6GuSG5/lIoNyaeXjLHAAua4T2ord/lCe09PTIMdx5fKz4ozLz5soXofQVtr9mQ+t5wmLDGoR1/i8gCKVD5aQX9jLWIQAjUbD3OOJRNvdOuDrA3u/348dypbZBjw/ee2xcqvhniQX0axz/aIcqCycBwkQ5na5XA5tijGGQ9DGxkb4jJUeGCN7e3siIvL06VMRsQ0V2Jc3m81EanSRuMzL2k/u2udwOBwOh8PhcDgcObEUVQy7cumAUcvCYGV0z3Iqnkc7Da1WrVYLQW3Xrl2LlIPLi/K///77QWtjaez0KTfNMmVp3LMk7U0Ca/STXIjOzs6CdgjavMPDQ9nf3w9/i0zbYHNzUzY3N0VEguXu6tWroW6W5khbovikv4gpXFu28KxWqxWsZejLs7Mz+fnPfy4iEuoG7exLL70U7odmiZMw6+dzOazyFalVsp7FGnCMQ+3mcnZ2FqP6hCZxEZfKVUJbCFj7j/LPck0sWpvG1gntXiUyHePQiB8dHYW+QVkx1jY3N2PZ19kNRlu6s5RtFWCLZpIVRyROBsBpCZaNtPHN6wlr2bXGXbt9icS9JtiZhicuAAAgAElEQVTdm99tuSctivF4HOa75earrTedTifIdMt6g79xD8sQa33WFmK0S7PZzO0Gh3eLTF11tFWjWq2axE9sneK6HR4ehrUYV3bxRv3YSqWtpWxV5HVq1UAbM8EIypaXan4RPHv2TETsPQ3KVa/XQx/yVbtiwY2q0WiEdVmPY4sOnxOvWq6AeWRK2npg7Ze4ztryzGstxj/qBRlSr9dzyeXzstQvahEr2iMEc3U8Hsf2L2jjy5cvhzGm211k2k9PnjwRkYllSq9JGDs7OztB/vD41jKAx1qWtnKLlMPhcDgcDofD4XDkRKpFKu+plbVmmlYbmqnhcBhO9dBgQHPLMS/LCmZGLFC1Wo2dfGGZKpfLMQ3KcDiUu3fvishUCwbLh+UXz5YpbZVKo89d1IpgxUaJRAkL0Cf7+/uxgD2c/Dc2NoIlCgHDHGsETQKeNRwOI9oj1EVk0hZ5NdQWSYbWnpbL5VDel156SUSmfrUiIv/zP/8TuZ6ensprr70mItNEkWwNxZhNSyQ467NlgP22+bNZ5Tnv+JQ0sGYLcx4W0F6vF6HhF5lqoHQ847KQRtxydnYWtKmw5D558iTMIcg1WKU3NzdDfCHKz/IR49pKslx0H+ahVOZ7tXaaLRNWPM8qYgmTns9tx9p0kYns0jS7TDuO3+rYjna7HdpAxygxrD7MCstKorXiTMzAViesSfiM111Ncc5yVGvueV1kywL+h7zNip/+9KciMpkLkM2YF2yxxZzB86vVamwNZop0tk7pK9Y0tsTpmFi2Aq+CQCgJ3Od6n7DKeBq2DiXRSltWg7W1tQgJlci0f7e2toJMtyited6JRGOwrPipeSx0sywoWj6USqVYzDHPJTwLbYQxrJ9xXsgbo2lZIFcJi3xE7wvW1tbCXpTHk25v1P3hw4chXsriNdjZ2QnPEInGMGtk3bcW4tqnFzUOMtQmM3apwwTkCaU7s+hByS5selOAct24cSOWIZ4XNxyo8Cz8b6FUKgUBw4IyaSPAAZdZwRs+3V686LNLn8jkIMWuFyJTt4vd3d2QA+TmzZsiMnXtE4kL2V6vF2OUY8adeTfAHOCpF1RcRaYT7LXXXgvlRL/+y7/8i4iIfPe73w1C8Ytf/KKIiDz33HMiMhH6aDtsVNL6YVkMQjweWMhjbAI8lnhRE4mav/GMZW3M5wW3LeY/Duynp6fy+PFjEZkuYliwln2QsgKQdd/0+/0wl+A+ur+/H+aCPkjdunUrLASYGwiQPTk5ibkhiEiMHaqI/pu1IdPfMxsc5hoH6zOZBu5fFbIQdDBpA8t4yGOMKfQXu+egnyALIDP5O97cWQQBeTcmOAxZ5BHs3oa/2a1Ns5fimuS+JxKVrUweoNmzcGV376z43ve+F56B9sZGBtfd3d2gaOBDFrN24f0oP9pdtw+7/eFAxZ9pN8dOpxNz2VwF0BdwqXv8+HGYT1iTUF9W6iwLLF8xPrQLKbuJsvIH49xi90W/MlEVvrP2f1p5xYr1eTf6SW5+PP4x5jHm+G/ImtPT0zCOIBcssiqUeZUHk1myXbcr+g9yQyTucpv0nCLd6PEMiyCCD1QYI1Dmv/TSSzECCsj68XgsDx48EJGpDLDIy4C1tbXEfYWTTTgcDofD4XA4HA7HklCIRUq7kzWbzXBy1FqojY2NcNLUWt3j4+Og/UjK2bQooFFJy5fQarWCFcYiPUDZ4Np3eHgYs0pZwfFoi3q9nkhxylrgrGBttUUtKzLRTKLu7PoAzQS0L+inW7duyQsvvCAiU4sUyn96ehrKyG4j2hyv22Je6DxJ6IejoyP52c9+JiLTPtnc3JTPfe5zIhJ1RRQR+cY3viH/9V//JSJTDfuXvvSlUEetbe10OjHN97JpxNkdiQP9oZFB/7CGSRMAWGPvopFOcHA++ufOnTsiMpED3//+90VkOlaZijet3YvSlvHv9RjguQSNXqfTCeMTffTiiy+KiMgnP/nJYAWBBhr14qB6tAmTHpyHuwUHhGsrNstPaJRhRbAsO8vSpFvU4xa0tnx9fT30BbTkOteIyFQbDw20iMSsIN1uN9UdMu+cQ7B0u9023fdQHotQQpPQaCuUSFzLz2QAbH2CnGfXO1xZY58F7777rohEXbnwLt4bwNUa3gS7u7vhb7ZiiEz6EHMNn6GMu7u7sZxUR0dHYfxi3jG1Ou5bFiw5rKmXHz16FL7TLqe8X7CeWQSef/55EYkSPmB8sQcLXKZgUT84OIjIQJGpvKxUKkG2633g7u5u6HPMw0uXLoV+1G5/RXgiWBZ3lBUyd2trK+JJgM9EJm0OV1WWCyifltV5CSiWhVKpFAtjwZ773r17Yfzfvn1bRKLka8veN7C3EWQW5ijLeLQt5v2NGzdCefVYYc+s+/fvR54pEt9zX758OYxTK09YFiIht0g5HA6Hw+FwOBwOR06kWqSynqZ1AOKlS5fCaf7ll18Wkelpd2NjI2g4YE34v//7PxGJJmlbVqAl+0Mn0bxWq9Vwcoemhn1DOTmiyMQypeOlLCsRsLm5GUs+xqdkjv3JC11G9vWHFoX953ECh9YFcSovvvhisEhpYgameobWqt1uR2hCRaKn+kX8mzkQWmSqVRkOh0Ez9oMf/EBEJppYaC3eeOMNEZFIoOLf/d3fiYgEiwc0gm+88UYYs9wnuk66bPMijVbdGpeoE8rG2kpAx7NYVkKLuGKV4HGO8mIuwILT6/VCvXSMFAf3Lhu6nax4QGirarVa0Kx+4hOfEJGJJUpE5JVXXglzh+n2RSbaXsgO9DtncgdWodnUmrp+vx8saLC+v//++yIyme/QLkO2o967u7thfGbRJM9TNyuhsbaCWbKj0WjEguNRD7ZIae16p9OJ0aWnpZuYRzZAe2rJao7xgexlK5SVJBhl1OQRHHMECwDHjSXFkG1sbMRiNWcBmm+mr9br7fr6eigHWyxgkYKGHNcrV65ErBhcxnq9HpON6+vrwcqAtcyy8K0CqDvGIPZHb7zxhnzwwQciMl2zV5ko2EoBo1PYcIwQLFOPHj0KllTEteK7w8PDWAwb7mm1WqHP0Tc7OzsxKzFTq6fF7uQBexhoUhkuD8YK9gWXLl0KYwqWVp6LlhUtb8LeIsGxkJi32NNhH/HNb35TfvSjH0V+h/rjnmUCbXV2dhbxhBCZyg5ehzluHJ5SWH84jYVud8RMHR4exmQ17+0ty1SW/ZJbpBwOh8PhcDgcDocjJwpVeeDk1uv1gqUAp0tYnwaDQcxfGdqKVSTG4zJCu6JjvJjRT5/kReLJ5c7OzoKmFnWDBpd9Z3ECvnnzZtC0cPJUfua8QNlYey4yaWNoh/BZuVwOJ3DEE6Gen/jEJ8JnKCtrX3QixHa7Hes/rndejYwVa4BnoM22t7dDe3344YciIvLOO++EcgK//uu/LiIif/RHfxQ0mH/7t38rIlMNe7lcDs8Ca1K9Xo9Z9hax5mRpA2Y4hAbO0qiy1kjTtMKK8OzZs9BPHDO3SlpdwHqnTmaKuXbp0qXwt9ZSJZW56DpZ5dQJp0WmWv5msxms19CQYS7t7OzEYqqgmX306FGQfxZr0nnGu/E7UWeOa8WcgGYZ2mS2uM9KpCwyX7why0s9J5mGXX9WrVZjCXn5qrXwHH+o68IWKc1cOI/Me/jwoYhM2lizy1ksc2yF0vFPFt27tuoyJTk+Y8pqyH22gudNyIt5fHJyEmNbZcYzHQOxvr4eyqTjp65duxazUuGenZ2dGOU2s8HhM04ynJfSfRHoZN2/8iu/IiIir776qrzzzjsiIvKd73xHRKZyfJG4u6xA+zSbzdBGVuJky0oFywH2fND+P3z4MFir8B2z3uFZGNuj0SiS4FxEImMxb3xeEtiarVPCsBWNWZxFJnMDbYH5BQ+kbrcbS1lQrVaXzkKdFSgv6sLzBftw9BHkC1voGUXWgeWJ3gOjHM+ePTMZ/dC2WHfh0cKWQFyRAuf+/ftBtmJsWt5SPOazeFXkPkhZE5nzR4lMDkgwFz569EhEJJitDw4OQsHhcsX02kzIIFJ8sDUHeaIu2rWGF0gI71arFTZGFnU5/kY9IQjef/99kwQAm3UIB+3qNy80lTIvXtodqdlsxszXGIzPPfdczI0KB5T9/f0gPFFPFiSaLMGiZc+DpMNLo9EI/YP2v3v3rvznf/6niERdGEVEfvu3f1v+4A/+QESmQvIf//EfRWRyEIO5Hs+6cuVKjHo9LX/NImAzM8YLu6ZgkwpBiA1Eq9UK7Q6XCiAtU/x5wSIhwMKFjcPW1lbMhZGpTVcBa27zONTB7s1mM8gxTXLS6/XCwQkHd8iJx48fh+cz1W8ScYhF35oVWQ8zeHez2YykPMBnIpOxhrnBMhK/XzZJBrtXa/nAB3RWdolM5KLe8GBulEqliKJNJEreoPPjMZLy9+UB5m+73Y4QWnB5+NDEc4Ppy0WihA58SBKJ5vnRZA3r6+uxnD/sqpRXjmOTc3x8HNYKTVzCeZ6YrpjHoa4T9g7a/e/q1avhM3YT01Tb7Da3LBc6a55qWf3f//3fIjJpA2zsdD6dvLmB5gFkb6fTCWMOYwPjjd3xOMUGgPmEft3f3w+bc+wDcX3y5El4J5NVJCmSNYHVImBFC8qMMjx8+DCmeMAe7fr16/LZz35WROLul48ePYqtrSyrV9mXGpz3EPMMdfvc5z4XlNDYCwLLItRiWK6ker3tdrthrOiwGpFpO0LWPP/886ZxBFcc9HnfoV0BOR9ploOUu/Y5HA6Hw+FwOBwOR06kqmKyaj5Z2ycyOUFC+6Tpfnu9Xjjh4+TPJuQsriCLgDXb0H7gZAqw2ZAtUyg3gqr55Kw1DzjlHx4eyr1790Qk6iaC+9iyIDLR/s3rPsZuLlrbOhgMwnesaYRlDKd5WDx2dnZCW2mLwdOnT4NFSmeM5zYoyiIF6HY5OzuL0GHiXaAo/d///d9I+Y+OjuQ3f/M3RUTkd3/3d0Vk2iff+ta3gmsN+q5UKiUmgh2NRkt3s2B3JNQT44Rpi3XyUGgJ2bWA++Q8ySYApm1H/4B05vLly6Hc0J5Dk7xK14gk62O1Wo1ZzDY2NkJZMVaYphp1gysIxlq73Y64EYhM+k/T/i67z6xExLVaLWjyoeUHRX273Y4RA7GL3LLleBrYRUS7hvZ6vSCz0O5sEWRyCZFo0lvtksbac23Nm0c2MHkEJzPnsnLySP4MaxMsBmyFgnZVX5nOnNfgJMppXjOzAmQK7XY7zAdtmTo4OAifMckG+gDtgvWHqdQ1Wcbu7m6wVvFVJ/zF79bW1gojMcgCpjsXEfmP//gPEZnID3iFJLlULROwmNdqtUjyapRNZDKmdDLlnZ2dGDEE2v327dthrmiL49OnT4MFDta5Z8+ehTGB+yFj2IMoC7K4ejPJAOQDxhgDfXbjxo0wVtBXGK+lUimUnd179RpyHi7a4/E4zCGEnEA+XLlyJaSAYXfXVYHlJls3RaIhNJC9bJlKIn+7detW2Ne+/vrrsfegD7EGd7vdMC61N5VIeoJiwC1SDofD4XA4HA6Hw5EThSbkZTpTnGp1YttarRbRmPPv2d99Wad17TMpIqZlSlsFyuVy8CG1LFNJ5X306FHQWiAuolqtxp4PLc7a2tpCPts6Xs0KSEb5L126FCw5uEJzV6/XgzYI1idorfb29iKxUSKTvrP8UVHHvJrpNI2StrqhvKgHyvHee++JyNTS8fd///dBa/SVr3xFREQ+//nPi8ikfRBbBU0Zx/NBO7cKTbuu82g0ipGH4FqpVII2zUrGqefTRbBGiUQtUqgLCGmePn0a+hbaTtx7HhYpgOWctrq2Wq0w3qFNBZ11p9MJY5EprvF7jC3MSw7q1/VdlIwmC9gSCjkOiwXKxVpC1AXjjrWFyy6j/lskqk20SHeYcldkWv5qtRrxy8f9ItHErainZZFaJBExW5+gBbXo27UnB8dB6XgoJo/gOCiRyTjT1lBuO+1pMhgMchNCQXM/GAwiaRlEohTkmoDq4OAgljyXrRT4W1urHj58aNLbw4KC9Q3/b29vh3b5/d///Vx1mwfacok2Pjw8DH2nE6xbvy8aaEfLu4ZjdrWVant727RSiURJgzDn8N3GxkawcnPiX/Q5xgP3/TxpYdgqxJ/hqtuz3++HuC69L3z27FnoI6xPPNY0SQK/a9mYZYFDebG3QfsOh8NgvcFYzEsoUwSs2HDLIwRj4OnTpzGyKiaFYt4FEZFPf/rT4R4+i4hM2gRyDePNkodpcIuUw+FwOBwOh8PhcOREIQl5w8PIxx+aTGi/cG00GuGEqTVTnU4notUUKd4yZVl7tOat0+nEfGW5LbRl6oUXXoixQ/H74A+N0+79+/cjycMYnMwyK/jknnRKL5fLMcaiK1euxOhjoeUcDodBawFLFK7Pnj2LUdaXSqWYJk1r7eeBpVECmHUHqNfrwbqGNoZf8N7envzTP/2TiEx9m8HC8/zzz4dn/eQnPxGRiVYUGhAdqzCPlS0N3IcWhT3GDvzJcU+z2Qzl1lrdTqeTmKjzvMEWKcwZjK+jo6MgBzSL5aw2X6YGkDW0mlWPLYOw4HKMFGQAxhPHE+kYg0ajEYu1WbalnsHv1Ax3PLfxt7bwsCV0WeB2T7IccpmYrhn9iM+09Ye/g3a50+kErT0z6Gnt/SLjD5aRUqkU2hRlY8ugtjCxRUp/x0l0daJatqxyn6OvLSt4XqsA1khmRNReE91uN6wnbKVi6xRfj4+PI0xvIlErPGQ7vjs4OAiyRbcTU8AXDWss8NrL5RkMBrGUAasEewVp5l9O+8BJXkWiez2OpRKZWGm0hZStoXrvU6lUQr/gO8h/ZrLMiyRrDa9BwHg8jsVLcZwXLGroK/YM0TGXlnw4L6AcKC+nE0nzgFpl+bX1PQ0c14S4Y15/UE/sBREb+alPfSrUiS1g2FfpNB8Whb2FpaXORoEgeHlDrwOAWXCvyrWPO0tvXIfDYZi02BRZG2c+UIEaXZefNxwwGZ+cnAQKRiuwF5M1K3gR1IF3HDiuA4t3d3eDqwMWWzzr5OQkLGrYBHIuCDZfo576ILWIa59+NpeNoTdK4/E4bBRg1kUb379/P0w+0M5ic3T79u2wEIB4o1arhe91LqFlUubqzWun04kFsqJctVotNvl5A68p7xehzy4aul8x3nq9Xqi7djPgg/Uq3Sv5fSxY8fd4PA5tjTEGnJ6ehv7CnMCGoVarhc2GzqouYrtiLrv/rCBfTQvOG2OdX2sV44v7IMl1h4lVeC7ptYndOPRawC7Slrt00rvnAZRavDm1yGWswxJkl0UeAXmoFVxMTsSHJu0+zCk08h6k8PxqtRpLacLzH+9nN2XMGcgzzvUDeYhDE1Opo4xMV4/nYm6y66aer8uEJmZBP7H79rJSbKQB440PUmgzvmo38uPj45g8ZiUA6odxiXHKB3zcU6/XzTGKcmWhoU6Dnpucv4zXc52HDuvq2dlZ+FuTH1lkW+dxeEpTPIvYYysLMceyYK3l1oHKalOMQU6ZIBJVBuEeyNaNjY2Q4oeNAJiP2O/zGpFlr+eufQ6Hw+FwOBwOh8ORE4Wq1VmTqbV+luuSxiKkBHnvL5fLiRoOTtoIrZgF1OPy5ctB4wJrhqWdxcl2f38/aDZAwbhI0kPLIgWwS5pFGYty4z7WPuKkz+QLIhOtLlN8ovxWIt4ikaY5wWdM8w4tA9wKKpVK0IJBgwm3v36/H7QWqMf29naoA7Sz3NbL0jhxXfRnKAc0sZVKJWaV4ySeeMYqXcOyQrcf1+OiuERYsLRnrPXWbc7WW7YUiEzmjSZysNygitJSp8leDR7jmurceuYqNelZZAuXlb0gtJWKLSRJlLq8pi3LugtihrW1tZgmH3KLKcvZ+gTZrskjLOsmW9ct6xOvAfq7vIlR2XKO8a3d2pn6mD0jLCsVnqmt73yFbMe72+12zMWZ+3lel7FZSKO8Zm04Pi/SupkXGD/j8TiME3zGrpiWlUpbADn1CvoCbnKWtQrvaTabpvupSHEpVBg839kyhb+1O3y32w33o85p3gPnQXWehIu8nopEk//qsrILMkPvWTHWPvjgg9jaC1lw9erVMO5ASDEYDGJWMOyveF1Pg1ukHA6Hw+FwOBwOhyMnMluk8p6otXbS8vU/b1/SNJ9bHWjZbrdNH1tcEcwGjRq0i+xHj/fdvXs3Yt0RmVp9OJllVqT5vbJGB9og9lPG95r+sd/vBy2STr47HA5j/tC1Ws2MjeLrPHWyfjvLMqVj8FDHVqsV2haaL2gq9vf3wxiFxa5SqcSIP1ijuYxxy3Vi/3BNrWpp0S0a0POwFsyLi1DGrO+2gvT1b9kKiDmhfetZG8/WrSTa+mW1TdJzz1NLnoYsaQj4O4sII+0ZSZYp/XeRgIa01WrF4kc4Vkpbn+r1emJ6AKYs10naOR7KioPSCYj7/X6M3GcWOGgbZWRSKpGotYrvQf1wxbo1HA5jMRCcvFcTVzBRiLayDYfDpRPxWHML77xo80lEYiQyAMsly0plxVZpEi7+nfZa4tQ42nrJ+4sioeU4W76sfYa2aLL3Tdpau8o4pCLftcrxqfd8Fs8AEwNp+c0pLpBmRHsfdLvdGGHI1atXY1YnyKHT09NM8n5pZBN6MKZtiPM+syhkYZXjQOokNz82h+vA8Rs3bsSCSMvlcsgppRmGnj59mpvHP23icrZo7T5RrVbDb/B+XphwgMKCxIQOmrWsWq3GPiuiv9IOLEmmc+0+wQdZtC36CW3S7/dDG+D+ZrMZ2fDqci0Luj9LpVIs0N0SllkOIhf5IPVRhuWaw/2XxmjJhBUi0UPZReuvLOVZJcNT1ndZbZtlgbT6YVl9AtfiRqMRZLQmJeBDEzM7andSVgQm5Znrdrsx175erxdzUWW3uLx5pJj1VAf28zXL4Qr15fv1IWswGJiHQ8h2vc51u93ch8MicVHmk7V3AKzcm/w7rWzgsacZGi3loFUOvXaPRqOlHKQAXk/1ms8ywFKg8f9Z35PnN/PivPfXWZDm/prWJ7yv1d8xuyjYOvlAhbxZHPIBQ4iWb0+fPs3k+uuufQ6Hw+FwOBwOh8ORE5ktUnnMhUVYn1aJLK50swgotHsV3MPW1tbCCZjNwjg9wwQJdwS2BOUFk3VAY8dWMA5wx2ecI0EkSq8NS5SmsGdTK1uhirREMbK4F6WNT253TYQBN5lKpRKjOO/1ejGXmWVrlNKINJI+S2ufizbvzpNqtSjMo+lLkjEWjXdeK2PeshT5PAt5CC3mxbzPYsthnmesYrxizWg0GjHXJrZaalc9/kx/xxamrNYn/aw099VZYHdjTRXNlg62NuGqLVL8f5IbObuAQbYzdb3VFjqVR1FICgW4iGDyKD0/2HKr13he9wFYC9jNXu+PmGadZVySvJuHiGweWPThWazf87j6X7S1+TxhtR+PQz0mLZd4HpMs/0SmZCfj8TRPGNLjbG5uhjEMyzZ7XyHVQhrcIuVwOBwOh8PhcDgcOZE7RurjfIqeZZnSmjr4TlpBzdC8rK+vB/92nIDZNxjaMyS97XQ6C2nIkmJ6KpVKjJ7comzmxJU62Jifpf2mmba9iDGSheo8yTKVpPljjaoua61Wi7QL3qODghex9iyikUyKv0kLaP04zFVrHJxXvfLG42T97Xloqj8OlsFFsYy6L9Ku2vokEieQEbFpzLXFhf+3KKtxj17TLKKTReZemuWcy6/jldlKZVmrNCEC36vX8XK5HEtKzLFnq4qRyisDziPQP+0zyzIgEt838VqpPWP4f4tQJ4mwYZVyat45/HFad88DSe1nxaaxV5S+v1KphD203tceHh7G4vC63W6IScWzsGff2NjINA7cIuVwOBwOh8PhcDgcObE01r5FkXYKXPaJf5ZlSlMqcsyUZnMZDocxKtsrV65ENIAiU03Z3t5esATlBcdiaIpfS1M3Go1iiWbZt1Rr6vj32iK1jIR5IukWpiS/5CzWLP2/RadpMbGdF5Lq9FGLRwTS+nXW7z4OWMS6taxyLPNd591vHwVtMceP6PnOMSaaCY2Z+bQGluODdPwUW58smneA5WLe9uPYLj2+0t41HA7NeBwR21rFVivNHmtRWvP78rLkLgMXZVzOYny1ysnxVYysdbK8aM7TIqXL5Zap1SKNyc8C5jmszBy3x/IEV8T+A8PhMLA463Q3zWYzU3qEQg5SqxrkqxyYaYcp3bCj0SiRIpGDKnGQajQasru7KyJxusVyuRxyTC1S7qRFCGXCu/WBjhdb3a9WrolFFtmsWMTNL+vvsrqvnLeQzPLej4oAz1POVdUpjYZ1Fi7KoXteLMP98KMyFi8CeA3RBxt28dMHI3bt0656TFme5dDEsNaQeQPqWXGCq6aX1mXS5eNNkV7X+GoRIVlpB3QZHbOR1QXQ+jvpnjRchL5Z5EB1Ecr/UUXa+mu1LadX0IoTKJpYecTylkNxRKL06llo9921z+FwOBwOh8PhcDhyovRR16I6HA6Hw+FwOBwOx6rhFimHw+FwOBwOh8PhyAk/SDkcDofD4XA4HA5HTvhByuFwOBwOh8PhcDhywg9SDofD4XA4HA6Hw5ETfpByOBwOh8PhcDgcjpzwg5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw5UU378u233/7IZOt98803S1nue+utt2J10kmJy+WylMuTM2a1Wg1X/F0qTV41HA5FRKTb7Uq73Q5/i4j0+/3wvEqlIiIijUZDRETq9brUarXwLn7meDwO5fn6178+d50APHcW8iRmzvpMC1n76S//8i/HIpP2wfvQVnzFd2llOjs7E5FJHfG3deX7cNXtYrXTW2+9tXA/zcIibT4PsvbTm2++ObNOpVIpVn7rM0Zau8+bRDxLP/3VX/3VGO/AeBiNRiIyne/D4VAGg4GISLj2+33p9XoiMpUB+K7X6wV5gBgKEJAAACAASURBVCu+Gw6H4blcr6QxX61Wg+z453/+50x9NK8cn7edVyEfstSp6GTz89brPOvE8wxjyEKaHLSe9fbbby917Fmw6l6kXFxkD3HRgHbJO/aytnHaOMx6f57n8r1Z6vRx3Lv+zd/8zVgkuhfFWsD/68+q1Wpsn8lAm+t1bjAYxNY5Xvt4PcTvITu+9rWvLSwfeG+ny409Nd9n7RG0LMP/+v4sSNs/uEXK4XA4HA6Hw+FwOHIi1SL1cUcWrQpOvrVaLZzwAT7tsnVKZHqCx2/5ys+3NPWLIM/vs57IV20N4XeWSqXQB7hCu1KpVEwrlQZrI6B10daG0WhkfsfWqfPCebR/XmTVOLJmLElDfnZ2FtMyrar9rbGSpqmDFarX68UsUrA+9Xq98Bk/A//j+QzLEoX/Wav2/9j7siY5rvPKU11r7w00FgIgCZDiIlGLJZmyZ+zRhMPhUMREzA/yi205/OLwiyP8E/wT/ObwMtLMeBuvkihaFEkQBEmA2BqN3rvWeag4t05++WV2ZlVWNSjd85LdVVmZd/3uvd/5ljJ18vrD0+bZ77K+P+t9We/8oiKvHef9zrPusWXS8ZOljVYNstUue886i0WeF85T9n5RcR5t5u1pYt9VB4914rXVaoWr/U4ZKbvmKgPNua/3WCZH154i1kBlYJ+nz+Xer8gYU7lWxMpInzXtfvyX9iB11iGKHaf0KAcmf8vN0GAwCJsmu4mq1+vhWexU3UR6A7ss5mG+V+a584Cao9i28g63etjKKnfe5lhNrLzDFa95E7JqfBE2oUUOOjretZ/yDlL2UMu2GA6Hpc1KykAP0d4YAcbzXg9QwHje24MUr55pn441u1jpGNYDFO8pO+60j/IUOHkmEvb+omX4RT1UzRt57ZtncufNM9uvOq7teFQlhjVlV1lcJbyDoCLvsF/kcHvW88uirKnbLzpsH+jVrptElAXloQcpPTgBE7eRZrOZ+Ju/swcRot/vp9ZYT5mYp6CuYk+kMswz47Of6Tro7ens/kHHqDdO1VTQK8NZiKZ9ERERERERERERERERJfFLy0gpeFpV5kNZD2B84qdGwDqHd7tdHB8fA0AIOkFNn1KthMeyzFujVbUZ3zxZAX2+vscLAEKNjGprsmhgj8ZWZ0oNHMB7LN2tmo559dkXSVvnUf7WlMibT56WTNkgfsa+0PHgsbZVmchqGewYUVM9j33i35QFeo9lpKxpqZZ5OByG8ZzlNDtNnc4KZpEXlCCPeZyGndIyPC94XspTlI3ymCLOL51b7H8dv8B4nNr5tbS0FGSqtcrIY5GnQRHms+p3zev5RZ571jx53ueHB4+F0LFn2QLrMhFRHGqyZ5kovXqMlF2T1fqB65tdkzzzOe1rz3yurEVVEfNAXSO1jDboRZ65vK5zun8Exm0wq3yIjFRERERERERERERERERJLIyRKqK1XLQGJqtMemrVk79lj1TTd3h4CAA4ODgAkGStlpeXU8+3gRP0VO85n1dVN/ud1VSonWzWMxZhD662uVnacLV3VadLG/5TtahZjFS3201pbPUz6yeTFSRgVng2vGVDfy/SKd5jO1TbBSS13Ko5s4wU27bb7aZYYi/EqiLL57BsG6idtRfiHEgGj1AWikyU5yPFMZXlc6dlV5twyzTkBenIAp/l+WMR6suZx0Do77K0j8+L/9RZz8xzXD4Pn5eyMlsZeqtl1TnFcci1aX9/H8B4PLMPOT9XVlbCHO10OonvgOl8ePPqsmicR8AQYpF+XIuCWmbYkNy1Wi3hR67w/O1+mf3MiiAv2MRZjJSFzmPbN3kBl9RSw/NNmlY+eHscLYdln3RNtSlFtD52H99qtdw9QtY+uOicjIxURERERERERERERERESfzS+UjlaRxVC2zDSrbb7fC9PR0fHR0FLd+zZ88Sz1RGRSMpWW18FZqysn5QWg7LtmnUsvMIk07NhoaRt1p6T9Pi9Z0yU17kKmCs1aDmlr8/OTkJ7UOth2oq5hEJymMrlaXM85Wxny1Cy+nNJ9tPrVYraLdXV1cBAMvLyyk/IPb10dFRIooYMOmvXq+XsuHWfiVU61SmDbzEg16EPusPdXh4mGKiNIKnlRleGFmVCVX6ICpLYe3h9X+P3c3TbmYlt1ZM6z81T9jIdjoObfuct5bci4hm/Q01eqn67QJj9ml3dxfAZG3imK3VasFagvNzY2MDa2trACbyk8+cReNcJaqQa4tgpvK023lMb5XR0OYFXUdZdsoKtcChzzhlpzLsedEYqy6nhy8aA6jyyu5xVBZYWa0WDl44cMsw5fmNq5WO5+87bWoO/duzGlK/TiC5BttyeH6eOu44TnV/mGXlUXRsTn2QOo+wtlU7Y3pBJoBkEAMuMO12OzUI2bkHBwdhkaL5BDtwZWUlvE8ngnW2nGWBmuYABSTN4Gyd8hz2vHavegxwAnsOq3mb0GazmQrn6WX41s0BMG4LzwkxazGsqr72efV6PSUcgbSjuI7FIiZB85qjWo6sMPXtdjtszjY2NgCMD1SsH/uVQnIwGIQFmM/XQ4zXv/ZQPW0/6WJi21wPSHqAsp+pSR+QPEh5G/QspQqQlk31et1VIORBTbPY5iwb33V4eJgaW81mM2yKKMd41fliF8CsBfV52Qyyna2j9mg0SvXdPMx3y8BTHlmznnq9Htqb84aHp0ePHuHp06cAJvOLz9ra2sKFCxcAAFeuXAmfcbzYudftdt08U/OCF/JYr0D+mnSeUHMhzwTTbvAA31QKGMsi73BlsYj5ZWWUHq5ZJ8r60WiEJ0+eAJiMPdZ/eXk5JaOrnGtVtMXzdgjz8md6uTW9snkKSSB5MLLr1/HxcZAnXioP6+4wayAke6DT9Zdl0zLye4LysNPpBIUtxyKvy8vLqUOWlt3uMYsGFYumfRERERERERERERERESVRmpF6XrSKwPwS7SkjpTSgdWbjaX1vby8wUjTxI5M1GAzcsN08DS8yLKsyUbyqRhqYmH+cnp6G8rIu6kQ6b40MtaDARNvCz1h+pZ5Vm+eZw/A5al6pz/Yc/M9yDp5lLmQ9W0132O7qmOtpgcqE3q2637w2UCYKGLMYZKKoAV9fXw/jiX3IOh4dHQWNkHWUPz4+TmlA1cHWhqkvG5hBzflskm2PkcoLNqEmEB6byjp4oduzzFg99u0sKBNORon9xrK2Wq3AXFAG7O7uhnJwLG5uboYrtX5se51TnmbP4jzWEmUKrCOymvadN7L6X013+F2v1wvz49GjRwCABw8eAAAeP34cxiXZxatXrwIAXnrpJbz00ksAgO3tbQDJdY7PVM2w1QLPCxqOn1BHc4J9p+zPeQYM0TXHBoppt9thHtlAHktLSwkzc8DXviuzvehw6fp8DbSlDDYwZjWBJDPNsURZsbW1FeTFebO+0+A8gpboviRrj6IhyNWcz5rLKcPEfSz3gPq/TeVxenqaGWxiGiizadkytQSxFh2NRiNlUaBWL1yn1tfXAUxkn66dynx5qW/0fWchMlIRERERERERERERERElUVi1WdYPwzspAz5jMIudc1mtU55zvGp9vYAF9qSsLA5t0nnSV/tLzznY+upUrdnwtNwE33l0dBTKff/+fQAT2/pmsxns5nmaL+oMb9/jlecsWH8NfYbnOEjNyerqam5YTMuu6dhj/1NT0Wq1MsNVq5anKIoyR3aM1mq1FEOjDuNeEs7zgNXAsq3X1taChujixYsAxlojtre1wwaS/ofAZFweHx+HdlFfDjuHdf6VaQ8dM3YcebbjarttmSt12rW+UTrubL8pc2r99jSwQFFQu1av11N9w/64fPlyaGOyGffu3QsMx87OTuK7zc3NwGLwGRqkwDK+/X7/3P07+B6boFbXJk/u5JVvXmHbPSZK/wcmY293dxeff/45gIkcf/z4MYDx2KNW9saNGwCAN998EwDw6quvBhnP5x4cHISxzbnHq/onzhsaCIPQABoc06wbWRBvnTsveOsVGWH1FeV3bFv2K61b9vf3w16Dz1QmaN7+QLY+QNJ/zsrjS5cuhe+4bpLlVv8p6ye+KGYqb58KpP1YF+ETXgRe+9urylllVWwqD65RR0dHYWxZRuro6CiMRV0Xs6wrgPKJlr1kvnaNVH9cjp92ux3mDq1cuA5duHAhyAWuc2y7fr8f6kS5dnh4mGLxvCAYeZhL1D7PIZodp9GtuDioiYg9YMxTGFpBpwceG9mjXq+HAcTyU9Dt7u6m8kexw3Xzohtetk+VzrtnTW49QLHcn3zyCYDxpkmf8corr4RFlgNVA1JwQpYdcEWhFGsWzar5vjhhVlZWQttzMnHRWl9fT0SNA5A6hGg9dENjTQIbjcZcnGM1YzfH2Wg0CmZX7Ke9vT0A43pzE8F6qwnmvKGmLHbTp5H62Acs68bGRiqSENu62+2GucUNPDeGFIJ8LmEDV3hlLAJVlnhBJnjN+8w7wOeZ4VindI08adtUs9oXBcul5bAL0+XLl/Hiiy8CGM99APjss89w+/ZtAMBHH30EAGHD/uzZs3DI4oGK8uLixYuJDSKQzB5vF/9FRpcEJv1ic/+dnJyE8lJ2WJPReUIDztiDtJqs6gEKGMsEygU69xOXLl0KB6evfe1rAICvfOUrAMZ9zndynh0cHIS5xs90/JQ9xBeFnR/NZjP0gR7ygHEdKR+uXbsGYCLj2+22G202a22aZuwVXee84BFWPmnwBd04AknTLGvSrc85jwiTNhAJMJH3ly9fBjDeH3Gucf3SdZZ1103sIuvgre+DwSClCGM/6Ia+CKoOyKYHGGuqp/fYw8BgMEgpA9V8zx6keD0+Pk4pE/XdHhGiAbLKwFMoqrKbc4Lr1dbWVjiwc7xxHdrY2Aj320BWu7u7qai/uufyousWUR5F076IiIiIiIiIiIiIiIiSqFS9xJNpp9MJ2iR+RpOQhw8fBu0ZT5cvv/wygPHJ0ppXFQ0/OAu8ENo2BxGQzHMDTFiB/f398JnVPHjsluZQIGbJGVHUZIynbJ7Onz17FspNjd5rr70GAPjOd74THJGpxaA2WrWe8+obLzyndQRUbSL7qd1uB7aJddKrslPAhJlqt9spjYgXxtlzJq4S6vCubcA589577wGY9MXa2lpgEMgosI46ducdzETN0Wy29eXl5VQY7eXl5dCPaq4CjDXP1IaT9Xj48CGAJGvAvmm1WqkgClq+MnVXDZxlnbwcFuqEa5koj0m1ZsyeGZ+mJdDAMPa7otDxZOcS5Wyj0QhBCK5fvw4AeP311/HVr34VAPD+++8DAH76058CAH7+85+Hvrl79y6AifnOpUuXgpaQDKRq3G1bVC3jzzJDZ99xTWLddnZ2AsPxpS99CcBE2+nJ7KqhjKSVRezD4+PjIH/VnI/sFMcQ+/Ab3/gGfvVXfxXAhImiJrfX6wVTQF7v3r0b+lXNh4GkSfosKGI+02g0Uky7stO0pOCzXnjhBQATuQ6kzbEXAS8thQaPsI793ENsbm4GRodQq48q3CBmhb5TrXOsVQjH12g0CvKR9WRfKgOxCLY3C1b29nq9sNZQrrF8L7/8cpAPNkXAWdYpVbhweCwUn6vsmQ3l3e12Qz/Y8XdwcJBp2qdBHrw9l2WOms1m6bnmuVZYZnZ5eTm4BpB1unz5cliv+Bnnfr1eD+XmmqTpiciOUp7s7+8HWWctrYoy1pGRioiIiIiIiIiIiIiIKImZGSnPDnRtbS04tlJLwVPuvXv38B//8R8AJs5h1CZvb29Xmqj2LNiTtWrUrbZEnc9pq62nXGo5WV/VLttQp+ovYFF1ffU91pZ6eXkZr776KoAJE/Xd734XAPDVr3411OnHP/4xgKQ/CDUURTRk02hhNBSmZQaUNbCaoHq9HpgQ+mlQU7G5uZkI36zfra6uht+pba7VOM2bKR2NRilN6snJSRhz1JZ98MEHAJLJVlkXzidN3DcvqI25ZaSURfEYXhtAQ/2i6KfB+vLa6/VSoUzX19dd/zlepwk2oT5S1k5c2Sd+p6H4PT8GLxQ/62DDcGt7WTZbvyuL4XCYSmyomkn2w61btwCMGU6y0mQzvvnNbwIYy4Qf/ehHAIAPP/wQwMQP4vPPP09pPi9evBjGpfU7WqR2XRlfsjj0/7p//35oA+sfmsUCVhksSMeGDU+vfq1kosgcHR4eBnl28+ZNAMC3v/1tAMDbb78d+o4MD2XJp59+ip///OcAJn344MGDVMAknW/z8JHyrDCWlpaCPCPbxHofHx/j008/BTBhOL785S+H351H0B3r/9fv990wzpaF595ArSWsf+FwOEyFhl60L5GF+n2pJRIwsYjY29tLBebSVBeLZqLy6sHvjo6OcOfOHQDAD3/4QwATef69730Pb731FoAJ60YZsre3V6hvZpEXykRpsmZ9nu4fdI3ymChe+bcGmeCzbbJdZRI5Psv6Eyl0rtp1UAOzkHViu1+6dCnIZt7Hdx8eHqaCI1F2PHjwIOEPyvax6VK8fU0eIiMVERERERERERERERFREpWol2y4wk6nE06O1E589tlnAMYaAGouCWpHvehWCs+2etqy6vNsRDY9hWooSS9sIstvk4RqFDmP6fDCaXtlnKV+FlZztLa2Fmx+aUf/rW99K9TjZz/7GQAEBpGa2+FwGLQAyrIRVWoCVRvnRZ6hxks1J7aeqtmg5lajxwFjhoraTY3oZ7URan88r3CtXvhz1sVGE1Ntk9VOeZEIq4ZqbmzocdU4erbcLDeZXfWBICPFKzV/Ok80SZ9lejx/kyLwWCfLhHoJMs8KC+tFBuXV+kG12+1MRkqTDxeFRgi1ob8p0/b29oLvDZmOJ0+ehIhvZKbIDrzxxhv4xje+AWAiH+g/9cknn4S+pazv9/spnxeOaS8p8Txhx4gmdrQR0xYJ9RmjPFMmChjPB/5NXLt2Lfh0kYmiPH/99deDPOP8Igv1k5/8JPxNxrfb7YY2YP9o+8wrah+hVhN8F2U0LViWl5dTLDb/V1lwHvBC6at84xxjuSkzOp1O0LB7Uchs0mvvnYuAfZe2t/qA815l7BWe72qV9SiyV/RSIQyHw4RvDjDZ5124cAFvv/02gAlrT5/ld999N8i6efuNDwaDFJunljPWWkJDnHPvyvGnPlI2obyuF7qnsD7QGmK8LMuoe24b7Zf7Nt1D8/mnp6dhLpF1Yt0eP34cGCjL3j979iwRhRRIrsF2vS3qY12pVFRam4W1G+0rV66EQcgFlVS2bprnZdLn5RDwQlzb0Mynp6epwagbWN7PjlCneisE1SFwkbkgbJ6f1dXVRBZyAHjnnXcAjAXED37wAwCThZcL27Vr14KQsSZLVUH7xj5bM8DbQAC9Xi+0A9tb+0QPTsDkQLW1tRU+47hcWVlJmaSpoJjHGPUO+s1mM5SNc4f3HRwcBDNaLrysU6PRmDpTd1HoptSaaqkSQtMfAOP+ojC3h6adnZ2wKHGuqeO7J9yz8i41Go2pTPt0bFkTHQ3Jn2fqqXPDO0AByc07F5BOpxPml35m7y8KlTtZfaTmo1ygPvvss+BwTfOw119/HcBYBnznO98BMAkW9MYbbwAYyxCGTdd8Rux7m+ek0+lMHTa3LDRELzesDKhx8+bNRF4tIB2K2nteVVBzKXtQoLN+t9sNawvn+Ze+9CX8yq/8CgCEwy1N/Or1eghUw4MuD77vv/9+6B++e2VlJTzfpoooO5emgbeH4PzTYAbW5NIzNTtvaJmA8X5BzV+BSb/WarUg4+lET+XF1atXU2ZMnpKPWMRBUpXnVnHGep+enqZSW+g6uogDYNGUMJoLi0oJjjeOv6997WtBLpAo4Aa91Wql3ADKmpWfBa5NCvvOrMASlO0cb3qgsubeeqj03F6sIk/3z9PKcW8cabAWlpt1Go1GqTQQVAY9ePAgHK443/i7wWAQysi5tL6+ngqcoeUqIk+eD4kTERERERERERERERHxBUIljJRlbw4ODoIzKE991EZev349mI9R20dNer1eTyUYm5fWQrWz9qqMkYad9RI4EtZBjto8ZTVUg+QlpeS1bJ3LZiXX/uKJXZM7AsC//du/BdM+tgsDU6ytraWCapxVjrJ1UkdGm5TUmpABSS2YdfrkfV6oTw35yb+VmbIO8vruKrVNee1Tr9fDeCL7xP97vV7QjrF/+H+/3w9mJcoGVDmndM5YlkOZHGrWdbyzbGpKBozpdxuOlGg0Gq4pnO0fb4wUgZrqWYdeTYppmXOPSdTQ/DYAh5pFcIwp+2TNqjRgzbTBJlS75rHwNvzt/v5+0PLRAZvs9JtvvhnC7pMZIbOzvb0dQvFrIl+OActIeZrWWZBn1lOr1UL7UfNPc0XVtrJf1Zxm3tD5wj7gPGBdNjY2AjtBdvqtt94KJpgMksHfffrpp/jJT34CACE4CNnC3d3d0FbK1PNvjst5m/Op7NCk3FY+cJxcuXIlyDqyZRzHvV4vlHeRJm+EavDtXF9ZWQnl5VWtWixrz/mytraWCrCjbIfVmFdVb299s8ESVB7r/o/l5/1WxqllwTwDipWFht3nGqvWRmR1uU/SwAXW6sNjMmbZM5BpUpNEL7CEF0iIc0lN+ni1puu6B7dWTO12O5VyRoOiTNuXXpAMPktdZzTgG/cLykQBYysIZfCBiXxYXV115VuWaV8MNhERERERERERERERETEnFFY1ZWn59ITN746OjsIpkac5niTX19eDJs0m3lqU3SyRleROT8e8ajI9dWwFkiG3GQxAAxdYu0s9uWcxU9PUw9OKe9Cw0+psDkyS0D169CjUgdplhki/cuVK6FfV0lvM0pdq+04tlg0BqyFm1fnfYxB49T7j1QYQOD09DX1tNWqNRqM025GHs7TobA+bSHhpaSn4dVDDTo2aJtTTBKxZ/TLL2PMSAWoCa96nWi8bZILastPT0/As1lt9hjgudY55miSgvEzR9soKZ67zVzW0XrJdIKnxsj5Pyj5pHa2PlDr8z+JPZLWJnrMvy3pwcBBkHpklav1u374d5DiZKTI7nU4n+OiwHltbW2FNYL8rczwPxidLBlrfSVpEbG9vh/6k7xAZ+5OTk7mvTTr2OObYT5zTFy5cCPKYrMzNmzfDOGEbM2HtT3/6U7z77rsAJrKd2uhWqxXYd8qQzc3NFPtBzCvlA5BmcYfDYbCS4HikDNvY2Ah1t3sIDdRxnnuJRqMR+kTLwbHH9YTtfnJyEn5rfcdbrVaun0aVIfjznq/10HZn23P8kinY3d0N91kWQBmp82AO82CDjxEnJych7YgN/X18fOwGeaoSytRYNp/Xo6OjVFC0/f39RKoevR4dHYU9jmWCdN+h1j3z8Lf2AiGpVYDdp2qKFDLW6g/F9cTGLNBQ6irzdK4BycBwMSFvRERERERERERERETEHDAX4+d+vx80mQRPtI1GI2hciFkiv5X9jfcu+5kmbVStOk/FXlI9G56adWy3226oSutjUYV2pmhbqI+CZWF4cn/11VeDtvL69esAJskpl5aWUhFetNxVaJjy/EDy3qX29p7Nr7XDZT00ea0XttuyLbMyA2VhmQRlFPi3jRLF74H85KdVaQSt7wvbX8P9sxz9fj9ol3ilZmw0GoX+t0mGl5eXU6HrNZmyZVrKygdlobwkzFpPfX6e/5H63njsk2U7l5eX3Wh9fNYs485qrzVakbV573Q6mX6hd+7cCZrAjz/+GMDEf++FF14IsoNtocnWWXe1Y/cY7XnD9lOj0Uil8lgklJXj+KemlGP96tWroZ2pUe31eoFtIhPFsMwffvhhiBDH+cgxdfHixYRWFkiGOLcyT30zqoZldIbDYRh7ZNmIdrsd2HeWkXXKSiNSReoU+6w8eOtJu90O+wJqxblP0gh3lrVaXV0N9VNNeZnyVAUv1Q3HKseXhpq2jCrrBKST3D+vUJbUhggnhsNhSmZ4e8xZoFYl1tJEU8JY1ml/fz/1mfrD2si+hK5pHhtp6ztNkmVdU63lh9aN1ipkqXd3dwPLZhOIr6+vp/bjlHMXL14MY1GtezivLJtYVN5VepDSxs6bHLMuUlVlkc8L18zPNLw2BQUbl0JiZWUlCEguSJqdXBcHIJmDaJGOlnYRGY1Gqc2NmhBx8eaAY5scHx+n6Neq6Xlvs2j73Qt7raYG1olyMBikDiSE0uWaLyhvYz4PkwQ1kTvrPoLlpZDRXE1ad6D6A68+yzrAqnOszXHV6/VSoc01rwPnk5UVepDSuWYPUtP2U575nsJuwtUEwI5JDYZhTfZWV1fDZ17KBGuy2Gq1KjnAe+ZANq+SHgBZNg2CQBlAx2sejO7fv5/KI9jpdMK7WF9Cn7UI2AM/x6GagWq43EVB8/HopgCYKLEuXrwYxgI3F/fv38f9+/cBTIKCMNjTzs5OGMecL/osm5fOM9H1ch1WWd8sk3TKDG769B6W267F9nmLhqeo1c0Z5xH7Qk20bPuelaNvkQcowDdRVIUi11sepPb29sJ3lNk6989DeZKFsgftLBP8ecI7SLHNdZ9qU8Ko+R7XZD08eXsDILlHVnNHmwpGDyFlzRqVSLABfjSAhh0rrVYrpWTVHK7cP3D90cOT5gllPayMU7lfRO5F076IiIiIiIiIiIiIiIiSmG9cUwdFNe6LKAfhnUY1USWvNhSzasF5OrbBANRETZMN5pkLzQNZGhTrWKlOpPzMJkY8OTlJOfSepY0pq61RbYGnFeE9NsGpmkVZbY06NNoEgVpfQulme606/LnirMATiuFwGPqFmnU1qbNOqfN0GOeYtmVURspL0svfKTtqNZhqSmLNZzudjstEAeXr6zFShJof2fmif1sTuVarlTLV0/QInrmfhjvXZ04T5CQvCE3eGFtaWkqwU1p+DcRizYN3dnaCNlQd5/lbll/NvRe5Jtjw/GQ8lBWz5i6LKB/bRceLBlQBkikrWO7Hjx8HRorBQMiotVqtVAJfNbNipqltngAAIABJREFUv6r5kq3zeQUFsJpylYvWCX7R7EwReOyUstRAso2LmBBn/b9oeOwp5YGOTxuES+vtpX45b5x3u+ZBGRvreqIy2Aag0CBbZYIkNZvNVKoRtfix1hJ5SaKzoH2ftd5q4BYNY26ti3RttQFbdD31LNDYjrZ9iu4fIiMVERERERERERERERFREqUZqSqdNs8DeQ6A1kYTSNqUUmPEEzC1hLVaLZHEFZicgJeWllIBJdRHymrdqm7Xs56XpelS9sbajar9aJ4GZxbtjvokWd8T9UGx2pHj4+PQL1Zjrg691rHXS6Doaf+1n+Y9B4q2n9Vkerbnnk9eldo3bQv7/uFwmKsFIqhlXl1dTYXXV/8dath59cIDaz3L+HacxdAA43FhnXCVpbLhtdvtdso3SgNM2GAT7XY7FYZVmalZEqQWSV+h2kpbd2WoWF4vxD7HG1mTXq+XYreyWMRFwSZ57Ha7mXI4a65XWXbV8PJvTTTLspJtok/kzs5OcLy2CVBXV1fdlBz6bCAp2583psCbv7MEqPKeUzWK+tAoG2B9qfPWmvPoEy2Hssqcx5z/msaCc53jUZnE5zX8eRmU3V/NAl3D7V7Mm78Kb+8EJP3k7b0q49WP11pXeGxVWej4Z5uq75V9rq6DtoxqlaSplfhsa6Xj7cd1bBYZn1OvyPNwuj+vBdWaMOghgousbvqscGg0GsGpzTqyqSmjbiLtYD8PYeK1t+YQyHM2zot+WAW8BcYTBta0Tzem1rGy1+ulIsNo2W1EPHXstw6Wusl8XmCdxL3v5v1uhT0EAUnnUmuWo8JS84gBvnDXzaYVwmUFoQdr8qAbfy/6nY4bvXp5ofRA5eWMss/QsVllRLmzzP6yovwtLS2l5gbL6ilhhsNhYuHS300T7WlaeGMh78Bw1hyv0rTMixxlo3KdnJyEwyk3rCcnJ6ngFF4wE29T4UWPfV4OUPPEImS3135VBezIK/8i+k2VmxpASNHr9VLKElUC/SIcpM4jYuJZh1AvGqldTwk1q7YKa1Xo6CFF/waSB6oqDlJ5CgfdB1jFtyrqbHAwJUms6aPuC7NMIM9CNO2LiIiIiIiIiIiIiIgoiUqCTZQ9lZ+nJl81q1khD1VTRy2Lhs7WULHA+FRvndpU82KZHc98Yl5Qk6oy7V7WnKLqPvX6ycvTYx37+/1+psmR53TpsWzKfFmtuxdk4HnGeYRpte9ULZin7VYtE5BkOwida5aFVPNZa0LoObHmQRkXGzzCBirRsutY9JxxrfmemiN4oc7zxt28xl5Zsz97n5bVy49n+2ER+YmeF/PbMs/S4Cx2HTo5OQnsFNus1Wq56St4taagasZy3gElFo3z2Hvo3Ml6f14QrrJ9sog6qpxUdwAguc+xZmPKhj5vFh3PO3SO2r2Y7l3sekpZAKQDIqncLZr/0AuOxPeUNTvPs2jyxpiu/XZtVIsva3mkclRzRfI6qznzF2M3GBERERERERERERER8Rxh4eHPn0dY3xINAataFrUdBZK+AVYjqNpFz19g0RrAotqfvPJM6w81Sx09XyQvDDX7ot/vJ3w19HqWU2He8z0/rbKhPheNRbNRRfrZ0/h4if2yQqFqu3s21bP6hynraf3i8px4lSnzbLctE5XnD+Vp22ZJxDmtP+tZ/lNZ71F7d9VAlw3kUAXKBEc6bw25xxTxMxuOHUgG7bBsqI6fLN/J58UfKo+N+UVDVazTecObK3acjUajlIyeh2/9Lxu8IEBeAC7P6sMGaPAYKbXy8dYyj/W2909TJ/u3Xq3Pk1qNWSsJ3XN7jJQ3TouWLQuRkYqIiIiIiIiIiIiIiCiJX2pGKiu8NwA3ups9zfMkqwkUPS22tUP3NIHnpZ0p896yGtt51cnTbqkGnO1s7YDVNy0vIqEiK1ndIiONlcWsDOCiymHb1OtP+78X3cdjTvSzaRgptfX2/IOsX5AyUjZRYafTSWnvKC/UDt1jGDz2bZY+KsIsecjznyryLC131ET7UFnk+XACySTJOgYtc0no+lVFWPN5sXZlx8Z5jqUvAru5KOT5UUY/qGqh7L7nz8TvCC/CKmWHZ4lj1z5lt9Rawq5Xs6TmKDo+PDY9ay3z5Ke3984rR9lx+0t7kNJJ7pk+eLRh1iBR53Av79C02ZLnjbKLVlXPK4OzHHQt8kJhNxoN1wle/9fPvHfoIe55WyRmbf9FhwTOel+eMM5q97zNYZl20XdnmfRpyF89/HhZ4AHfHEIXNi+wRJZJzLyCugDVmf2VeWfZ906LeWy8q+6LvMXeU+DoNassnknl8xi8oKp3PS8yuQpzoS8airoFlMEvYjtNC12PuM/0lCNWQdloNAoF2fLMBG0wr2azmVqv9Fqlu4M3nlgPNQf2rlkKcm/dKhLw4iw8nyr1iIiIiIiIiIiIiIiI5xi154EZiYiIiIiIiIiIiIiI+CIhMlIRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IREREREREREREREREl0cj78g//8A9HQH529FqtFjIbdzodAMDy8nL4m5mF9/f3AQCff/457t27BwDY29sDMMmofPHiRVy/fh0AcPXqVQDA6upqeNfJyQkA4PDwEABwenoaMh3/0R/9UaEUxKzTWfAyGttsyfq/zaCsYLbnIpnoFb//+79faZ3KZh6392uf55U/7z3f//73C9Xp+9///pl1OqsNi+RIm+UZ/O0f/MEflKpT0X6YNqv7tLnh9H1l6/RFQJGxN0t9qhhvZTDLXCpajmnqpP/b9eIsFK2TyrysZ3tZ7BuNRliveCX6/T5OT08BIFwHg0H4nnKcv+P/Z2GWucRy88o1RNHr9UJ5uR7y/mazieXlZQBAq9VK/G4wGIT77Zqmz/D+L1onb20quw7N+ruimKVOFovMz5lX97J1Klpu7512rOq+kfOI420wGITvOKabzSba7Xb4G5jMsdFoFJ7xu7/7u2fWSevD39mrvmdlZQUAsL6+Hv5mPTi39vb2cHBwAGCyF2Ud6vV6qszeXPVQ5bjzUMVYLDu/qtw/6H4zb23Jk2He7/PgtVne2hQZqYiIiIiIiIiIiIiIiJLIZaS8U5n3GU/e1NS1Wq3wd6/XS1yPjo4Co3R0dARgwmQtLS0FjQQ/a7VaQYvhnUCr1PycpQ3jiZdX1XDYcnjsUxFGSrU4VSLrmUVO6JZt81iT50UDB1THDFRdpyKa+ixtShmNkPeeIhiNRpUyJl4ZtN5Wk5QHzp0s7dQ8yp2HvH4rM/4WXYcy7zirHnnaviwLhqyyVFl3750qg7k2UYNM6HrCNYfrVtYctJ8tQg5S462adc4hllfvZRn5Oy0zn6FtNq9xeF5MVNV1KlLWIhYeVeGsuTXtMzx4LGXWu4bDYWoe6fjkeOQ8VEsmzlGWq9frhWdNC7veDAaDUHZ+VqvVQnl45dpzenoa9qyWadP7eJ3nXPplhbdm2P2D7i08Jr9In5Td1+YepGzhvP+XlpZSE6LZbIbPSIuSCj04OAhmfsfHxwAQDk+tViuYIXBCNZvNlLDn/2eZ1BVFEcE4HA6DEMijitWkAki2j15tR+kzzgPehsAeHPUeS1sX3XRVUTYPVZpVzctES02Bsg7l2u56eNC/y7y/7EavysVeN2e2vsPhMDV/8jYEOnfsglVWSM6KLFNnT6Bb2PKpUmXag3NRTKOAsJ/pwpRluqJ9q+YWdlGroo55Zc46SNHEjeuOymCWt9vtJq6NRiNhZmSfO+8DlM4DeyDSTSbXW6Jer4eycXOq/WYPXkB2n8+CaQ5DVRyg7N9V1KWIAuAsxcq8x8s8nu/V26s/5363203NI5ar2WyGPd76+joAYG1tLeyb+AzuG/VZs5Rbnz0cDsMz9ZCmBzsAib0s9672WaPRKMyrsqZ9EUnkrZseGWH3D965QE0w7TOrQOzpiIiIiIiIiIiIiIiIkijESOVpdjxTCXVq5UmflOje3l441Vumo9PpBEc/agOWlpaCli1Liz8tspxps5wkLT2tjrqWleP/rVYrtAfbqVarpZ4/L3PFs7R6WZolra9qkYCkVlRP/vPWgnmY1owqzzRrmnIUgfY/YVmZXq/njm+rkVGt9CLM/srAM6Hg32rqYeeRZ7phAwM0m80wDlXDbrWA8zZRtJ/pvLVOr3nOrx5T42kyz4PR1c9s2ZQZVPMbXq05tq4T8+ojb07b9m40GoGJ4hrD/tLgRTboRLPZTJkb5c29WedWnim2ZaRqtVpKg886NRqNFCOlJo1WI6/yfJFMlH2Xtw5WaVEwCzzLjKJWLXkmr4s0jy+DPCZK9zJ27pycnKSYKM69tbU1XLx4EQCwubkZvuMzbFCHo6OjqRipPHPjfr8f+o/P1r2cDdLS7XZD+flcllcZepUTnrx/HjAvq5tZkDU3vH2qWgjYc4TuN6x1D5A2cdYxbJH3nSIyUhERERERERERERERESUxtY8UsbS0FDRcGrqS2kme9MlCPXv2LGgbeNLXEJRra2uJZ2kgB88naVp4WhY9CStDAIy1LNbeV0+7lpGi/e/KykpKiwEgpbFVZmqRWgur/dZ6UxvEuqkvG+uiWqJ52aFnIUubl+WLodoF71rEmXwWqJ0ux4l1mNRQxrwqS2Xr1mg0UmzBvPynzoJtU88WndeTk5MwvnhVrbj1NeR86nQ6iUA0evXqVLb/8hg771k6f2wfeePTmyN8hueLOG+t4FkaccuAsq2XlpZC/3Kc0udV/XTUSsGOeX1vFb6uhGoj7ZxotVrB6mF1dRUAElpw1okWFBybnU4nyL+8fqpCdp/lI+Cl0+A6xT7g/61WK8FOAUnfMNbPe3eV8tyDZymgfWffX8Q/NE+zXBU8TXme32aer6i37nts1XnAk6Uew8KxpuwRMJYDrAPn3IULFwCM09tsb28DmMj2brebsFwCgN3d3fDsMu3hzUf7mVpAqHUE+06DoPEe6zelbDzvY5/a9ApVYtr9URFLnLz1p2pZkLdWevt8fqb7AGWpeI+1drHP1t/puC7i1+whMlIRERERERERERERERElUdhHytr/q++Ctb1uNBpB80wNw7NnzwCMNQzUmlG7qdFbqCWk1uzk5CTFRKlWp0o7bj0JWy3LyclJgiEAkNBcqO0vMLH7XVlZcUN5Wg2sp4mrok4e8iJvaXRF9iGj6lAbs7y8HE76lp3Le19VyNOiqw+B9UnScKx2LNVq5SPjle0ntq1GtWSbqraebcp5cnBwEOaRtTnvdrupcK2z+E9NO/Y8GaFltPLg4OAgMNOqwQTGWiSrPadGc21tzR1rli2oIpJf1jjIalcrR3ROW020h0VGeSqimVTmlLKaY6vX66VkO/+v1Wqhv5SZtzbt6oNZpczT8WfZo1arFdYYyjWOu3q9HsapTdExGo1CnQiVGXlsZJWo1+spTffS0lJ4v51L7XY7yDrL7ur9lhnnc+eBPNbGhssGfLmWJWs8VL3+qF+MfQevXnRFLaf1GR0MBpn7nEVaqeRZZXjse6/XS6xTABIsJ/dDV65cAQC8/PLLAMaMFNc+/v7p06d4/PgxAODhw4cAJvMQmMj4svXJsrzQNtfogNafi/NlMBiE+lAeeoyU7oez9hTT9Geer5pXP/3fW5Psd/aZyn577E2V8PYe2k8sr5ZHLaSApFxTyxcgaQHmWSzN2j+F8kh5phc6aFghXpeWlkLBORFI0e7t7QUhz2fowsa/KTyPj49TZnZKh89iumPr6Zm1caE5Pj5OhYplxy0vL2NrawsAcOnSJQCTg1Sn0wnP16AZnvmT/W5e8IQ9Bx6F4e7ubugfUvCsU7PZTAUM0UPtPA9OWZ+rCRLHIevGsXhychKENttdTZbsBMvrh2n6iGOpXq+HscT2Y9uurq4G8weW8fDwMMwfXtlPvV4vNT80uEnZUKxV9J2XZ41jn22wv7+fkAn63WAwCOXm5nVjYwPAuN2tuVm9Xg/v8hb9aeAJV30vF0s9tHsbJyAZDtwG1uj3+wvbOHnP9Ewd1JTXHqAoF58+fYpHjx4BmJhts322trbCeKZcbDQaiXkIVBc0KMtsRR3H2V+dTiccoFhGziUNbMQxSXmxtLQU7ldkmUbPiiKm9GrSwrbkestru90OcsEqbwBkmltqGbwxMg3y1gd7gO33+2F9tcFB1CRx1jJOM8/UdJX9b5VZ7XY79Z2aYduNrQbf8YLw2PunLXsW8syWPdNL7S/OH84V3r+1tYWXXnoJAPClL30JAHDjxg0AY7nOOfb06VMAwN27d3Hv3j0AE5nCZy0vL09tKpc17rIU5mx3vo+Hp9FoFNYha/an6XEoM3UdqwJaj7zARN7e0iqQdf9hD1J8pprM61jwgmbxvbOMSXuI0blhn7u8vBzaln2iZxKruLUHKlunWfspmvZFRERERERERERERESUROkjvj0JN5vNRJAJIKkxotkHtQ6a0Iynep4oNzc3U+YTXiLcqkK0ekwUMD698iTrOU5aM75Lly7h+vXr4W9gYjZSq9VcZ3prwuCFn52lTh68QAUEtUmk1nd2dgIdzzrxenp6GvpV+8ZqjKo0u/SgmhnVBLIc1ByS+Xj69Gn4zI49peEJL1ztLGBbqbkC5wM/u3LlSmh3hoe9cuVKGEM7OzsAJqYPT548Cc+wY0nZYmVQ5uU87jECgK/5Ozg4SGkk2Qa9Xi/Vh4THgLfb7cwEfFVo0C2r0Wq1UlrylZWVlAZdHZItG8P5dnx8nDLbVO10FQ7oZzFRrKs1p1StOvvmwYMHAIB79+6FfqPm8tq1awCAl156CVevXgWQtCxgX2o9WZYqzMg88xWbDH15eTmx3uj9AFLO7uwnTQxP6Fyad1AQQvtJAxyxDiwvy9/pdEJ7836O09FoFPpOtcFsxypN+zyGl+j1eikmrV6vB+uUy5cvA5iMM8/UmWg0GjMFpSgCjmMNhGVZ8k6nE2QD66EywqYC0D2BDczT7XZTbNU8g1PkMTjWsmBvby+MOdadFhWvvvoqvvzlLwMAbt26BWBiYfD06VN8/vnnAID33nsPAPDRRx+FtZrtwrm6vLwc2rFsXfICJlgro+Pj49SYUvZJLUe0zt6eTseHV66y/cVyq2uA3Y+PRqOUnFKrFRsQSNkeNX9mnazrQaPRSJnaEmp5URae2aKa+9o5oaHyWUZ1QbEWcbzqfsJzcZnWxC8yUhERERERERERERERESVR2EeKsKEGG41Gyo7+9PQ0aCyUDQDGmgye5nmlHf3GxkbQhqrjedWsDetkHT41kZw9uavNNrUk1LrevHkzMFJkEXhqPzk5SZU/6/nA/Jz5+Gwg2XfUkD958gQAgo3ycDgMmqWbN28m6nb//v1QfmoIqnZMLqIJ8ELvNxqNMPaoPf/ss88AjMce+4514VV9CTSARpV26Cyrhnnl/CAT+Pjx4/AZtXgvvvhiGF/U+tPW/P79+6HP6LPCZ2tQE/WbWlRiVGUGrI+Qhnm3/ogabML6dWiyVy+Yw6z95WljLYOrIbSV3aC2kpoxDc9qE5NrOgiyu/zs6OgotInHxlcxJj3neMuoARNm49NPPwUA3LlzB8CYGWUbUD587WtfC/9zvLFuOzs7oX6sm767St88byyw75SR4lX9bWwADX63traW0raqJtMLiT4L8nykbHCnZrMZ3svyKiNl21v71zJS+m4buGWauuX5XvH5agHCz7a3t8O4osyjVvnJkyeBmaes5lhUPyT7Pvv3tOA40CA6Viapry73ORsbGykZoUlfbXADPvv09NRlc6tkrQndf9h+Oj09Df5QnMvHx8dhPJI5fOONNwAAX//61/Hqq68m6sk16sMPP8S//du/AQB+9rOfARjvEfksrsu0+llfX0/49pWtE5BOxAqkU+ocHh6m/Ki1X7gnYv9pKgH2DeeU+rZVYSGh813Xc62bloPQxMjsPw0MYgM5KMNjQ8F7FmiEWo8VhcdEWXkzGAyCDGOfLC0thXOD9TPn51pPykMNGqeMvrVcKItCBykvko86JFuH1V6vFyYaD1BcmLrdbhiMXMh0cOoBhPdnCatpNu9aJ0uXq+CyQqrT6YQO4qaWQuLWrVvhUMVBqLmA1JwJGAuhLNMEXSinrZsHNYPT57NM3IzTZOzGjRuhfrx6po/8rN1uL8zMRelsGxHx2bNn4eD00Ucfhc+AsXCm8+vrr78OYCIQNaCDdcwE8qMZFQUXe6X1OTZ4kLp//z7u3r0LAOH65ptvBhMJbi7YJ9euXQuHKt7P+u/s7KQOvLoR9w5UVfSdF/DBC9hghZeOSz3sZ5XRO+wsAt5c6nQ6YcNEga4RnmxUNY5Jz6RUlTzzNt/x5DjH/bNnz/Dxxx8DAG7fvg1gonDZ2toKY/I3fuM3AEw2UI1GI5jtcFw/ePAg5YyuwYmqMNG2/+u6xX5aWVkJc579o/PeLryaR9BuOLTcVcq8vI2/OkZrREW+X3NiAeP1iO3OZ+nhSU12eM+8gx3ZqI26BnLuvPLKK3jzzTcT5eWYev/994OMYx9qbjm2j26Sq4zqp5stazKl8pbvZBuvrq6GPQQPChrIifLDRiFrNpvhGbqv0AiaWpZplC3eOLZ54g4PD1PKkFarFfY+X/nKVwAAv/IrvwJgvMayDtxf/OQnPwEA/N3f/V34mzKi3W6HZ1lFp0b9LAtbt7yD1NHRUZg7Vkm8srKSUPoDSbM/G0hIowJmBSIqA0+RZw9U6laj8s8G0aEiYn9/P9zHMUb5OBqNUpESm81m6t1Ev9+fqY9s26ic5RjXvuHcf/HFF8N9wNgFhe2hwdP4P9uHbaIyIy/3aB6iaV9ERERERERERERERERJTB1sQrVhlkE5OTlJBZngyXA0muTjoLaBp/yVlZVEFmkg6WhpTStUM1wUnrmR1YwpPckT+cbGRmCiXnvtNQCTkJ4vvvhiOMVbDdXBwUFoA2oBnj17lgrfWoXGIg+ek3K32w1UO013eEq/ceMGvvGNbwCYMHAffvghgHGf0sxCT+7zNkm0/9fr9aD9YXvfv38/lJNjkFqu3/iN38Cv/dqvAZhoAqlh393ddU2PskJ9TlNX1TB6miRgrHUlK0gzqvfeew8///nPAQDf+ta3AIzNJoDx2CO7RtMK1vfu3btBi6uMsGVblV2ZhdmxbaLPtVrW1dXVTKdeNe2zJnRq4qFtWJWZoqd9suYKKneUVbNloKzpdDrhO81nASTNw7xnTashy4JlorQNWU9q7z7++OPARPEzBpz59V//dfz2b/82AOCtt95KlPX27dvhd7weHByk+rTqACh5rJ3mUKLmn+NI+0bznAHpfD9APuM6L3ipHtTUh21p5eHBwUHKVEllGvtfx1nV7KetgzXBrNVqQbvMtebWrVthX0Cm/Z//+Z8BjNkM1oVrlO4lOI5tvauC5hCy4cCVeeYYUgd3jn0yb5TZGmSIFjqaCsauQyo37J5JmZCiUHM+NV0EJiaVBwcH4R2U1deuXQvz/5vf/CaAibVHu90OffdP//RPAID/9b/+FwDg3//93wMTxXl49epVvPDCC+FvYMI4Li0tzdyP1opA2RPWS/tN03GwPjZQjQad8HKDVWVyDkzmuwbesMEgNOgX+08DnpGR4r7v2bNnobwc1xyvzWYz1FOtWKwJuFqiVQEvbQDBOb2/vx8+55ihlc6NGzfCZ2oODIzXMWvhprk4pzVnjoxURERERERERERERERESRTykQLSvlGqDbM2yYeHh4GFoSaTJ+JGoxHYG8tIaRhjz1/JBpmYRguoIc+tA7yeRnnqpkbk6tWr4cRLJsoGYQAmDByvDx48CEEPqIE5PDxMaQdn0agXDXmuDonA+FSv/jTAREP2zW9+M9g8U1PBU/3jx4+D1oIakbO0ytNqZDxnRKLf74dysGwPHz4M76LPxn//7/8dAPDbv/3bQVPBepOx+fjjjwMjpY7pVfpAqG8ItTnUUPKqvmZkCe/fv4/3338fwMTG/Fd/9VcBAL/2a7+Gr371qwAmfUeN4NbWVtBu0kb9yZMnKcdu1aTNmvQQQGqceRnpG41GGDuc/8rS8nma9BoYawCtNk5tnL1M7GWg2ng7ZjVYjLW9fvToUehDygwNe2ztyTn/1d9QQ7R6SXpnhfrXWA336elpqAvH3ccffxzK9vLLLwMAfvM3fxMA8L3vfS8El2BZ33nnHQDAv/7rvwYHcg16wPZRZpZlmKV+WWH3R6NRqr469+xYPz09DX2giXj5rCxn6KLlKoOs32o5dC2264iyMqyLBjSyv8vzbaiKmcpiutrtdpgrZDzb7Tbu378PAPi///f/AgD+8i//EgDwwQcfBJnHtZiyHpj4+XKO9vt9l0UEZrMs8JhjQh3kua7s7++HOrG9uRfa3t4OQTV4pTy/ePFigp1iua01wyx10n0R917W72swGIQ5zDK+9dZbgYliX7Bcd+7cwf/+3/8bAPDXf/3XACas4pMnT0KdyMTdvHkz+PvaJKuauLws7Frg+elq4BPKLPYb9xhLS0spP1gNLKTBknjNk99l+0kT5aplBz/TeuhnXrAjTT3C/tZxzTryO7WcUms0rUetVivNSmkQjiy52ul0Eiwwy8+60gKJwbleeumlMKbIcHMP/ujRIzdJL8udl6A8D5GRioiIiIiIiIiIiIiIKInC6mfLSNmoWsCERdrf3w/aTZ7qqU1YW1sLWnIyOTzVa+hqjU5jGalZooxpklCr9dVQj9R2U0N28+bNcOJllBCWv9FohBM+T77U6t69ezewAfRT8cI7q2Zr2sgnedAIU2zPnZ2dYCvL76jZ+/a3vx1O9fTV+eSTTwCMtRhsM9WElk1iloe8sLmaqM2Ol5WVlcAU0h/qt37rtwCM+41RyP7f//t/ACa22xqc0omIAAAgAElEQVTOmeNzVp8hC9UI27DtfPf6+npgaDgvPvjggzCe/vEf/xEAgs/Uj3/8Y/z6r/86AODtt98GMLEV3tjYwCuvvAJgorlaW1sLfe7Z8M+SWsBq/DRxn9p3s2w2YhKvqnVX7Tmf5UX0y0qoVxY6xlRWAMnQ7dbOWkPd2qh0Kk80uhjrYMuqtvVZ1zLw/CpYVvb9/v5+YGep0QcQIqeRifqd3/kdAOMIXdToMYzxD37wAwBjZorykJrl1dXVVIQ1omrGg9D1wWNvCI3MZ/uV407lsudPNG+clYrEW4+B8ZpsQ2drlC67Dnlh0Kssvy03MG5Pzg9+9/DhwxB1lazGBx98AGAsFynbv/vd7wKYMCSffPJJKp3CaDRKhWquInqayjUvSbj1gx0Oh6EPNC0AMLaMoIy32vRr164FdopMyMrKSqqvZ9lDqL8428/6ca+uroZ2prXK17/+9bAvIphg9wc/+EFgEf/1X/8VwMRyZHNzM6zT3HO8/PLLKSZK/dbVT3Ea2PWp0Wik2klZafYR/x8Oh6F/uTYrI+WxktYndpYw6Gp1oRZhwGQ9GQwGqYS5KqdsXADdX1s2expGbZb9kt1TakRcGymw3+8HCy8yUozIfPPmzVR0RY7b+/fvh7OJMpx2rS8bs6D0QcrmsKjX66lQznt7e2EQUpixQOvr68HRX0Nb8h5LK6uZCzFLgANvYFizoHa7HTqCHXD9+vXgAMnJQ+zt7aVCbtPJ+pNPPgnCkguaLnz23brZmQVeyE/WmX2ys7MT+ox1U4dRCgEepDQghXVyzCtz1ZsNO+iBiTnVjRs3wuaPBwt+98477+Bv/uZvAAD/5//8HwATp8utra0wLpUuzws/XbZenmmC3XRfvnw5jD0NHsHFiQcobnYfPXoUNhg0rfrOd74DYGx2QVNGtsG1a9dCn3FRUxOGaZ2UFd6hxgt17o193m8d0/Uw4x3Aqg4T7uWZ41w5PDwMiysPDHt7e4ncHFr2RqOROCgDkw3RxsZGKu9Uu93ONPmdZgHW39oDGeuheZ5YjldffTW1YaX5zrNnz4IygnPqxz/+MYCxfLHKsosXLybyagHJg/O8DiRWAahjy8sfaDdruqjbcaqosvy6rtnxrEGSCM1ZZHM6al4Z1pMbJT3EZ5mozQN2LOuBgPuGBw8e4D//8z8BTGQd16j/8l/+C/7n//yfACbBd7g5evLkSbif61yn05naZCev/KrU0WAmQDLMPuf65uZmULSyjCz38fFxyiRRcwyy7rxeuHAhbBLZ97MErPKCe/E56t5As3GmPrh27VoYV1yHfvjDHwIA/uqv/iooWejqQHnw+uuvhyAVPIhtbm4mTI1ZHpZv1mATdrx5ORVVOcuxSBl/enqa6F9g0jZqvs0y67pURRh0lQF5ira8lAkaQh1IHgCtDFH5oPWwYd55T7/fn/qwq8pTQvfjdi49efIk7F9oLsu90c2bNwMBwqsGd+E8s2HQAT8QTpG+iqZ9ERERERERERERERERJVGIkfJC9OpJXpOZAeOTPE/xNtvz1tZW6pSotCRPiZrYLouRsn+XhTUb4nVtbS2wE+rwSRaG5dGAEmSiGBSALM6jR49SmeVbrVaKklUtSZVaQW0faguoeT48PAwaFmqaqCVaXV0NLBvrRGatVquF36nT+ry0ylkJN4F0MIIXXnghaO2o2fv7v/97AGOzOJrGkYliP7/wwgvBpILPHI1GKWo+q0xl6qGUuWVIlcamtmt7ezs44ZK+JkP10UcfpUxHaYL5/vvvB6dsBgtYX18Pz7eh9w8ODhLambLwnHmBZDZ07S8vaAR/b4POUB5o5nmVEV5SSq3jLPBYAbYTx9jOzk4q+TjnPTDR9lHmUTN76dKlMAbJRK6traVMAL0ksNPUw0uyCYzbkFo/zoOvf/3r+Pa3vx3KCSRDUVPz/NOf/jRR38uXLwcmn89iHwN+H1UpO1R+WkZvNBql0l1ocnRrXqKmWtY8jM/L+78q5GmjNYw5x43KMBvOmletzyxWHkXK7UH7xJbt2bNnYX7bhK+/8zu/E9I/EFx/f/7znwcTVbaJJnO1pq2z9NdwOHSTIwPJMPsaXIv7Ca4/1I4rI2yTj2r7aFh76xpBud5sNkv3obIMbCPr3vDKK68Es3G+e29vL1jfMCjI3/7t3wIAfvSjH4V9IGUcmayvfe1rCSaK9bRmhWpqW6avPBM6z7TP7r/UtJFl12SuvJ9X9vHq6mrC7Iyoch1SCwm1btLvdG1SxsiWl2sN66x10vFkZZ6+m1CXEZvS5Cx4Fj6275rNZig3LR22trZC/3CecG167733gsyw80BdJzjf8srlMWUeIiMVERERERERERERERFREoUYKQ1NaH17VMPHk+He3l7C0RNAIgmv1aTwWb1eL2hcqBXo9XopB2vVeE+rPVtaWko55VGLt7m5mXL4bzab4STOMlID/emnnwatDJkoapoODw9DGVXzbu1RtSxVagQ9rZ86kfLkTodP+tTs7e0Fm1MyHPy9hhCed9jcPCdHdZbWxHh0QmSfkL159913U0l6qWG7fv16GKOqtZ9HckovkaPnA6TJaDkOqdGko+4HH3wQbNNpf0+GSu3u6dty69atoB20jtKDwWDqsaeabI+RsukENjc3g40/rxpK3ya2pvZJ0y14gSGq8GVjfQjLwmt9WOaVlZWU7PLC4No5eHJykrLVVvaD7WA16WWgbIb1l2G51tfXg1zmWHnttddCOSgLfvSjHwEYs7tkqllPzqmXX345l91V36hZ66Tw/EItu9/v91PhwMnK9Xq90Ncabp//W38KZdLmHXRC+9D6KGgwBdWQs07sY/UFA8Zrj7JaQJLxtLKgqHb2LHjPtXXq9/tB5lFmkFW/detWqAO10AxmcOfOnbAf0eAmNjDDLPJBg1NxLHt+X9ZvamNjI2jUOT9oafDkyZNg8UFZp76WbBfusVRGWoZ1eXk5lW6hTJ049ikPWEZdI1nWO3fuhIBNf/d3fwdgwlAfHx8HZppMlPpFsX818I36xgPpBOZl4Y1Zr490btv9LBmpvb29xD4DSMp/rqOez43nw1t2LmmACLsGaih8tdQgWDautWzXTqeTCqyjgTTsONI9uvWR0tD508Cz/ACSPqBkL7e3txPWIMBkP37nzp0w7uzv6vV6IvAW65Tn21Vk7EVGKiIiIiIiIiIiIiIioiQK+0h5kbWA8amRmhOeEA8ODhIJeIFk4jlqZXia16h/XrIsLYe+exbtmPp9qS01rzzJ8h0nJycpvwgyAHfv3g2sDW2fqeUcjUbhWaq9sEkpZ4nmoshqG9VGs01XVlaC1okh3fm7u3fvhrCSZHE0UW2VUZA8FLGt13FJ7cXe3l5gYXglQ9NoNEI4cIaWJQOnmhlN0DxLOPC88mdpXzQqDjVQKysrKS0h59PVq1eDrbmNrnhwcBDGKJ+1t7cX6k5mSqNmTpuQl7/XqzJTXrQg60PAcjQajdAGNtm3hnPmGDw9Pc2UDWU1mTqebVtoYmEb5azdbgcNF+Wb+t5YWcN719fXUz5iykpXMb804pJN9aDl4FzguOh2u4HN5ZUJoT/99NPQ/hxP1Fxfvnw5yDdlWi0jVTWyxh8waYNutxvkN+WhMlLqJ6tXT+Z5jFRVzFTW+FV2QtOCcJ4oiw2MWQH1/+BnwHi8WU2sx+JVhayx7DHKzWYzWIVwLHFeHR8fB4aUMo9X9fWjFloTq2eFoy4Dldn2uWqhYaOSesy8MlQaARSY+HAcHR2lIukpG8a9kvZb2XVLrXLYbmTN1IKB1h7c7/z7v/87/uVf/gXAJGof3/3SSy+FCLpkpLjPWF9fTyTBBcbzMI+JmsU3VH+vbWh9C09OTkK5KL/JSO3u7oZ5xXv4++Xl5fAMzi+NoFeFfLBRRoHJnNZxZ60OdA9qw8uvra2FcaThxvmdjbSqPmRWdngRRYtC28X6gC4tLaUsjy5duhT6x0bxffLkSUhzQ1nAtW04HKbqqeu6V64i4y531+RtWO3mYjAYJBwggfEAZCOwkBQY29vbYaJyYWKHHB8fp/ICacd4m6Wyk0sPLPybk8HLeK8mhvxbg0wA4/CLpBdtYIl2u504oAFJ6l2DNfBatRkZkBzkbMfNzc1gjsMBSqHx6NGj1CZcHZmtSd+splRloH1uhfFgMEgdxrloqVkpF2l1ELUhV8+qRxWBTryNkl1Mut1uqAvHEPvi0qVLoX5c+BiQ4vPPPw/9yTrt7OyENuO85XxcXV2d6SA1LeY9XqaByhar7FBTA/bH1tZWpqN0r9dzHWiBZJZ6Few2rcAsMk83qZorT69bW1vhQMf58/Dhw7BhojM/5ZwG5OEiRRnfbDZTZkeeiXZVsO2hh1z+rZtOblB1AweM20fNsID8vD16AFhU3VQhpmY0HJuU4yy/KjF0nQXG48xuWKsONlEWOj/susw+fPjwYegzBpbgdxsbG6k0FqqYqfLAqwdp22aDwcBNcZIVanplZSVx+AUm4/L4+DhlrjUcDlPuFkS/3y+dR4rlWVtbS7k1EI8ePQqhphma/p133gmKSr6Tir3XXnsNr732GoCJsoXycjgcJuoHJNMPZLVrFfBM+zSojN1T8GD79OnT0DZWPnc6nZRyXA8aVRyovCAWHA/aTja4jJr+cq/AMaMyxOZ/1HVIFQZWeaB1rEIO2rQjev7gfNnY2AhuDqwvzx+9Xi8c+Bkwjeh0OqmDo5o4W3PFooimfRERERERERERERERESVRiJFSut+e1LrdrqtB4X02bOGFCxfCZ4RmIqc2lJpM1aJ6Jhtl4WX/tqzQaDRKsQL9fj/l3MZT7+7ubrjfBpZYXl4OWhjW29M4VwHtG4/pIKh9aTabQevEUzpNEz///PPQFzbkp2rAFsEm5JmEWMd11ZJYM5fl5eXQL2x39psmo9W2ynr3NJoyj4XyHK9tXQaDQcLRFEgyChxLZNs0bC3HKpkp9imQDK8LjOccnzsNypgrnp6ehnnP8qvjrNWeqxOyZl7ne/KCTZSB169e8AxCQ8lbrZaaHGSFwVVmXN9jTQerMPWr1WqpVA86Hyi/KdceP34czGM57jiXNI0FZYgyH14fnVW2quA9i+U4OjoKGmY7toCJjGZ5VXZYLf8iTH8tBoNBSuOsASLYn2TSlpaWUoyOphhRhoPv9spzVrmK1CkLnjxUZswy6JoCgXXSwBIcj1yv1BqjCrNSjy1R81l+Z8f+YDBIzXWd89bEl7JFzZmVjbSMgD6z7HxSB3yWg+XmGvLo0aMQwIlm/0+fPg33k3ViIKRbt26l0h/oPPSsj7KYqFnN+jyoyZiynzYAFNfJp0+fhjWW7aWmqJZl1HJXwYTqXseaaHtWOjo+rDmbhri3ba4Mqg1/rsHl7N51Vllo20bnlF3/Op1OYAdtsAx1N2LSXg3t7gWzyGJwi467yEhFRERERERERERERESURGEfKS/IBDDWMFOzwGu/30+F/iQjtbGxEU7u6gAMJJ0NlZGyTNQs2lmPkbLPUXt+tZelbT2DL1CD2+/3U5oN1SpRG6NMUBYTpYxEFfDCbWrwC5bb+hXt7e2F39o6TcOizRJWOw9W06Nad+vz1mw2E6FWgSR74LEFs5bfQx5z6L1HtUBWG31ycpKqJ68616hZ10AwVmM6SzJehWVflFHTIDTWf4VaZg3taxPHHh4eJvwW+ft5+Kpk+d6wjPqZatA9LaTViKtWbFE+hR4Lz3crO0E5t7+/H74nw+EFyeDz1SfM1rNI+WYF+0I1jYQynLzP+rTVarXAAthgHO12OxVyuGpnckVem1h2V8cl11v1b6Hc1qBOQFL7royUree86lEkoJCWTcvK39pATq1WK4xtnXN2PFYZxMV7ns55ZQ9sUArPn8/zzbTJXjVksyf7ytZP1xA+l77gvN67dy8EMqI8Xl1dDcwAGSkNZsRy24TCKm+0b/KYqKr9pTS4kjJSdi7rnoh7P0L9eCwjNRgMcvd5s/i6WkYq6z5erT9dXjqNs/b9dt229aoKHiOl9VD/ZC2jBndi37HftC2UPZ41UXfpgxShGx0b+x9Ix29XB8Ys51edXCrwqjZv4dVueDwzFI10ZA8b6rSmm3Wtf6fTSeWK8gb/LAOw6GJlJ1Oz2Qx1JX2tm1R70DwrsmCVk6hIH3sLmW6e7HhRE7M8U7B5Cm8LbwHOe2feIcWOQXU054aw1Wq5wRD4zGn70Gs/FVRqhsTP7AFO50dW3qXj42M36709QFW9sdVnZZn/5ckpu9lRcx+7IFV9uFI57inCAD/YSr1eDxtymzdLzVc8M75FHqBse3vmLrrW2A0S66uLs5037XY7dVDTvxcVMEXN1OycAib9QtOjWq2WMtfVOVXGnLkqFD1AZd2vmyGr3KvX6ylZ4CnJznpnWeQpxLwNoZUlOje9/YFdu1VhaPtwGjnOdbPf76eCLNA86smTJ2Gua64hOv3zqoFO1HQeSM45K/fy1t15BZ2w5toabMIqiA4ODsKG3NuM2/W31+ulZJOn5CkKL6BXXj/n7Y+KHKSy2jzLDLgqGei5pVgFhI5/G/1b83/aPcbh4WFKsaTRN6cdZ9G0LyIiIiIiIiIiIiIioiQKB5uwmhTPaVxNDXhKVOdkYHJ65G+BiVnZ8fFxQjtu310lM6Xv8MxQbPje09PTFFum4U1teEZloSyd6pUh6/8q4ZXDtrfWMSt32HnjrDbK0pSoprKISV1Rs8Iq4Wkyz2Icrdmf5pbwTEioobbMsMeOTAMv2ATLRAyHw1ReGy2r/hZIsmeWScsL5lBFPTwtstdOeQ7AeSZglunIM/2cBnlO8eqYzM9US2uDU3DMKDNS1qxyXnLE02Jbx2tlYawZuTr8q0UBMK6/ff48zTLz2khzcwHjcqvTOzBhpDRXDp+p+Wg82bgINmAaeHsCQudo3lxbVN2yNP2eKTqQrxXPYqgsu6EsQ9lxqWOC+zEyUhqUyOb8u3DhQtjb6ZjjsywT5QX9yNvPzZsttCG/vdxqLKsGqrF7Kc3lprIyL4hLFdYfZZ9RZvyfx/4n6/l23fJYeELnhmUOu91ueJZnYTAtIiMVERERERERERERERFREoUZKcJqilXrwO80FCS1FeqjYROeqY+Vl5CtaiaKsEyUalizQhkDvqOodeJWP5Widt9VomjgBMtIeSf+IozUPG3QZ/W3KOsvU9Y/a16wTIh+5pXDsjLKGnj2954D6jS2215ZFDp3VH6oL44to+cLxqsXVnheTv/es1R7rOUqIqds+TyWdJ5jK8830MowZTRtfdXhucp5NQ2ynquf63plgzUoE0cGinVSJ/ws/xb9bN4YjUYpRkr7gv2llh82vYcm+iY8hnXe/ZUn1xRl1pai2vrzYNm8+nrlzavTLAxCHrgXU0ZKg7AAYxaK80ODzlgfPPW1tP6TXl3mxURlIc93qNVqZQYGOj09DUE2NHAYkExUq3I0b/09TzwvLHNRWIZJ103LKgLZaQ7U30r7Imu/XFSuPx+9GhERERERERERERER8QVCYUbKaobVptbalLZarVQYVtrU1uv1lEaMjFS3203ZK3qMVBU4S5Pv2f1naZw1Ypdqc3nPtFqzKpGlHc/SgnvlPg+U9Yea5hlFnlNFPxXVLFrkab7PYjutRizPFl/n2izIY1uV9c3yffR+q3PUi3KX9e55QOdN2b7x7l2Un+RZ8z0v8bnH+FUxr+zzq0CeT5iGZrdsfL1eD5pm4qxIq1nlrqo+RXyltBw2mt1oNEpYTgB+wk6dX3nMW5WYReY+D2vTLMjyofK+m3bdKAONrmc19hqpk3/zmpdSRP0ui+4vFsFEedY5yiJx7lA+KJOrSaEBpMLSA+lky/adEeXgyST9jNA9uJV16j84jyTqUx+kOMh0YWJhO51OcDzkVR3CbEhjnYDWOU/DglZp2udtZPI2CbXaJLylRb1eT02evA3WWYvsLItwWTOILOfXvN9N+76qUcXhZ5EOlZ5DdNFyZN2XtyB793kCaBEO11nhyYu+f1EHjyrLpSjSb/NGXt2yNjZZB69FKTjKoMihQ+tpzY2WlpYS5iFAUjGWZ45VBYqak3gbAasw0aAgWTkgPZNbrzxfNHxRD2BllWRVQnN0cbzY/IStVit8phtVPTjxM16zyjvvQ1RZJYBuwi0xoHW1e1dV9KlCJutZEbOhyPjPSyVQ1hxbzz657zzzjoiIiIiIiIiIiIiIiIgEauehGY2IiIiIiIiIiIiIiPgiIzJSERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESXRyPvy+9//fsjWW6vVCj+0aJLfMs88C3/wB39Q6GF/+Id/WHkG4nnVd5Y6sUx65fuXlpbCtV6vJ343GAwAAP1+P/w9HA4BINxbr9fDM/jM0WiUeqeH73//+4Xq9Md//McjAGi1Wmi1WgCARqORKP9gMMDJyQkA4ODgAADw7Nkz7O3tJT7jPQDQbrcBAGtrawCA9fV1AMDW1lb4rNPphHqyLv1+HwDQ7XbDtdfrAQB+7/d+r1CddD7lwY6TWq0WPrPfLS0thc/YX7yenp7i6OgIAHB4eBiubA/2K9t1ZWUFW1tbAIA/+7M/O7f5NC8UmU9V1ofjtF6vh7nDzziuBoNBap4VlSdF5cPv/u7vjrLexXE9GAzC33pVeaDX4XAYnsErMRwOU/JhaWkp0R72yjH453/+56Xnkp0bKpNYNjs3AKDZbCauKgttvfV3WifvnQT/Lirzqhh7RcdOEeStV9OsTXlly3tXlb+rok5/9Ed/NMp6Ft+p80PHnn6vz6jVaol12V6z5L/37mnqNO3Yq2K8zWNfVHStfR5QVD5Qjne73bCGcz+i8on7pdXVVQDjvc2FCxcAIFw3NzcBjPc6bH8+a39/HwCwu7uLnZ0dAON9FTDeU52enibeSbnZbDbD/upP//RPC9XpT/7kT0bAeA9i93eE7vO4j9nb20vt81iu4XAYysTyrKysAACWl5fD/o7fNRqNcP+08iEyUhERERERERERERERESWRy0ippjFL6+dBv8vTWNjvqmSopkFZDZbeU0Qzw3vOo57KGBHKLFkN8mAwyNQ4q/ZsWk1iUdRqtYTGQ8tar9eDVoTajFqtFspLxkgZKcvCUDuhGh0+n/coVJNfZT96z9K5ZtlEvZ9lt/U9PDwM2iVqck5OTkIdqJGh5mp7extXrlyprE6/aJhWPuh9lpEajUah/zxWowqo1tIyLfodxw+v/X7fZal4v2XQ+L9q0D1WjvOK12azOXWd89habVuWezQapZgxZaTsXNL68vkqa2x/RmQjb3wvam2s4j15jKS+h2NJNew6DvV/vc9jbq3G3GrtbRkWNR7P451F8LyWaxZ4VgyWMdf9D1Gr1YKM45rPfY8yUoTKN0LfR9no7Sftu8+CZ7Fg37+0tBSeS9ZpaWkpNYf4Xb/fD/PFY37tOqRy3JarKHIPUkTegUqFiQdPwGRBv1vkYaNImc4qT5lNkJrZVQnvmWpewLLpQLKbCTVD4MDkZksHmzUJnJfgUvNDDnz+r4uV0sL2IMXy68ZQzdl4L8udd5Di7/UQOgvyDlB57a3lYf14WCLVvb+/n6K9a7UalpeXASCY8V29ehUA8MILL4S/F4kic+E8F8MyyiAFx6aamNlFQmXrvOqo41//BpA4PKnZKq/2QOGZ9nmw5ntqukFZQxOUWertHaT0YGfLvbS0FN5vTTxGo1GqXThvtNx2o6soYt48D/yibBY92H7V9fM8lZNAeiyoosua9tXr9dQc0/WWY4918dZpziFVCOStIYvEeSvCszDPci1y3ul4yjtI2X2Lmk5T1vG6vLycUoTpXsqaROseyirS1BWiKPIOODq3+C7K4EajkVpD7dwCJnJc91Rcd1SBNqsciaZ9ERERERERERERERERJZHLSFFzrdpKpaKB5AlYNSRZDM00Zn/Pg6ajKItUlJmalybNC1TA91lNQr1eD9/zlE4Mh8MQqMA6pgNpdkjNYqo2UbJaP9Ugs2wsv2qJLeXb7XZTjBTN2vr9fooG9jTO1GL0+/2ZGKmsfjrLjM/S2ScnJwkGCpg4hh4eHoZ5y7qsra3h8uXLAIDr168DAG7cuAFgzEiRpZoXtC5ZbaCw88Rz5n9eYDV7XoADj12dl0kfwfGfxTrxHv5N01CPwcpjpHQMW/O9VqsV5o6dZ0A245oFb74Qykiz7mzbTqcTmCiy0ZQdJycn4X6yu6x/s9lMaEN5tfPxvBipKjHv9bboeM+yZlEzdc8kJ09m2GdNU1cd95aJ8jTmOkY4vjjHuMaenJyk2F/+r2yurt3evmvaOkVMUMZCYpFtrYGCPFNrIGnyRigLr0wUMJaHajII+LJYTZ6zTPsajUZp0z5Czes89pWgPFaWlt+ra4Nl7PTZbAOdq9Z0tmy/RkYqIiIiIiIiIiIiIiKiJHIZKWrrR6NROAkeHx8nrnpCVX8bq6lRZJ3mszRU8w5KMQ8W6TyYKc9fQO1YrSZBtRf2lF6r1UIf2zDiw+Ew5eegvnKedrashlbbxWr7tIyWkeJ3wESLwnKfnJykmDSGPO92uwnmgHXi/Vbz0+v1pmakPFZGr3kBJVgOajIPDg6wu7sLACEcKLXpvV4vtMvGxgaAMev08ssvAwBefPFFAMC1a9cAjEOien5hVcCrJ1GkHZXFsGPpedH+W988zp+jo6OEdhmYaARXVlZcNpWoQi5oqFxqxO31+Pg4wU7xarXjqhW1skt9Cy0jpfexnnzWNMEmvPniBZmx7d7pdELoX6Y+YDn29vYCm8s5pbJHHbT5bitTPT+eafC8+w3OC7o2Wf7pD8gAACAASURBVLnghRb3HMc9/8MqoX6yhMdI5VlJ2HDOBwcHiYBAem+v10utPzpnrD+ix9IuEjo3n3eG1luPyrSdZyExr7oqc5TFSHkBH+r1em6wCetjpOPIylTd7xMacKdKRkr3eewTLbf1N1YLJMvu8p5GoxHaQPeMWaHXiyIyUhERERERERERERERESWRq36mZnJtbS2lvdPEWPTNoCbl9PQ0xVjoiS+LiTpv/6lZWKQ8n7BFhkb3EuUSNuTxaDQKrCO1AGQuWq1W0KgzKZv6Wtjw4a1WK6WZLptgVKG/sVH0lAXzGDVrO2u1gPosMjuqfdf3WpZAGa1p/Tr0b3v1fGY0gqLOO2CsOacW3fq0LS8vY3t7G8CEfXrllVdw8+ZNAGN2Cpj0ubKQVcJjSllX/cyb/9YXwtN4FZ1j84SyrjaK4s7OThh7HD9kRYD0nFWtmFevsjJCGVlloOx3NsmjF7VPIxHa8qsm0Za73++n/DxmqZOGirY+nNYPBUgmp7x48SKAicygfNvd3cXjx48BTOYSGet2ux2ewd8NBgM3+hrrU7ZOefJB4TGyZbThi/SlKbqu5fnN8aqMqWVIvUiKKjOqlA86J+xao5pvro1kn9vtdigTn8FxpmyotSw4PT1N+YGo9t/6HjYajUoiypaFlV86N63/6HkizxrCCzXvMWw2NYyypLNY4uRB9wN5CcM93yXumTxGyvqNa5vYeiobRuStzWXgWTYAyQjJHP+dTiexD1RowmKNuspn2mit7XY75R+m87mInMw9SN2/fx/AeKN16dIlAAiLEDdo3W43LESPHj0CMF6QaA7GgmnhlbLTq62A/p+FeWygssyHskwMvVDYOqnKOJNPYxLiHSztJl9DWWpIbDXfBCYbjrW1tTAYHz58CCC5gNjw4Uq1ZgmbMvBC7lvKV82GdHxZ8zQ19WHZeA8VBGoGpIu6FapqslT2IKX18QQ4v/OcJ7mo0uTo6dOnAMaLrj38sA9feOEF3Lp1CwDwpS99CQBw69atEOKcm0S+b39/Pzy3auQpGPIUKXbslN00VoGi89eWVc1A2Uc2YIoGOSlan7L11kMT53TeQcoLzuKZ62aFAdcNlH5mN8ZeaPRpYA+wahpFuUBlweXLl8MaxjbgnPrss8/Cmsfy8N6trS1cuHABwGQ87O/vp0wfrblVGeSZ9+rcsHJVx5A96Op6YuVoloJjUVATPFunWq2WUtZR7ne73bCGWdmhZjpq1pPnNlC27nadAJBSQmjeMW74VldXg2y2ZTw6OgrjkPspT8ZbxUYWpl2bZoEdXxpW2oahPm/FF+DnLlIzOC8Nig3M4IUFV/PnWZTJFl74c3v1UqbU6/XUQUqvWSH8R6N0Hj41MyVUQTZLPb3xAySV43z38vJyQomvUMWzRavVCooNDabHZ9h2LCobomlfRERERERERERERERESeSqAWmy9/Tp08BK0ByIDuvXrl0Lzuq8//79+/j0008BAE+ePAEwCcncaDQSLAbgO5V5WKQWw2pORqNRSjurWjCe8C09OhgMUmYL82TZsk71LB8w0Z4dHByEPqDGi/dtbm6Gen7++ecAxhpbYNzPrB81bCsrK4kEZ0DSzGXaYBOeCZEXYlbNJyzjyXKohp1lpGmFOtt74aqtdmoaRkq1HFkaj+FwmDL72N/fd5koIBlAg6HLGc781VdfxRtvvAEAgZm6cuVKQhOjz3z48GHo61mQFwzC+7sMc6mmwXpdhEY97x21Wi0R6ARIBjOwjAvnmZpyzqsOGjxCGSggadpqA1Co9jEr1DnroJ9paGZ16LXm3vq/1SqeBTUnUeYWmLACtVotsK5XrlwJV7Y917S7d+8CAG7fvh3mFdc0Wl5cu3YtsFpkQ5RptEnLtV+LQtvPS/BKqDxjG2QlerXPBZKJKK3D9iKZKY9lU0d2tY4AJm3R7XaDCSaZetZXTX5Yz16vV6lJmTrnW6aWY+Pw8DAVNKJWqyXYKWBi4nvhwoXAeFKO08LnyZMnQf7z+d1uNyU/bRCA88LzwDZ5yDMv51VlkXVL0d95ibut+ZkyN1UwU7pOWvaLV11ndM9iZa8GXNB9rNbTY6SUebP7q1ldOIgsecW6A+P9pu75tO4aWM0GEDs8PAznD7XAsMHWsvYsWYiMVEREREREREREREREREnkMlLUqO7u7gbtHW3I79y5A2Cs9f7qV78KYMJS3bx5MzBRt2/fBgB89NFHAIDHjx8H7QpPxzwhLi8vuxqyLIfVWU/ARbRveg9PuTYceLvdDlokai21HlarW7VzvOdPYjUtnU4nkUwWGDMR1FrQLpvP2NraCnVnn7MPHz16FNqA183NzVRgAGWmytbXYym8xLzWjl41kpYx0gSd/I4azePj4/CdahmtFstLpFoU6kxpf6taZnVABsb9RNaIrC81z51OJ2jNOf/IQr3xxhuBiaKPowYR4TPZv5999lmY50WR16+qhfNsrW17e+wH+9xjNlSznqfBXwSWlpaCVosygG3e6/XCPGHbq6O4p/GsEuo7pP5PQLI/rBbPm7c6hlluT9tpEz8uLy8nQr7rd51OJ8GYF4HKB+sbpVpL9gEtKTY3N4N1xCeffAIA+M///E8AwMcffxzKxDnFwCw3btwIz9WE1zbAC+vh+UGeBZVvNk2DPov9xHr3er1QDptIeDgchmd5juZEEauQWdYsT5bqWOKzlRXlOOGVa+xoNAoyiww667u6uhruV9+NPK35tPXyxp5aYVC+egnSCSZHX1tbC8wb9116JTvFdXp/fz/1Ti3XvGRJXlvZtA9Aes32ynUe/nlEHiNiy55ngaL+UOpnU9YaKQ/6fPsuj/HyGCnrI9VqtVL7Nq9OHiNlAzPMwvp6MQi0/LZsKysrgdW1zJS2u03I3mq1EkmwAT+9gLJbMweboDnE2tpaoNIpwN5///1wfffddwEA3/rWtwAAb7/9Nl555RUA44MWgGDq97Of/QwffPABgLTZ38nJSRCCStdlHaBmnYB5z7Ebsnq9norGde/ePQDjjmBbceHlQqwOl54DcJVOiHpAsxtQYDLg+O6Dg4NQJ9aFG/V2ux3MWxjxjY7Xn3zySSKPETA2EeLAts6a05he6UbJtpHnhOgdpGw51AzIOtsfHx8nJpaWQd+pG5uyDvLaHtZpVaPyWTO+p0+fpkxY2NZXrlwJgSS+/OUvA5gcpG7evJlykD84OAiHJZpqcm4+fPgwvHsa2HGuJic2D93h4WHCDEa/6/f7oa0oB9TMR81J9R7Fog5Uam7KcnHucxPf6XTCfOGGSE2TZjGNKAI9oGblH8kKWGMXMF3c8iJB2U3w8vKye4Di78ua9qkTux4agMm839raCnKZfVGr1cIa9tOf/hTAeE0CxusQZd7rr78OYDKXtre3w7xRBQfHLt/Juk0TOU03cNasxXMAZ31PTk7CGkqZwXINh8OUORkP+howRA9SWfmb7N+zQscZ/9aoknZe8zC8vLwcZNaHH34IYLIOra2tBXM5jUirpvazguXS9YTtops0vuvBgwcAxvKVex6OId5/48aNIKt5gFJlhJ0zOzs7qWfMW45kPddGcuZh7/DwMKWYsME27HOrktd5beCNZ1Uo2cOxKux0HgLJYBN5h5oq93mqGPAiBXrKX7snUhcFa6qn7/NMCO2BadYDFK9ZQaSWlpZSZod6kLKuQjq2rHLWO0hpFFDvMFxkTEbTvoiIiIiIiIiIiIiIiJLIVanzZHfhwoWgJSG9To3Q7du38c///M8AgB//+McAgH/6p3/Cd7/7XQDAf/tv/w0A8OabbwIYh2Gmud8777wTngGMtWnUrGkOBmuqVXUITY+ZsqFL1VyFmiBqwW7fvh0clqlVJztw8eLFFFXsOQLPajYB+CYMqoFgH7Jtu91uYCBYfppK9Pv9oMUlI0Vt7Ycffhi0T3qlJtAG3PCybZ+FvBCinlO7jhdqwbw8A1b7ooyU57Bt36ka+WnzSGn4Ug38AYzNaKlVZt8cHx+H39qAEq+99hq+8pWvAJhoz1966SUAY80z247PunfvXuhrZaKA8bjOChtaBtYcQE2PVJtP7ax1pNY8ZZr/BxhrNvl8zVfihW4FFmc2Uq/Xg2aMLAiZwu3t7SAXaBLN+j148CDUu4i2fJpwzXlmsjpHPfbD3q/mfFmmYuoIrCyUNcHQ8LU2UMdZ0NDydk5zzFy6dCn0BWXB48ePgynfT37yEwATNn57ezswUd/4xjcATCwqTk9Pw9pEGbmzs5My6VOz02lNf/VvK3eUoWe9Dw8PAwtA9oNzSkPAcw5pzhQ17wOSTJrVutdqtUq0z7ZOyu6rvLKmlFy/rly5EmQX9xBkeJ88eRLWLY43L6DLLOutmm/avtDgI5R1/O7+/fthrHEMacoYmmbT3I/l39jYSJmWa1AEvkdDpC8qX5MG4eKYo7XS559/HvqCa5QXZGdR8HIiWQbCY1y8/Ya95uVHtX9XUX47N73yahlteZVl85goPtvLV6XyYFYoC5UVkENTIXCdX15eTgWj4f+dTiczl6gXcEkZKd7nBZ3IQ2SkIiIiIiIiIiIiIiIiSiKXkdJgCtQkkJ2g1uTatWt47733AExszf/mb/4msFQ//OEPAQDf+973AIwZqrfeegvAxJ+IGox3330XH3/8MYCJlmUwGATNiw2XXnX2bk+LzZPwxsZGOPHySg3MRx99FDTNhNrK834vgAZRlTOitZ3lczudTmCMlDmiTxQdr9n+Ozs7oa+ZwJX/b29vBw2o2rTreAGq8ZFSDYjHTNkwy+12O2VPrsyUp6EAkglJ1Yk7iw3TZH5FoVp0vovtr35R/JvlaDQawX7eBpR46623ghadjCFZAA0TTA3u3bt3w9/UhrIMs4TPVRtnL2GoDVF9cHAQ5jjHkmpwbWAUTQDN+aT2z/bdqlVflObT+puwHzY2NsIYtKywMptFQuROUxcviIHnd2jZbPWX0eS5LL8XUIJX+1mn00lllNfwwmX9DW3YX30u5dv29nboAzKit2/fDkwULSH4jFdeeQVvv/02AODrX/86AIR59+GHHwafQvpY7e3tBW2oXaMajUZpViDPX0DnEse9+h1yDnMuke3tdruhTPydJinmc7V/rTa3Sv8ihefDwXGgdeLaRNnxyiuvBIuPf/mXf0ncs7e3F37HcaC+tFWEofbmrjKGwLjdbRL1k5OTwBhyDJGhunfvXpDp9C+nT9j6+noqgbz2hZ2jx8fHCw2FzrJwjrFOd+7cCWOUVhJVj6Ei8OYQkFyHNVgL+836nmn4esoVZUMs4151egGPvbHwZLYXGMgLYuOt317i3yzZNE399Bl23+BZUOh+1usDYNw31hqJ88Hzget2u6lASyonYvjziIiIiIiIiIiIiIiIOSBXDehFe7K272+88UYiASgA/Md//EeIiPQXf/EXAIB/+Id/AAD85m/+Jv7H//gfAID/+l//KwAEhur69euJaIDAWHND219eeWrU0MFVQk/8vDYajcDMkEljGxwfH+Pv//7vAUx8huh3sr29nWJN8jBLaHTVINhEgc1mM0RrYvS9jY2N4C/D8lJL++mnnwYNGbVgZDyuXr0atE4a/c6Gsy6aaDmrLnyW1RJ40fT4LtV8W0ZK/Rb+P3tv8iTHdV0Pn+oau3puoAGQIAECpCRS1kiF5JB2jrA2Dof9tzjCK0dIIm2v7K1XXnrv8Mp2KOxwWDbDmi1ZJiWSskgRIAliavRY3TV1fYv6zquTN29mZ9YAUPq9s8nuGrLyze/dc++5NomhynFrQtIsJaRp5I2VvfOYKGDMDrH+2L8uXrwYZMwZa0if8+effz4VB8I2uXv3bmhfMqYffPBBYKKsxU3VceYBzwqnVmgrycq5ZTgcplgdtfZZ67X6eds2mdYCeN4YtAxyv98PY9+ygJq4U63TwHSpAcqCdamyyHbcqEXQi12xEtqNRiMREwUkGSk7BrXdPPn6adnd0WiU8JsHJrFAq6ur4XOc337xi1/g7bffBjCJyeO89vLLL+PLX/4ygIkFnSzprVu3gkIcWQWVvKc11JPgLVumpaWlRDJPvsbPePLnNr5T/f/5HLTcqsqhZVGbzWaKXfES584Ca9FeWloKfYj1uLS0FNg1qvxyPH36058OazD3HGxTZRRYB/NOeq1y/1ksq8bPsi09dUXOxXfv3g1xU2SrGGP59NNPp+KPa7VaqDOPSWbZHweyUoRoHFee/PkioYyCZVc01Yiuw5y/rSri2dlZKsUFGeuLFy+GeYf7JbYPkGaApqmHLFU7hcYLe+rGNjZWkSd1nsfQzOIZdh77pZ/RMjUajTDf27QByg5qUm7eU/d8wHiOzErBUnRtzj2FaAdUtyRgMhm32+0wyCl/fuPGjZBbitQ7D1Z///d/j+9///sAEAQpvv71rwMAvvCFL4TNO4MU33333UDbc2Llb+dRnFko6kKn0rLAeIJm2bnIstyj0ShMqDyIsAE1kM2jU/OesSw08NN2FmAysFWemR3OulG88847+MxnPgNgPJEDkwPY5cuXwwSiHdTmqCkrxqDQQZSXZ8huoHVxs+5FjUYjTPJW7tLbjGgguxeAWraduHE7PDwMEzcXVk7WwGRCoGvHzZs3gysLrzxYbW9vh7Jz4udC/N5774UDFA++u7u7KYOE0v5lZagJrQvrGqRB7VyA+HtaXo610WiUOkhpThW7UDUajdQCsegF2y7OvV4vlZ+Ldb65uRkWYI4RtrtudBd1oFI3B7tIqWuFrUPN2eVJndtxpocnm69EjV42cHiWTUWlUgm/YV08lpaWEu7XwNhAR7c39kW68X31q18Nhgo+I9353nzzzeD2zI369vZ26IvWxW8wGEx96FD3J+tSrBsBPYhYAQ+ODX3Pk0C2h9p6vZ46SBFnZ2czzem2f+v9+ftsk7W1tVR6BoYOfP7znw/1znmQxtzbt2+n3Ii99AizuNKr8Iq6GgFJo4I9ZKnR17pgHh0dhb7K13jIunHjRigf191ms5lyy+TvPA7jDFGpVMLv8yDxwgsvABiPDz4v31t0zrwy8NzRe71eyu2eV3U5Zx9TUTJvLM1TBCnvMKO/4+W/yjpAaToFe3BR4Y28NWqWcAevTN6Byhp1arVaGF+c73UutpLomr/NnmWycijy+aJrX0RERERERERERERExAKQy0jpqdmTXAXGFlVaWWlNunDhAr72ta8BmCQ2ZNLeH//4x4GG/4d/+AcAwE9+8hMAY7c/fo9WjevXr4f70sJLN43Dw8OprX7nudBZ16/9/f1g6efJl4Ibn/70p4M1mhYYsg+tVitFyc47AN5rJ57A2TbD4TCczvmMly9fDiwTn5e0tkq62wSBFy9eDN8jk1KpVFJs2CyiIB4b6iUTVdYDGFsqsqyzmqzXWlhUEEGZKf5tgxenaUOyEHt7e8HKResX77uxsREYQE20y3Gk0uYsB9uMFlyyUO+9915wF7EuZQBctmEW1z5PIp6vW5n61dXVFGuqVicrbqDufLYt1DL8uCydnuwx25dty367urqacKflM9t7LQrqApQV7O25hDQajRSz5DFS1jVTXXqU/chiDafxLCC0P6jADzBuE1r+OSbu3r0b3udY+spXvgJgzEyxfcgGcN166623AlPAMm1sbARLu012rq4qRaF14LkxA751ttVqhefmfM8ynp6ehvrhZ3jVoGxlCbPcY6dh4T1YFkAFpVS6nusO1ya6+7/11lvBc4X1zznz8PAwPL+uR/MUplL5eMtCqIsin5+eK2tra4G5pOeHullzX8P5kHusg4ODwHLTDXVrayvV55ShXETIg7fPACZzM72I2L+03u188Ligz2znH507lN3g87P/qGCBdSNW18V5uO8VhV1rdYzaedxjinQOydtf2X1SngBE0eS1HjzXPs/FUNd+O+9zbK2vr4c25L5H98GeqEaea19kpCIiIiIiIiIiIiIiIhaAQoyUZ31X5sMmo1tdXQ0nQgo0kL158cUXg5WPCXzJfPzzP/9zSJb48ssvAxhbCRkrwnvRArq7uxviQqZBnp+0FSXodDqBkSJoea7X68EaQ59tWkKHw6FrHZpnwlC1RtiAQRVQYL1p4lDWLS1eZCzu3LkTrLhsO5ZDLewq1W1/exapU7VWZsV9qRgEUa1WU5YKFUhRP3IgKVtrf0dlMfmejf8pA9bV8fFxSoqYVsurV68GJopW1+effz4lbc52ffDgQUq6XgVEOD7Up5v1Y+Ne5iXeYgUNqtVqKk5BxTo8piJrXHgyqZ5c9LQo+n1r1To9PQ1jnjEdZKja7XawmDPWgeOn1Wq51vJ5WjPZZ7RsXhtZiVwViPDEI7zEuvy+J7Nr51svkXhR6PPbZLgq7ME5m9b+4XAY5jwm3WWM1KVLl8KczpQejO394IMPwn01xpSsA3+bDLaK5BSF1ofGRCnUZ19ZEDIzrG8+V6/XC/2LFlt6JLTb7dR491ixWRKLensIT46af7NM29vbYb2nuAfZm7fffjshTMPPA+M24dyoa8g80osQmgDainXo2kN2jXuhjY2NUPe8cq64e/du6Ht8fpZXX6MHyM7OTooNtRLvi4QXE2sTpFar1USMOYBUfN/jhPWUyBJOsPG8rPPBYBDKbUVmND2OsrweYzQrPJbNS1/gxUXZsTcajdx9Fctr5wC9lyezPi3zq+u6t9+ze0pdr2ys1Pr6eip2VcV3vITjWYJmMUYqIiIiIiIiIiIiIiJiQShkfvZO0Wrh4YlNZaPp18yTIU/3V65cCewNLe70fX777beDFea1114DMLayU0mJkqe03m9ubs7F59azVlkLwnA4DFYVxmrx/5WVlXBitgpSqhbi1eM8mCnvuzxRq+S2jSHY2toKTAfjbDSJIK1lVjGoVqslrGxA0jddLYHTlk0tJx5TxP+tTHClUklIoQMTi3y73U5JxrIuVK7YU3Oxv1PUUqEgYzsajYL1hHXKZMfPP/98iA8ku3nx4sVg5aLVkhb2W7duBSaKMVLaltbPW5kEGx+hil1Fkde26jvtSbLa+AJtC2s5y2sTL/ZnWovntJZrVT+yqmGj0ShYNW2KAGUfsmIQvOcrA85FyvRZ66zGC2g8lBcbxf8ts6nt6CnyWSZKfeHL9ru8BLKqusW4Jr62traWiG0FJvE1Z2dnYSyRiaIKa6fTCfXIdAPb29thTlUZfJZpWkY+jwFSZkfjNaylnOuu1q2NQ2q1WinPC7W623l8mjmvSDlViZB9aHV1NTBSNnbi/fffD+uOncvW19fDGpbHPBDTpBvxWBUv/kbXHZaJz62y2cB47uacTiZb2W56jGj8F/s012Jlpp6EKp6XIsGOSZuW5XHAzq9ePJEyiWwj3c8CfhJknQO9dXURsu/63Fa5UWO1vN+28UdnZ2eJ/ZRelSXS386KdZ0mRkrXhKwYKV3z1bOGv8U5Q2OlOCY4Z3PvpXXn1Uuet0seSvvxnHeo4kPYTqiHDivvTFr+xo0bwS2Jm/jDw0O8/vrrACabR27sL168GBb2eSDvQKUbbW6KWLajo6NUgCtRrVYLNcQsBypdWLM2oCcnJ6EzqYsfNxVsC7pRdLvdUN/coBODwSBFp9py6zPMMmmq20+eGIS6qHiTI5+Vi5q6ZwDJ9tXfs5OMPte0bi6rq6thAeUBivlQbt68GV6j6wYwcRWleIS68/E1foaL1tnZWUqsQQU3rAz1vAKyvYOB3cB7hyud4OymO29cLMKV5bxDjS1Pq9UKC7DN66WGB816z/ssetPDxSTrQMurJyyRJTbhCUqc14/sQklMIzahbiWsP+vOfHh4GOY8PuPOzk5KMptlunfvXshZROMe3XEbjUYYs5wzV1dXw3PY9W4wGMw07xVavGXOsy5mnJe1Xq3rpn5fN7j2IOXJIZdF3oFlOByGeVbljW2KDV1v6bLJccU+1Ww23QPsPDfuugn13FQJe7hdXV1NiO0Akzn+woULYR6nIYwHKhVt0L5t9yusw1artRCxifNgx5+mRMnKx7goFDHw6aZaja+eexev9vk96W+9ztOlTw8u3sGdVzVIEt7ehv/btC9eaIZd7+xv8r2yewgdN1mufb1eL/QjFZuwQmNqOLe5pdSITXipHLyDVJE5L7r2RURERERERERERERElEShhLxFTveKarWaOtnxRNjtdgM7ZanvS5cuBasu3S3u3bsXLO1kgmiNPzw8DFareUKtZ2pRsNYInpw7nY6b6EzvZ1/LwizS6J51m1aF09PThFQ5MLYk0CJGK7qKMNA9k+6Wak1XSzwwblfryucFT5eF5zKl0u58Ju+3LOWrrn3qcsm6IPT3PEqZv1fWOsu63tnZCawTreN0W71y5UrKLfTg4CC4k9oklffu3Uskd1Uoa6BuW1kJUadxc/HgWe3y3D2tm6U+h5UjVZcpL3HpIiyeeSy8yt+qWxIwma9Go1GCFQUmffNxuODwNz1BCQ2Sz2OkbD/S5Nae7G+WG5++VsSVMQteALVmqgeSUv9qoeTawnpRd2a68pEVIDY3N8McyTWqXq+nfpNjdhahnTx464MGtVs3R/28dQnUOvQSj3tsyzzGlyc6oomt+RmuLZw3lWmniI71elDXbs9FMm8sF0WRJKKenPPy8nIYM2TSOJ7a7XboV/TQoTvf4eFhar0CkqkXbDkep8y49X7heFpaWkq5xT0JkYksaP8vyhzZMTGLEEtZeK7yhDJTnvy6bSNln+zc5aWXsXOH/u0l/p0Gdo/A51DPIz6r7nXtfq/dbqdSD3CMKKtuPSn0GXRcFxlLkZGKiIiIiIiIiIiIiIgoiUKOtB674sGTZSSUoeLpkKdMWm7VAkqrzOrqavBJp4VGA7et5XBeyIuXsihqqZunBKt3X/WdtYxgv98PFj1lImgpZ32rdYIWAVr/WP+tVitYDVQq2bI2xCwsm1or1UIBJJNNKoNhLRSe76xNwOYFrWsQmPHTMAAAIABJREFUtPXhB8rH5ly7dg3AODaDyUDJTDH+otFoBIse6/ujjz4KTBSvZGmPjo5S9a7sU15iVGvRWZRFzWPvtF1Z757V1fNTz/Jhf5zwAv5pZWZbqu+5ldv3pGkXBRWZsUyUMk4e+2Rf8wQlvDJ4bWPjSNQCOa38Ob+v91NWyMZJbm1thTmA3+Ma8tFHH4W4UN5DRWHo/cD5ROcHGwtin3ER0PHrxTAU+V5ewk1iUWyv14aEWt1Z37puaeyvvZeVN14URqNRisnTPu3JSrMsVi59Y2Mj5UHB+eTg4CCsCeyXZ2dnKVbCem48LmR5D+h7Hycm6jwUSajryYh7wjDzRB4jpeyKnZe1n9r5qt/vpxh8rx1tLJb+rTFSs4hN2DrVZ+YzspxaBzZWqtFohHmbzBTHz2AwCHWQF9tVlmksHZGYFWxnYV/P07JnA/Z6vTAx6sLNhY+LOj9zcnLibr7miaKbzDJiEfNyofLum3WA1cBPVbzS3FDARE2o0+mEz6kKFu9lXbRqtVruhnZaYQYtgz1IdbvdlDubbrStsk673Q4DzC5yjUYjNTlpRm2bR6pSqZTe/N24cQPA+CBFcQ9Sz8T+/n5KWOLOnTtBBISHK6Wqvfw/LFOeEMDjOEDx6rl7Et7CZZ8pb3F+Eou09ntgXOfcJPE19pnT09PQXnmbjSK/Nw1082Y3cHrNe80umurG4blqWTcNzQdnF3UdZ2WhB3L7m5oTRvO+8Lm5uNIw9+DBg9T6w+9tbW0Ft2AVt/BUGInH4bZpf2vajUzW/0XfK4Ii8423hmiAOT9j3Tl101hkAzxL23jlsO3v9fder5fKx8ayqUsiD1CaJ4d9lf2z3++H3/IO0U9iTvTceD/O8NYl4MkY5orAO0hZo7EauAh1M7X7ZTWwe6p9tm/pvG9du2dx7fPWDF0nPNc+jh27F200GilXejWgcwypQM+sglvRtS8iIiIiIiIiIiIiIqIkchmpIsHA07j92dfUSm1zxOgJ21L99Xp94YyUPmtRsQh+/rz76eft69PAC8BTS52lddWyZ/Nf6b144lc3Cvv8S0tLibwds5ZFYRkpPn+3200ElNvPW0ngVquVEptQ+XQVO2A5rLuEumSVtVzRne/ixYspGXayfbu7u4GJopvRgwcPgnuldSGq1+sptyu2pZfHYpYA/1lg2SOPcSz6bB9XNxE75jxJWk+QZdHMhfYPy0ipq18e6+S5PnjumkCSsVarvLV4amqBad2RPJcQQt1YVSjHjn2OPTJTwMSSqZZN24b63Pa3p3FzycPjcFN7kshjoD2RjDyGe9Hw2tVzqc9Lp8F5nP1Sc/hZF+2VlZVUSo9ut+uyoFnPNy/Mi5X8uMArT5n9y+MUm1BWiHMR217nbs8rydv7AUlGygrlaHn09+z65sm9l4U3j3vjRueALDa9Wq26qW94LztWm82mK5hUBpGRioiIiIiIiIiIiIiIKInCMVKegIDFtGyMJ7+sPtA2UFstvzx5Pg6UiSkpWxfzgrWKq7XKSmB63yPb5wloKCNkM2t7Qhfzkjf24i2AZKI27UM23kLjWMgEeQGHGshIWMt6nkXkPFDGt1arBXaPVnDGRd2/fz/IzTMeyhOUUFbWEwLgMz5pJioLefPHbwKs9UwtXTZmR+ewJwGPGbMsvwbc5sUn2nEPpGMj1Cdf/dxtbJReyzJSniVY2XFebTlHo1EqyTbH4tnZWULOHkjK1du4HBUR8dayj5v1nfi4PpfCmwvKzmWLFnfyoGugJ4rD/uIxUnZs6u9ZSXdd57ISXX8ckdeuHwfkMUxPeg1VZpZ9SuPd+X8RRsrbQ1n589FolEqroIyXjc2eVf48S4xIvRl0PrdzrnoiWaExZaTs55XhKxKv7SEyUhERERERERERERERESVRWrWPyDuxTRs3lRUvYC2eeqJ8EtaMsszUk7S4eO2k9ZgV19FsNlNMoFoIPN/0aZWj8p77PPU+q9q3tLSUsKgASVlMWihofVELimWkstTH+N60SoTHx8cpifOHDx+GKxPs0nd5OBymLE9e3MuTUOabBb9pz+SNJY1TsLF5hCp4Wbnpx4Gicr62r/N1vUdeEuQ8xTJV5rO++LOo9insXOapmOqz2XjcpaWlRHwhkFRVs2XS5I52vvo4M1JPGnlz0rzja7MwL/U+T7GSsLFLmuDUY63zElzbuUVlqL1kzIuaXz7O68mi8aTLrGyMFw/K92yf9tI0eHsomzwZSPctIK1Ya+OwyyBvHHqpBDyPILs/HY1G4XOczzUFDsE6qFQqKUaqLKY+SCmKTIz2s3n3ybqXfe/jckh50gNMUUT61dswETpRZ90rL9fIec9QFHkUqyeLqZsou0ipRCgHlCebaw8kuoBZ1ybrflEEDGo/OjrC3t4egMlBiv8fHR2FiU0Pgjb/j5cXyk4CRQVSFIv+/G8y8uY3ddvJc0F60vVlN3J54+zs7Cyx2OhVN2v2np7YRNbhyr43bXkUeYcZNQLZ+c9zW1GjnXU58ea8eHhaHD4OBzD7G1m/6wliAOm1U8VK9EAPJPugd6+sTd8iD1LzRBwr5aDrvB6k9T11r9N5zh6k+BnvIKXzm9cXFyF/rs/rGfyt8Uv7js0n5bkk6oHKumhrWTz32iL9NLr2RURERERERERERERElETlN8FyERERERERERERERER8XFCZKQiIiIiIiIiIiIiIiJKIh6kIiIiIiIiIiIiIiIiSiIepCIiIiIiIiIiIiIiIkoiHqQiIiIiIiIiIiIiIiJKIh6kIiIiIiIiIiIiIiIiSiIepCIiIiIiIiIiIiIiIkoiHqQiIiIiIiIiIiIiIiJKopb35quvvrqQJFNlc1cVySz8rW99q1Ca7FdeeSXzx4s+l32eRWXoLlqmv/3bvx3Z52B2al4Hg0HIDq1X+xo/z6zSQLpe9HeYQVpf49/2CgCvvPJKoTJp37MZr/UZ+Zx8rVKppLJZr66uAgDW1tawtrYGAGi324nn7/V62N/fBwDcv38fAHDv3j08ePAAAHB0dARgUk+1Wi3c9+/+7u9KlalsP9PM9oTWia0f+/3zXvNQtO8tao5YBIqU6U/+5E9GwDgD/MnJCYBJ2/N6eHiI4+NjAECn0wE/z77IPtVoNACMs6mvr68DQLiyH66uroZ+1Gq1wveYYd0bX8Q3vvGN/yfbCAD+9E//dASMxy3HPutseXkZALCyshLqm+O9Wq2GMcz21bbs9XoAkvMJv2fbZGlpKXNcTlOmb37zm2F+sHOd/l/ktaw5QctUqVRS/WtpaSlRvqzP/+Vf/uXM6y2hv8kxwzkcAPr9PgDg4OAAALC3t4fDw0MA43EHTNp+c3MTFy5cAABsbGwAmIwrYDJ/83u9Xi+M27Jr03nzuF3/tJz2efr9fign78vP1uv1UB/6/aw21v+LlqlIOyny1hFvL1Dke0B+Wfh3kfFUdq19kpil33lrvvY3vsa/7f6u3++HPuj1u3q9DmAyvvRzvAev+iyvvvpqqXmcz6Xg/bL2m3ae0jFi3/Pm7Ly+6NXxn/3Zn2V+ITJSERERERERERERERERJZHLSH1c4J1M53lf+3/WiT/LuqJWAO8zi7aK8CReq9VSFi9C2Rtavvr9frDA6mv289bioOVRS0LWb49Go6nbzrPOKgtlLRmVSiVYUWiFpsV/fX09/E0LC60xp6engZF6+PAhAODBgwd49OgRgLTls9lsBsarTFnOe82zEnt1p4yjVwf276L1/5tgwVskitaT7YvD4TD0JVvntVotNc6UAfYYhoh8cIyMRqNQp6xj1i0wYTg4VlutVsrKynYajUapOc+zttrv6d96r1nKZNc8/d9al/U5sth775lGo1F4397Te66s96eF1hnrlGtZtVpNeFMAk/Y9OTkJLCLfUw8D2+bNZjPVnoTW9aKgrCbLZ+txOByG8hGsk1qt5s7jj2uuPo99KsI6eXuDvL7kMatlyjuPuvm4roXenlLryfYVZdP5mrJQyooqtH34Pc6d3m97e5Hz4K2D9prFqnvrLJAcZ941j6Uiys55Ux+k8g4Y52GWxWYRsIvO2dlZYdcIIN8dwjtEzLvcXBxqtVriUKXPo7+rA4eTN6/q8mA3fdrpzztU6W/PegC2VLJuYj06mq49eoACxu5UdPPgPeiadXBwENz47t27B2B8kOKCbQdrs9kMi/csZSK8vsS2rFQqqU0F62IwGKQ2XUpxl1mAPy7j8eMGb/yWOUjV6/XUGNLv2U1evV6PbXEOdNFkW3Du4nU4HIY24OZ6eXk5jFvOBepGZhdx73Cm0DGqmMaAps9h4Rn3dKORtfnwDuqEZwQ7zyg4T+jveBseO+dxrj48PAxutrwH5/x6vR7ala+pWxIx7Qb9PHhGX29Daw90uhZbNBqNlLvW2dlZah81j3KUddnLc/P37pW3b9B9xSKNS3n1VMTYWRRF63IesHskhe4peBDifLi0tBS+w3lTjerW0KJ7TFsG3YuUxdnZWWJPo2XxQjg8eIcllpfvqZusGio890CWsUhbRde+iIiIiIiIiIiIiIiIkijNSOWd2BfNTM3Txc8L0lfLiLUS57k8qKXJWtayrDjztILRUqf3zDt1azlt8C2vp6enqdfUOuu5vtj2KeL2l4U8i6r+pmWKWq1WCOS3Af5kqICJcIAKTHz00UcAgLt374b3WD8rKyvh/vyfr5WFx+Yqo0krirYX64DWIrbJYDBItXVecHNeUGdEEmXmGbWa2e9rQK/nQusJBWS1yeNwFf5NAMfhcDjE6ekpgLQLWLfbTQlRtFqtFCOl1lnCzm/ahoRnrdR7lF2n1IvACxjPu2feWua5RPNaRJxCMU9Lus7ddg7T5+Rcxzn74OAgiE2wDVlPrVYrMFH6nh2bOuYWMZ5Go1HCig+MreGWHeNv93q90I8Jry9pf/DYh2lRRCAii30q4oJuXWQ9Jt6bC7Oe8TwUZZbyxC1sGTx4+5o8b6RZxo/XRvbZPBc5YDLHcW+hzJSuU4AvxEIsLS2lxhz7tM4nReGNd95X5yvPoyOrr3gMtzcGtS4sc6VzcZ6nABEZqYiIiIiIiIiIiIiIiJIozEgVOWkuShRiEb+jll17HQ6HqQBmz09TT69ZJ361tulvzzNOTAURyJJ48Up8JgbhLi0tZQb0np6eBgsZZYL5f7fbTTAigG+RV5T1cVbLal7gN+uWVpLV1dUge7u5uQlgwkjVarVQFlo0GQ/1wQcf4MMPPwSAECvV7XbDfa2U+sbGRrhv2TIBPhMFjC1ELJP2R9Y9Y7ZY/1p22/c0MNSzwC4iNuC3BecxC3kstq2LWq2WijdUf3A7r3jxMIueU3/ToLGOKkIATBj6TqcTxgnrUWMoyUzxWq/XM2WCNQbO6+uWDZ6mvdQqmuXhoIHUXrC0x0zlxVd68QhZLMK82BsvxtiWV9dgznmUPz84OAjzIdddXldWVsIczXbWWLmsgPZZYetd13iNz2Ab2zo+PT1NxePq/G3rR+ebeewliggUVavVQvFPXnoS3UcBSYZXmV4732kfL8IMZD2T/dv7X59dkcc65dVbHsuX9VoReCygPrd6OwDjumcZLBtfr9fD3sYywCcnJ4EFVrZI2V8g2b/Lgs8zGAzCGLbMmMY8aXltzL83pu3+qlarhbGke3YbO6b7dy/G0iIyUhERERERERERERERESWxEPnzPN/QPPWwshYV73fOg+e/au+h1kdavnq9XkINSqGneuuf3Ww2Uz6ZnrV7FmsSWZXl5eXASDFOSK82Ga0+N6FqVWqZACaW3pOTk/CaxlF51s1p4fn422utVgvPz3Jvbm4GJorMFD/T6/VSTNT7778PALh9+3aIkaJlUJlG1t3W1haApJR6WahFySaiVKlbWvE6nQ729vYAIGUharVawSJkLewaG6CKUFmM1DQWsjJMddbfZfp+lpVv0Qpj593TU07j573E18pQ5cWulH2O/1fAMa2xIpyrOcbVys/6rlQq4bt2vKytraXkqZW94fyn8YpZ/c6T1D0PykhZpkXVQq18e1bCVj6rZaTU28J6XvR6vRR7oH13HgyOZWrU6quMFNcWtifnQMa16r1UqZXzMtu52+26DDIwHctW9PM2flf3ApahPj4+DuVim6sqrI3dGAwGuZL1ZVFWhc/zGPH6l42v1hhRL/7GMrA6DmaNkcpbI7Stspgl/byNgT8vVsx7vlnUALPYOR2j3KPp2mT3Mzpe8lLCkA0G0glwdd4qWybu0bx+5M1JnONPT09dpWkg2bfseNe9Pa+1Wi2Ugf3Ti5/Kw8LzSNkNo9LDVjxA5awX/Txe0LYOAE961bpV8bmr1Wo4OOmiDIw3+JpZHcjP9zQN7t+/D2A8SNTFDZi4tW1sbISBxc67trYWDiBcdDjQWq1WKkcHr5rHg/c8OTlx5Yd5LTvZ50lfcgA3m83w/OrOx7+5uPJ7R0dH4QB1+/ZtAMB7770HAPjwww/DQk202+1Qf9vb24nrxsZGuH9RaN+zk5C6BnEw89D08OFD7O7uApjULdt5Y2MjlPfixYsAJm2omz8vyHcWNyQPWYYRXTSKBLp7LpDeBjXvNfv9ovAyp+fdU5/Zbr6JwWCQ6drniQEsyv1yFpQJ/vbab97g+NExaF3BDg8Pwyac40DFWfhdziEXLlwIRhhvjbIbFHU/yxKHKAPP/YRXPmuz2QxjX8UyOOY9kSPrAqOuMyouxKsaD+01Swb+PHiGyzxJZZ27uJmjC/vR0VFKKERdrvk379nr9VxXTV5nWYuz5i5vjlejF/sQ6/bo6CgloMG+qG75WXmoZoV3gCJ0DHh9SfuOvVohGO0/Vqyg2Wym6kxzgbGti0APGlkHIi/ViHeY00NWVpqbrDUh6wA/zZ7Im1ftPAEgVedHR0cpAyz3ctvb28E4bNO5qACKCr3YOtODWNmxxL2LV3/qBmoNPnmhJyqUlrXu6u+cnZ2lcmh563oeomtfRERERERERERERERESRRmpIq4oHluO5aSVnrbJj3sdruZAdtFfrMI8qhrQk+onoVMXUf4DNYypuwPWQ0+a6vVSrkyzGK5VVcHK75AK8PGxkawPCi7YgUZaJ1VdktZKmBsHaWFlNbfTqcT6kdFKQBfRvM8eK59Nsna8vJyeG6WY3NzM5SBdUpryv3793Hr1i0AwDvvvANgwkzdv38/WC1UPp0sD6+sw9XV1dLBr2oJy3LFGQwG4XkpenH37t3Q51h2tuHly5dx/fr18LzAxOqyv7+fcu3r9XqpsVk2+ZyHPJc9z+3NY108YQU7RtWKOK1bRR7y7plXT571kVAWMC/ZYJ4VbNGiE7PKDc/yW9P8jq4r1kqvctlkmemacnJyEn7fulC12+3UHKnWURvUfHp6mrCaalny3G+yUMTdTOcODZq2HgUq3+tZeIGkxZnrVaPRCHMM39N5IitpbBY88QXr2qdl0j0B5zwyUWzLTqeTWt/UnZuv6bjyksoT82Z3WEbrntZqtVJzF/vq/v5+KB/3EHz+LDGORY1Pjz0BkkmD2Tc6nU7CK0Wv6u7PfsDyN5vNBMsKJIVC2Ib8TKPRKBT0T2hKgywpbGV+1TPEeolo3WftGb017jyX7rLsrnUZ956xXq+H9lIPKu4p+BrLsb6+Hua8nZ2dRF0MBoPQzspi8x6WBV9eXi7NSHGvpe7J3rzpiaLZ0BNej4+PE/0TSIboWDEOb/zreafIGSMyUhERERERERERERERESUxlxgpT/aSr1mpweXl5VSQvjI9PFVO64t9Hry4Ci+hqWXLPB9UMkHdbjecxMmGqFXGY9msyMMsrIBaDdSKBUysCxrvQwvEhQsXAtNy4cKFxHtq2WPb8ZlVhEF9mPm3ilLw+cpaMtU66zFRwNiaoUwUkGSK+Bxkdm7fvh2YKMZGUWDi5OQklJf1tLOzE6w0rCdaUJrNZmlLplpkrWVLY/EYD8XEwPwfmLQTWagXX3wRV69eTdyL5e31esESxetwOHQT986KvH6r5SXUn9qTc856Jk98RP3N5xFMbsuTx3ZlBWArPMukXrMCY/We+v9vm9jENGVinTUajTAfWFbj5OQksBpkpB49ehQsmfxNZTe8uCn+nvW3V+l1yzhOIzbB+49GI5el5G96fd2yB554jSctbS3yXrL1WWJdFVnrrcrOs/5OTk7C+sr5j204HA5D+axHwvr6emhPzv/KJs5DCMmDF9OosbwAUrHSwIQh2NvbC+VlvfAZdb1lHXriQbNA5yCPuQSSIlN87uPj41DPNi4FSCZKBpKx24yP0ZhtT56b9ylTTt5HhVtsWhDds6iohWWkPO8Ry/LqHG/lu4E0m6RjvCh0TrDro7J6fG622+7uLu7cuQNgEk/PZ2s2m6k91OXLl8MzWyGHe/fuhdfYBzR+r6wEurK1XlsAfgohTdJrY/M1ht9elUEtylIVmcdLH6Q8Fz/bqTQQzDb41tZWmPxYcdzk6T3mnefBQql3616nNDJf6/f7oQE4obPcJycnoSE4qdgFVu/lbdLUFaMs2HnVVcN2DFW442Fgc3MzHBB4YNCDA91b2F7cZNTr9QR1zvvbAaBX1l1R5C1IGliskzCfTScQYKLM93//93949913ASDkjGJb1uv1cN9Lly4BAK5cuYIrV64AQCogczQapQIUz4NuYGzQrrp4PHz4MPFsw+EwtNMnPvEJAMAXvvAFAOODFDd9PECxD96/fz81cdbr9fAcmk9Mr2XgHX5swLC6ifK6srKSWjT5PCpIw/6rQfH2oH5ycpJyJ9X+X2bB8pScirgXAv6ml9cim9M8IY5ZMIv4g53v2V8rlUrKtUM3q/MWMrFg+3qbIZ2zdaMKjMcUxxfLpAYaznE0VLCPXrhwIaU8NhwOE2NUr9MYAtmv1R2JfVw3d3a8qACFrYt6vZ5aWzzFNXX9zdpUzNoX7UHKc+VRoR22Ga9sy1arlQiWByYH3rW1tVBXfF4tk5cLbJr8RESeAqp17Ws2m6l+onsKHvo5n6sB0ZbJO6jPMs41L6TnvserFZk6PT1N9XU1mtuDroYV2LW73W6HcvL5VSGzjDHWcwm0oQqqgOlt2j0FzCyFwm63m1KRUwOyvWoeraLwjADWpVjdI1kOFdmicVb3sLqfAoDnnnsOAPDUU0+l+kCv1wvftfn7dG9RFJ4IhG2vVquVOlirgci6/XW73VTeTRUg8g5ZVoWaZVPDUh6ia19ERERERERERERERERJTO3a5wVjaU4EBojyZMhT7PLycqAOaXnxGBU98S9CAlitfpbyXVpaCs+mLAxhWQSVW7TMi2apV6uHtRJ6bFVR0OoDJC04wORkfXx8HOqRFtl2ux1c22jRIyN1+fLlwMxYoYW1tbVgIdBgR9aVZSaKZof2UK1WU7mi1MpFa5ZKftOC+cEHHwAAfvWrXwEYC0xQXIL9k1hdXQ3lffrpp8OV9cLfVkndsowUoSyrjhlgzEjxb+LixYv41Kc+BQD40pe+BAD44he/CGDcTnQJYbv++te/DuVlXbB+NjY2Ui4LapEtO9byMoazP6hlUuX4NUUAkGTINKBf6+fg4CDVl9Ttahb5aUWe2ESW4AVhrdSe2ITn2ufJn9t7LtKtz/6mihjQa4AWzaWlJXzyk58EMJk7yH7ev38/5dJT1jXnPKj1V5kZYGJZBZCS7z08PAzzAuc+ZWhtkDvngtXV1VBOtS7bwG61jpZleHmPvHrSvqeucdayroyUXd/UAyNP5j2vrxaFt67ZeUf7hu4XOEdz78Axvr6+HhgOuza12203b5Zl2RY9njz583q9Hn7Xk+pXiX4gyUhZga5KJZ2+ZRaoRd4KkCj7ZFObNJvN1DzO8be9vZ1yjffSh6isud1bsX7KupV664DtF6PRKJGrkfDc9oBxm1nGQt3KPPYpS3Z/Gug+2/YR1uXm5mZCEIu/yX0A52/ug3T+tHlGL1++HDxyyJbq/tZ6P52enpZmd3VsWyEYHaN5cvjWe0y9hey+/Pj4OBXucHh4GP5W9pVlKsKERkYqIiIiIiIiIiIiIiKiJGYWm1BLkwb5Wp9MWiZHo1FCGABIClIoK8T7E/NmpryEcHwOvsZTurIwXvJD/m2tit1uNxWguby8nPL5zPLLLQJaedRyZaFSljx9azwOrclsr48++igwh2Rq+L8mrlQ2wTJRKqhR1vKn1jgVlwCSwanar4CxdYHxTxSWICP13nvvhTgiWix4r52dHTzzzDMAEMQbLl26FBgUm/la2dNpoNnDgWSCRpad7Xr9+vUQE/X5z38eAHDt2rXwPLSwv/HGGwCAn/3sZwDGjBzrndb0er0e2sX2vVmtZTY2iHVVrVZTFvtutxusZDZAVS1KnnU0L1jUxkCUFZvwLOheEkY7TrMk0Xm18SZqfbNWchUPmPecl2eJtwzrhQsXwph4++23AQA/+MEPwjN++ctfBgB85StfSbz3zjvvpOYAZRnnUSZaGmu1WiJwGpisNdvb24m4XSCZnJJzHcdPrVZzZZeBsUWd849aoy3ToXNC2fGkjISNwVDruFePto8qQ2WtucpaWXZaRWDyJP7LQtdIu05UKpMk5KyDg4ODVGwUsbKyEuYzMh2cxxuNRkr8wGunvFijojiPOfTkti1roRZwa/n2ZLpVkGSe0BhT6xVEeJLl7XY7FafG/cLOzk5oH7tfUOZC64LMh53/lUkoAtar17Y6z9pxpgmGbayYJrC27aheB5quxfZ1ZSnLeulo3LCNLeK9NGUL2aRmsxmem/sfTa3y05/+FEByr8tnZXuxTQ8PD0Ndce+obFtZLx3uOzXOk7/J69raWjgr6DNarzEVQ7NxVpzXV1dXw56O84quCVYmXsUp8hAZqYiIiIiIiIiIiIiIiJKYCyPlqX7xFEerH0/EZ2dnwXrE0yItGXqaVcvwImKkgLR1VpPZWsZoOByGk6z1JW232+HEzJM+y69WB89nm1ctiMeIAAAgAElEQVRlB8payOiHrPK9ViVNLY2aUNgmGaZf+oMHDwKryPurmh1fU2U/1odV3lGFr6LQtqDlwVollpeXw2/QgnD37t0gbU4mikp9Kt2palwA8Oyzz+LZZ58N5QPGVjRaOSzLMo2ke54ljP9Xq9WU//+LL76Il156CcBYSQeYWIHeffdd/PCHPwQAfO973wMA/OIXvwAwtpbxHiyvWnKsXLSmLSgKtUaxPrwYIU2kx3Jb2VIvgbMdoxrz4flLe/EgZfy2iyTk1cSMamXPGreePLUXk6J1OU/VvjyoX7mNiVleXg4KTpzHOaaOj4/D87JPcl7Xey3q+TluTk5OUrF5ai3nnKWy2tYfnnPe7du3E3M6gIS0OsvHeWhzczNlvdcYmLJl13Gg8bd6VcZA1xAbP6LeFna8eInVVfXPMtbKjEybKkH3CZaZ0lgVjRW17ASfZ3NzM6w/NoZ1aWkpFR+hjNTjGk9AmoWoVCquCimQlDO3kvRZamjznCO0/3jy0/xfFX+B8Rpl9wfKQtkUKhr7asfh3t5eGIuM+2UfKMoMEGQ6BoNBKl5c696q0ql0th176vngxfHYdtOxpGOO//PvotB+ZD1a+DytVivESLEdrl27FvoP65z1fHR0FDx4yExxnms2m7h582aiTNvb2+EelkXW5ygK7jFVOt3Wme6vlZGyDJoyVMpOAcm5hq+x/r14Rh0DRfauM4tNeDKfOkHbnD4ffvhhWIzZ4Ozg1Wo1tRnzfnNeE4cNKtRBYbPeNxqNVMC8Ni5fY6dkR+31eqns03o49KSPyy5WGmjL57X06MrKSooC3d/fD4PCUtadTieRewWYULkPHjwIbn56oOLkyo6th7iyh0PdDPF+Voa9UqmEyZUT561bt1IHKAaVdzqdMFD0AMUrXfr08GGDg3Vy1TwRReDJDvM1jhOVL+WzXb9+PbQxf5+Hxe9973t47bXXAAD/8z//A2DSXltbW6FNNE8Y28XmStPnKQp1MbCBwmybo6OjsCCqpLFKUgNJ1yYr0a75R1gXmjuGfYP9n5glmNwaiPKk3vUglZciwh6atC94+aMW5eLnwdZVt9sNGxqWk/L7BwcHIb0AXUr52bW1tVAmdfueZxlUtpv9xlsE2W942Ov1eqFf2lQVx8fHQajFHqgajUaQRFdXY7sR1nxwZeWNtW/YTZ/KTnsS1Pb31XXNuq6reI+3TthDpH5vWnekLIMry8t61LxfduOsub24aeccoAYv6w6sByl1u7LPOE+osUXHgF1H2L66wctLB+G5/84Dunm140jnXutSubOzE4ypbAv2n1qtlhINUoMt5w/uKx49ehTWAu5LdHyVKS8NP95hidfj4+OUFLa6jttUGvr7XliIPSzpvscestTltij0YG37j+bGY3txnXzuuefw/PPPJz7Hufro6Ai//OUvAUz2rq+//jqA8YGK5eQ+r16vh7XWjs9pxLfYBzw3Za0z66Kn+yR7oFpdXQ1zhT3Ia846wkuRomeaIv0uuvZFRERERERERERERESUxMyufQBS1uP19fXAWNDqrEGwmjBUv9dqtVyJyqzfm8bC6UkS20zeGtCrLmY8AfOqVksr/amJz3h/DcC21qpZrLX8baWLrTucWu15cn/06FGKSmfb9Pv9YD2yVlGVpqU1aWdnJ1iraKFSF0gVnigCLQfLYOXVe71eeA4GjL/77rtBZILZvGndqlar4dnIPtF16dq1a8Hqom4ill0pK4up8FwxWBZ1S7LUfLvdTo0ZsgCvvfZaYKJIk6t8KVktWuTX19dTUsNeItiyZVJ3RSsb6wXiAmlREh0LmiQVSPZjjjFlhm1yXxWtmca1TxmmPEZKr9ZlSdmkPAY67zVC+848Leja3lZ85PDwMIhMcAxRWELdz+gSwj66vLyccsn0koXOMuepFDD7seeeoXL7APDMM8+kLNQsx61bt8KcR8ZX3UZoIeVY0mBsK0ShzEhRqCuYJwIBZMs5e4kkCStAwfGm1npPxpnrIcfUcDgs7aLtJbG20GT3GuztufQBYxcyzo2cF3hvL2F3r9fL7GuziE14yEuHoO7MluVQF0xrRVehFm8enQdYt41GI+EGDkzGztbWVop9Wl9fTzAwwKQPapJhrtPKQtm9x9HRUcptddokw2S2lGGy/ULdBT3my87nyu5aN2JNEq2MrhVEULaxLCOl7pFWrERDG3hfdUHmnMU0KmyXw8PDUF7uWXl9++23w/Oz37Xb7VAvLKc+S9k5nfWv8ud5SbOVAWSdqpAEr3l7ddt2OgdYL7Ver1fIsyAyUhERERERERERERERESWRy0jlSeXqe5ZdWV1dDQH7PNnRujEYDMLfPAnzVK3W1iKBlNMIUehzWyZKgxDtqVUD2nm6VQsirQBWwvHRo0ehfMpMeRLq00J9Z/m8lpnSOC5ak3Z3d4OFiFcNQqS1gM+t5bBMzeHhYbACkYVUIQpaBopCn99avGghODo6CvFPtCD/+te/DkwULV2EWmaUiQLG7A3rR6Vmrb+9Wq7KxkB4sBYuZT7Zrp1OJ1hqybyRBXjzzTdDm7GeyEK98MILoXxkC5eWlkJZrAVoGkZK/ZmtPzjLtLKyEvoc5wW1QFn/ZLX82Xupb7knKJGXKLcIiopNeEmNPZl0+wweM+UxUnnz3yLipTRYmvfv9XqBAeVrjE3RNmP/07icxwWNO7ECP/V6PdEHgbFVnWPCBpMPBoMwn3DuUGZKLZjAeH6z1lm1qJdlDZSZzep7GpPsldNa9LWdPPEDa4FVCWPLCA0Gg9IJN/PSCWiMprWsK5unsVHAuA+StbfxnicnJynmQdvCq895IG++UaEdG1PHz3veJGxT3asoI5U1R0xTJq7VGmetaUZ4tUJFygDa5OkqHsGrxsVyTfNYVC958zSeBbrXsvHIGkPpsSD2GVSy3MaxNRqN1Gte0lhds8rOk/rbNt2Cioa9//77iWfUNDFsUzJTe3t7YZzQy4X3evDgAW7duhXKB4y9ZOw8wvJqnHRZ6DzuseuejoH1QOD4USbQMlMaA8qrtpO3jyiy3kZGKiIiIiIiIiIiIiIioiQKxUjl+eWPRqOUT2m9Xg8nX4Inwl6vl5I3VKvYtBbkorAWYb0HT8AnJyepU7daD/jcqrRGi45VBVpeXg5MjUrAW+v9LMyU1r89pWtda5wJMLZO0LLH51dmis9NC5P6/1vFlsFg4CYZBMZMEH+zKDTmhXVkrTD3798P1hdaTj788MNg9WK90KJ25cqVwERRgYsM1ebmZrBKqLSoJmbT3+73+6UTbno+v7YfNJvNhLWP5aSFnEqEtJQfHR0F6wvjBqisdvPmzYTaDpCMgbCWwGn87j3/ceI8didLDtlLbkvofJPH6swqDeypjOnzWlbAY8XyrN55yXe9xKePQ7WP0OfNUjNS6/Cs8RrTzHnquWAlpZUltfPr8vJyGCc24abG0lgL+q1btxLJx/nbNqk8raLdbre0dVaZd1XA1bKtrq66SaptrJOObds+2netJ4WX+qOIxP958Ma0WqDtmjEYDFKxHpzLLl68GF7js6nyomW1NJ2AfYa8uWYa6Dxn4701Jo19Vstored8T/v4LN4DedBYEpuMmmXy1A+73W5K+Y5r1f7+fkqFT+PWrLKn54mg+5kyjJSmSPHWC70qvHlcx4FNs6HP6zG+RbwTioLf1ThFT02Yc5YqkHJMU72P9fOJT3witBfr4/bt2+H+vBfjryuVSpgHCV0Xy5ZJFZgJy5JrgvIi85rnmaVqf1apVOuHV2USi/S7mcUmdNFnwZeWlkJH4wZaN6maEwaYVMC0gYXTPred6PibXr4CzY1i8yWdnZ2FAcX36MoETMrnBb96rgZly+5R4nYSUAlJDeDXQFJg4j7x6NGjMIg40Dgpnp6epjYJ3W43vG9zLnW73VI5IIDJZqVer4e64j24yblz504Y9Co7b9uJC/C1a9fCAerpp59OlLvZbCYWB2C8qHsHKCCd9b0ItF2zhAwqlUriwMrnoagHXa34PKurq0HY5ZlnngEwcVvc2dkJ9ajuT9oPgeRmeRGucCrE4LlqeHWQJc6gz1p00p5mw+G5IuXJn3sLqXfQtgc8T4giL4/UNGIT80wbMU/MMsd7BznWN/u3bmQ0nwjnRM4L6tLNe3ATwnvt7e2FOUYPT7yHNa41m83SYjQakO7lFgOSLrE6v1p3PN1wWLcldcG09aOHKyuDra5NReHND7Zsuj5onbE+aOTjAXhrayvM7VY45/j4OCVrrwbaLAn2WWH7su4v2L9UFITvqUCXzYdjDYi8h31tHtCcguxDKvxh39P65ufs5zudTupgr7CHJc9te9ocZlzXFXZ9UvcwL0egChbZ94ockLy5fRb5en5e93LsMzoXcN9Go/g777wTPs/npov92tpa2C+w3VjnvA8wadtWqxXKoPtf1k/ZNYbPr/Lx7FscG7pn0QOj3YupYI51M1V3S5veQd1ZPVGQInNedO2LiIiIiIiIiIiIiIgoicLmpSLCE4SetK3Lg+dSN+/AzzxoOay1QCU37XvqusbTsQYfZyVY1RMwUa1WU58npmGklMFQOVW9qiCGuv158tLAmFGz4hGabdwmrVN3RVoIlIUqa31R+pq/QasIaeb3338/CEuQsel2u6FdyAqSqbl27VpgojS41v4OLZoquOGxN9PCY28IFfLQoGm2hWXb1D2Tboq03DYajdCn1UqowfVZzzUN8uYBbzwVcX2zVy+Rdt7np0VRsQm9egIUfJY8V0VvLph3ebQci8bj+B3NVG/rSF2AOWcoI8U5jvMEWSW1cvJelALu9/thHuSc02q1wnNwrtG2LysVnic2QWQxmJa5UiEZy2J4oib6O3mJqMuKTRB5660mN+drtVotWKutt8Ta2lrKpc+bs/MEBKYVo7FlIrx6tGJWKn9O0PLtJRXnvYbDYcp7xnPtm6UsyhxxrVDZdr5nRaaOj49d0Ra9KnS+VAEhvma9lKZ1K+V+5jwXbc+9vAjrROjc47mXe+sXr2X3RCpWxnoie6ljip+jxPmDBw9CSIANX1hbWwt1xb0R27jZbCYSRgPjNrZMlM7FZcExriI3Nhm5usQqM2XdmZWhss+icuZ2LOm45P3V1a9I6p7ISEVERERERERERERERJRE6Rgpz/JiT93eyV1P+UUsKXn3mge807OeVD2rn5XZ5UlVfSitRRBIJ4pU9mYeifU0ANSzOvIZvSSP6p8MTE7iq6urwVpAa6smzrP+0F4gqlrkrSXuPLB+VIL8wYMHABAkiu/duxfipfiZer0erDRkaCh3fPXq1WB9tnLsaonj9eTkJOHfrmUDFiPz3O/3U9LFp6enoT9aidV2ux3ah1dNjKgB6bzasswSn0d4bIrXx+d5/0XASjPra57YRB5LpWMxi33T38mzZOYJUcwLRdrmcbFa50FZQtuPdR4nU6FtyO9af/idnZ2UVZ3tvLu7G9qT7PD9+/dT1krOPRp/URQ6P1tLuYrSeKxKluVb2SovYba3XmQlg54G3hxpmZperxf+JjRmQmOjgEm7AZN5X9cjO2cDvsiE9/q08JhDtYLzNZaT9aLsE9ckG2+nMZPeOjSPMrD+9Llt3J0KFSkLkCVW1Gg0XMEGvmdf0zioWb2UeO88RkrZJ+93vL1Z1tjIEjzKE0Equ/djPSszzDmD846OaaLf74d9EkW5WO5Lly6lvMY43kajUegX6jVh+/C0LDUw2Ydp/VlGUxkjFQjSmCi+xs/bPZTHSuft2zWmr4gX0tRiE2Vc/fRzZYOlpwmuLgvPJclbmGxQmwZEWnpTJ6MsdZG8ZygDXSA1aFTfq9fr4Zm8AE51feHnuUmwKiedTid16NAs4V4Ok2kFNPr9fjjA8SBFwYXd3d2wUVKxD04EV69eBTChrHd2dlJqT+qGaJWjVHnLG0yzKFhZeG5wuhGwE6aWl6+x7TTQ3LpdaH+c16KVhXm6pT1ueAuwupvYQ5OqStmD1HA4LFTH3mHpN7HuHgc8pS8rGqSLvh6o7FytCk80tFhX3mq1msoHeHh4GFyKbQ6uZrM5tSqXHsJsn/JETbRMFrpB8dz+7FytuWDy8ptNA3s/3ajbtWl5eTmV91DV7Oy8rMYva9AD8ue6RewvVJxKXe9VkAuYHJpqtVoqfxShht2skIBZoX3b9gl1mbJ9o1KpJA4tgC8oZOdLPQx4AiCztolnePMEH4r8XhGXaz0YeffP+n4Z6Pxm+7GqP1qX1sPDw/CadU8+OztLGZXV3ZT314OF51qvz1IGnvuz56rtHXSsK6Ae8q0RQPutbRc1IHtKpUUQXfsiIiIiIiIiIiIiIiJKYmb587JQN4SPG/QkbCWD1ZqkAbGAn6Xa+55njSjK6OVBrUPWWqDW8SIB8mp9t66LPK2rXCTd/5SRskGA6ppQFCowQVqa1l/+r2IWtOZtbGwEBkqZKGBsrbHWa/6OyuYqe5Mlc16tVqeWoVbkuZgSS0tLiUB0ICkYwjazFtB+v5/4m/fOC6YtW6aP61ieFloXtn68oHtlpe340r6TZxEvY938bavvecBawgm1PqoogZ2rVc6c8xqFDdTljPOOzmucM8iaq0dAWalwz03Iusfq+NXy5wXGWyZIWQLbV/v9fkq+X6/TuqJrn85yHQOSUutMzcGruvTZHGDqYu6J6BCLlj0nvP2CvsY6Zn8bDoehfJo/iu/lCRnMA6xHde2zzEalUnFzBdq+p3sK+5rOpWU9k8pA2ZKsNs8TAPKQ1/ez2mOebWS9jIB0ndfr9bAn03az+csoRFGr1UL/5B7Dk1lnHWoqgXl4TWh/KCM+5bFUOod5aSB4te7MnreRulQXcV2MjFRERERERERERERERERJ5JrM7MnTwzQxTEXu+6SRFzdlLXVqXSkiV3uer/m0rIC2hYo1AMmTOE/neZLNak2ipUAZKlrNVCbSBv1pQKoNJj4PDHI8ODgIlmD695KJGo1GiYzVwDhQksnmyESp3LH1z/divPKkzuchzJAHj61Ui4iNaatUKq7lGEj6tBParvOUAv5thMdEAUkG2pPztTEG2lfKxEoB8xGj+W3GeWyDteSrqIxdh5aXl1MiAEyToPGntOZq3KxlRoDJ3FgURWTwvZgnjXXNYuey7k9o/7Trle270yIrmHw0GrlrDOdtzu0q1mTrW5PwejEQHou3aORJlrN92EfOzs5SIhPazrYfLxJeMH7WZ3Tv44k5ZK0tHiPvYdo9oifi4+3NimDen5sVymzaRMeacJzjZjQapZhnfr/T6aT2ijp32NhPL7bwPA+rPHhxwd58k+fJYceGxnl6V7sPzhIKKVOmyEhFRERERERERERERESURCEn7vPimvIU/GbFk7aSe9ZBexIvahVdFPJ8c9UiZBVM9OTuMVI2bkrvZZV61CJPq6KqqVjLyXkgI7W/vx/iD8gY0aKgPryUxr18+XKIb7CJN0ejUYo106S3lok6T/Vpnn0zzwKiv2XfU19hwvP99SxJkYlKo0g9aUJE9U23r3kW3SwpXqCYxPm8YyN+G2D7rzcHqzcBx7myR8B43HAO4z0Zt7K5uZmKMdK0BJaZmkYSWOWN7RhWNtRajj0FtLyx7bFbyg5lxfTO0vd0LrXMuTJSmt7ByoFrLKiNkdJYCC/GNIsJXtTc58U5aqywTTsCTBg323+VkVq0mmdeTIjHMHlrk1enRdigeZap7P1/E+ZU3Xeqt4PCU2JeXl4On7NpaIbDYeq1vHWrWq3OtR51zstLnOylavHWUD6DncN0zvE8eLLSBhWRPgdKiE0UdfPTz85yz4/b5q7IYSXrc3n3mscA1nq099Nn9Nz+yhyksjoukMxvYiVPG43G1K59h4eHqdwuvG+r1QqBlTw8XbhwIQQn2xwveqDTAxTfswNMy7qow0dRF0/7u3nuP+cdyrLeWyQ+buP5PKiLk5U/9/J4qJssN0TnBbHq1SJrHEdkI+9ApW2R5RajC7Dd1OpcQ2igNn9bBQbKHqa8/DO2LN4Gwgv4L+pC6rkOZrnMTHOQ0g1QlvtMpVJJiEwA481flvhCr9dLHVy9lBuPS+rcg9eGWneaY5HPZecGr00WNQ8UcePTv8vW6aIPTo/z3k8KXj3nhR8QKhxm5ykNDeAYytvHz3v86EEqy0VU04d4bqPefpSvcQx6svt5rrN6ECtChETXvoiIiIiIiIiIiIiIiJKo/Dae3CMiIiIiIiIiIiIiIhaJyEhFRERERERERERERESURDxIRURERERERERERERElEQ8SEVERERERERERERERJREPEhFRERERERERERERESURDxIRURERERERERERERElEQ8SEVERERERERERERERJREPEhFRERERERERERERESURDxIRURERERERERERERElEQt781XX311BADnJe2tVCqp/5eWkme04XAIAOh0Ojg8PAQAnJycAADq9ToA4OLFi7h8+TIAYHV1FQBwdnaGo6MjAMDx8TEAoNfrpX771VdfTT5EBr75zW+GwrBcvJ6dnYX/7Xvea/x8HpaWlsIz6pV/23rSz//FX/xFoTK98sorI72/B68N9fP8W8vG8tlnrVar4TV+5uzsLHzX6w/Et771rUJl+ta3vnVupmitR69uPXjtat8ri1deeaVQmf78z/88tFO1WgUA1Gq11JX1zOtgMMDp6SkAYG9vDwCwu7sLALh79y4++ugjAMCDBw8AIIyvs7MzLC8vAwB2dnYAANeuXcPNmzcBAM8991x4DQC2t7exsrICAPjiF79YqEycI86DV7dZ9Z3Vrlnf17GZhyLtxLE0DfL6XdZnZkmIXnQsFW0jouw8Ms1nsn5vnmXK619lv1P0+x6KlilvHvf6Oj9Xq9XC/MHXuEaenJyg3+8DQJhzOMaXl5fDHNPtdsPnB4NB4l5cn/kbAPCNb3yjUJn+5m/+ZgQA7XYb7XY78ftc41dWVsI81Ww2w7NybdFnA8bz2/7+PoDJfMj/j46O0Ol0ACDMmYPBINWuupaxXv76r/96ruOpTD8rOocVube+v6g54kmiSJnyxhL3ooPBILT9+vo6AGBjYyP0s1/96lcAgDt37oT3XnrpJQCT9fTg4AAAcO/evdR+ttFohH7mtbc8a6E2+qu/+qsRMO6zHIvePoKv8VqpVFJ7Vo7x09PTME64v+Z+++DgIOwlWM6jo6PwOR1fvDfr+8033yzV76Y5Y1jo/t3uzXVPdd78moW8doqMVEREREREREREREREREnkMlLTIs8KcHp6Gk60tJTRCtVut7G2tgYAaLVaAMYMFr/Lq54ay1oH1UJgWRh9z2OdpmUzirAmlpmaF8qe9LXcrG9r4VBrYd5JvmzbePCYQO831OLAv7NYPyDNJnrs4ixsgQdtf7WI6lWfX61HtHZx7NAS++jRo/A3LUm0Rtfr9WDp3dzcBDBmfS9dugQA4cr3VldXE1bnIshiIbPKnsfw6mdtG3r3t6yovX+R5yoDb7x7r3l9yfbBvLlgns9cBtP+7rzHyawoMhdPgyLfnWVtKsJgqNWXc0a9Xg/9i2Of1uKTk5PweTJCZIIajUb4nL0CkzVYrd9FvDDOK5/n4WCZ+Wq1mhpjtHzX63X383nIYgTKMkHnIW+eyps/zpsPbd3p/qXIM0SkMRqNwh5H2V32JfZ1rr2tVit8jnvWRqMR7sd76Ro6Tw8E3StkebToe3msDffe/X4/jHkyudxjHB4eBkZKvcJYH2SKOS71ORYFbz+gXlG88m9v3bUs4TzGf26pp3XR0Idl52Kla+Ow0jlhb2xshIMU73V8fBwayh6ozps8PdgNmvf8HvIOTXmT4CxuLov6jreJs5tADjRgMlmQsq5Wq6E9tfPmbXrLwlt87OFNF3VdnO3kQqhLYhblbu9LzGOw6QbIO5zyyt/SscPJjS4sjx49ClfS7pzgiEajEcbTxYsXAYwPTzxAXbhwAcDEraHZbIbfnCey+gPr2TOQ2IXCcyvN6xtE3oajKLx7epO35yLM/7MOhnpw1s8UcW2cJzw3XyB7sZnn5nMebcT7ZKHsIajs98u4rpaB7WfAZGxwXq7VagnXeWDi3jscDrGxsQEA2NraAjA5SHW7XffzNL7w/rzqBqUsPHd2vXobQ/tbaiCyByh7VXiuPjqHzHI4tPfL+p+w7cm9jXeg8/ZT0+5fPm4oa+BdxG/rvKNurNrfgcmY4rgA/INUmZCPaeC5o3pXe3jQAyP3dWpw8Q5QwPjw5B2k+F3dI+rzlUFRV2rPKJ7lrjgcDl1jDTCeV/ie7jtmNcBG176IiIiIiIiIiIiIiIiSmJmH8yyZemrkKVEtX7Sc01JGi/iFCxeCtYyfGQwG4R5KIU4LtXDnWcjKiBjkCVGoFU/fy7Jez8vSW+Q+2k58Dlober1eImgSmFhkhsNhuL9aJWzA8yxQy5tlSZSx4XsqjMHvWibNs2Rqm3uWpCzr0jTtlGdRymuLTqcTWCcyURSb2NvbC5Ykjg8yvKurqwkmCgCuXLkSXqNLH9u1UqkkhFzKwGMVPDZF3fEss8S+pAwO29Bj8/Sedm7whGOmhceAeXNdniUyK/jVgxcQuwhXxSx4TJ/H+GWxZY/bMj5tkL7nXlX22RflRpi1PlSrVZcp4vpKN1+662xsbITxzivvubu7GwRquN6qKIRa4IHx+JwHY+2xslmuSgqyAKenpykBDM+1Ow/zWm+L9H1vvbJ7Gk+syVrT9b0sBvm3BfOc77z5m//b36nX66k+zrGhHh+WkdJ9yqIYKc+1z9vHWJydnYV+xjVW3fmsyARZqMPDw5TQm4rXWPe5RfVDj5EFkFmm0WiU8J4Ckuy9nVOHw2FmmxUtU2SkIiIiIiIiIiIiIiIiSmIukWEee2Njo2hR39/fDydJWrxoKbtw4UJCZAIYW+VpJbfWgzyWKAt6UrWnen3P88E+79SvV43nstanwWCQisWlMZ0AACAASURBVPfS66IsGh4DlyUxe3Z2FmRqrQDI0dFRyk9WfU/nEWCpdWzbSS0i1udXmQnLajSbzVAGWpTUommDNM8TF5i2TN5v6W9aC8vh4WFK9pz/Hx4ehrZjWWhR3t7eDkzUU089BQC4fPlyGG8adA4kx9o0yLIiqkXJq1uWl23X6/VclgpIxsCpNY735efzYhrzUCT2wJt3zhM0yWKen2SsgMKrL7Wge8yv7cN5QdZPCnbOq1TSUsDaNt7ns7AokQ2N2+Fv8Hnq9XqYu/ja8fExHj58CGBiTeYccPnyZVy9ehXAZJx/+OGHAIAPPvggMFKcF9fW1oKnCNtXvUNsXMQ8oCySZaYURcQmvJhDDzqfTNuO57FCViRjMBgkPD6A9PymZeF+xBtrXhyl/d1FYFEM7JOAF2PdaDRSY0/ZG77GMchrpVJxY36z2LBpUCRGyiufjlsVoQHGc4eVOOdVGSndj1uBDr3OU2zCY6z5mo4lloVlq1aroV04r3E+VK8bXnXPPS3jGxmpiIiIiIiIiIiIiIiIkpj6+Oid2PTUyBMefSsZ23F4eBiYDqoIXblyJfxvT4unp6cJZRv9vWksIOpb7cXQAGMrg/VD9+Jr9PetNUL9N22Z1PKvMpT83iyxYEUsRmq9429Zee1WqxWYKLaT+tbzc3xNGbs8RqcovFg2Qi2TViq82+0Gy4S1MjQajcCC0kLBa7PZTPnbs1wA3NiAsmXK83EmhsNhYJhoDdrf30+o9PE1YNxuLCetMLQo7+zsBCaKY2xnZye8T2uNKjVa5b9p4DFT1tKsVlb+Ptut1+uFOmBbcHzo97w6tG0ybUyHZ/HV32M5dO7IitvQ57J+3f1+PzUHKBNUlsGaFsqCKDOYZTn3YnUIZacXjfPmO6+PeBZJYNxebENvLpuV2TwPypTZuYvPtby8HPoXrcQPHjwITDXLyfF+8+ZNbG9vA0BI3P3OO+8AAG7duhXKqfMD12eb2JZjclZ4rJ9tJ68P6ZjLUu3LSrhJ2LlpHjGUHrLiUqz1XBl0lskyjvq3V85FKUY+jns+TnjMnfX+qdfrqXKyzZrNZvi8erkA4/bw1Gftb89Sh57Euce6WtXffr+f8jjyJM7tVZPvqtS5xmtq2XRdnAVejCB/S9dRqzLI8ba6uhr2ddzrcN+n+3E7PrVM9lnOw1zEJryDDScKK9d8enoaskJz8uZmb21tLbga6CRuM63PknOJDd1oNFKDgRvLVquV+JufsQcvXQjsQYqN0+12QyfUXB02bwc/0+v1FpZTyk7Go9EoDCy2E59ne3s7tA9lsu/fvw9g3HHZibUubCecx8RbrVZTG1T2Az10cKDU6/VUXiV1kdMNCTBxb9PBp4GJdhOttHZZF0zPTVSpamDc/laS+NGjRymXPs3Fxv7LstB176mnnsLTTz8NYDLWtre3Qzmtq6TmeCuL8xZzG/SpCxbf4wSngjTWJUFFKnQs24lWr/NyldX5R38b8OcMHRv2IKVuCXaR08OVBqPzOsu4yvqubvy8QGQ+I79fr9fDhpvw8vDlLUTzEGPIu6/OHXy2brcb6pnjjPdqt9updvXcHBctMKEuQTZFSL1eD32H7nz3798Prz333HMAgBdffBEA8Oyzz4a196233gIA/OIXvwAw7m83b94MnwPGB6msvDLD4TB1aJ4FOp48t3ObokLXARtMrldvP5JliJhmLOW58ek+wI5r3ZiyjjnmqtVqat5QFz+791AX1UXLbv+2wqsvzz2ZbXV8fBy+Y0MENEyiyDwxDfL2DwpvD+odoHi1Ln1ezig1pNkDlOd2PAs8w6XNk9fpdMKelc+romg0HvGswe/v7e2lcu31er1ckiS69kVERERERERERERERCwAc2Gk7Cmu3++Hk6+1pFer1cBwXLt2DcA4IJbv6ekfGJ+medK3jMQ0YhN5SQBV7psWBzIXy8vLKclLpVezXHfU1WzaLOxFkfc9LS/r4PT0NJzq2T4apMz24ffeffddAGMLKNuHbn/KCmTJlZeBWuoIto+13AETS8XBwUHCUgtMLLf7+/spOU+V/qQMP5md5eXllEVbUbbveW4Z1op1cnISnpGW5IcPHyZcY4EJQ1CpVAIzwHFF9unpp58ObC9ZqvX19dB/LQNxfHw8NSOlZbGWUrWesj5XV1fD3+xzRKfTCfMGy6sMnLXWr6+vp8aW1muZdspzR1XXN2vV6nQ6qXQBakG3z6AuwNZtblbWKa9MWe95jNTR0VGYH2y/WFlZCXXNefE8i/jjcvfjWK3VaqFfaBoOzge0tqqruRVaUFbB9u+8gP9p4DG4nOvYp4bDYRgbdNU7Pj4O4/x3fud3AACf/OQnQ7nffPNNAMAPf/hDAGORCWDMXvFzZLJGo1GYN/k77A+NRmNq152ioirqNWHnXBWYsGu2JzZRpG3mNdY8t1jrWXBwcJDykiDa7XYqETLnxdXV1dAPlKWzYQRPmpEqIgZ2noBT1ucf19xRq9VSYiWcA6rVaqhzuxepVqu5qSLmgbyku4SKm+ncxX7Hq7JQWYxUp9Nx3Z/t8+hYnIWR8uYAXnV/BIz3q9wTqUsfMGahyLAzxQvHYK/XSyUZ1t/yBDSKIDJSERERERERERERERERJVHavOSd2OyprdfrBUsmrX+0zly6dAnPPPMMAOD69esAJqzGo0ePUqdFlWOeh/VPLdV5EuR5suQacG2fzVoz1HqgVnNaNGiN9ixq84QXw9TpdMKpnid9xtS88MILQTb3/fffBzCRzb13717K6t5sNsNzz8NH2JOxtvECy8vLwZrM5xgOh6EP3bt3L/Hcd+7cCa9ZRkoD62nJ0bgpK5c+TR/0EizaNAHHx8eBHWTb7O3thfHEccQ+uLKyEqzoVur86tWrge3lGGPdsczApO1V7rQsNC7Gs5RaS3ar1QrPzbrUhM9WkpX/9/v9lDxwrVYL7WSt0dVqtVQ/9GKrrPR3r9fLFY2wlmFPnEKFGqyUs5cQfFHwGCkvOJ5XPqOyzl4g/KLiPPPgSWizfBzn9+7dC3MAP8dx88wzzwTfepaX7Iy2uTeO5wGVOs8S8jg4OAhMFOeEjY2NwER97nOfAzBh2X7wgx/gO9/5DoBJbBTL+9nPfhaf+cxnEp9/7733UvfXPjtLjFQRwRRP8pjQdvXEKfi9vPgR+9vKlpcth/5t41KUBWA97u/vp+Zv9XKhRZ19kB4Gyt7z97y0MHa+ehzw2CQVSLGWfj6bymirsMo84uDLPLvtk17qG9ZzpVJJCF0Bk/GpXkl5yctngRe/QygjauPvNdbVrqcHBwcprw/20W63mxI+03FpWblWqzVXRkr7gHocAWO2nONKmSgAuHHjRvCm4j25p1Imi3P88vKyG5dFFGF6Z1bt04mLA0Ndc/jQfJhLly6FAFdu1Fn5nU4nFJiNORwOXZc+vZaBTj6243v5HpQetW4W6vZlVVk8xRS9WuUQnVRmmdgtvAMdB9je3l5oJ5ZFg5Wp2seNB9We9vb2wmZdcxFlKY1NAxXfyFIO0w00n3V1dTWU88aNGwAmm6Hbt2/jvffeAzBxb2H5O51OapFTmpybfHUrLDvZa3+1C6+6gXAM0Aixt7cXFmU7ka+traUOUKS1r1y5EhZlbpRqtVoql4TS/tMepIBs5Uo1hrAeR6NReCa6VPKwp8/oTfKcU3Q8si3sRD6N+y/LYo0pOhfYhWl/fz/lGsHP60GS5WeZNzY2wt/swysrKykhnKKqZPOAJ6rB5+bzbG5uhvbigdgLvH4cgfB2PVD3J/Yjihh99NFHoe242FKY4bOf/Wz4Luc69r/9/f3wnrrzFFGIK1sOVZTla3zmu3fvhvmBz/GpT30KX/ziFwFMXHh//vOfAwD+5V/+Bd///vcBTOaML3zhCwCAr371q8Goefv2bQDjg9Tdu3cTz8V+2W63E+7URZC3FuSpm1UqldT8qoemrDxSXiD+osaL517nzRG8npycpOZvFQjifojGTM7drVYr1BXv3+l0MvP5PA7onklzcgFIrMnsO2wf1oUG/asia9Zebx7IO1irEcP2KTUIWsO3hnlYQ/u8kSei4imtck9xdHSUUuTLyxWlipJWEEpJAE+sTQ21ZctmDVQqxOWFCrGeOU6ef/55AGO3Zu6JuHflXurOnTthLeDvqBKqVdwt2pbRtS8iIiIiIiIiIiIiIqIkCjNS9jTvUbBKv/HkyJMkra7Xrl0LJ0daz2hl2dvbC9Y2nopVCjaP2iwKlTy0Lnd69QLHbb4ptVxY65laYi37oIyXl0dqntnjbd0BE8vDw4cPQ33QGkZXj+vXrwfWhsHKZHMqlUpKmKFWq6UYo1kCRdXSZ91zaEE5OTlJ0O7A2CrCfkXRBTI0zz33XGCpKJzBMn300UeBKmY58qToNe9HUahEN8ungf1AknrmWDg4OEiISwBICExYJooWzUuXLgW2QIPV7W8qk8K+URSelUxd4IBkf2d9bmxshM+xL6nLJi1sas0FxswC/+Zzt9vtlMuRdYUpWx4vB5RKY7OPaIZ7a8nk9/r9fioXHuGJ3uh84gXBLsrybK2sKysrqbmRbXXx4sWUu6hazZVR5nMvIg+NJ1Orz8E+wjHV6XRCADLd4L72ta8BGI8fzgu0ZHJ+OD09Dd8jO6eM1DxYeO1fNtCdVlSWA5gw7l/60pfC2L9z5w6AMRMFAP/2b/8WrNC/93u/BwD4+te/DgB46aWXwn1/+ctfAhjPixxfdC3T8Tmttd1zo8q7l+fmlZeHz3MrVWaqiOhBmbLw+e3arp4smnuHz2EZac7V169fD94gXLfI5lQqldSc561NWr+Lyvmk+RqB8Xji31Yk48qVKyF8g/Mx2c533nknrD+sO08Gf9Esm7dXsfs7zbOo7BmQFjvRz8+bmcpjolTczKYtUEaKexwVmLBrLPvw2dmZ6y6tKT8AJHJzWuGoMmWzba75JdU9luXk3obj5qWXXgr/a85TYDKPv//++6G8ujeyoRtlx09kpCIiIiIiIiIiIiIiIkpiarEJ/V9PjkCSWeJ7ZAmef/75YKXgSZb+iw8ePAgnZbVSzIOJIsrGgHgJAu3zaPBrUWtYlnVuXnKsHotnA/b29/fDSfwTn/gEgIl8bqvVCnECr7/+OoBJO127di34pbINz87OFiL7eXZ2lmLvVIyBz6Ty4LSQ0f+fz7q1tRXEF8jA8TO//vWvAwOn8sg2TmYebOFwOAxlsqzQ7u5uKAtfOzk5CXVKKxCt45cvXw5l4ZUWza2trdA+7A+9Xs9NysdnsbK858ELDreBxScnJynmtdFoBIs3GTX6Nbfb7fBdGwB7dnYW/tb4MlrCLEs4bRybpnXwZNq9+ErPKs3ntGytjk/LdCv7bX22522h9RhrotFohOewgiaXLl0KFnOWSWX7aTlUa+6irORZ8andbjchm89y0HJJJoosfKfTCXPef//3fwOYxEqtra2FMcc5s16vZwrrTFNWZSRpabYJ7QeDQRjnn//85wEAN2/eDGP43//93wEA//RP/wRgLLTz8ssvAwD+8A//EADwu7/7u+Fe9DbQOd4TOyA03rEIPJbEWuqzLPe2r3upS+y663mHzDOOTb/jCdLYK5AUJqAnAedosoo3b94MMXusd/YHnZM5xtSrwZPdnudY0/pjv1RBJ67Hlm3b3t7Gpz71qcR7vO7u7obYZeJJyLd7fdJ6CBD9fj/V/72UNoua5/T+XtJdIBmbx76iSXetmFun00l4aQFIxN5Zr61msxnmP9umy8vLpWOkPE8LO48fHx8n9kJ8Du7dOJ8zlcPGxkZgoDh/k3G/e/duKrar3W6n0gvovF6kPSMjFRERERERERERERERURJTM1J6AqeVQi2SPBXzpEcf7hdeeCHIFPLUp4wUrc15SQ9nscryRKuKbNbPdDAYuGp61mKSl9xX5Y695IHWkqH1Og+rs2XGRqNRKLvGAtnYKP6/u7uLn/zkJwAmp3lC4yNYpuPj48w4EC1fUWiMC9tFVSGBcT+jzzUlzj/44IPALL3wwgsAJmouTz31VLAIkhWlVX17ezuwppR7f/jwYUJ226KszKxakaxinrJs/Fut6V6MCjBuL1pm6G+vlmR+T5V8rHKPxpyVZaS0f1krMdu83++nlIFGo1GwdPN5lTnkfGFZs36/H9pcrcG0yKlynD5LUXgMm8auAGPLmybL5NVK5WtyYNa/VfI6ODhIqSV1u103SS/rbZ7Q8lq//3q9HlgYWtBpNb9y5UooJ8tC2ex+v5+ycg4Gg5TFcR7P7d1Pxxnf05hCqtaRhSdef/11/Ou//isABKU73uvChQuhLpShUWVG71nKQBOas09wLuDvbG9vBwssn384HOJ73/seAOAf//EfAQBvv/02gHF7/dEf/REA4Pd///cTz//d734XP/jBDwBM5rzV1dWwPts5XpnlovAYKe+aF2PmySJ7aUbse0U9WaaNkcp77mq1GvY+asHnvM31h2vTc889F5h5ziMaq0Jo37YMuGWoZoVlwbW87J93794Nay+fm+uQrluWYffi0DUe3iLvvbI4T73PsjDKUti0LJYRtfeaJ5Q9s23P/cTx8XGCiQJ8RVmNi+LcYtl19ZbQWGDOpVbJeBrVvrwYa43D43rCet7a2gr7O7KenLdOT0/xq1/9CgDwxhtvAJjESB0dHaViEFUl1yp4F2WkCh2k8g4zo9EoNCYbbm9vLzSKuvQB4wmDA47uCtwYPXr0KBVUr25z8xhIGgTNv63u/unpaWoj0Ov1Uht6hT1AacfTjsarvg8k6f9ps8cD2YdNlZDktd1uh3bhoszv/fznPw8HKbYTJ8irV6+GxVhdd+YhMkFofgYLL1cH3QQ+/PDDFK3Lsr3wwgsp9wn+zqVLl1JSz2tra+GQz77NfjHNYqUuR9alTwUm+Fvsn0tLS2HM8LChbcHDL92u1AWJz6m5orIymZ+enqbyRhSFGgA86W4rHtHtdlOHQ17r9XoYK5z0GFB6dHQUfofPrbmirGvNPMQZrPGoXq+n3FguXLgQ2ob1zzbzyq8S9zZVhHe4UsPOIlxg1JWR7bKyshLGCduBh/bLly+Hz6tbLZAUS3ic8PK46OEXGB8suGHlPPKzn/0MAPDtb38b//mf/wlgYphhqo6nn346uAWzX5+cnMw1sFxzBNn5hvPtjRs3wpzNsr3xxhv49re/DQD46U9/CmByCPqDP/iD4NLHNqQ0+n/913+FAxf7+JUrV8LnWGeatmOWg1SWJL4aKYscSLMEJex73rif52Fe29zOEa1WK5VnZ3NzM8zbnM/YllevXg1zCaEuwnbtVpEHz51wljkvK53A2dmZu6+waS7Yd+/cuRM2tOxLFENhOYCk0dTLIbQoeJt3LaM9/I1Go5TRnW1bq9UKPfMs/U83+XbvmneQUolzT1jCGsD1IGmFJdrtdkJcAkDiYDWPPHNWiOv09DSUnYefZ599Nowdzlf8zHvvvYf//d//BTARSuMa2263w9zIdXplZSVhxNJr0XaKrn0RERERERERERERERElMT318f/j7OwsJc15cnISTqa0lpOGu3z5cjg5kkUgI3V4eJhItgr4CfZmgVKi9jSvspH8m1elQK3bDTA5xXvZnu3Jvd1up+hRXgeDwUyn+iyZV5XFJHZ2dkK70JpOi9GPf/zjwOiwTLRG7+zshPbhPTUx3zxcMLX9PXlo3p+/Sav4w4cPg6QvXfx+/etfAxi7KLK86qIEjNtEpbmBcftaSWhNtlqWvfHc6zTjNjAeA+yPLFuz2Uy5WKlYBl+zAiAqMKKuZJ5L37RlIlQ2llDLFX+T99/d3Q39g+OD16WlpcCusc1ZtqeeeiqMQ743GAxSkuO0KKmFugjUWp7FcqnrmrpmUjKb7AHL02g0Em6OQNKCqCwdf2fRgdd51npl3qz7jbpyWku0XjWJJT+fZd2blwuM515l+9b6+np4TrIxtF5+97vfDWsRrZZ0G3n++edDH9R2yrJcTjP3ad9g/yL7xfFw/fr1sHaQef/Od76DH/3oRwAmczWlzv/4j/84MPJk2f7jP/4DAPCTn/wk/A5FD5599tnAjFi3oV6vV7pf5olN5DFSOv48b5girn2eK6Bdo2aBJp6260S9Xg+v0YruJd0l46tCNtatc3d3N1jU6ZZ/fHwc2s56yCxKMlyT1fJ5L126lNr7sA7u3r0bREzs+qleSzp3L1ruHPDrR+d4L90Fv2e9kpTFsX1s3q59eUl3PalzXeeta73uZe0403Lb/anuXdkH1MVvWtc+IC3opa6tvC/3Zs8++2zwLODz0rX8jTfeCP2O+1lie3s7uAByT6WiQbZdo9hERERERERERERERETEgpDLSHnJDq11t9/vp+QWh8NhOO3RF5gMwMrKSjgx8wTJOBQNdFf24XElZdPgdesLqwyWBlAT1q9WhSs8K57n283rtP7BeZYWtWLwdH/16tVgGbNW2tdffz20J2MDyEitrq4m4n1YzjLPdR60fmzAqsaX2RgzldBnv6IV7969eyGomv1RmSlaYjVGjXWlAeZ8vrJSwMpi8JksI9XpdEJdskxra2vBiuIl32UcoiZHBpJjUyXv+bdlQqZlo4CklZhtQvT7/ZTlbG9vL1hZyXzS0lWv10Ndsf7Vuku2wIqPAOn5qawUsMdIcfyqBLtnQbfPoHGTNobyvCBla2WeR7LNsuNQRTI4F5Dt1ZhItiM9DA4ODkL7KXu/KFngLDZI4xbYJ4fDYbBSchzQerm7uxvGGRkapoO4fPlyuJf67nuWafscRaExMewfHNMc9xsbG6G+f/zjH4crv/ulL30JwETq/HOf+1yYW1577TUACMIUjx49CvM/y0uGBJgwUSrQNO3apDFS3nrrSXnzb2VBCW/d5NVL6urFds8Ddqyr4A7nLq4rW1tbqfhJPs/p6WkqvQfZ0Tt37oRxx/dOTk5SsS2ecExR6D2yUrNo+dgvn3766VAm1gXLe3JyErxC+D3dV9l90eNgo7KgCW6tV5TG+up+CpisT7r+LWqe85Lu8sr5WRkplTpXrxO9l7KAnqiSFw/lMVHAuJ7KelNpXVmhB2V72ac0/QZ/l3upW7duARjP5/xb014A43ncY4EtGxZjpCIiIiIiIiIiIiIiIhaM0qp9hEoUWiWQRqMRTo5kpPj/2dlZsK6QkeKJUmMdPIvtPMBT93A4TDEcPIUOBoOUhUxP7ta6okovntynLct5CQJnsWh4qjpA0sqp8Ta0Kqh/KTCOL+J7ZD9ora1Wqyl2zlMwmgVqjfCSwgHJ+DNayNbW1kL/YtyU+mXTIsM+Ryv6U089FcpH60Wz2UxZMK3VpgxoPTo8PAyWRfV5B8btxPKqUp/HRAHjcaXPC0zq7vT0NCGBCoyt8KrSByTbcBYpYMLKxiqzoewX5wvWBa1Iy8vLod/SeqT3pyWKDBaQjhOYxXLOq7WQaQyQxk4CybhKj+GzipAau2PjJTURrjd3lB1fZT+vVlfLRLEO7t+/H6x3Vnlyf38/wWIAi4t/yJPLViVCfu74+DiMCZaJZVRVNcYVcdxVq9WUcprGDM0jjlcV8dgnyDbTItvpdPDuu+8CmKgNHhwchOf9+te/DmDCTA0GA/zwhz8EMGGkyMhtbW0lUkMA435qGWs+l1rdi0LjDD0miv/bGAUv3UhZaXSPoZkHI+Xd1zJjtVotFZ+3vLwc3rdj5/DwMMwf9KRgzOX9+/fDHKleP5YFnUdssiKvjtRTgDFghKrccR6w9a77nKJtMm+mR9uP9+73+6nYLZZVY2kss6NjQ/vtPBSMCc69p6enbmwUkFToU+XXvPh+G9/H8i4vL4d5SON+2Z+tGrWuW2XheXmwzlqtVlgb6Y3SbrfD/oVj46233gIwjolnvdgE2Op5xHJrKgEvvq1I200tNqEShTY4fnV1NbiB8coCHR8fh4mcE4W65thN2DzkixUaOEjYgNV6ve7KPtqgaktB6r28TNDaQdkxrGuaHryKwqsfu1jpbzBoemNjI3RGUvBcpPv9fjj8cnPBNuz3+ynpVe85Zmk3XYDtfVR8QrNTA+MNBw+KKkABjCcZDhhONuyLJycn4bDBwbq+vp5yLdPnK1s+lTy3mcbZDqPRKJRJ6WiVOweQEJhQcQm9l7oQ6kEqa8M/TZm0nWwdaWCypc4Hg0GoA/ZVPuOHH34Yvmtd/HSzqcHNWTnMZjl02E0Sn+ns7CzMD1oum5dOn0kNGnpPzVeV54r0ONxerCtPr9cLCxKfn22mQibWzaTb7aY2wfMWDSoCdVNSlw0bzMzx88wzz4SxT3dmts3R0VFCTAkY10lWmaYpI5+10Wgk8pPxuYHxPM28fnTxu3z5Mr7yla8AAF5++eVwD2BsGPvud78LYCK6w7nyxo0bwTDD8aUHTfZVQoV/ypZJ/7Zrk7r26dixaUA8o40n1Z0nTpH1vWngjVP+jmcMGY1Gqc0f1wSVvFeRCWA8L3Js6YYvqy3mtWfKy4ek5cyac1UMah6Y17zh9RVN48I6Zv3qXs3mONI0G1ZCe95QcTQ14AHJ/FB2Pla3UXtI0VxRutcFkm58+pp16eNcM83e1TNcEmq05mGdz3F2dhbGBw1iNMTu7e2FMlmRrs3NTXft9g6YZRBd+yIiIiIiIiIiIiIiIkqiNCOlJ3cgKWLAE+TGxkawnFvXnIcPH4YgSlpeaIVSi9c8k/AqNHDQCkPwpN3r9cIJWClRT5YRyE/Qq7/Dk7BaqzxGah5Jzax0MzBhlFi2arUaTvW3b98GMGFxNAs7rbS0Qmmbq4Vjnm2l981iGzSIX60qLCetEWTgVGhBhU2AcTuzPyrbynuxnfh7au0uCpUl5d/WBapWqwUrNOv/qaeeCuOJLjgqMKHiEsDEErW/v59goviex0SxTGVdd7SdVHKcrwHjtlE2k99jm9m2ODo6ClYmPhu/v7S0lEoKqqzOvCygH+copgAAIABJREFUamFUphoY9wVaxjxGSlk3/YxCWVWb+FH7tee2syhGxzIGGnhthVWGw2HKXdOzlqubzyKe1cN5DL2uN8BkPlQmiO+pVVdTPRDzdKvibzabzYTbKjCZl2/fvh3GBj/zwgsv4HOf+xyAiQsgpdF/9KMf4f/+7/8ATNqCXiLPPfdcKC/bUNMvWHfmaYSfPIuzdeNTV3q9WvGbPFnpPGl0zwVvlnnCq4OsBMEsC5Cc56wXz+npaUoYSNNT2PHnybwrG/A4Wd8nJVleFh5zZgUidJ/H31TXTMtIabssKhyFYP/pdDouE8X/PYlzuwbp/KyeUkCSfeL8oO7nnksfkJT8nwVWTr9er4f9F9vk6OgozA8MS+EcCUz2fty7cg/YbrcTwiLAuA1nTWMRGamIiIiIiIiIiIiIiIiSKCw2QdjTnPrD80R78eLF4GPO0y0tMPfu3QsWNZ6ceU9lpBblT2+ZBSAZ4Mr/bUxHVkAsr3l+wtZi5DFvynRY3/Ay8CRLgWTcF+tA/bIpusDvbW1tBTaRn/dO8B7+P/a+rMexLKt6Oeyww5ERGTlnDT0VPTJDt4SE4IXpD/BDPl6QEEhNVQMSD/wN/gaDQAgxdwM9FD3QdFdVZ1ZWjjF6Cn8P1jped999j++1ryOzYa+XG2Ff33vGfc7Zaw9t22dblk3raMNKq52u9Z86OjoqaXA0qSHLzXqqBpGfsQ3V56MuNAS5OqwDRcdK66fx+uuvJx8pBsSg5rnf76c+4xzTABM57abXh+tq03R+WO28srLU/s/n8/QutoWOWbY962JD0tryt+VL5PlIeX4QVk55vhmE50jrOdV7/iQvAzq/bMCNHANnAy8oXlZoY9uO6geoGk9e2a+Wcdckw4QyNG3UTxlPtiHXSFoOPHnyJN1Hu//Pfe5zSePKtfVb3/oWAOA73/lOKjfZbAaAOjo6KgU9OD09LfXjJpp2byx5663nX1jlI5ULiuMxUvxcr/Y36yL3Wxsqez6fl/zOdB9lA9jYFAIKj5FqYyxeVVLcumhzL+H5MXrBJuyarElpbQoE9qeuZ95+uQ1wXJydnZUCSuQCS+g6afedg8Eg1c8GlvB8pJSRUt8oXjfxkbKsscppDQzBOqnlDbCcS/v7+6ls3MOyTmrhpLEONu2n2nmkCGvap5H2NNY7N4N8Bs2mNAINK8R7tCPqTqCmDWAXz6pnepv3qvwK3nde+eps0oDtUMPdbrdkMnh2dlYKdsDDx3A4LAw+YNnn6+SA2GSgWjPF3OKsY4hXCoHd3d0kNFg3jVzn5S+wigN1Tm0qNNjWal6nDubAYg7ZnFFvvvlmOlRxXmmACQ0uAaAQYEJN+oCF4NccElqndXKYab/ag5k33jUABcG5aBcwYDnmWMfd3d3S4ue9c5sHEW0nzxwvlyPKc6q3hxQvwpl9zlWgrszTAzNx1YElVr3Lk8N2rVHzVCtrdGxvKxAS54Gar2qkT4KbA+bAu337dlJGMHgOc7Odn5+n+2kezEMXUDYN0gAadjwDzcefZ9pn29Q7SE0mk+xBqsrMzwsA0XagkzrPuLy8LJlO60HcBkFRc3kvEq5nxleldL5KU7u6kfbqlOmqD3DeQUr7wR6k9vf302dcv3nV/cC23FL00KSHKqB4+PYUpXasUNZoZD7v6uWMUlM+oEgCtFFnT0HgBfmwJowsx8HBQcE8Ub9ThW8ux2HTeoRpXyAQCAQCgUAgEAg0RG0bsiqN6nw+TydUhmu+detWov/UiRVYmCZYkz51rN6Wox6RM8XJhR/OYZX5mWduV3X/prDPUU2EDc95dnZWCD0PFPPb2Pu9cO8e67At2Hb0GCkNw221MN1utxBsBFgyQepkak1P9Bm5cJ2roHS8MmjAUuNz+/btgkkfsDDxIxNlNSyaj8nLGWVNGNVMxI77TeafjjvLWs/nczc0umaFB5YarsvLy1I5PEYghzaYqarfahAGNX3zzP20LHq/N4arWKirQpO20mAcdm7oONJ7tiUfmj7XM/EhbNt7eQTryrymY8/Lu0b5zPF/cHCQGCU6VE+n05TigUGDNI8KGW4yU2pS6+WKsvJzE9QNNuEFarEmbboHqRN4Ireea0jyNselltGuV1ofazKrbWBZnF6v5wbQeBlM1P8GaHvZMT6dTgummECRkWKbW0ZKTfs8BreN9UhZqJwZqGXTVfbaIGfD4bBkvqdXL9S5ZaK03uuOQU+m6jpv92Rqgkmo2w7La4MdqVypmzOqTp2CkQoEAoFAIBAIBAKBhqjNSFU5HXc6nYIzP7BwKOdpklp4Osu+ePGiFF5SNWBNmaCmWJUB3X6n91Ql9fPYCc+eO6eFruNIuw48x1tP62cZGvWnskxUUy1eW/XxtDpe29ow3J5W3GpodnZ2kvZCNaZWq7iJrb0m8tTgEsBSu3z37t3ERNGZ/M6dO2lusV80bK5long9OTlJ71S7aasl83xc6kL7pCrQiTpzarvZMce29YJIeJrtOk69nU6zMPXevTmtt5Vl+ve6fp5XGXSi6fM9vxPb31646VVt0YYm00Oufva3Te6t8+6mUBlmE35STl27di35Iqv/MYNMUBZwTh0eHiY226YbODs7K6UeyPkp6xyvC2/eWmbKC1zi+UgROUbKY5+qkvRuA7m1Sb/PsdBV+wyg6H9TVZd16rbJ/NvUv+RlwWPJlelgvTRRraaAAYrBqnKWCBZN1yVgyTKfn5+n96qPN8uhTBSvmtxW63RwcFBK/k2rl/39/XSfpump8gXetN9zc9MG+lGrEBvvQINb2TXK8z/O7Wfr1ikYqUAgEAgEAoFAIBBoiFqMlKdtVv8mG9p4MBgkzRI14poItUq7v23/KIs6YTHrarU8H5qqZ3nQNlnXRyJn66mRTyy7CCxP9bmIY001ttuCx4IQyvZZhkNtor32sd9pX3ht0LTOak9NzZBNvnv//v3kI0Xfhhs3bpT8iaiBOj09LYRVB4rRAamx0r72GFjbBk3hadiqGCoLGyVsZ2enUsu5DkPT1tj0tM36XZP50dTfpm2sy0R5cy+nQd8Wk7OJxcKm1g7bspZQO37KLC+Vgw2NfnJykrTVNlLp0dFRwQ8UWK7JGopbrUJsf27ix+tZWnhpRGwS68lkUimLtH3WTdK7iRz3UIfJ9uSH7hOq9j9Ve4g2mKg28OPCQHlrim1zz0dK/YlsegSN2pfbx7ZhWaAslA1xrlZD6lvH8lgmSqPa2Wh9/G5vby/d76WIuKrIrFXzxrazWiBVWUu0EaHPQ+2DlDfxgUXDatZjfsdOp9BWp9YqkyIVGNuCR6F7Gz0vGIQtmzdhvI7LCfttwzN/0qtdrPT/3CGiDjapb52Nc65uQLkv9DBZN2BB1QF5HWguJApnNekDFqkD7t27BwCFABMaXAJYzqvnz58XgksAxRxZucAZ3nWT+ZczlfLM8ap+7x2kvHdUKUK2BR2T3vjMjdmmgRxedTSV1a/yIbGt3276e2+ucgOj+eu4eeJm6vz8PP1GHeP5OxvswAsCs+0cODnTPi/YhHeQYtk0z6O3RnkKz9yhYxtjs6kMyM2nq1BM/F+FbbvZbFYKP885uL+/X1qH9V7PjaJN6EHKyxEFLOqj4chZfj0MAn6I8zqhzj33m7ZkRm7/4O2/qkwpcyRAXfPkpnMqTPsCgUAgEAgEAoFAoCE6L4MlCQQCgUAgEAgEAoEfZwQjFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEPEQSoQCAQCgUAgEAgEGiIOUoFAIBAIBAKBQCDQEHGQCgQCgUAgEAgEAoGGiINUIBAIBAKBQCAQCDREL/flO++8k5JM2Uy/XiZ0ZoI+Pz/H8fExAODs7Kzwu8PDQ9y5cwcAcOPGDQDLrO2TyaSQvZmfMdeVl0lZylorFfFXvvKVH5vEWW+//XajOq3KCZbL1txGtmf7DO/+unX6gz/4gzmw6H9mD7fZvOfzeRqHHEP7+/s4ODgAAFy/fh3AYswBi4zdzNBNMEP5xcUFTk9PASzH7NnZWWEc6ruBRZZvAPizP/uzWnXifFrVjrnM4Tl4Wbxz960oa6M65VBV/rYyjNcd93XG3pe//OXKuUT51gY0+zrh1TX32Ve+8pXGfcTfbpo/UMuVG6deRvmcnGhj3Hll03ezH21/9no99Hq99Lf+bjabuTKA/Wiv+tumctzDtvM91pUxRN06/emf/mnlHoLQuunftn+q7vP+r/osh6ZjT5/v7Y/svNCxYcfgdDpNaxE/030Pf8v1q9vtumOOv+dvv/zlL/9Y74u8PqzTT69qfTxcpXyw47TT6dSaS3WepahbJ5XjfK8d/8By3A+HQwDAwcEB9vb2Cvdx//bkyRM8ffoUQPn8MRwOsb+/DwDp95T1+ixvrcrVKRipQCAQCAQCgUAgEGiILCPloYqZApYnydlsllgEavSJvb299AyeMgeDQXo2f/cqoonWbtsaxLZRR1NtmcFVaHq/99vZbJY0wNTYqebOauVUi8nPqHEYDAZpzPE7apkvLy/T2CPTpOX2NBSqmW4LnU7H1WRWaf1V0+8xJi9zHK7T7y+7DNqnbWq9bTnYVx57orDaZk+DWBdt9IeOwyqNu45D1bzXYazbKFvu+ZeXl0l+EMpC6VoELOXDdDpN8oH12NnZSbLCWmhsOu+uYt5exfzUdqnTP56ctRpqZRVzv/PQRrvmmDFvPusY4WfKRNlnWWYKQGnd2tnZKa19Oh+3sTa9DLwKa8j/RnhyvI5Fyzb7w77X288oM0srJIKyutPppL9Ho1Hhnl6vl7UsWVd+Nz5I2Rd6DauLjj1I6QRnQ3DxApZCh9CNYlUZtolVG51Vv8sJ3baxapO1ycGm7vv1Pfrepu/USWUPUhxbl5eXabzwqguTNYcYDAaJzrWLXNVByk5q76DWFLn2UMGmC6U93Glbe2Yi3jtfdax7UG8T2r+5hcW+u67Zn3cw8jaYdi5tMmc9hcC6z9CxmdvI2c2gHqTqmjA2LZv3vypkgMXcYJmsfFDTX5afC/L5+Xn6m3NRTQE9Bce6qPvbdU1jX8bmVM3TPNPlOocqPVBZmWf72ft92/DMVXV+828dL/Ygpb+z8ptXXYf4ezVHqpqHTevSBHHAeXXhjXs7NnJ7F117rvowBfimrQTn0u7ubjLRI7h/0/XHnj96vV4yD9S5x3dQfjQ9UIVpXyAQCAQCgUAgEAg0xNqMFOFpky4vLxOlRmcvNaXiqZKMlDqNWVMJfs53XRU8xi1n1khYzXQVS7QtLVnTk/Q6gQBWMUxtsHGqhbSMlDp9W0ZqNpuVtINq2sexZr+7vLxM2gt+5o1B1Za02Yfanp4m09ZTy2U1mdYsyd7/qiCnMc5pV7dZh1xwBI+t8v7Plc+TD3Uc1Vdp7+tgnXarMt9TNsaa9OmcVdNZ+yzVqreBHFurWn5+T0sIBqfZ399P5ea6dXJykq6sJzWh/X6/MniNZ362Ck2ZqNz9rwoTpQxR1VjS8Z7TlOcYKXUraMpSbbI2VfWx1km16GoBASzbQq0I+Df3UPP5PI0znTvcP9n1Qt/dBrz2qTO2NxmDVzVWt21ufFXwWKScaZwdM0AxiBev9hltjy1FlZWH7qE1uBiZJZaHMttjpPj7fr9fMnv1rICamsYGIxUIBAKBQCAQCAQCDbF2sImq0JvA4jTHkyBDElL7OJ1O02/IDvA6m81KWkpPc/SybLwJLxiBbZdVjrFtOSWvAz3dV2kAVbNncZXtP5/PS0wUbWE9RmoymVT6QAwGg6TF4P181nQ6TRo+j5FSjec2ob4EqslkHWzZcu2j3+k4exljz2Otq9q02+26zDTQPhO4qpyrUEfD7bW9907tlyr2e5saQQ9WrnEc9vv9kqzWNAXUpquzrw0tbt+xbvm89gMWY8zOCWAZQpdpEY6OjgAsxt3z588BAM+ePQMAfPTRRwAWY1TTKAALZkrljn33Nsapx5R648tjMF+G3y77v9vtpn5nm/Hdyrh7Pni23J7WnTJE1y1+1u12KxmsdervMVK2L3RMe8GOrIZ9MpmU2CnuoWazWYn53N3dLa1z1lphXVTJL2/c5Fh6RR3W3buvDTTp41XWNuu+46r80nW/6aWJUSsXYCHHrQWLjmlvv5PzSV0X3tjy2pTlHwwGyTKA71f/d/Vt1Wft7e2VmDdPRuq1zvgJRioQCAQCgUAgEAgEGqKVqH32VDydTpM2hTbm1PZPp9PCqRJYavj0Oz3l8oTchsZlEw2crWcdrZ++z9PebIsd8J7rMUzUTFiN12g0qkxcqSxBU1+puvC0fpZ5UXaT9VCNpLVRV7tay3hMJpM0Huv6SLUJz0dKtUbUtnAeqVbXhvrUZ3l2x9vWQnsaK4+ptf3paaNtiFONuraK6VkH6jNH8D3a5nY81PWfyqFOHTaZS02h/Wb9WQeDQfqO446sz9nZWZL71P71er1akaKalK0KrO94PC6w1yw3E3Xfvn0bwNLn6fj4ODFQ77//fvoMWPhRXbt2DcCSwRoOh2nscr3jmJ7NZlvxU/H8gwCUmHllfzw2mNdtyTOCFinKMts1x2Pcd3d3K61fPPmmWnhlp6o+89qgLvSdNjqlltHKDbWIYB00SiTZUD6LY2o8Hpf2PqqRJ3JWQqtQhxXyWIOdnR3XF63qfsKbHznf0Fy5cnhVfILb9sXKsUIcP+pLbuccx9Pe3l76ThlQYCHXPb8pu6/SMmzS3lU+yVU+Uhz/1sKt2+2WGCnWYzgclqIae/uTptj4IOVNWj1IsSLq7KudqNfRaOQ603t/twlv4+O9q0qwe2YIOeQ2utuC1kkFnm7W7ZWbCPZlLvT3tqjr3EFqMpmUTI7UtI/wDiTWzGQymaS6a84Oz1xI/18XVWaqOpbUJMRm49aDr25g9ZkezX+V8A5GWh5uwG2Qj93d3XSoZX213a0ZlffOdccj36fwTKnqfJYLpW4/t39Xlf8qzPp0oeSc4AaQh4l+v5/6gZtlyosnT56kPrUBGrz3tGXap4o8YDGn+TfH1vXr13Hv3j0AwK1btwAsD4KPHj3C9773PQDLgxTH4euvv55+xwOYjmE1ayTaMrVS6BjR9UdlhV7VjNIL2mMPAqvkWlO5xzGh/aQmy7za9WcwGJTksR62vDWYVxvQYTqduocrXpsepPQQpgdn1pNXK3MHg0EKbGL7azKZpDlGcEydnp6W1ltdEzgnNzHx8/YvVtmYS3mgUFnnHVyBYlCobR/mX0Wsa0IIrDbXZZtz/IzH49I+iX177dq1kszg709PT135UDX3tDxN6mLL762fBN+lSgnOJQ0kxnLbg9RoNCq5O3jt2RRh2hcIBAKBQCAQCAQCDbF2sAnvBOeZ9lFbydOix0jxZHl+fl7SvHimSFXa/Kao0oh4DqueCYBqgDyTCt6T0x5sG9pGVlMxm81SHWjuQodq1ew9fvwYQNFxz9ZzVRma1tkzAfOCKngmEl7wAmDRN9S6WBO50WhUCjahdbOMwrYYHqWZNUgGtY/UaGoSa5tImJjNZiWN6WQyubLxl2OkJpNJMv8ik0Y5cOPGjVRPalup+VfGkXVrygjnwPG/yqQkx2J7zFSVKaCyhqsYr6p3twUvsIQNzKDMEvvt6dOnAIAHDx6k/1kn9t9wOCwxresmtNayAuU5qewQ7+N4un//fmKW+H6yT++++y7effddAMux9elPfxoA8PGPfxwf+9jHACzn5dOnT1MbUDay3pp8dRN4/e+ZLFv5wHbv9XqpTCwjy+w93xtbm4w3MlLeOq7miCrrgMXewFqsqFmptaRQlwAv8W9VQtKdnZ21Zfl0Oi0liSfm83mJ3d7d3U39Yvc+0+k0zSM1UwcW/Wb3Hvv7+yWzVWKTeaW/t/NVTTCVLawKhASUk8SrNYllcZUhtUxWlZXQVaCO/L9qVFmEqRmrutdYBlTlIvd+lA/sl+fPn7tpLOy40DJt0i5VgVv0b12bKPMsI9XtdksBW5ShygXgCkYqEAgEAoFAIBAIBK4IrfhI2VOo5yNF6InW2mZqwjrvZGgdOjeB5zPE54/H41LY3NFolP62J2dPo6ZXT2PTZgjJHEPnfca6HR8fp/7h6Z5a15s3b6bPbIJA1fTqOKgTeKIpvH5SDZYN3ekFI1DWSm3w9Tu1ydd+8sJv87oNVkq1p57G2TIDOzs7hRDDWkbV+nkJSW2dtlEXlot/s2/Ozs6SBpZjUJ1G6YdCDTsdsY+Pj0u+Hp5D/bpgIAHPH0C12nWCHXisk5U1qkHMfbaJc3xdsE6q4eN4o9aSsuz4+BhPnjwBAPzwhz8EALz33nsAFn1r/Te8BL5tMfTWuVr9lKjxv3PnDoAFI8Wy/ehHPwIAfOMb3wAAfO1rX8PDhw8BLBgoAPjMZz4DAPj85z+fxiTH7fHxMV68eAGg7Oen9WwDHruiCYVZNl7Zb91uN5WNId35rOl06ibtJtqQC5zb6rhuxzZQDo3vMVIcS/v7+6lflaUCinsIb7627bdmA/3oPFUrHJaH5aac0YAAnE8axhkoBk0h9vf3E7No9yU6RurCOuDr+3WNt/3U7/dL/m2637F9oUmEbYAU/cxaoWxD7lX52Fq2x7NMaSOM/rrw9gjEzs5OiYE+Pj5O5WP7a/AFygy75/3oo48KgXuARX1tQKa2LMSIOsEmdnd3SwG4lJFSnygt/3g8Lvl9eT5S1oJmFTZOLV9lYkHhzas6gFYFOKgyh2izo3QjbAeC0s80SdQM9/zM0oU7OzupDhT23AAeHByUAgVoDpZNIu00gQpB1vf09BSPHj0q3MeD1Kc//emCUzVQNOXRugOrN5ebwE4s71CgmygvTwBQzGVio8HpgqAOmVURrzZxFl0FzymbY8ea7uzu7pai9WkkMY38BBQF7baQW4jYdycnJ8lklGW7ceMGgMWm/fXXXwdQNH0BFu2uB3pg0XdtbZC4wdGNvzX1VFNeIheYRP+2h0A1v1RlgPcZr9s+wOsG3R7c2fZPnjzB97//fQDAd7/7XQDLnEu9Xi/1mwYKaMOkz0JNf20QksFgkGQYx9PR0VE6lPMA9Y//+I8AgO985zu4efMmAOAnf/InAQBf/OIXASzkIeUff//RRx+lg5TdKOqGfhN4wWj4DsqCu3fv4o033gAAvPbaa6mewKIteIDiuKFJrW5QcqZ9m0BltjUdUiWPldW9Xq+0QdLDOeWfvQ6Hw9IGy8tT5QXtqAuN8FW1YdPgRfxuPp+X9gmq/KI8ZL9y7lxeXhbysQGLtdvKdl33mq5NarpVlZdQlcme0oLwDlmeeWbOJNCizXGZMxf0xh3l4c7OTilKp0a6s+tqG0EMqlAV+EzXBvbVs2fPUp+ybix3v99PMk/3hcBijPK5evhgv2lEVluuTZAL5ESom4a9drvd0gFQ52cT07665oph2hcIBAKBQCAQCAQCDVGbkaoyRasKNmGdMHkSns/nlfkkthEutg480zGWm9q7p0+fJs2eDQsOoKQlpHb95s2b6W/V7qopgmITZsfTRGk/WUfRyWSSzFtYN5rAfOELX8Bbb72VngssNbFnZ2elnANq4tmGVsIbVx6DqGYqLI9nOsIy2rFHqMZcmYcqTUVbzq85jbAyU+w7y0z1+/00liylf3JyUnLG9piTtlBFj6tDNzVDz58/TyZSBMt67949vPnmm4VnkO2YTCbu2ONvNx172q45kxXLVnlmoIRn2qdaX6vdVVMeqxVWs+M2oH1lQ51r7iSCffa9730P3/rWtwAA//3f/10o6507d0osguYraWP8qcy2JmN8z+HhYQosQa3raDTCd77zHQDAP/zDPwAAvv71rwNYzJ/Pf/7zAIBf+ZVfAQD8/M//fHrmBx98AGBpwvjhhx+mftRxw/vblIPK1HC+cz25desW7t+/D2DJvLE8o9GoZF6v5mhewII2teialsKGJddcL9YE2XNqV1aD9WMb6P+Wpdrb2yutt8pQNa2n/lbdAYDlnkDzEpLJVFN0zjENOc0xSjZRneltIJWLi4uC2SSfz2fZ9a0uVFZZ9uXk5CTtfbgXePHiRfrMMo4su9aX/XR0dFQKcHV4eOha7wC+FUBTWHM8Zfd1rLEMNk1Cr9dLbA3XI1r0XFxcpHrruu2ZmbYBa7XC62w2K6R0ARb9ZvO5ss/m83lJdpFlPzg4KJnxXV5euoFAtgF9rpcb1LrTqNmotV7S9dSzWNrU1SYYqUAgEAgEAoFAIBBoiI2DTXhQO1vPsSsXKtxmGN6W1rzK0VDrABRD6lKzxBM7T/mqAeI96rzpaQ+qEtttqrGwrJAGxOC7VGNHbRMdxnm6/+xnP4tPfvKTAJDYAWpfnj59mv5WTYXtu6vymQKKmn4bQtUbS9Z/R1kGbzxeZQjUOiGP1S5YQ7gDRW1nLhz/tuCNZfYVy3h6eprmig2k8dprryVtoIbcB4p+X4SXRHdd1kPbtcoGu9/vZ5OEeppTL6AE4IcBHo1GqZ28q63/OvCYDuvUPxwO0/dkrMk+ffOb38S3v/1tAEhO8tSkHxwcpL81ga+VD5tAZQDbUpNMAgtmjFp+tvsPfvCD5BP11a9+FcBSjv/Mz/wMfu3Xfg3AkpEi0/O9730vJev9wQ9+AGCh1bXhrFWDvokvYpWG1GOnh8NhmvNq+QEs5g/7jlp0+uI8f/68pEVX1r4NLTq13crWWvk9mUxKPkaaOJNQPzQ7VpXV0L95D8vhBadoyt7Y9gGW81nnpgZmARZ7Aw2QAyxl3+XlZRqr1oJlb2+vFIRhPB6XEporq9e0z1SGsn/4fpVVfJf6jnP+sJ78X0NNW0ZB91O5scFrv9/fWG54aRIsI7W/v5+YMvqLcx+0t7eXZB3rQWZuMpmUgiapH3Yb+ztvP+AFprLj+eLiIvUN5z5lwvn5efqtsoX8X1kewE9urfuVbewtqvbqlrH20tZ4Cbhtufk8vTZm+SdQAAAgAElEQVRFMFKBQCAQCAQCgUAg0BAbR+1TqDahilHSk6Sn8fK0eNvQ/HthLvWEa/0jVENto81oxBYbDnU0GpV8IDREt9VQb5MtsBrb27dvJ60TI2/927/9GwDgrbfeSskoGcGKLMGdO3eS1kk1cNvwkfLsV72oO17iZNu2ORayKtx127bNVdB6eOWvskfWaIzWl0eTJXpaVM/vq00oK+TNC9aJ2lDaot+6dStpx6jl02SifIYXLagtRtd7phcGOBeVzvOZ8hgp6wel/WZljUYgawMaTlaTofI7znMy1vSL+ta3vpVChbM8lCW3b99OWnWbNLEt6Bxhv3D8U6N8dHSU6vfhhx8CAP793/89yTgyNGTcf/3Xfx2/8Ru/AQDJP5TM+3/913+luvOzwWCQmCDW0zJCTeCNXW8Mekk4OT+oMScT8ODBgyTbyaix3y4uLkra88Fg0GpURfaF+hxan4aqiI7W/0gT1FoWhMzAtWvXSozU9evXS75U6oujyc3rIBcWW9d4zl2W9eTkJPWTjXo7HA5TGckIsO2uXbuWyqhhqD3/SZahaR+qb19VKhcNSa/+1hybnu8bWScbBfn8/DzJcctaAWXGxRv3dWHnYs73ptPplOQII39qMnKOI/VH0si4vLYt91h+L8olYfcBwHKfxvlCX9eTk5NS1F/1b2WdNYJklc/6VSBnraPzsiptjXc2aaP8rfRyrkBNK2k/02e0CW9i6sDzBgnLwft4ILm4uCjF6edEU/Oqq9qU23IDxbZlGe/cuZNoa5qr0KH6n/7pn/CFL3wBQDEnCbAQ9Ky7hpKsMitcp77eQcoTrl49PTqX11z2bO9gXfVu3cC1DS8bvF0sddNt66JlzS1I2w6DrodDa+oLLBcmmrRo7iiWjZsQmtOquYiaHbS1+fMUA96YsQciNe/NHaiqTFeA/KFezYI3WZzteNADoQ3YcX5+njbdNOP7r//6LwCLDTrrzn5j6O179+4VNtB8X27uNYW2m5XHlL29Xi9tsFn+f//3f091Yrl/9Vd/FQDwW7/1W/jpn/5pAMvN4LvvvgtgkWOKh0m+++bNmwXzK9YTKCrL2oQXcptzA1geGLlheu+995I55vvvv1/4bmdnJ81BDblt8zVuIuc4DobDYSm3mKYI0b9ZJ26+bdqR8XhcMhX2TGU1RDU38LzqgYqH4bpQ5RT/tvJnOp0WzN+AxWFPzfx4H3/PMWQPVBqyn22hIZ49h/qmyhZvo0lowAiWiYqSGzduuGa8wGLcsGzWLeLZs2fJvIxX7XMbSGOTtA9Va7iX863T6aR2ZFmpcBkOh0lRwT7V/mvbTcMiJy/1QGVNf/f29lLZrMx4+vRpGlvsR0JNaDUoStX+fZsHKm/d9EgaYD3z5E3LHqZ9gUAgEAgEAoFAINAQrfKOnna1KrAEUE6E6yU1axveydSeXlXry5O4Uu8MEa7aE0vVKx1uHV3VMd1qSbYFZWqosTg4OMAnPvEJAIvgEsBSc/Tw4UP8y7/8C4ClwzXvVe0ZNUca2rUN6LOqTDA15DqxKtS0x4wA1RqOqnevEzbXg1d+1a4CC02jdfL1HHU1/C3rdFXw2sILBsL67u3tJa3m3bt3ASw1sZeXlyUHeWrNxuOxy9JYLeC6GiZqjEejkWuCxLLXMaNUZqrKjNIL3+2Z+7UR/txjdzXps9Vanp6eJqaaZmEPHjxIZWX/MRksTeRu3bqVtPzKfm5DY+mFbWc/XFxcpPKSWfrggw/SfT/7sz8LAPjN3/xNAIvku+wzJuv953/+5/R7zjkyWbdu3UpjVs28Wd82559qZG1qjtlsluYLv+O8ee+991K4dprzsIzqTL4q+NK6IEsxm80SI2WDyxwdHZXYp+Pj45L5npqHVZmFKQtCmalhynNsf12oKbU1wdUEu5aNOT4+LrAvwJLZ0aTiHjPFvwmVKV6o56astSb81XDnwHLcaGJ4ziEN5GGTxXspD1jG0WiUxpzutarWrjbWWyvz9G9dN9g3ZKDZDru7u6kvOb/UzNwym9qnm4bXVugex7L8apqobDP/1qBNwKJvGYCC7LEXMIXPnE6nlabrbQWbyD0jZ+WibZALaGef1ca6FIxUIBAIBAKBQCAQCDTE2oyUnua8E511gKd2VzUl1Kzy1OsFYVB4rM0mJ2D7PLUzVSYKWGiHNKkjUEy+x7rkEoepFtvaVueCK2wC7Rt7gu/3+0m7+rnPfQ7AkmF6/Phx0ubSyZpQh3fVxFad7NfRVGj72aRznnZB0ST5KeGxoV5glG37uam2SbV31t6eY0kT8Hl25TmH0E3Zm1wdWDbCJktVLRmDTGi4YM4t+rNoAkEbCEYZqU1B5kttzb3AD95nVWFYPV87wguYoiyVjl3+35SRyrG7qjHluziOHj58mNgMBljgPUdHR8knin6WDEZzcHDg+mi0qQFU+aCO8lrG58+fJ60yZVmn00myjr5RX/ziF1O5v//97wNY+IgCy2S9z549Sxp3jtcbN26UkmFrH7YZ5p2YzWap/1W7zDpTPlBj/qMf/Sj9zTmlPix2HLTpbwiU5w6wXFN1PVJ/JqAYqMD6FWnqBC/0N6F1s4ErdI42lR269lUF0NB3KsNL+UKWhyzAwcFBZWj0w8PD5L+i40HbFCj6j1iLi1VQZs9jyHm1n+V8yJWxs+kBlK3JBeXR9WtThtfzF7aYz+cFqxxg2VfdbreUpFj3sLYeGg+g7f1CFcOq6xDn2dHRUWI+lXkEFnOLfpU2+br62rH/NLS+12+byLyq/bhC10WbNoRt0Ol0KgNwefNd1+B1yx+MVCAQCAQCgUAgEAg0xMY+Up6mQH2MbNQPZaRU4w4UNR5ELiRsW/D8BuqEaFftTFUC4qqocG0yHHWYBe0nnuDVv4DaZWqj33///dRX1EZTe3b9+vXC6Z/PymmM1j7pSwQWy4L1er2CJoblsRp+9TfhWLMhb6vYm6qIP22GoFaoj5TOD/oHsNwsh2qNqKHhvZrUsk6oUi8K4rp14PP03cBSM3R4eFhKBMh6P378OP2GY09t0dWvByhqmTYtP+3jvXHnRc7Tz6oY06Zh9HMh/FVjvA6qtIkaQpsMwMOHD5P9POeQJm38+Mc/DgB4/fXX02dAMSSy+satkk9NoJG32Bd8BlmZR48epfHDcnzsYx9LvlG/8Au/AGDJMD18+DCFRv/a174GYBkFbzAYJN9Y+vSpT0SV/G8L6iPlvYtzn32nvn5sK8+/xSbyzWnRN4m+6rGyyubZKLAqqy37dHZ2Vgijrd8pO6d9YCO1qfxu2ldeygC2n/oO23KcnZ0lRorR3zTprWWa+UwN6a5skfaZvkf9R5rWCShHSNN+t4yMJkj3/I9tu3uh1HWPaNnlXLTndbEqqp5lN70owVY+VzG527BcqbJisO9ku16/fj1ZHnHesB8uLi7SWOQcZNtzbgFlXylge9Y5XoRkQpllaxmmvlKWkdKw/XbvtmptqoOtmPZ1u92S0Oa11+uVaHZCM00rriJcuIXn0F61edbNjRUmXl4goN2cS8SqjbAN7qEUKM0I6Dje6/XSRGK56SgLLNvHO3S04rznbELtBlqDduSyzXtBG2yo5/F47B46coEu2kCVeYGWfzwelza5xHg8LmW9182FDUBxlTkftB118wEsNuRW2FEwPn78OP3NjYcGSrG5b9o0MeAmXPvFOgx7B6OccsQLhrHKlLcq18UmQj/3nslkktpcHeI5figfqEy5e/duUr7Q5JmLc6fTcbPeV2Gd+thDK1A0oWI9+BlNW95880381E/9VKoD6wkA//3f/43/+I//ALBMA8G+vH37dgq6ozlkrFnJVaQUsG2rm3C+X83D7KaUa/PBwUEpZLW3Yd1kjdLNeB2llJrUs54cc9wvjEajkimgXj2Tn6pQyZukE1Czfc4P7nNGo1Epr9B0Oi2MTaAYYIfg73RDzDGn+wurWCTWUbao7LJy2ctn6JnLWzPkKnN5oNjuOt7sXkn3FFcVPMmuv3Vym171HtWWUQ+v/I79d3R0lOaH3Y9fXl6mNdYqVU5PT9Pzde/F57d5kPLMzz2ln54duM/hOFKzRTsv1Z3FCzBStd7WRZj2BQKBQCAQCAQCgUBDbMxIVSVws4yUUmvWCVNPmTnN+VWd+lX7oadiyyJ5rIm9aiK5XDIxffe2YGlp1dSxTtRQ3rlzJ534rZnO+fl5IdP1NqBaCU0aqtfBYFAy7csxUqrJtME+RqORy95UaU83Ma/KwaPtx+NxKdy5ziHW3c6r8/PzpN30wjJva6x5Id3Zjmq6YTVDyoCy3GpewN9bc5G6TGwdWGfcKnjmIVWskxf+PBdG33vmtuCxntSaT6fTkpO4BlwgE2WT0WqghZyGb5Pxp+1i5znLP5lMSqG233rrrcREsa/pVP7Nb34zBdfgHNEQ/WRGNKhFjvHdZkCaqndZCwNdtzjf1NyljjP2JtB20dDgCo/hVZbFBvTQdle3AP5vUwZ4waDa6KdOp1MyeWK7q8m1mp5y/SELSrOqy8vLdD/vYRl3d3fTPkrrVGWuuA57o+tmlembJ8f0fy/IkGWYtP1XmZnnynKVuEorjhxybe0xUhwPw+EwyTHOJY7bTqeT5D6ZKY5l3Y/rvORnbafsqdPnuT2RnidskBNlpLyw7ZvuiYKRCgQCgUAgEAgEAoGGqMVIeRpf1Yp5wSbUDhsoOrJZzbk61V+lL0fVO6q0OepYqFd9lpcgrY4Wx/tuHTTxU8qFmN/f3y85sarGOVfONnylVJtvfaNUC5hLzmqDa1xcXCTti9XmqSbTJniz5eF7tsXGecwhy0YtjLK4VtOr86mpj1TT8Vf3fqtpvry8LNn2q3bZ2ttbHyv9rE1tpafZq5J9q1CHrdJ7clrhtjWzVkM8mUxKTru9Xi8xORokhFcv6S7RZqhzD8qSc2xbdqLb7ZaSPjOwBLAMJEEW6v3330/P4LpFJ+2bN2+WfEZ0nFb5grSNKksN6zdImTAYDEo+DRqsxzL5bZdbQ3rbcqg/l2fRUeX/qv411LCr75CV46pZt8zUOomita+1nYFiaHfPj5JlImtKZgpYzjEvnLN1mlfZmUs2Whd12CFlzbRuXgqXqud74cy9vlhX5v5fg8cCEp6PnbXE2dnZKTH5nD9eGP1VzGUb/aR+dEBxv6lnB5aXZeL6pYwUGWL+bjgcFtg4fmfHXVOGaiumfXqQ4gFKg02w01lxNoQXbMLruLbhNZY3yT3BWPUs/d22zMDWhda3iqbViH5e9B7vs21AD1LWCXY2m5UW4F6vVzrI6cGCBynrrD4ej0sL8OXlZamvdZK3TW1b6EHKCi3dzHkR2Pg7z7TvZSxKOXNYlkedrK05rBe4YZvtn3NArTpA19mMePA2Its6SFVFnZpMJiWFyXA4TAsRN4r8f29vz41+xGvbwSVyz7DmLWyj/f39UpCMbrebTFjo8M8cU+fn52nd4oFRTWKsPFGzbQ9tyMa6Y8qLclaFbeWM8qAbPeu4rofhOpEu9X/vfqCY403HhQZ/st+te5BSk2VrSqRrkx4YrRJZg9tYc3kNxmUPbDs7O4Xn2vbZBFWHmU6nU5LH3rvqlEOfvc0gErlD3ctA2/ul3JrE79TVxsqF3d3dkiJKla5eFO02g9F4z7FXLbNniu4dpGzUcP5O1y3vALjuWAzTvkAgEAgEAoFAIBBoiMaMVC4cr5rf8CSolBpQn5GyQRAUV+HEa0/FqlHLnci9k61HU29bK1LHvE7r6TFMm2q4NjHx80z7bNAJT5OpIdGtBlnDn1tN5mw2K5maKntjtSPb7ENvvKgmhmVjWT3nSaCYT8QL5XqVmrlccAbPPMFzSL8K5OSNjgVvntvfah1y8swyQZ788diqdWDltzcuNACBDVBgTce8Z1aNqzbHm77Lak01cILNjXJxcZHmOR39NYwu7ycjxXWs0+m4qS0s2lqbmrSVvtMz87L3XaUDv7emelYnXnCnqgBOHmNLKFvlrSE2L5sXonsVvH6380TNkXi/WktwDOl6pGbLQNEM0ZplAmWZ0oZc2PQei6sYY01xVWXa5ntsX+TGsIa09yyPCM+loc3gLE3hmYx74c8JzqXZbJbqq7n+gAWja0379LkRbCIQCAQCgUAgEAgErghr+0ipBslqaNRHilAbzSY+Uoqr1G5Y7eq6DnXevS+DCaj7zqaah5ymXZ/TtM65YBPqu+WF57SaDM9HynNotIkcVROS04puG15AF2UUrEZS2RKrgb0KNtSWcZ372g7CUhc5B1qPYco5eK8TlML+3yaL4JVfP/N8LSwb7DFRVf/X/W4d5BhBlRM2WbWGzeX6oz5VXKc0uTB/b/1rcrJy2+yARZ33vwx2oA4r68m3VT5STedFFaO9ztrkaf91/QEWY88yUjqfrF/ozs5Oae/jscW5YEee78dVYJ15/yoyVZvgZddnlY+PTZFCeOucZxniWWO0DTvPdY21n00mk7SX42c5HyliVULedZmpYKQCgUAgEAgEAoFAoCHW9pHymA7VCFqNnsdIUfNPjMfjKwsj2xSeZo141SLDNEFdRknvvWrYSFQa3YiaZvXPq4piN5lMkn+EZaQuLy9LPlKaWNSWpdfrXSmzYzUl6ldT1Xfe766SkWqKbZSr6TNzjEsuMtIm7/TQJsNRB57G3vNTWZeZbxveumP9ADQZrbLNnOf8TtMqcN2yPjXKfr9KTFQdvMw1tI5Fh/pP6WceS8Vn5XwI65SHWKdtlGGq8qH10jTYvwE/ambOd9Tzo7R1uer+flX2aIHiuNCxaeW3fpfb370MGZ+b07qXs4mrNUIx60t5TmhUT8KTNU2xtmkf4RWg2+0m8ytbIaWwNbwnsJ7jZ1PkBsa6AuFV3ZgqtknJAr5gt+9fB7qZ40ZJNzQ2nGev1yt95h2kCN0weeHPbfmVfr7Kfq9616r5sioAwI8ztrGA19moKLx2bVOG5YIGrIOq8aAHci+wxbrKlKsYd97cJKx5lG5YrYmHmn3YPvTSBrQ9/rahsHpVNrlendY5XNnfEbk0KavmbRumffZdugapOV/VQUqVdra8VQq9V32c/G9cc34c0GSP+yqSAFaOaxoaYjqdlhRi6pLhKTSA4v4wF8Cq6b4pTPsCgUAgEAgEAoFAoCE6L/v0GQgEAoFAIBAIBAI/bghGKhAIBAKBQCAQCAQaIg5SgUAgEAgEAoFAINAQcZAKBAKBQCAQCAQCgYaIg1QgEAgEAoFAIBAINEQcpAKBQCAQCAQCgUCgIeIgFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEP0cl++/fbbcwDQpL25BL6dTidd9W+92r8V3rPrJgx+++23/YcavPPOO6U61X2XLXeuvlW/se/JlaNunX77t397DgB7e3u4du0aAODg4AAAcHh4CAC4fv06rl+/Xvjs4OAA+/v7AIDhcAgA6Pf7AIBut4udncU5+/LyEgAwm80AAJPJBOPxGAAwGo3S9eLiovQZr5PJBADwO7/zO7Xq9P/+3/9LDcM2ZHn0f++zqvub9lNd1O2nr3zlK6XOtu+9vLxM7cw2ZtsBSP118+bNdN3b2yvc/+TJEwDAo0eP8Pz5cwDLMTUYDFJf83fdbjc9n/d9+ctfXrtOq+poYefAfD6vPe/rPJ945513Vt74+7//++nFbJfBYABg2fYHBwdpnvG72WyG4+NjAMDDhw8BAB988AEA4P333099wjnCZ92+fRsf+9jHACBd7969m57PunEunZyc4OTkBADwu7/7u41k3jpjfN2E7evOp03mkkWbyeavQj6wn3Joukate88q1K3Tn/zJn8yBhVyz64he+Tfv0fu9Ols5rrKe85afeWuC3s/v6sgHrdN4PMaLFy8AAGdnZwCW6+jdu3dx69atQp1OTk5wfn5e+Mwrh4d1x3LdOnljr41xQrQpR+qMvTpz6WVC61V3Lv3RH/3RVvsohzr9p/e0KceBcj03ke3bmEvBSAUCgUAgEAgEAoFAQ2QZKZ7cVENMTcoqUCtUxQ7Yv+3/fJ/32Sbw6mTr5tU31wY5FsRjTdpmRk5PTwEsNHu2TloO9kmv10vX3d1dAEhX/Y7l5u9U0+f1p4UtSxOQXVF4Wkjb7sqkVV21Ll6f2PddNTyNre0DZUmUFdHraDQqtePu7m7l2GsLddvRjgtvruWez+t8Ps/WpUk9dd7Y93Bu9Pv9AnPLslsmkVrqs7Oz9LfVvPd6vdR/vO7t7aXnszxkpObzeW0ZTKzbz6tY+6rnXiXztcmzXtb8rkJuratrQWE/89bbHMOzTVQxUpPJJH03nU7TvZaR8saeXYd6vV5p7ev1eoV5qt+tAz5rb2+vNNefPXuW7iHrTEsQfX8VM1VVtlzftY1XgYmyv7/KuVqnzFfF/Co2Kdema8Aqy6pN3lH129zzdL7k2qXNM0MO2YOU0uzewUL/V3Q6nSQk7WZcN+G5zXjuULVJ49RZWLS+vHrmB/zfg9bXmhroZp9l0I190wGpmzRbRn2W3YzrQYqbRP6vm9PcQcqDbbumplr6W2/sKbwDe+4AyL/t+FSTkKs4XFU9RzcQ3FRMJpNUTvaPmpmx3DxQ0xTw4uIimZKxf7Uf7DxsukFfVafcgU3LoXIGKCoEvHd4yooqgd+0v/Q5ts31yr95z3g8Tv3FNmd/nJycpDlq540epGgWNBgM0kFKzTpZvpzcaQOe6XGTg+0m72sDr9oBqS7qKAM8WdpU8aCfX9XhylNEcmxPp9M0d/Rq5YJXNru2Xl5eluatgvdvskHnb3u9XjKh58GI5r0PHjxI85pmunqgUmWXLbeto/fZtg5Ur9IhCni1TNeAdl0CmsKT+7k5rZ/lxntTJWSTw9UqePtx77l15kauHCpDckqqdeVgmPYFAoFAIBAIBAKBQENkGSlCtaBKvfNqNUdqcuWZk1mNkWd65ZnttMFMqWN9nZOmatFyzrK2DbyyeSwR66wMSY7t8UANuGr26rA3vV6v0C9A0cSPn9myeuXzGMRNGClq5LVOhLa1/c5jLFiPbrdbYqlyzGEVS2Xruwn4HB03rBPNRi4vL9N91HJSE3pwcFDqa2o5z87O0thgYAmgrMXV/lyX7VilWfJYLzufWG9lVu3vtZ9Uu9y2ZlDHkZ0b/X6/NDfm83nJzIeMlPYD+4/P7vf7qW80CAifb9vBa5u2kJu3nhlHlXlV7pned4EFvPliUWV2XsdKwj7bM1/Vv9tkqXQP4Zn2UWYpC+8FoGD5q6wOlC324MnxpnVSGca5e+PGDQBLRurRo0f4n//5HwBLRupTn/pUuo91e/r0KYCiKbvuUa4KrxoTdVWow+Tq/3XW/1UMx7qo4+6gyM3zOkzQOmNiGwxdletMk/17HZce7377d2UZV94RCAQCgUAgEAgEAoECavlIqcaLV56OlTGglsXzr1H/ArINvJ/fKWPgwWOm1rXJVD8ly5p1u93C37zak7vHjKhGjf/b9vF8sDYBtd07OztuX/BqT/XqhGuZKa8NtP7sQ8/pz7s29b9hiHYNtc421avVbnrIsRms487OTmEc8ur5UvG7dbUvOW3KbDYrjSUtJ1kL2tsfHByk/mdZlRGhppd1U5atTsCQpvXSqz7fs1m2/ar/W/bZ9pd93zZs1+07OeZ3d3dLjNF0Ok39wPandvr09DTV0Qap2NvbK6UgGAwGqfxsE48ZbxMe0+EFr1HZYWWGymc7H7elpX1V0AZ7o/OmCdunLKV3tWXjdx4DVtfaoC68cWsZd02dobKAf9sxqPPdyuzBYOBahdg1e1Vwhxy0bTn2uV7dvn0bwIJpevDgAYDlnD86OkopDgjW8fnz56V+WeVD/jLmUW4c5qxx7O9XtflV+R7lWCcvWFVVuVR+quyzsnqTPqMfXg5VrFku+FvVeKvLZG2CnP+jZ5Wm91TFWlhlGWHH687OTul809TqLRipQCAQCAQCgUAgEGiILCOlfg1V/gyj0ShpvalhUi2Y9TNQnwB7HQwGJRbE0wLkTqqroFqDqlNnp9NJmi4NTcy/rRZMtUNq923bR9vJauO1XZtqLTxGyvpKaVupBqKKkdKIfutG7fMYzbp47bXXAPiJfvV/tqMypLnoijn/KT5D26DKp8qLsrQJdPxoXfhOq/k8OjoCsGCkdKwBSAlbT05OUt1pp+/5gm0CT3uV06RqxC72o70qI6W+RLza9zT1e6wDZce9qH38Thloagy1/YGFJtH68rE+w+EwMVKUg/1+vzRfVD7YZ20C1dTlfD/t+N/b20vy0PqLTafT0hjWd/04YZMyb/Jbu26qnLbztirSJ692jfGYAx1vbfpM6Fpgy6by3MoAjYJprStUG61+i3yPt55bmWfH7Dp1mk6n6TmcC3fu3AGwYJg++ugjAMB3v/tdAAtZzUTqTNar+wUy2Ky3WktUsVJanm3DY/417YPnn23HUm5fp/dtEx6romuJZ53De7y5BxT3HXbc6n2b9BnHh/d7L5Ke1i2XEsauo57Vlq7pTZigVcj59Hvwym2vXlTsVWOtql/r1ikrTbQQ9kXsOHUUpUnLaDRKDWQ3RHt7e8mEhZs7/Z+bCd20tGmCpOYCLLetr242VVBTWOvBj9+p0FN0u930O29g8z0s187OTmNncm1ru4HRgWonX87UzTPt8yhu3cRy8OmixmvTOr3xxhsAFguqPUBxw6pO/Hq1B3u2rZebRE337OFKzZdy7dMG9NBnBXKv1yuZ9PEgNRwO8eTJEwDL+ff8+XMAi428NZXVwyH7sw2zq5zzqh6k2RdnZ2cpGAMXCJZfnWptaHBVcijqmEA1qcfOzk4pyIQGYrHBM8bjcSHcuV7Pz89Lwp4y5Nq1a0kO8kC1u7tbCDQCFA83bRykrLmUZ1KqyibOaQY5uX79eqFP2AbAoh+tWVbOhOOqUbUJWWWStO3yqoy0Gy49OKiJKVq4S78AACAASURBVFA8FNg+VEWUHlJ4jxfQoU0FkSfXrIn2xcVFkum8qpLMO0hRhtk0AV4bqqzW9B72vqaYzWbpvXwuFV0f+9jH0kHq29/+NgDga1/7WjpA/dIv/RIA4P79+wCKylX2k7eZexkHKm8zqnLDUxQT9qCrz9yGOXadZ3kyXg/ENq+fN89yCnN9dxsHKILrie6ncgETvP27p0D23BxYb0+RbJUR3uGmLnTeWkVezsRc35vbt3kuOjkz5nURpn2BQCAQCAQCgUAg0BC1TPu63W6BztbvgHJyudPT06RZsgkld3d3kyaT2htex+Nx0njynsvLS9eUTq9NoFouz8md/2uwCL1qOVTbkjNXtBoN7ySsTElT9saadXjl9kKQe+ZYuVDhOh5s6Hr9zGpJ1nGQp2nf5eWlawoCFBkpDTVNZoNj0DMFVJZKr/r3dDp1mSj+35SR8szg7Pun02lp7HW73cRWMHwumaler5fup8bqxYsXqf6cR16QABt6fx3k6HHVWrKMav727NkzAEhX9uF0Ok3PsqwHtYT6mUfl2zI0rY/HSFGr6jEAFxcXJYaNdR2PxwX2WuuljBRlhvapnduaqHQdVIV+9RipTqeTykQn+rt37wJYmCaxLpxf2o+WdZjNZpWawLY00bm+1npbcxLPvMTTutqgGm0zVMoE2rmpzJRlSIfDYRpfNuGsmpzasPzn5+elIA9qzklswt5448uT5ywjy3ZxcVGyrlDncBsARtM7eGkLrJZb67NuP+pYYlnVxO8nfuInAAAffvghAOCHP/wh/vEf/xHAch797M/+LADgzTffTH3B+3UevoyQ6B50XAGLMUR5xyvbYmdnp7TX8yxZts30VpkX9nq9kjze398vWQhoygobbE0TrXvuL22ssQTXdzXv9vYxHntj28CzrLH7VE0loFfPDYTXpuNU9zpWLuh+3HOF8SysbLnZv8oy2nW83++XZIa3V8shGKlAIBAIBAKBQCAQaIisSt1zdrS+JYPBIGlSeM90Oi1pwdQHiKdDnrA9LRQ17rPZrKCpBYqhnJtqM6xvg9ZJ/TdYNmpZTk5OUjk9R2qWjWWlNuPg4KDEvKkvmDqYs3xNmQ7tE6txyyU5VdtZzx/KsjGqrbeaDdWa8bNNfKTIvCgbau2Sx+Nxybb+9PS05KvisVU2gMV4PHZDqVuN8LZ8VfhcZUOJ3d3dNHbYLvxffSDICNBH6uLiwp07lmH0wvK3Aa2bZQ6fPXuWfAjo46WMlGqYgeV8ms/npTHracK8EM914PlVeHPDjsWzs7OCrOBnvMfOc2o9Dw4OUt1UBlgLANXcravd9HzhdDzbNhsOh0lz/vGPfxzAQnMOLHz0KAcfPXoEAKk/VX6y3MqEtsHo1B2nnuywvpaUAbPZrBBMAyiGpPeCm7QJ1S5bPwe23dnZWcna4/LyspQegeXXAESst/rw2bGqa3AVi9MEyvrZsayBgqwf19nZWSlJr7YJ6+v5Ylv/KWVxPauMdX0lOp1OQW4Dy7F0cHCAT3ziEwCAx48fAwAePnyI//zP/wSwDDZx7949AMAnPvGJFBqdbcBkveqruCokulfHdeGx+2xvjsEnT56U5j/bot/vJ/mh7CnrYeeRZzXTRh2q2JjBYJDKo76fuk/jfYRaXQFFf7CcJY7HEq0bVEz9xi3zr+uD3btoG6h1gJfeg1frL9bv90uB2PR3uWTYOagllpVX5+fnST6pnGIbWKsetY6yPtYa3EmDPNm4B3VTMhHZHbuadlVF+tLJpUKQ99mJp3Sn3dTqJtKjKu3myotgtApsUC/uv9L01jzs+Pi4tFFSEwkbPc6LTsZnHRwclOhjzxSwLlSA2knE77xDln7vRT6pCjahplSEHv68bPNNN39KzXoHOaBo9qeOuuw76/SvGwfvkGWdscfjsWv6B/jU8jrIbWiJvb29UpAJjpuLi4tUFx6k+P9oNCoFC9CDlC1DW7AbsNFolMY+D3m6ANtgGWomwXnCeqizvQpyu4lQpU4TqDLAmgeoOas1Q9DgGWx/dRrnGFYFC+tHmcTne5GwdBPadC55i7dd4C8vL0sb0Hv37uFTn/oUAOAzn/kMAOD1118HsGhf5slhfdmfDx8+TH2pB5Oq6FD277agQU48E0ydJ4Q98K4yH22z3HrYs4cZleccJxxLR0dHKRoco8Zxo354eFjaDHlmpbq2adAolgdYT9Hi5Vq0MtsLiKGR/GzgEm2DXDCOOuXe5CClsPucwWCQZPbnP/95AMCDBw/wD//wDwCAv//7vwewNGG/detW+pvjUzftfL7uDarGXqfTTo4p+4zLy8tUJo6XR48e4Qc/+AGAxbwHlu1+8+bNNNZ4YOQYVHmg60WdXFRN4QUjABZtaRUm+/v7JZM+3UNZkz7dT9h+W3WAbwpPKWEDfKgZnCrRq8rhBVbTqz1gaLRW+906e1c1m6wyy9d2176w7gK699b9gpZ1b28vrb16YK7aj+vBMYcw7QsEAoFAIBAIBAKBhqjFSCk9ak+tHjuh2icbGlMd/m28/XVMCJqawWl4dZsDQjXPlo1Rtod18czJlKUCivkwvPCtNk9Ot9td21xRTYI8k7RcxnfriKmUb908Upbp0JxLTfuJ7aimXLafdOypKZQNI6tO1tYxVjWx1sxFzUvU/IfXdU1eVGPo5X9Rs1lgoS0jE0UtJ+ffaDRKTBSZHdZjPp+7rIpn3toG7PhSFsCayj579iyZrrD8bON+v580QxxD2hY2bYIX/IFQdqsO6uQTAYqhm4FFm9sw7ryn0+mk8qtJH6+USTpfqkJFbzLugOrQ35qmgezGxz/+8cRE0XGe/fL48ePkFM88Oe+++y6AhUkS5Qm1fhr+eJthjy08DSzLwrmkrJkN/KHaSM9kZhuac2A5l6wMu7i4KI1xZT8YFIQMwL1799JnvEdlAfvTS/3Bca/M0LqBkLzwxsq6WoZXw0lbKxWd09bMdzKZlIJTeHmNdM3cxFTTMuFce05PT9OcJ4v78z//8/jggw8AAN/85jcBAH/1V38FYJHu41d/9VcBIJn4qfmYDe/eZuqNVfBS3VDWffTRR4mZpjxg2x4eHqbxRYaUY3Fvb89lVeqkIqgDz3zYfua5rEwmk5KllJpvc61iQCdez87OUt97IbotC+8xMKtAWaRzyT7XY+F1Tti9tu7t7bzXlD/KQlUFVtOgN3WhJvt10iNoQKcq6yLPPUL3JOzX3LzXcVhnLAYjFQgEAoFAIBAIBAINUTv8eVVCST2halhBa0+u//N06YVNtyEY1UfGsjibaDLVzprlV4dDMgC8Hh4elpwP1V7TOvJqaGdqLTRRqg3Nrb4QTbUx2j62TVTrl8tsbzUnno/UKkbKhkT3tJx1QX+La9eupX6xToKqAeHY43da91WaDV49lkp9qIAi02i1HqvgjVerNVXtKdtfA5Zo/ViPKmZHkwuqht0yUk21Lx70d7mgIOqfYtlb9XewDA616devXy+lSNDgD5bpW5fdVR8pGy5effM8VtrO6cFgUJIx2p82dPVoNKpMzbBJ+HOvb7W+LBPZjDfffDNp0znuKN++853v4F/+5V8AAP/0T/8EAMlXYjqdJg00+09lRhvyuw68ADga6IeBW6glv3HjRsm/Rn36KMdVU90mI6Usgw2zTIzH41Qm1cBqgBBgySq+9tpryfeGV/pRHRwclBKlKmOXs2Coi7qMlPeZl1SeZbQ+2Pr7HKvhMVJNfaRyQR2USWT/sC8+85nP4Etf+hKApT/R17/+dQDAX/zFX6T++cIXvgBgGeDl7OwsrYde4uRc4IlN4PlTWmsPDU5CUMa99tpricn+9Kc/DWA59ubzecFyAticba8Lj53ge8/Ozgq+vUBxT6d7OL1nPp+XfEw11LYNub2O3GC7eglnvcBLul+rGg/qP+jtRbw5a+elXpv6JHN98HxQdb319nI2AIVahVXtrzW1hPZX1d41GKlAIBAIBAKBQCAQ2BJqGdpquFGrnfXCCQ6Hw0IUFH7GKzXnVhMNFCPn8R4bklCZkqbaJI1IZCOvUNvw7NmzpJ0lI3Xjxo2kweRn1BAoK0fNOTWyp6en6bl6tZoNL/xsXbB9JpOJazMLrPaR8iLzWc2Gx0ip3bH6W+iz7N91QFtyTZDHPlH/Ei/aitUCKbth7bI1LDeZKGWrLEulDBXH7zqo8pHSULfKYnBcsS68//nz5ym8LstIrY1GhOPvVOtdFcFtnXro3150Ls9XUsOV6vXw8LAUhYyswc2bN0t+YsoQaRLLdaAMRhUDPp1OS9pKL9oZoT5fGq0PWIxb6++gfn4eI9UGC2K1choZ0iZ9BpYMMf2h/u7v/g5/8zd/AwD4xje+AQAFDTz7y0uEaJPG2r+bIhe9zFpQHB0dJZbtk5/8JACkMNVHR0dp/NDv4/vf/z6A4nqxLSZN52hVMvT5fJ7GgmpgOQ45XjimDg8P0xxivd944w0AC+aR65SNVAaUI792u93GjIGXZsTKHe871QR7fn258eL5wlRpu+uGN656j40Sqr427B+26fXr1/FzP/dzAIAf/ehHAIC//Mu/BLCI4kffKM4dXl9//fVCInNgIQdsNLFtJ7b2LFgGg0GSF8pEAcAv/MIv4Bd/8RcBAJ/97GcBLMfZ8fFxep5NQ9C0XKvuVcsh/U7T9CgLZa2K6Hv85MmTUthzjp3hcJj2Jyo31e9Vr+tEiuSaqHtEu49Rqxv9zkb388Kl5yL65Xyw9NrUSkf3aDa6qFq62bUDKEbGBlBIuZRLgeMl97X90pSxbuyxaEMl6iZAHal1swugkEuJA1Pz3QBF53h2nLdJ0mtTIaiO4FUDwstwr6EgKQy0vtYBXs11vFwmdmB7DrF1oQepKtpVTSvqBp2wpn16iPbMCvg8S2OvUycuNHt7e6WDuo6tXChL2xea58Dm/bp27VoSgGquZR0aPbO/ptCNgBWwGlxDD1JqIqXlefr0aTLt49gmvPwIKhS8zcu6UAHrzSs79jSrvK3vnTt3cP/+fQBLMzNuBm/cuJHGAX+nTvCbbiZUYeQFnGF9vEAmmuJByzIYDApmw8ByvO7t7ZXyrmn4axuYZ5NcX57jtc4Da6I9Go3SPKQp0r/+678CAP72b/82/c38MWpyqSbR/KzKQX6d+tTpZ1UAsk7D4bC0NvG72WxW2kTRnO/k5KQU/KDttAF6eGKZbHhmlW9qXm9TCagyiIoWHg7fe+89AIsNL+cX8/1cv349zS+bdmId09+ckkZlUe6A421CPcUfr95myK5hXmqXNjGfz0uBGQaDQTpk/NIv/RKAZZ989atfxV//9V8DWAab+LVf+zUAi37iM7zUHHajt638bDqfOB5v3bpVCqtPc74vfelL6eDIw7tu2rlu2YBRTcu1qj7eGsv/bTjz58+fJ3lm82M9f/489SnHDGXIzZs3Szno2kqRQvDAqgcWG6SlSmnpfcZn2b2i7gdy+0KP2FgXqvSybhq6z1Nixu4f1BzautpQjntpjLzUN7pvDtO+QCAQCAQCgUAgENgCsoyUlxTXUtnqlM3P1BzLmmVdv349nfSp9eMJkadCPhcoJhC1zI46odWFOr96YcyB6sStNkminqC9DNm82kzZepq2ppLraJPUlI1lZJ/kGCl9n9XsacAQT5uXC/xh2ZZ1aGxqVr0ErGo26oWTtu2tmnIb1lg1kzYxqoZDtrS9slXrIBdkwguCwrrzPs6Zx48fl4JMKMvA8qv2pooNWwcqF6o0f3yvLRtZC44d9tP9+/eTGRI15jRrODw8LJj0AUWn/5yJUB3kkpDr+9SMACgyFsq6AYtxqow8gEK/2OTBqlX0GKlNUDXfNVAN6/bw4cM0xr/97W8DQAow8fWvfz1pavkMmiK99tprieFgHw8Gg8okyW0zOwrLjp6fn6f1h237wx/+EMBCRnJeWauJs7OzEhPVdrk9cxJeqYk9PDwsBcm4f/9+6guGoPa06OxX1l/Nz9jPt27dKiX9VuuKNszH7BhU9lfTq1iWWdeTqmTZanWgGvMqM/V13AO8uth9EbDcJ1BGvHjxIjHrDL7wy7/8ywAWcpxmpGSmyF791E/9VClZ7+PHj0vtsy0miuh0Oqm9Kcd6vV76LcclUyZ85jOfSZYFlHccZxq0QMPbW/m26RzzrD/0vWxPMhYfffRRYt85TzyLD+4V1J3BG5NVAbs07HhdsH29hO1q7eQFKrJsun5XZfHlJUjWMntzqWlCXjUFtG4muu+0DL0GpMqlqtBAdsBi32RD17948aJkAtg0qFgwUoFAIBAIBAKBQCDQELUZKXsCVrt+m6y33++7tt1A0XHehm0+PT0tORx2u92So/wmGiQ9vXrJxni1AS46nU6JpVLHT4L3s96Hh4clNkNZKg2SABTDvdcF36X2rl7YSu8zaiZyNuoee2OdEBXUXrBOym7VBbU/qp21Wst+v18Kk13FUvFq/ae8dleWjWPEjvHBYJCe0RQ5Hyl9p9oDa2hsoOgIS82KTeR7cHBQYtn4fn2n/bxpXWydPKdVvp/tP5/P099sd86P+/fvJ0aDzvCcL8psqL19FcPX1KfICzZBqJbLstjn5+elIDHsh/39fdc3iu+xQTqUkbLawk01tFZ7rRp1Gwp4Mpkk7SwZqf/5n/8BsJifnFdkRt566y0Ai7DN1LxrwldrPbBNJorP5zs1ZDlZJkLbnffZsnp+OZusQx5ySTI9p3bOjfv37xd8JgEkv6gnT54U1letW7fbTXXn7zXZLcce+3CdtYlQHwhr4aBWLcq8sw0sE6jyxPqAalATZaSqUqiskxi1qn5aRn0m2/j4+DiVjezCz/zMzwBYsB8MPPGf//mfABYBXXgv5SHnlSY591I9tBkQRceileMa3InjkWUdDoepbFyjNC2MDZCigXTaYKIIuyZw/I/H41I49/F4XPJx1Trzb8o+1vnWrVuloBvD4bC0d9qEPdTUJ1UWGBo4QfeCdh1RHyv7mWfF5FkzWayTSkDlsg0a4fmqcxyp35SXRL3K767X66X7PCsPbx9cp07BSAUCgUAgEAgEAoFAQ2TVS2qDa09qPD16YbL7/X4li6CafOt/cnx8XIiqBxSjmFWFYG8Cvls1ZFY7oJpMjYpiI3VpklarQVYtkY0c1e/3S9pu1ZY0rZdGianSvnjhz6sSnQF+dBZlqKyGxTu5832DwaCxJlO19PybbazaAj6XWmZNfmqj/WnEKxs2XROjKhtZNUaa2gJbVPknKQNrE0ADS60yfTiePn1a6DMABX89toUXNrQNHykPng+EsmTAov1slEdq9O7evZtYDhtZrdPpuFEBNZTpJnXykhhaxkiTAWo4XNX0A8t56THQ7CMNKa0hbb0QrSzLutpmleNeuGb+rYw7o4qR6SBu3bqV6kB/NkYbu3//fuo3jgEvUWpb4c+roCypjgvrC2BlN1COTmsT124DWlbbxzqnrH/t4eFhgZ0CluPy+fPnJV9kmzgaKI57y0gp07puP+UsCwaDgcu42iheykjZqKtqYWCTtOd8VdbRonusE+El6+Xf4/E4adRZNvp+fvGLX0ws4le/+lUAywiZb7zxRvKloow8OTkpRfbUdBJtzicde7YdO51OyXeZODk5SYw2y8Z1+sMPP0zjUX1L25YDHjOlsKyT+u5aS4lOp1NipNgfh4eH6T5lcKsSva4DXQPrPEf3e95+EMhHr1YLJy+xdhtRf9n3nU4nySMbmbjKT95aFWmUZtvu2iZWrmk4eS8U/Mbhz+0GoglsRXQhsgJVG0fzKfHddgJ4QQ/qwnOM8/IOWWGs7/LCmWscf6CYr8ou1OpoabHO4ZADqSoTNb+zBymdREp3s75V2bP1IKjfeQdSXpsePDT0bVVY7clk4lKx3sEV8POb6VVznQHFRdlbQJrCC8zgBW+xjpW7u7sl2lud4fmdFfLXrl0rOGfyfZseNqrghUplufTgACza2IY950J0dHRUCsGsssgKQjXHsON+k2AThDV90IOUDZvK+gJFRZHNgabzwYaw1bwfVjEDNB97OfNbNWGz7XpyclI6pHPDfufOnbThoCmPHn5t6GpvAbZlWQd1f5szQfUcwD35ti2TPkKVb3YDk9usaGoOXjnebt686QZTAhZrlIZB5vPt+uwFEqoLbTPPNBsoKvkIXZ/tWqnP8sLEewepqrxc6zj914V97uXlZeoLHm45Z9544w186UtfArA88DKX4r/+678mkz7mY7px40YhZDefD2ymbFlVHy8fl53rHGedTieVjX2oTv/8W2Vc7pC6KWzQIFVa6nu59ljZq/dbZW3VXqGNQCAW2g9V+dHsO6sOPbpn9A5bXgCKqvt1X1MXdX9r66vywV71AFtl0qttoGuTXetze/VC+VbeEQgEAoFAIBAIBAKBAmoxUkCelfIc53Phva2jKKFmCxp+sEqDvg4t74Xa9E7W+g5gcdq1mi7V2Fm2RLXL1nnOyzTtOfjVBdtR2Qbv6oWltiYVatZBbbQXmtYz+8uZSqp5Wh142kqPZVN2jVeWw2rTj4+PS2NPrx4jZe/Tvl/X8drT6qqJgX0XsBxP1qn85OQktY+GSweKzrEaQjgXprwNeIyUTQCrf2tSZP5v2V819bUMzmg0KjFS65obeGPYmt7pnFbmnL+x9VFGSoNM5OqTMzXYRphjZf5ZjtlslsaPFyyD48yGy1bzMNVo1mFXNq3HKqiptTXrUXga3ybtviljXRWMxtMSK4ttLUHUzMWm6NB1SJkpu/6otrvpeuuZhdm1Q9+nbBjrZNtArUM8RsrKT2Wk2jCxInLMT6fTyYZEJxuj84uJbMlWUS68//77+I//+A8Ay/l3+/btNCdtePttBnHxGBY1XQRQMNnzUjvwO8v4bKPcXgAOtZLy0nJUWYvkWJDc/kexSR3rMDbKKOdYKs/01DP5z7mI5Kye6kLfnZN59n6de7lAaZ47UI6Vtv1T19w0GKlAIBAIBAKBQCAQaIjajBRhGQy1bfTCLdpABaqt9LQBhGqwrbZ5E98OZS4sVPNgT7Sa6M/zn7Kafz1de9pyq7XfREurjJQ9xa/S9thQ0oT2ITVetMnWcLXavzmtX9M+U20k28/aYquW29OieD5VVnuntrQ22ITaPdswu/1+f20HdM9vxwvMwLLN5/PU9ho+Flholb2kncBCO2t9jDz/rLbgacAIq0HudDrpM9vu6ixtQ7J6zqIawtVjpJrU07OhttrUi4uLQthW3ms17dT8Hx4elgKHaBAGG4I35/zalk+HJ2+sDNAk1do3/N9+p8+xNueq2dumxrwKnibdG6e5tt2WL42HnE+DtePv9XppvFgNrJdEV5NzW3+lyWTi+hmuC2X2bAAPfa4XsMoyb/rMqmAgGo7bsx7w+rwNZrRO4AllqTQkOlCca/SD4nfvvvtuSjtAf0S1kKFssbJyG/Dq6e0dABRS2di12PPZrnr+prDrklpnVAU08X5XxWZUoenaswpsOy9hdy6Zt9bTk4NePfk760fq+Uh5e6+60PlozwUKK4u8fZ4+02Op+H8u4IzXr3XGZC3bJB0QntmWt3lXIa9XdeK2VJ5+r+YWtlE22QhqWe37tfw5EyHb6CqovehA9tCk8Nqgab30IOU9j/97dWHZPKd2G71JzRWtaYIXGWmTg5TmKbMHKF57vV6Wes5NdLtI7+zspI2s1sku1JscpHImfTp+dKPDMnJRtRG4ptNpybFSN+02n4Jn4tmGsF+1QfXGflUUTjV3s+NGN+m6cWjinF+3HnbB8IJB6KZFg5oAKASYsPm89KBoA9Vo8Axbf3U0bhNV5sxedE77HaEy01PktLmpqDLFWOcZV3lAWhcaiMJ+Np1OSxsGvdYJkqF9aZ+1SR/qs6rmpCpVdHxVKU51DniBhTRID+/JmZ23HXSnDjxzOJaRkeA+97nPAVjInUePHgFY5nE7OjrCG2+8AaBspn52draVOuWC1gBlUy/P/C83lrY9D+1813Gn91SZjOnfTcdRG7JP9ypVhx8vEAVQnccqp5irKnOVHFlHyaf18BQP/M7u/XNRYL1y67iz+0J9vl3n6tYpTPsCgUAgEAgEAoFAoCFqmfat0izkHGJzGjLvmR5N52kN9H1NoCdOak6s1l6f64V9zAWDsKxMFVtltX51NARVUDM4r39snfRvq9nw8nlYZuri4qJkPqFZ770AI02htKtn3sJ76jpI8vdVjIW2nTIeVU6LylY1hacxUZMWG8RCgw8wDwlN/ebzecnsUPNY2Lmi43hbqDKlUHhaIG1/awKj/eQFUmmLXfMYKc0fBSzMQm3ulk6nk+YCGUGa9nn5vFQjbR3+2w6w4yHHjmq/WFMWT9tpTYlU27xtcz41l/K++9+CnJM4ULaq8LS5OfOeXD9t0o5aDjv2ve8o+7yww/o7y0h5ctkLBGBxeXnZqqnsKhM/z/QfWMx9BhKi/GDI87feeivdx3vee++9EvNN+T8ej13XhXWxqn2azPFtm/E1gbfX8hgpwjP7JjyLom2a9lWxZlp+/Yy/zQWgqIscE7SuBYgnx3VfbufL7u5u5V5uVRm8tvMYxiYIRioQCAQCgUAgEAgEGqK2jxRhNSqqZdH7reNY3UzilmFYpTltenJUrVhVoAe18fd8S+rY/+e0fh6T5tm41oX6XVSdzqveaTUU1sldoTb5NmjDqiRo69rONtVUeKycF57TG2eelsmyVOro3bSfckwUv+v1eqXPNLw3faOocex0Om5yQD5L66LXqvJsgqo+rmKk7Oc6vuz88VhrjxUnNq2bOvVr4l+gGNxC/fXY3uwHMlL7+/uu3xuvno+UF2SC100YqVy71NHOevLM/l/FmmwLL1uzfZWoav8qbawyLrm+XOf9TZAL7KHjLJeIXVGVYFcZqdz6o+3Vpj/RKmbKfq/rC9dSfkeZ8dprryXZwyS9z58/x4cffgignDx6MBg0rpPHaJp/AgAAIABJREFUeLY5r65CDlShTj1y/l85C6icrGu7znX3j03keNX9dcrh/b8uI+WdIzxZZ/dG+rfH0NeBx87pu8NHKhAIBAKBQCAQCAS2gMYZRT3mJec3RXjaodxJb9vRntY5iRMe69PUXrOqDdbRAmmI65z9aq4cljmcTCaVp3pPW39xcVGyTc+F4F0FTyNQpc3T8nsslUYArPId877z2szz41kHVTbOvV4vfUZWQqPEaQJYYNG2mowSKIbY9pjJq4pS1bTP64Rb9qJPboMB0ffYqJUaVU/D53Kc0Wchl2BYE3d7SVG9SGW8rstI1W2bnDYuNyfWeVdgfeTYQf28iiVpqnXNvacKHPez2aw0bnVM6318T86X0/p9KUPlWURYqG/jNsaqp2Hn+/i9/d/6YvK7wWCAe/fuAVjKjRcvXqQIrpT7jPanbdAUbbO729i7tX0vUOyPdZmlbcs8b72ze4Wqelf5K+dkwLZkgodV72riw6TWZt53m77bw3qzTaACwzP349U6uwF5Z/S6NF0bE6bK9Ei/8w5e3nNzJnX63aqB0ASeCZfN7bLqmd6Byjr6a1tYU7fRaJRMERgIwQs/uwm8iV73cMVr7sDrHbJWHUQ3hefE7x2kaPbBRVadLtnO1qRP2yc3Hq8Sq+Y6UN+EYhsHKK/vPdM+a7qsYZd5gKKJ32AwKBzm9Vmj0aiUk0oPUlaOrqOUWLdt6si7tt7342Cel1sv2kCdg1Gd39aFN3/arpuOW8ozG2BpZ2enNN4BZA8Ddr3K5dHx2lXn+asw9lRWq9k2sJAflCUMQAGgFIBI14GqjeRV4mUdojbBq6wAqlO2KtPMVQctD9tIs9EWcvXIydFtjaNXt6UCgUAgEAgEAoFA4BVF51U+gQcCgUAgEAgEAoHAq4hgpAKBQCAQCAQCgUCgIeIgFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEPEQSoQCAQCgUAgEAgEGiIOUoFAIBAIBAKBQCDQEHGQCgQCgUAgEAgEAoGGqE4dDuArX/lKSjJl801pZnBmTeZns9ksZSq3VwWzDGv2c2bk1uzk9j57BYB33nmnVsrid955p5Q4q2nGaItV2ZLXzaZct05vv/32HFiUsardZ7NZ6qfpdJqu+r1eqzJkA4uM8+wnvfJvZqT3ss3/yZ/8Sa066dgjNu0nRZsZrjcZe1eVsV1Rp43q1unP//zP5wBwcnKCZ8+eAQAeP35cuD558gRPnz4FADx//hwAcHp6ivF4DGA55jhGdnd3sbe3BwAYDocAgGvXrqUr/97f30/3DAYDAEC/3wewHIPdbjc994/+6I9W1skbd3Xg9WOdvvX6ou4YfvvttzeeS7zu7Oyk+UuMRqPUR2zXw8NDAIt2PT4+BrDoe2DZ5gcHB9jd3QWA9Hte+Vu+k2VgOdaZS3wvy8ixs7Ozg4uLCwDAo0eP0pWfcczcuHEDAHD9+vX0GcGxqetXUxmzST/ZZym0HLY/665X9rl15dE6dVo3Z+VVycg2+ikH3ctYXF5ert0+ObQhI+rC3u+NM875qvGcG79NZATr47U55/JkMkmy4PT0FADS/8BCjgHAvXv3AABvvvkmXnvtNQDLtWc0GgEAPvzwQ7z33nsAgAcPHgAAXrx4kd5FuaJrF+XVH/7hH25l/7DunrWN39WV43/8x388BxYynGODoOy9uLjA+fk5gGV7X15epjWG/XT9+vX0P7/jXpdr1IsXL9LfXJN2dnZS/3Dt4P98DgD83u/9XmWdgpEKBAKBQCAQCAQCgYbIMlKqHbCaAtXU8dSnjEeO4VCmAlhqFKtOwlZT2gY8bd6q7zxWjbCn6bpavza0bcoq2f5R9mkymRQ+m0wm6VRumalV/cU+44l9d3e31I68p9PpZNvOg21PW6bcZ5v+7ipZojrlb7s8bT6PWqCdnZ3Uxzq+gIXmh5q+s7MzAIs5zbrzfv4/mUxcJsCWXbWbOdZ6G+2Xm9Pe2LVgvTqdTnYMtK2lrtL4djqdVG7tR/YhNXXUovJ7YKnZo5ze3d1Nc19Zx5w2ehOwvKwTn69lJbOpLBXrRhk2HA7T36yLjiP7/G0wCERufHnrlW0Dzh+Fbf8647RNvAz2/VVHHQbGw6vUlnXKacee7unUuqnJs3Oo0z6z2aywP+JnLCvLqGwSmSh+Rhkym82SrKN8GY/HlbJi03WpCaM0n8+z1lxNWexV72sC/a1njQYs+obv1zWHf/N37JtOp5NkP68cWxcXF+m5do+sz9K1o46crH2QqjIBm0wm7sbJmpZ5JiRcbL0C6wTMDYKm8AaN/Uxp9tz9iqqFTyeMt5Hw6tS0frrp1H4BlhsZPTSRHh2Px+k+FQisoxV6enjiQKtjCtjpdBofhpVSzR10rfDN9av3rNzvFC9z4VolqF5m2WiqoHPXHqj0IKVXNe0ClmMQKJtUeW2gm107t9psE288e3O6zru9zXiujtuCfZfKZZUhLBvnPg8k4/E4yRH2Kb/r9/upfp5pt22nTRfiqo1Yr9dLcoRl7Pf7qX56qAcWCzEPjGoaSniHk21g1fpAaL2tMkLHlG1n75lXfaj6vwgdn96cbyK7tF/bRpP5qPOP0HJ5LgDAYrx587ZKFrZ5oFKZZPeuk8kkHZKszDs4OEjrHetBk0A1E6RcnEwmJYWM57LSRp2Iqv6oUmJdXl5mlZXbOkDp+wlLrOh6xL/5Tl1/WEYepI6OjgoKfsVoNEp9RozH4/Rcaya+u7tbq59CegYCgUAgEAgEAoFAQ9RipJTpsFToaDRyT/VV5haetoKnRjUZUw1Gm1pmz1zRlrUqaEMTClQ1EJaqzLFU69TRM8Wx7NPFxYXLCihjBRQZJqsh4Cl9MBiUmCjVelhq3NOSrIJqEqrapIp98vpT6+Z9V4e12hQ585xN8DLMAwmroQPKZqLj8dgdj/YzNfWjTMlpbnXu2LHnzbVNUXeO5sx0PJlTR/u6LRM/T06xbGpZwLlPrd94PC45aHMM7O3tlSwXptNpekbbrKFnbs46UbtME9TDw8NUT5afv9/f30/1Y1nVDKQpg9a0z5qao6rc90yUCCvH1ZS+ibnlVTKmV4VtzSsPl5eXJQsQZXw55iyLcZVoOrZzjFS32y3tHbRu1nJB1+5NUYdJmc1mbn/wt5QdDGZwcHCQ5IPdH2pABDUdtkyvF4CrzTp5DI8GAiPUzUOtxfQ9dfZddctXBd0r8Pfc++nz2Kb8bDKZpH0DZR2tCW7evJnqYi0MxuNxCpLEqwbHs6Z9/X6/ZDnnIRipQCAQCAQCgUAgEGiI7FFLT91VPlJqZ+ppE6wWrN/vF5gNfgYsTqLW4czDttgDPdXb+nqf5aCaB6th8kI9EusE1vCCR/AETy3J+fl50sDys7Ozs3Sf1WiynEA5rLDWX/2n+NtVwUPqQMdBlVbHY3hy9tbKrHo+fLZfVdvWls02y221N6tYiCbvy/lTNP3dKqjGx45dZZioAcsxpBratMrH0itnFevL66b+H3V89LyxotcmMiPnb7WpltqWzdNasqzn5+dJO0hNLBnIJ0+epDCy6n/Ee9V3APAZKa9cm9TJ03DznWSkbt68mcYsy8aw/fv7+zg6OirUV8PpW7ZHA1C0iZzvklpGqC+b+r0CyzbodDolOa6MQS7whB0r26rvjzPTVafsKge59nJ+qDadzAf3RdsIsrUpqph1oGwpsLu7m/yLvDpx/tWxGGmz7MqSW4uc+XxeYNaBJSN1eHiY6sN+VMss+lxSHs5msxKzrfuaNi2sPCZKZRfvs/vCy8vLUuAwz7okxxRuUg/dM7KMlhXqdrtJvmnwCGsRwe9u3bqV+pPjjv02nU4TE0W5DxRDoevv9vb2ao3BYKQCgUAgEAgEAoFAoCFWG/8h7+Oys7NT8HEirP8TT3iDwaDwt/6uKiSmZRGqQiPXgXd6zmmsNUxxFYuwyi7avmc6nZa0TTaCWRMoI2UjaVFLcnJykk7w1CQrI2Vttnd2dlK/UDOj3/H0n2MjFU21FtruhB0nykTktNzeWPKuTVjIdfy+cj4Q3hj0koB61zps2bZ8IHQOV9n2qy26+kVZJkqjHVntf07zqQyvd23CSNX1b7LjQkO227nkzQ31AbWsfVViclv/dWA1mPpuvoN9pIwUGR0yUqPRKCVZtuFnr127lmSN2rF7vpNAkQVvo046dmwI49u3b6cEvGxHaib7/T5u3rxZqC9/5/mQtc2keEyUty6o7yFQtDZQbTh/b5MMs0/VWkLnrscQ2DLUxbptlGPjc+y099nLjGoKFJkQzgeuwcqeWgsd68d4FWVcdY+dayrbOK6Umaa8sHs9nfMarbVqnWvaBquiXPK91kdKI32y7ExCfnBwkPZCGuIcWOylbNJYLYd3Xbdfc3Oj2+26ob81IS2wbP/hcJjYGrvPy8U6sH+vC7Z7t9stMP1A8czA8nKN2dnZKewX+AxgYXXw+uuvp/uAJasILOfekydPACzkv/VvY9sNh8OS/PRQ6yCl5jF2gdfDky7K7BR2kl7tpNJQuV4odUu/KpoeprzDmv3fExi60fY23FWHKi+wxKoM500pfW0zS93yIHV6epomERdd7yBF6IRUqt7W13NW90J+NjWvYhk1AImXu8rbhFY5TWq/egcpG1BF80zkDll1kQuY4B0OtR52fHkh770AKbl3twHPZNeO36p57QU/4Xc255nX1t5hyYbqX2fsAcWxYsugJiFqSmsPhPo7NesFioFbrGJJc1gQm/SZN/4Jndusn8qO27dvA0A6fFCOn56epoWI93DDsb+/X3DCJuymXTdTbRxK+AzdHGkgCQC4c+cO7t69W6jLj370IwDARx99lA5St27dKvzOkyvbOkjlFETz+TzVj/304sWLJNv5GWXC7u5u2kTYcdXtdl0lqN3QbHrQXYUqU9mq1BbzeTn0t7adZxb7sg9TFmpeyf+rNt3e764S2u7e2mL3gpxX169fTwoJu0m/uLhI41j3f7nw5+vUXfMbWkWLmpyr4tYGmWAdDg4O0nxR82dgIQ+51+KaoKGz7T5lHdO+VQcoYLGG2NQ0T58+xUcffVQoG+X53bt3kzkz24Kmb6PRKD3DC/62qox1wHfu7OyUTD3twRxYHgSHw2EqGz/j/4eHh3jzzTcL9eUzjo6O0t7y8ePH6crDlXWNubi4SGM3hzDtCwQCgUAgEAgEAoGGyDJSqjWxWgfVAGtmeKBI6VJLyf/39vZKTJQ6olsm5ezsrJJOXse0z9NiewyGd+q22hLPLMy71innJuGaveTIufDnGoDChpAkdnd3S2EoVatitQXKSNjvcsE1qkANipo52SRru7u7JS2+lsOaAmo5PNbEC9lcFfZfGay6UMqf8JgUW89+v18wxwGKWvGqMmpY5Drs6TpQuWCdVXUcW22gJtSzjvJqcmHLr9nitS08ZpL3rGPaN5/PS0yUls9jfq25h2rybThWla11GKi2TEJsn2u7WXPg8XicZDoZGrbrkydPEiN17949AEhsznA4LGn2+v1+ZSChtpgdu04os8l6qtkH6/S9730v1enDDz8EANy/fx8ACsEncuGB22Sncmbk0+k0tSm1qE+fPk1mljbZ5P7+fqo75xD/HwwGqX/VhNDKES8FSNtoEgxHLWRsX7RlAtsmdK5zH2TZ+/39/VIwkG0EXNgEykipRYplojhnbt++nf5m3TSgkDJRQHEf5Vn2bApvDbJjvNvtpnpw78rrcDgsmL0BRdcJ/s1navA0b13aBF5gD76Hspcy4cGDB0lmUK597nOfAwC88cYb6bv33nsPwNLkTZMT2/0H0E4ALpZVTVs1UA6wmBt8P+Xb9evXS8nWed3f309y/JOf/GThWQcHB0nu8/rhhx8m825r4nd+fp6sEnIIRioQCAQCgUAgEAgEGiLLSKnW3rI3Gh6QJzbakh4dHSVNhHVSVu2rtfU+Pj4uBGQAFifWKg3mOidgPblbbYF+Z0/gvV4vqyW0mgq9ekyBTWibCx6wCsqkeMmRgaL2RbX81tdJQ0/a0JHqTM7+5GfD4TBp260WY5VdrQdqFBQeM2YZqb29vVQOWx6tU05DpIyK9evQENFNGSmPOfQSIdt66hyz18FgUArooho+Ow48lmqTscf7NTiJbVvPgd0L1a/MlGV1tE68T9k8L5E3y1InoV5VvTyoRly1+1Y2Esra21QCatOu88ULMsF3bwLLjnvpC6j1m06nSX7TD4raykePHuH58+cAUGKtdnd3S6GANbFhm0Ebcv6Do9GolOz54OAgMVJvvPFGofyPHz/Gw4cPASDdc+fOnXSPHVvrzJcccoFzlJFl27L9nzx5khh8fqdlZB/a4EGaYFTXXfaZ54/ZFHXGq+fX5PnG6hy3rI23BipTX2XFsmn/1fGXU+d563dDX5XBYJDmHbXjZBy3yQTm4M0r+1m32y0lvaasuHfvXvqM0GSoXnAY2z/bCBHuWWwQu7u7aU7Yvtrb2yvJNfbR6elpKdCLt39okgA7B88qg+WiXKAsu7i4SD5DX/rSlwAAn/3sZ1P5v/vd7wIAvvGNbxTqduPGjZKPm1rAtMGY6vqullL8DFisHewTyvM7d+4kppDvp69Ur9fDBx98AGDpB/WJT3wCwKIvOefU/5f3WasSXUNyqHWQ6nQ6pY0AF5+Dg4N0aKJpx40bN9KiqrQoUHQq46Bkg3lmfOoQ6DnVNxXuemiy+ax0A+5txr04+yyHNalTutHShdo5nglT0w16jhr3ggyoqZydiNq/7Dsr9I+OjgoOmLzfO0Dps5uAwsA7iOrhz1Lb3gGQfXjt2rVS8BOvf70Fm/BMSOqCzwdQMsfTiIoUzmp2yffqvAMWfcJ+sUqLvb09NyoXy+4F3Fj3EK/vsIcMbUPvIGVN+0ajUekg5UXH4++8yHdeQJIm0P71IqhRsOtBkvXMmWZ6beJtBquiS26yedff2rLu7OykNudYnM/npSh2XKAePHiQ5Bq/+//svcuTJNdVP/7p6uqqflR3T/eMZjSj0Wsk2ZYs2xhjIILHiu+aBcGaINiwZ8MKy8YRbPk72BBBBN4QgQMwERA8DALLspAtaUaPefVMv7v6Vf1bVHxuffLkyezMqqxu2b/z2WR3VVbmfZ5773l8DmV+v9/PbTiuXLni1p3vaYpMA8i69lkmrbW1tXQ4ev755wGMDkt3795NB0UqcnigWllZyeXC0XmpdWkSdvN3eHiY23BvbGzkNgJK9mRlhyo8+RnR7/dzxDpNucgVkSmoO7bKaso4VdYBwzlk3W2VVEnJlICsPCHGORRWqVvZd3Nzc0lWv/zyywCAO3fuABi2AV2r3n//fQCjvUG/379U9z6P5EDXXa43nP909b1x40bqM84/zZ9lCXtUwTypAslzQbNrrq4lqgTjuOO+lvVTZRPHFjfvu7u7GTc1oJiRlfUZt091DSdUdtOlj3jllVfwa7/2awCA119/HcBof/Vv//Zv+P73v5+pC93hrl+/nvpUyTXsQWoSUhedx6yTZXydmZlJfUBZfePGjXQQosygzH78+HGaS1yvOM+uXbuWG6/r6+vptzzo6z5eCZOKEK59gUAgEAgEAoFAIFATlcgm1HrD05+aP6nhUsuFWiqA0Yl8MBi4Ln3A8ESspl9geNK35jYvcL4qVLtsXaJYVtWGqVaM39tAUQ3QLdOQqRmY31uNqQbaj4Oi4NtOp5MzY6ubpboosd6qudTr8vJyhuqYv7OuO6opHpcq/Pj4OGfRU02WtVJqbhT2r2pnqY3wrlbzOT8/nyOuUK16Xe2L5jKwVjbVcKn7FDDUsFDLxDGkgePsC1oPqKl55plnchaFhYUFN3CU/9ftJ7UWqyaOZeNzvcBim1tKLVJFZBM619QyZS1RbE914a0Cz8XTEoIoycl5MgPIuv2x/pzvBwcHqU81v5ul0rUumuNALVLeeLZkE91uN40lzhNqMu/fv5/an1pCyofd3d0kv5WG25LXqPW8SWIAdbm27iJnZ2fJKkB3D1qm3n333TT36BbDeffMM89kckrx2rRlw9bB5ozq9/tpnLCNt7a2cvlhdA6y79g/1MSura0l2ejl9VGNsC3XOLBrk1pu1RIFZNcYm4toZmYmZ7HW9iojBrpMy06r1UrygvOKY3B+fj6V9/79+wBGbrQXDS/cgFfr1bKwsJDGlVoLgGEdLaEY66ieOrqvs1bluqlgLHSP6JEyqXxifawnjnpTsax2Du7t7aU6qkeO9UTwUppUhc4f2066nnDPcvv2bQDA17/+dXzpS18CMJJnf/M3fwMA+Ou//utkzf6lX/qlzPWll15K9eVeRCnRy8h3qsJaloHs3h8Yjh3WibLr5s2bePbZZwGM5Brfv7W1hU8++QTAiECDFqdbt27lwo7W19fTZ2wfpcW31mwPYZEKBAKBQCAQCAQCgZqoFCOlWn4vANTGwmjAqrUYqAaWPt7UvDx58iRzwgeyids86s0mNZmqbbGJM9WaQQ2FaljtyZralqLYoaLT/LhJ5/hca41RH3ibVXwwGOQCkL1kemqJArJBymqxKYpVUM1PVWiAtLWcaFI/pXcHsjFVhPpzFxFo9Hq9TP2ALKmGjalSK21VKKmAJclQzRvHEOfCxsZGShpKn19eHz9+nLTRnE/UxiwvLycNDjWga2trOd9vrVPd2C+1mHgJP3n1xoaXpBcY9qW1xHhBqWrdsrGJaqHS2LTzoJbcIsvm4uJiLm5wfX09tTU/Y/vOzc2lemsSVWCoAWNZ1ZJi663lqjuXCNWAWmvbYDDIWcHW1tbSuGEZSWrw+PHj1N+MieAcOTg4SGOXfaSxah6t9iR0wFZ2qgaa44Hl6ff7qU6Mf3rppZcADDXqrB+1tNTE3rx5MxMrAWSp9cftEw9qOdTYQJbfpgjRNZJg3ywtLeWCq3ldX19PdVFr/ySxrefVS6EJOK1lfmNjI2e5VHlhrdiexalqqpOLggbqc93SOHH2gbWyXRbsOg7kY6mVTpoWKf7f6/VyseDqneNZpJrYD1l4JBN6BUYyfnFxMbcPUBInK79VrrBvdZ238eKT0p4DvnzgtdvtJnn86quvAhjKZ6Z4+N73vgcA+Ku/+isAQ0vNb/7mbwIAfu/3fg/AyCK1s7ODd999F8BI7h8fHzcSA0/wGV7b0vuBcwQY9cmNGzeSxY0WUNZxe3s7yW9apLgnevHFF3PecleuXEmeO/yd0vRX8QIJi1QgEAgEAoFAIBAI1ESlGCnPR1rpvvXECAxP58psBIxOeLu7u+mkSQ26Mp/wVKraKo8aFRiPVls1vNSE2BgIteyUvYtt4LGCedYt1ZIXUbseHx/X1lqoxcVaAJXO3GpNNRmoR/npWaKA4Unes6R4Ce9Yp7qxHfR/VQYwarM8hjuNP/NYEoGRNQBAzuKoFOPUPPd6vVzci1qoLOPVeVAqZrabxqQBQ+ufZbqcm5tL7ce5Qk35J598gnv37gEYWan43d7eXobtCxjOOdVua500JqxunZR5y6Pz9uZOkUVKY6Tsd+pHr+1px5xaqCa1SHljxcaKnhdDaS1mXiJHjZe0mtxJ4kLLYhzUSsT38129Xi9ZM2z8xvb2dqLUpQaU7by9vZ3qpDGaXmwUUVeOe7TTnmXKMhHu7u5mLDLAiEHt+eefx4cffpjuA0ZzaXNzM91v4x288o9jTVDNv1ongWyMnBcfpHIeyPr/P/PMMwCQrqy/UlNrjGlRQthJ0iPoO2zdTk5O0njXGGnGK/BK2acsdpSfGidK7bJa3q0XiVoIpm2V0vpyXFFDrvEXjMvjHFNrVZV2b6oeRe9S9mZNWksZYS3xGndprQw7OztJ3niJy5tM+2CtNmoRV/nEstv4b44VjU9kPSzDLuAzxpaxtdaFzkNe1WuC8ph1eu+99/CDH/wAAPC3f/u3AEZeYP/v//0//PEf/zEA4Dd+4zcy37399tt47733AIzWJvVGsvFr49RH985WBvB8sLm5mWF/BYZyjesPLVOUa48ePUrPoCcPLVMPHjzIJI5nnfhc9j37VVlMS+tR9qUemmzQKwfE6elpamRtWEsowYJtb2/nBiEFxvHxcS5gXV2oLF054B9iyqBugp7LAOvIjuMCvL297ZIRsIx28dENrHXFODw8LM0eXxfeRtG60XibqG63mztAaTZvLkTWFNrpdHKC7uTkJEdnrffUrRcXfSUlsAei/f391D/qEqqHKt4HZF3GrPvE0dFRLhdYv9/PEYXo5Kt7kNKDHcukRCu8cqLrBojCkUGj2r+cT9z0qfsfF2earPf3912aVv5f10WJddJ5aum/NehWKcSt25JHf25d/LxNxcnJSW7e6bPqHKQIfY912+33+5l68DNu+Cypgua+8pQBlir86Ogoo0jSMuhn49SJsG4ZShXOz5Q0iOOfm7zj4+PkysMFjP2pC58ePi3hxrSIGgg9rCuZC+vJDR8X4jt37uDHP/4xgNFGVzfzfAblvh62m8ipQig5TxENPt8PDOU4+4zynLLjxo0bSXbwIMgD1OLiYhpnusGz/aPXJjZ/dr1VV1C296NHj5I8o+xSAhPOMa5R/F835aoUKUpjcZEYDAa50AWOt62trfQZx5QquMZJ4TAOPKpzQuU454CSgnhU+krRDyBDmkR5qod4YlLXPi9FjnWXVgWE5lajzONnWh9dp1kPPtOSqGiOQI+AaxKlBGEVYouLi6m+d+/eBTAcY//+7/+eygQAv//7vw8A+KM/+iP89m//NoDRQf4f//EfAQD/9E//lOpHGdLr9VK7eHK8bp20fTQNEjAaKxryQ7nW6/WSsp2ELcwL+Omnn+bChijPP/vss/Q7VZByDFOesH+rrlHh2hcIBAKBQCAQCAQCNVGq5vC001aLDOStMKrJtybd7e3tnDlUKVt5mqc2ZnFxMf2tgWl8b90TsGolrHXNC1BXa5ilgFdAqNa8AAAgAElEQVQNvA0m9OhY1bJiNe7qumTJEs6Dmo2VZt7CWvYWFxdz2bv1ammcVXtQZnkjOFaUXKMqaGoFijOTq5uL0hxbrb9elcQE8JMBEpr4zqO3Hzd5rWrJlFAC8N0tV1dXM5TmQDawl/fR1M177ty5k7Q6GkRPbY2ar4Fhe9a1ErDdz7NIWc3Z/Px8GmueRbjIInVycpIro5ckUPurjlup9ql1q1MNOtvVs7p6891a3VSbZ1MVaHtZuTKOK5Ln/mbno8okai3X19dTH1pa8FarlYJ82Y8cC5ubm6kN+J26jNq+msS9SqmAvdQPmkgSyKbYsEkeX3755WSd4nzkHHn69Gn6m9pL9UTwLB6TuGBaba8+SwPjgaxFlmWj1vXGjRuZ4H8AGRp3T1tvx30TyaAV1pW+0+lk3MFYRlrmrRdBq9XKkQaVpSnxEs9fhEXKtpW6mnI/RLfsjz76KNWPJChMjOoRel0ErAfL7Oxsand1aVYPISDrhaQpPICRu9b+/n5OLqsLeJNuijbcQdcPO5eUYIrf8f7d3d2cRUpJ1awLeKfTcd2AWa5J4K0ZwHCOsK3Z9vfv308yjtan3/3d3wUAfPOb30yWK1Ki/93f/R2A4d6EqSGUatxSrk9ijedeutvtpr0Z5wjbeGNjI60/LMf6+nqSD9bF76OPPsq5y6u3Du/ju2dmZnLpFyhPjo6OKu3HwyIVCAQCgUAgEAgEAjVRKUbKS2inml9L5ekRSmhcFE/xlhBhfn4+nQTVF9eedFVrMS7ZhJcktiz42dOqexTwngaZ0PdZLYlnragKakA8mlKP+lu1L9QYU5NJS1Cv18slQtS6WBKDmZmZnNVGtYB1+8kj/ijzM9ZAUhurpdYqG6+mxBQ2wN+zfrAM7Xa7tt+6LT+Qt2KoX7kX06K+6cBQQ8Q+s7TmGjvBa6/XS+Xgs7wYsqrQGCmPVIX/e8QN7DuOQbXasC94VcuUjW+bmZkp1I5pfFIVeP7enkXUWsz29/cLx5Ymjtb5CAz7wFLxa8oEa+XTZ1TFebTnQDbRIsuxsrKSfkuNHrWES0tLSTvIslLG88r6AcN+t2O9qaSbFjrmrTVxZ2cnzS8bHP/8888nKnRqaTW4nHVX0omi2JtxtLMeqYO1tmnyWsoATRFBLa3GRVlLFJ95fHyc5q9erQXIs4rVha4BVove7XZTXfR+u6aqfPHWApbVWtJ0TW2iLmUoe65aRygbaPm8f/9+am/2L+eXxqZdhkVKPXasV0u73c6ldqBl4cmTJ7kYXU2ySqhMatpyWDQe+D5NbwNkY4BYFiWssfH9nDdANjYKON8iNel8svUEhn2gsfjAMIb1q1/9KgDgV37lVwCMrJ7//d//jb//+78HAHz/+98HMPKSeeWVV1JMNi1aJycnqX+9dbgu2Nbz8/Op/ex54smTJxnSH2Ao+7hO0TOCsVIffPBBus9atx48eJDGpCb8Zj9xLeD14OAg08dFCItUIBAIBAKBQCAQCNREqUrdi0WyGj6PgWxrayunndTEZRoTBWR9FT0/U6uVVUryutpZT0vF8qjmosyvllCrk9XGqxZNrRi837NOjAtlOCyiN1bKZmXm82KjgOGJXFkJFZoAVTXtRX0xjvbF80u1dVPWLO/dVhOlMVWWGr3f77ufWXZFL/FjVWgZrVaW/aS+1h6LpKVlbrVabsJYXqlpo5ZMx6M3tutaBzQBrrVEeTFSOmdsLJgX+2YtUmrdUfC51u++roXXi1PxZJ61PikVv43D0/Jan3xthzIqb/18XJmhsUiaPB0YtqtaLYGhDGD7P3z4EMBI67y2tpasHvwd48b29vZyNMmzs7Olc2camnYvxnF/fz+XdoOW3GeeeSbFpVBjS8vUwcFBup9toEyEdk6NA88iZWXe3NxcalPes7i4mNpb6YGBoYy3Hgs6Lu2YPTg4KLVI1ZV7Xr96SVft2l627muMrmUTPo/x8rJhmd2o+V5dXU3tbRM/TzLnJykjofsXu14B2dQawGh8PX78OK1T3Afq2POSJDeRsBbI9r2V41ovrosak68JeLV+W1tbqY6cL3ymjmEvdt5LjzDuuPQo1HV/YuMll5eXU1wQy/P2228DAH74wx/iP//zPwGM5tDrr78OAHjzzTdTvLXGoluPskk8xDTW0XrIqDVJE8EDQzmnMeTAiLXv9u3bufvZh0+fPk1rGWWkxrPadWt+fn5y+nMVoHYjqRvSMvcAHWgsqEd6AAw38dbVzHMxU8E67sTTgVzmHqabqKJDlQoAz+2vjA66iWBytqO6Q9hJvbCw4OaKsu2teZLY3p77hF2klCTDM6XXpdVWN7uinFu6eVeXKdsXiqLNsea60s27JTvQa906ab9aNxeW9ezsLAkUbceinEt66Lcm/e3t7dzY63Q67sbElq8q+C5ValhlgneQ0kO/JweKXPs0+FM35nyWPbxoQP24KNv4a51t0LUSLNg8P3ropdDWQ6/NRdVU3hsvbQGQzV+nrmDcaNM1h2157dq1HBkAD1KHh4c5MgB1s7N0x5NuEq188p7H7w4PD9OBiJsipT7mhoNX3nN6eppLp9Dr9TKy15alic2vHWedTifNEyUWYnvblBWdTifj9gNkXZfZv1R06kGqyJ1/GrByWRUmlhRKy+LRw5eRZEyzDudB12fOC7rvaT/ZPFgqK+3zpl1eIKv0s2vq0dFRGjuEygzOH5ujThWv+vymXPrKwlI8N1nOqfn5+VwOVCVO03kCZHOJqpsaMOzTolxL44zDMrmmz7Nu4b1eL8n5Dz74AMCI5OSDDz5I5b1z5w4A4LXXXgMwlPF6iASGcsK6ZE8ix3U9tEpfzblIsgkegq5evZrLxUlCrlu3bmXSdGjZ9vb20rN41X7y1ukqZFXh2hcIBAKBQCAQCAQCNVE5IS9RFrSpGmYbsMcT/2AwyLibAVnKQRvsNT8/nzPxF2W/rgN1D7AaUnUZ0+9snbXutl1U+2rvV62Od7qv616l5mNrieJpvdvt5jQmqvm2FjI1d3pundZioPTJ1npzcnJSm9Kd2ojZ2dn0HKs1UCtIGbGB9qsdOxo0a60FWm6bzE9JD5qAp4nTcVBGfGDHXlm51PRvXWzHsXawz9XqZMeSWo51ztr5rNnlrWufN86s65G+05JPjIMiq6G+R131ioLz1WJtLYSaasEb103SAXuuF2rd0/ZnOSxpENtgdXU13UcLjVpvLAW1ejV4Fv1pwVqrNFia5VWCIwZV0yJFzebW1lYuqfLR0VFOQ6/9NUnCTS/QHxjKbE3ES1irriYT5XyhpUCD6Nl3SjhT5sbcRF95RBpWdlV9j7cWn0f4cJmw+yH2q6YaUDds4qLK7XlNeGNaXWXt2shxtrm5meaK1errPqcqtXudsee5ydrnqBVJrSH8nvNGQ1b4tyUw6nQ6mb0Wn19Eez4p2URRnXTtUEp0ehTYdETLy8vJnZmucbSEqhs066379yYo+fks9ZhiX+heS6nQgWFoA9uZ5aBX1fXr15Ol18o3tcLTg0LfTZmnYQrW08RDWKQCgUAgEAgEAoFAoCbq8TfD95u1msx2u51OlVarpfdbP3olRFBLQdnJt+6pXv1fiyw/arnQU73VqJYF6+vv62qXxw3o9WjYPYp2LY8XH8bvbN9pfI496WsQok12O06iV7Xo2XbzLExlBBQeOUmZVZP1VRIRj8SjboxUGc4LwLflVIuI1cp6hAlV31N37CnVve0Lz8LitbftL03QSe1YmUVKrWy2n+rSuXvaVztftc3LSGh0vHqB1WUo8qVvioLa8yywVMCnp6c5ul/K5/X19XQ/tZW8zszM5IhqvDjJJq1t+hztN48mWGMIAWTo0GmdokaTV421UFIaqxVtGnZOAX57sRxK3AQM5wjjOSxdsdKfa4C3va/svZOgbrxIlfsukpSh7P1FZbVkE7rfKaLd1oTeF2lRs+stkI+R11hApQnnVe+zz7TPb5JUQ+Wa9RDQdUb3nkA2TQPnDeXa9vZ2xrKhz1KPH927eiQTWr5xUDYG9H1aD+stpOQaXGNZbo2bZF+qN5X19JlkTHqxn7zqOs++oBVpY2MjQ4rEsgFDOU6SDCvjZ2dnk8yz65ZCveuq7CFKD1Jl7kY6MSzT3uLiYq5xdQPq8e0D2Vw0ZRtcdZ+rO/F0o1W0+VLmNA1ctW57ZSxLKiS8w1VZvqm6dfJYb8oOnV4+HDspVAjwfj1IaW4mYLgQq3sI7+Pv6rrBlS2yHkuMN0a9Q1PRQapu240jPOqylXnuKmX3V5mvReUYFxrM6QUP8/8yZYIN2PeUM3qgqpPvq9/vj3XgLVvUvXFXdkj3nuWxSum1ygF4kjpZBQqQdwc+PT1NiwznMvvjypUrqX6W+U3ZsNRFuijoe5rwNrYcNxpEDmSDjhm4zBwlu7u7OQZPLz9Y0+W2z1XCAk/hY+X48fFxksv2d8qOqZuWqhu1KpjWxv+yD0znoaqbmrpsqjsmMJIHyqh5EQepIgXH2dlZTpmgSlKPEdfKMVUIWAV5lbWtKjz5attc3fGU3MfmyKR82NnZyZFmKOGBJUMrc823f9epkwdPGWrJfYB8XsL5+fn0W5v/UN0XtU5Fsm6SsalkTTxIaR5Q6zb69OnT5Mpn23ZhYSEx8vEA5Y0H1nN3dzdnePBkaxnCtS8QCAQCgUAgEAgEaqKSa5+nBVNNLE+reiK1WlklA/DowPVefZaXB0C1dXVNpJ450n7mne61HHXNs1WsH5OYt8s0OqqdUE2qvpPfF33nWaQ01xIwPN17xAD83STEDGXuTV6d7Tgps9R4rgbe/UUWhUlR112lKctEU1BX0KIxfZ5FyiNhUU0t4FOjq8WzyNrRarUq0ZcSXttUGUee5bmsrp4125MrTWqgPbcP1dZaynXNX8b2pTtFr9dLn1FLyHbWFBf6vmm7inmwfaCuPtbFQ7XKHG+0TD19+jRpplUDb4lw9L1NEJ0Qnju2fmffr2W08lDXBEsM5PVTUZkuGpf9/mlA9zbWhUhdLC+DbIJQWeG5fXr7AyBr7fb2OU2lP/DgWYDKrIAqr+zehvJhd3c310dqxbcWKc8DoOmcZlU9WrxQD97DelqLO+CTkNnnN0Ga0Wq1cq7l7AcdR2olpJufR3KlnhPAaGz2+/007ljPfr+fS8nknQ/KEBapQCAQCAQCgUAgEKiJSjFS+rcN2PPuVw1ZlSSqZf6d3mlXT/fjnobPq5tnBWlKq+DRTFfxEy6CRy5grXgaD6X15am8TPNgiSjUt75K8tqTk5OJiBnK/MOrfOb1dZHVyv7txShcNM4b45epqdV2LIoR8uL/vDmvFhF+xvGoqRLUBx/IWga8WCn6fk8KT7vpyaIqFmLP0q33NKHts2VVWDnRarVyMuDw8DC1NZ+h9NpWg6nUsXwWoTGmRbKvKXjywhtvHDMcH7SsASN5yNi8tbW1XIC5WvltCg37zknrov9748zep+unlb069qyFwbN+lFnmP4/4eSij3Ut4lkM7ry4bOl60TF5SZEI9l/T/qp434/alJz89UiNrhVfLoJes2qYM0VQ+lrZe95FejHUTe1eLojhsuyZ58bhWhnlEafqOJtYmHfPWC0VJjyiref/+/n4mtlXv1wTljKPSdDp2D6geVoSuIWGRCgQCgUAgEAgEAoEpoHKMlP3b09qr9sGe2JUK2Won9ITraTfKNB51T8VVmGHOzs5qn/rrlKdJdhr7bmvR0/6yWpHj4+PSODXCtvvJyUnO6qQaNaup9qiPx0GZhr8MVZhyirSuth3LynIemo51qvOsaWlptX2Uwc9ePR95wo5LpeonlB2J/s8ai2c1VjreJmVUK7N6jstA1wSL0zgoijf02kgTb/N7av3a7XZqc97D/pubmyuM2VFcpOWgbLxpol07PmmBW11dzcXmeevVtOpU1SrkeXQUMY2qFVXvsevyJHX6ebAOXSZ0LbZxxGUeMFUZ3KqiytqqaSaIojFU9NyqnjeTjhu1slgWWU22apPCDwaDnIWa7J79fj8Xz6sMgGWJasvivKti3D3peeUo8qQo+l2TY09ZT/lby+p9cnKSK9PR0VHqF97nxTWpJ4t9p1ohuZZZL5qq/VTZta/oO90keXTHFue5GljKxqq0402iiUl+UcGhQFbQlpXNtq1Hf1624dB+8DaudlOhv5tWezTVF56gKBNAkwr6ix5D0zps8bkq7LwDVRUCBo6bhYWFHOWoBgpbQoDl5eWcW6nnXtIUmjj8XKR8IMrGsyezlcTD5lNTEg9L5uFR5F5kwDyhyiMP1sXv4OAgs8kCRvXu9Xq53HkzMzM5WdfE4aMuyurppQipun7GIWj6mPZepknoodz7rMoaeRljyirxdKNu1xmlc+dGnfNd5Zx1Q+t2u8nFzCNNm1bfjjt/qxx2m3xfETwFO88RbE9tdx1rVGyxfzRvqD3w8kCl7th6WLKf6bioEsoTrn2BQCAQCAQCgUAgUBMzn3ctSCAQCAQCgUAgEAh83hAWqUAgEAgEAoFAIBCoiThIBQKBQCAQCAQCgUBNxEEqEAgEAoFAIBAIBGoiDlKBQCAQCAQCgUAgUBNxkAoEAoFAIBAIBAKBmoiDVCAQCAQCgUAgEAjURBykAoFAIBAIBAKBQKAm4iAVCAQCgUAgEAgEAjXRLvvy29/+dsrWW5S4Vz/n3969MzMz6ap/67XouVXwrW99K/8QB6xT1edr2YrKe3Z2lp53enoKABgMBpkrAMzOzqZrq9XKPMtrx7p1smWqA9t3g8EgVxd+12q1UvlZp1ar5fajxVtvvTWVfqqLKmWtiqr99NZbb9WqTJNlrItJxt40UHcceG1XpU5l9SkrgycndE7Z33r3131n1bn0F3/xF2fAUDbZuWz/tygq2yRjs+y3Vcfdn//5n58BwMHBAfb29jLPvXbtGgDghRdewPz8PADg3r17AIC3334bDx8+BADcuHEDAPDmm28CAG7duoV+vw8A+PTTTwEAm5ubAIbttLi4CADodDoAhjLPtp/XjnVlnsI+zxtL+r+3ttq15rznn3cPUL1O3/3ud9OP2VYnJycAgKOjIwDA4eEh9vf3ASD15c7ODnZ2dtLfwLCvAaDf76dnEO32cCuzsLCAXq8HAFhZWQEArK6upr955T0LCwupP//kT/6kMZk3zv5i0ueNK/OA+muTvtOOL/aFtyfgGDg9PcXx8TEApOvJyYm71+CVe43vfve759bJq08T6+m4+5EmZN5F7x/GqSvfOc7+oc4apHLQk2+6767yrCr3ldUpLFKBQCAQCAQCgUAgUBOlFqmqKNJyKlSzUFfLOQ2rhL6r7Pll2j49CfM+fkeN2WAwcLWWasmx5ZlEkzBuW9m+Ozk5SRpDT0s0NzeXe0aZ5nNcaNtWRRXNRpmF4LIxblmqalZ+HuFZbstg5+NFoMjSPhgMCi0WF1G+hYUFAEMtMK3MlE9qdfbkVJV2rFuHJtqAv5mdnU1/U17RunF8fIy1tTUAwNWrVwEAy8vL+OSTTwAAGxsbAEZWp2effRZLS0sARhYLWkNOTk6S5lxlX91xWRVFliLtk/M8P4Bs+1A+E4PBIDNGzyvDJNByW28NbVv24dHRUeZvYGi54v8ct7a+3nrdarWSlYRXzzukqXpWQZ0xPy2vjEmgbattCWQtUoT1bjk+Ps70JzAcB9bK4D2rTvkmwaTt/ouy9tbBRY1VTw7qGLFybdp9MfZByhPsOllsg+pBygqz81z8prVYEWXudfq//UwFB+tHNwEKh36/nxEUQHbTUtQWFwU7GNmHupBxkeM9LDOQP1ROA1X6/7y+0+fo3/rsz4Pga7oMn9cDo7fhqYK6B+tp9atXBqscUSFuN5E6b8o2g02AhwJ111V3Gv7Pzzw57s0pW95xlUHj1NeTvSw3D1L7+/vpvvX1dQBDtz9+9vTpUwDAo0ePAAAvv/xycv1aXl4GADx58gTA0K3Mto8eUprsO0+W6SHXO/TYsacHB6vg0jFoDyTeOtcUvAMUrzoOgeGhiRttu+E+OjpKz2C99XCrew0g2wZ2068HgUlwmQeoy1AWaTsWHVKBrCsfkFV20I1WFbb2MKb7oib6qQhNjPnPy9p6GS59Tb27Duxaqmtqmcv6NMoYrn2BQCAQCAQCgUAgUBOVLVJFGrezs7OkbVC3Eas1o0Zobm4upzHyTohVXe+ahFdHq1GxfwND7QktUdSkMMh5bm4uaUgZLHt6elpYp6qkDU3DWqQ80zvv6XQ6Oc3RYDCYimufoorWV7XoViuh5bJjsNVqXYo7GHFR7pyXZXmrYk2sWq5x3fyagmcVUG2pWmyBrAa9yCVJMY47axlokdLnWgvA0dGRq/n3XACJOm4TTY851YzTGsG6Ud7u7Oykcq+urgLIuu89ePAAAPDZZ58BALa2thIBBe/nvVtbW7k2m5ubK3TnHKf/yixRrIfnbjk7O5vGH9chXrWMnozX59rnW0wqo6q49ukYpMXCWqZ4r5ZJ28Ra5XSMeNaTSbwp6vRzk8QSdZ7XFNQqxPabm5tLbWvXVCWU4N5nd3c3Xdm/auG1+yh9X9MWqarEKkRT4RcXjbJyl3nyfB6h893uw+nCPjs7m3Nht7+3n01a97BIBQKBQCAQCAQCgUBNTEw2oZS6SmfJkyBPetQm6CnSaoJU41BGXDHtk3NRrBY1Yny/alRYdlqiSJW7sLCAbrcLYFS//f39nKaXUH/uJsp+XlsVaQk9elJCT/yXocXw/PlV22q1uLaNgWqayYvQOtl3eNqjKvFfvwgos0yVBZNfhsXajinV1lKr6gXycy5pHb34SNsWk9SRGuNut5t7l8apUL5Re3xwcJCzCpRRFWuZq/RlE1BvAM5fln9zczPRadPSdOvWLVy/fh0A8N577wEYWaTu37+PV155BcAoRooxU48fP07P0lgpyo8m66QWT89iRPCdc3Nzad2hVpZrTrvdzlkh2Zca3O9ZHJvsO69O3thTYglLNqFjz8bqEBrzpLK9KPZmEovUOJajIstl3di0y4qLYvup5dMSQii9PecMCV22trYADC1SvI/Pn5+fz4xbfc/c3FzO0l8VVdq1bH7pte7+quy+afehF7/uEc6UxcHaZ10WPA8OygPKFdZtfn4+jS2uBdZaeh7qesmERSoQCAQCgUAgEAgEamJi1j4g63MN+Ex11CZ0u92cNokaUz3x64nQsyjYMoxT7iJrgHeC17gmq5nc2dlJmluCdLu9Xi+nZWm32+kZ1nJXlEhsXBRZ1+z3XsxQ0Sle/dAv029Y36facavBVM2FHXvsm06nk8ahajkvqk6e1kvZ36zW6BfJEmWhcVzeOPPaoKn2KGOg87THKpt4H8eUxu4wNsDGGxbF5jVpbaNWbm5uLmex0DJSFlGW7e/vJznFK+OPPBlfZqUui80bZ46pp4NqrQFge3sbwFD7TU0433Hjxg3cvn0bwMh68/jxYwDDpL387ZUrVwAg0acvLi6mNtB6Wy15EzFSnqVdr1Z2LS4upjg4xnTxu5mZmQyDrF41NknXoaI1YRzoulZkkVLGSLVMFVmk1APEzh1dt9SqYWV73UTy48BjEyuCMmra33u0zuehSRY63aexHdUiZVkz2Yfb29spxQCvtEj1+/30XHrv6FymhVXX52kwHtr7dG9jx4pa2q0Ft2681bTg7R/m5uZynghaD8vKrHWqY5WedB5VkZ36DsoF+93S0lKSLUxf4TGtNolKB6nzGtO69vX7/UwuD2DUqVy8AN98W2Z6LBu048AKYR2ARXSyQDaYmf+TQpefsZOfffbZJCjoJqLCns+yQXHTRBnpgreZY93ZXu12O7cwTZP+3JbnvE01xwk3DNxIUsADo00XBfbS0lKGIESv9p1NoOgQr24oKlissPM28F77nPfei0KZkPT6sMjtV6lNPXekJhevMrdLS+er76WM47wHRlTbHIMq2LlZ0Do3eWCmbDo+Ps7RRrOsCwsLuXl+eHiYk3U8aOzs7KTDoZ1fZW5/Rag7LlV227nMdtzZ2UmHJJbx2WefxUsvvQRgSIUOAHfv3gUAfPjhh3j48CEA4MUXXwQwOkitrKwkKnRV0OiBxV7r9p22VZFCTccLD02rq6tpbeFnbJPj4+OcApD9pBTUSoJi3Z6ryJU6dSo7SOmBynPpA7IKMbvZ17VJ62E/U3lSd+2q0q/ehrbdbqfP7KHj8PAw56Kk7V+njJPKDK9NgeyBVMMVWBd1qQWGZC4kdLHzcGZmJo1VPVDRpZZXndNNK5m1rtrmlrBFFZpWfhOfJ8WmTcXT7XZzddHxZ9M6qFLFU7Bf9B6iqBw2vxzLv7Kykv7WtQ+YPHSmCOHaFwgEAoFAIBAIBAI1Udu1zzuNWovU/v5+0mBalzdqIQDkzMTdbtc9LVqLlGeRGAf2t6qtVVdEYHjKZdltUPOTJ0/SyZcmbGpuDw4O8NxzzwHIElBYlz7V5k4bnuXNSyZqg8+VdOIiNRVFFhd9J8uhpCDUSlATu7u7m6EuBkbj8eTkJN2v/WyDmZuoY1nwsY49JQSwwZPaF0WUsZfpdlkEtUxZ65oGn2vgKJC1ElrNpD6rSZc4W2aFF7jPz1hmWjNarRbu378PICsjgWFfUS7oWLCybpL63Lt3D8DQrUat6ADwzDPPABi6IpPyW11trLaZv1e3Obrr6LOtpUNdo5voGx0DXsoJlotymRbBF198MVmkXnjhBQDAD3/4QwDAp59+io8++ggA8OabbwIYacbX19cTKYVHD6/u6byOW0/vd5wP3W43ufGxv9bX19PftDCyfXZ2dlJ52T9si+3t7VzAv1rhm0xUq65rdr6rG59aaIosUgorP9X6pNeipLHTWrfUiqTeG+wX1pNz5+DgIOe1o2X13IstmnLn8yxRrIcNUzg9PU2yhBZbzpOPP/44WXi5HyI05IHW1LW1teRSqzKI72lKpnseOWrVphxhHRdRHDQAACAASURBVD1PK/VuYTnrpH9pasx5rq2sC8fR0tJSak+WkeVXOWrnoLoR6x5jEgv1pPAsUpxDLDfHE4Akz9mHSo7SaLkaf2IgEAgEAoFAIBAI/IKj9GhWptH2rEIaN6CJ1/QetVBZX+8ianQNzAaai5UqI3jQhHPAMPiY5aUWl+UeDAb4yU9+AmCo1QRGGpi9vb2kzb1161b6nbV4eYkupwXV3mnAMpDVhvEUrxYdYNiHRQQg04Tnu221C51OJ7UftUfs5/39/VQHgvXWwEXPytYENXpVTZXVPh4eHqbxZGNt5ubm0jikplr91z8vligLtbooaQuvNpEq55wGzmrMhW2zJi1TZVZ4yrOZmZmkMaf2bn19PZWZfWOtpO12O9WR0LiNJmTBJ598AmAYp0ANMclwaEG/efMmnn32WQBI9OBra2vJIkNNMeOK9vb2kkWKY5Ia6c3NzZzlS33x68ZPedB0DTZugxrlp0+fpvoxVuPk5CTJ4ddeey1Tt48//hg/+9nPMu3CdlpfX09twbppndSSMy68drCETEtLS0njyvF19erVTNJlABmSENaFVlHGq7BvgCxxha2LR4RTt05qkVJLFDCc/x6xhBcbBWTlsrWenGeRsp4XTVuktK2sFezs7Cz1C8cj5+PJyUnqV0sIo2uaxoU2aeH1YnQ9YgmbxmZvby+NJ8oZxhx+8sknSUaw/Byni4uLafxS3jzzzDOpDdhm/J1af8etm/e5HUdKzkJLB+Xa06dPM+suMNoDLiws5NJelL17UngkGawHxw3bWuW4xo8CQxnA+qoXFZ9JWUro2nQZlikdmxwb7B/WSa2d7BPWsdvt5uJam5g/lckmbGN5TDK6MbKuIJrhmr/lJpadvLS05JoqPZe+ceE1ng1a10nLCb26upoWVQYi02VvfX09ddiPfvQjAKPO/fDDDzPPBYbMURSWyj6lZZkGrNDQDR6FGt2RlIufwlCZdyxJhrpXTavcHhuTdec5OzvLHU55eNrY2MgxWOkByrrIKfmJdd0pYyGbBIPBILfRePr0aVqkeFDn/Or1emkDbBduNcNPOxi2as4JT/hyzHOcffTRR0moc1wyv8/MzEwas1UwiYuVhW6+rIvOyclJ6hPOL27QFxYW0mHJbnRnZ2dz+em8xWoScEMwGAxS2bip1nHFA8bzzz8PALh9+3bKv0S5wLbv9XqpfpSLPFBtbGxkDlXAcB2wbjF6sKrbRzbvHTDaeFK2tlqttP5w3mxtbSXXvi984QupngDw05/+FB988AGAkTskx93a2lpqAw1gtodD3TSP23e6CbekIL1eL40lXnu9XpJPNuD/008/xYcffghgtMHVzS3lp27iPRdqvdaB9pPnwgsM29HmkdK2teuzzg/rejQ3N5cjC1CFmz1INSXDPfdzqwzZ29tLbm8cZzyE9Hq9zKEdGM2rs7OzTG43ftakTNf2tAdRzRPFPuNc3tjYwMcffwwAuXH2+PHjdD/lBseZsmdyH7W2tpb6zCqqTk9Pa+2N6vYrx9/e3l6SY5SNvB4cHKR6cM1VMoymx1QdqHKcZaJ8uHbtWpLVHnEb11rKNa5NnU4n7c3V9bcJl99x4bkg2/1pp9NJc0hDN4As+U6T/RSufYFAIBAIBAKBQCBQE2NHXal1w2pjlKiAmgvNP0INB0+7PDkrbaFH/VnmXjhO+e3vNY+KuuYR1KZQc0vL1O3bt5PrEU/+//M//wNgaJniSZkaU2Ck4dXPmqhXGayGUQOXqf26efMmgOFJnhpBajjUsmY1sWUuSE0HiSq9J62arMfc3FzSRij9NDDsV2r0bE6Vg4OD1Nf83fz8fI7mvYlgcg98lrovaBAvtX50PeJ3165dS/2jrmRaZn3+ea5i41I2nwdv7ClBiF4fPHiQtJuUESz3/Px8zko4bXjEKgQ16EqwY0lLVlZW0nxXWmpgKF88Cuoi94lxxhx/e3BwkMrL9tTAcLY5ZcBLL72UrDe0UvG7tbW1ZCXh+FN5TjlI2ffkyZOMdQrIzsG6LozqTq5pGYCRLJifn0+WMVrg7t+/jzt37gBAutIy9a//+q9J+/zTn/4UAPDLv/zLAIZzy8qVzc3NnNVE+62uHPesGdTQK6U+368uKpRr1CqzL9977z28//77mTbg2tztdtO4JDz6Zy+PTlWoS5pHew5kPUCUBr0oV49HiKBuaGWufV66j0nkeNGexLNaP3z4MPULr8Rzzz2XrJ9f+tKXAIz6d3NzM5czp2kPAyWdKqK/V6s75/Unn3ySrGsM7Kfb4uHhYZKBnDuUI3fu3En7J+6dFhYWUpvxPUpQogRLTUDTirCPnjx5kqzR7CPKq+Xl5bT345VlV/dL64o6TXjeOpQVSpRDecxxREvo3t5eci+l7OPeQvdPlPFNkn7UgbUiqeXUussOBoPUL9yza65br/xF62tVz4KwSAUCgUAgEAgEAoFATdS2SFm/Xw1O9GiXlTqS/1PTZQOS+/1+OgXztOnFYtmkYnVQFjhLTcLp6WmKK1D6XNaB5acl7ZVXXkmaPZ78qYF55513ks+tBvWxDtQaWO1P0/ACK+fm5lJ722Dy5eXlVF/V1vB3dfzlm66T1sP6A6+urqb60cqmSaBtvB3HnpIFqJbZxso17QftaUA4rtn+e3t7yUJKrT6/6/V6ubHEtpidnc1offn8Io3StBJd699KecsxZ8v4/vvvJ4sA60sN2ubmZqqn0oZXzYZepw5l2mYrB/v9ftKMKbU5MOwPa81Q8h0vyWOTc4YWmrOzs1yMg2ooqZGkJvZnP/tZogh/+eWXM9cXXnghWac43ti36+vrmUTXLAPLofTbwDCG0abJOA8eiQHnqGpkKbM4fu7du5fqzjiHN954A8AwboPjzlpxrl27lvqQWs7t7e0cRfckVlIdXzb2h23b7XZzgfg7OzuZ+EIAePfddwEAP/7xj3Oadf7+6tWrOep4pX+2wfNliYKLUGaRUnnLdtTveL8XN11mkdJ4KV6L9iiTWKTK5uhgMEhjQvcS3E+wXWih+dVf/VX81m/9FoDRuOQ82dnZySUsVvnklaOu/NAYKbu2q9WGZWLM4UcffZSzRNGaND8/n4gkKDdeffXVdGU9KSPOzs7S3LSWoknIJghvrbX704ODg3Qfy/7FL34RwNByzb8pCzSOnPJMPQu8dzaBojhGtSirFZvyw9Z3Y2Mj45UAjNp8fX099Q33t14y7IuA9VpRojT2gcZ4cU3SFCSAn5S+CYRFKhAIBAKBQCAQCARqYuwYKfU1tlohZcnhqVV9XamxoPZB41bUKqS/bxqen7X6Alvqy48//jid3K1V4Jvf/GbSXnzta18DMDrt9nq9pOWkRpDvAPKJ3aaZ2JZQi46n8QSGmj2W8TLZaBQe5SfbmVrUK1euJIsFGcfU6mfja5SBy9PAFr27adY+1bjY2AC1vFEbTk3RzZs3k9WA2k1aFU9OTtKY8zT404ZqkpVVCBhaMRh/YynO+/1+0pwpqxVhE3teFIuQzhtLu398fJyhnOZnwHAuaRwoMBp/mozU862fxPpOaHyWjRGhnNvd3U1jhW2ujG//93//B2AUF3rnzp0UY8TxRxm4srKS6sf6Ki2vpSnvdDqZWNQ60PHM8cbnLy8vp79Zt3v37qU5T+0yY1JeeeWVJKupZWf9X3vttVQXWlIfPnyYs0h5FN1VoX1jGdPUksJ3aRwvLQTsp3feeQfAMNaL9WX7KDOrtWIvLy8nTbZNMzAYDMa2SCkbqbWuHB8f56xUp6enuXd58RHeuuWx9jXJ1lf2W5WzNqF9v99PY54y+td//dcBAL/zO7+Dr371qwBG7cJ+29nZScyzHG/K7NhE7LhtHyC/R9na2koWWlovPvnkk7RH4n0cPzdv3kwpBmj15Zx77rnn0nzS9CR8hvWGOTw8HMsiVdUCxPG0vr6e1iFa3F9//XUAwJe//OU097kHZMzyzs6Om9Zh2vFEnmXKWhQHg0EuKTdj3B4+fJj61FpLlblZ52LReBunrmXeK2VJp1utVvLu4DxQ7xV+xv7S2GSuF03GWNc+SHk06DY4Ud0DKOC0IpYOXM23/ExpJYs6aByzvN7vZbUGhgKYjU0B9vDhw7RY2YG3sbGBb3zjGwBGm0EGIx4cHCQhxYPY/v5+LtC+KIfWNKGbdiXaYBlYd25y1D1z2oGUOsEsUYKW2Tt02HxKmtXb5iTjd3rI1TxMZeWaFizFp1KcW4r2l19+OQl6uk2w/pqPSRcmK6imLezVHUBN8xRy3JDz4KsBoVyo2BfLy8u5fpkWFX0RVOaxP1qtVmpjdVkDsjTT3Lhys6FjWBetorxl4/QVN8mdTid3iFH5bMkgnjx5kjZJlF08WLz//vvpIMz+4//PPfdccqtlfWdnZ9Ncs1Thc3NzLulOFehByrpsLC0tJYWD0qBbanOW+4033sC//Mu/ABjl9+H4e/LkSY5QaHFxMUflrrnEmiCbsP1+dHSU6st3P378ONMvwOgg+OjRo3Qf24Lj4fr16+nwq/1FudNE8LzKbI9kgs+3BymPEl8Vn5ZQQg9P1rVPFWjeQWoapEHeAV/HI+fMb/7mbwIA3nzzzSS3OeZ4WPnss8+STLEun4omDofq/mvz+z1+/DiX/2praytHcc4x9dprr6UDFNcoKl00d54q1K17tKaXqKNMqtKnMzMzufFz7dq1tA6RhIaKlvX19aR4Yv0pSx48eJChpgcuRiluoXs6zi91neaeVfeumhcPyB6qbcoZj1yOaJqAy9YLyMogrqGcU5ozkHXnmOS9HtlEE+UO175AIBAIBAKBQCAQqInaFimr0VftLLWci4uLmcSNQJbq1mqUVZtnNVNlp/pJTImq8WJdNEiZ2lOW9ejoCD/5yU8AjLSy1E5sbW2lE/5XvvIVACOt5fz8fNJkauJRS2c6Sb3K6Bz1Hmv9Ojw8TBogBpGyvt1uN0O/DYw0U6oVnbZVw3OvU42L5ybK8WQTPj/77LOpvtS+sO8fP36c+0yDlKcdYOkltKW2bHl5OZWJQa4cqy+88ELOpU+tvx4BzEVZovQ97BP2087OTtKyUjZQfty+fTtpMGmpYD8rvbFXp2m4W3ouRpR5atHR4H9gNG92d3dTuSgPOSaPjo5c9+cmwbGiyUotGcTS0lKaL5pMl1Y1Wqs0eaOl5Kfb30svvVRKl26pvVmGcWHbjX2zuLiY3IforrixsZHkN+tEa8wbb7yRyk2yBlI7f/rpp+k+yvaVlZXUHnbdmpubG9sipdZIjkF1HeQ7+O779+9nrBf63dnZWc5SwASot2/fTpZuTcdh32nLUgce2YS96rqv7lFW5ipJUpFLerfbzVntlRDLWxebhMo8S7Bz7dq1NB5p5aDnyunpaRqXTJ3y3nvvARhZP4DRvNW6NCHzdG21hDTqMqUyjb9jnTg/SCzx+uuvJ1c+ygYN/udazGdtb2+nvy0ZzjRot9XyyzVF5SDHJ62777zzTpIHlHmUK2dnZznZel4KhGm4l56enqZ9DNchtbjQokjrzcHBQXqWEs4Aw7WKslnnVJG1/CKgrsKcVxxT3MM+evQo/W29zTwXzCbmT1ikAoFAIBAIBAKBQKAmSi1SnpbcfleUPJBaMPrCKj2sDaTVgGqr2VM6zirJX8+DPY3yHcDo1LqwsJALDl9aWkr1+/GPfwxgdAJ+++23U/2ovaEP9PLyciZ4DxhqSm0Q+bQ0YwobmzYzM5M0TLyfWqK5ubkMXTsw0hxp4OdFaCOsZYDvPjo6SporWgeV6pPQxJLsT1pvlP6c9dPkml4QbpPw2s/6JS8sLOQCI6k1W1tbS/WzRC1K2qDjrMk+q6KV0ngBXh8+fJi0evwt67S9vZ36kNp/1vHs7Kw0YHyaMWxKJGCt8AsLC5mgaSBrBeFcUvp3YNhHNl5SY6SaiEVUzby1pClFriUe0GTiNkH57u5ushJSDlLbee/evWTZuX37NoCh5p1zjnXXhLKWvOM8eJTcGp/Eq6XK39/fT3ENjHllHMerr76aguM5NkkJf/fu3RRTxWetrKyk/qdVVS0rk5CgFMUnKUW0Wgr4N8cZ5cXa2lqaQ+wLxoTdvn07WRE0+aZSbNtr3fGo897G3nhU555FyloNNNbPI5vwiDqqyIxJUCbHNSie7U1NP9ecn/zkJ8mqSGsorYuHh4e5tDBqGWgCah20xARcW7e3tzPrJZBNY8E5zzn0hS98IVmi6EnBOXFwcOBau63HiI6LcetbxSqkMYi0AFL2sR0+++yz1CcsO2XN+vp6Jl7WvnfasVLafzZW9/DwMH3PNYnln5mZSXt165l17dq1JD8p5zRG6jKg8kRlHDCaZ0+fPk19SHliiSmaRlikAoFAIBAIBAKBQKAmKh/PrFVAtW2eBp0nWWoteZLsdrvpRGtPiTMzMzlmNo0XaFJz5LE9sR7dbjed0unDvLKyktPYkmpWaUGpGaNG5caNG+k0rzFZ9mR8ESx41vp1dHSUtBfUUFBjoVYdpSAFsswnF+kf68V42USL2q4sr/o828SZ1AxynOrvgLx2qSm/9CrtpppYa71l/5yeniaNmbW2Km2uavg81ppJcR7zjZ3Xm5ubyfecWk72xdHRUYZFERj1a9V50qRlytPMsx801oiaXF5p7X306FFOxlC+7O3t5SyoZTTT4zAMeWxwGm8CDMcT54l6E1j/c9UeK60zMGIz3dvbS9pcfnbz5s0Uj8NYHdUkeixkVerkxX5qrJSNSdvb20uWs7t37wIYxXTcunUrxa78x3/8R6onMIyTYJ0Y86oWKUthf3p6OrZFSlkubZzS8fFxmi8aT8I6azwc/2d5aZFijNQzzzyT5hzHhVqHvGvdsefFSHlrvBe3YPcVZRYptUJ6Hi9F1oxJLfRljMKWnlxjbml54RhUCnuOT/Z9t9vNJUlu2kPC8/KwrL3Hx8eZ9BVAllGW84iW29u3b6f72AaUGVtbW5lYTGC4XikzsEUdWV71Xitn9/b2ctZ3zvsnT56k8lnPAo2rL6KlvwioRUo9JCwDo8oLymPKMMqQq1ev5rwHymTaRXonnZycpPalRZTX/f393H7cpkRourxj27m8jYq6jXABs6QK8/Pz6beWgtcLsm36gKGB3Xy25/5kN+br6+s5Om1uBO7evZsmHevLA8nMzExaxNU9ie+2G/NpufjpwVFzn1gqYnX5sUH903IPqwutE4WGwgbLKv25PRQSGsBM6CH+Ms3ZGrRqXUIPDw+ToNcAYGC4wbWZ4svyQTRRTn2/ltnmpDk5OcllgtcDhXX/0XdUpbadFPYA5Y0HlQmcQ7xfqXI5l/gdF6ter5c7rOtBqknlkbaJpYNWSmk9HHIhpazTg5TmoAKyeYF48NIFXA+WwMi9dm1tLcnISWBp5NvtdoZACBhuilhubmIpq5eXl5NbEt2UGPD/6aefpsWZbbG0tJSey3mm47uuG4n2tSUcUbdspYQGhn3IA5E9RKyurqaDFDe8bHcde7oWeq53vKfueNTDkn1u2QFNcwRaimollLAHKi90wHNHmsRd0T7DgyezW61WWns5j9gGe3t7aVza/ZGmk1FX87L31+0n9s3h4WGOelwVE9ZV9urVq+lgTjc+jjMlkeH+SPMY2XQL6opuMW0qcT3k2zxnmpOIfytJDzDsI+uiPa19UhlxlB4wNBerTZnAcbSyspL+5j1KNmFdSr09+kVC68myWLKTwWCQlBF0KVVlTB1XT/t3EcK1LxAIBAKBQCAQCARqopLKrIy4QE+o6opETSBPi0Sn00nPU1dA/k61srwWWW3GOfGra4u1SHmZ1nlKv3btWjqpU1vJ8q+srKTgNp74WVale+ez1Dw6DTcrD+raR63YwcFBzmJITd/i4mLql7IgysuAWqRsIuGzs1FCQboRsN/a7Xbqc97P+us4u2yti4eiJLpK/GETAp6cnLjW1mn3n2eZ8pLo2r5gPdTq5LmQXjTtqn2PtqFqKC0xCTW7T58+TRpz9ofOM3X55fO9dzZVfsB3VbWa/E6nk0t6SFed3d3dpEm2rjD7+/s5+bm7u5tLDk1L1tbWViZBZx3oWPEIGlgnrkO9Xi+9l65UvH7xi19Msp3uSSQAePz4cbqP93Q6nRw9sMqTcftM+99aQ09OTnJr78LCQo7em/22urqatLK0pCkNPZ+llnrroqrW0Ulc+yxZlBIWWaupWpHU/ZRXS9WsrjvWDbpM3o1jkaraBva9g8Eg5xGhLnWEtT55rt1Nyz5NkmxDAFiOXq+X2lmTOjP8gZZOjq+zs7M013SuA0OLnFqiWAbPm4HXptat89rOko+px5RN9qyeBtZ7okmL4XnQ59k1H8i7/bPcvV4v51qu9bdj8bx310XdPvW8Qzje6No3OzubrJ3WK0DPGE26/4dFKhAIBAKBQCAQCARqYmIuQC/mot1uZ4LhFZ1OJ2eZUf/mskSyTZziVeNlNZnUCu3t7WXiiICh1kSTFgIjrYwmB6N2lr9TX+bLiLnROnr04TYwUclErIbdasUUl2GlUm2ixgBZy5vSnxO8R/32rYbC80VuCnWsKl7bqoZJfaH1mR5pgfruX5RlCshrh71623pUfb5nrZoGtC2tVV1JdDSJNzCMAbBafpV9HoHPNKxtakmrYqXS2DYvforyj5Y3WqT29vbchJocA5Q51BpqgHRVlMUJqPXGxjT0er1kuWXMFq1Ozz33XLLeMHD+nXfeATC09jJGivFfV69eTTEEbBdPDlVFmeeHjj2bbqTVauViGTQ2jHEtVvOs8VBqKbHxlJPE63mWNNsu3vzV9cfGQ6mm3IuRsgREZbHXk1gOy+CNTy/2UfvVprjQ9bbOejFOfdSCbGUb50673U5/c55cu3YtWQLsnk/lAPdFmkrFeiKofPJInsaV7XXaQ9vakkZo+gjbH7p/8Cyc0/ae8GJ69DNbJo1/11hLvbbbbZc4Y9prE3HeXsFarNX7jWsRx6S1rjaN2qx9Ft7g9vKsaKXthkkDwbwA3SIT3DgdqhO16HlKYsDF9vj4OC3AXJjUFYaf8Zk2ULOo3Bd5ACnbPH2en130XM/V5PT0NLN5A7IBlt5hg/eW5fSaljtZlY1tGcrcbYrG2UUfer13l82Buu6HF10fLbtuau1BhONJXd3UfRHI5ri5CJfSOs9V5jTL9tfpdDJ58YCRXOz3+zlGP92gW3l+fHycFr5xUDSnT05OcpvxlZWV5FLETR5zwzx+/DgtxnTfo9vSBx98kNYCJaew+bjUvXYSoqSy9daukZ1OJ3e40vxmlvFS5aO6dAFZt/YmCE/0Gfa53uFQDz/WpU/d9+xnuoeo4oY0SXhAGc5zI/T+BrLMgp7bbRU00U86vuzcn5+fT3Och6der5fut3nC+v1+Ju8cMJpz/X4/l4tS5U3RgaouxmmTon4A8uRH3jialuyuuh8o+9sq+4C8+5vnTnnZ4RyEtrHm+wSy7MyUZxxvaryxRG9NIFz7AoFAIBAIBAKBQKAmarv2VXVFsqdcPQlbTZ2aEj3tbJ1314EN0NWrtWposDQtHBqIZ4P4CHVp8E7ATQa8nQerjZibm8u1Kfut3W5PbAaddt08UgIgS2Wq/59nhfS0ld47i76bBE220SQWz4uwIlZ5z0XOi6oo63OVAbauR0dHOWuJus9Y8oDLtlJ78IK/bYoItc5oTjBei0gM9LlNQC1SlkBjcXExlY3aclqYHjx4kCxLDJxn7qVHjx7l8spcv349l0+M79PUEnVRJnfUqq7rl+0LpSv23J+BrEVKvyuzRNUdmyqLi+SqWiK0Tp5LHzAcX0V5pHTd8lxl7bUsZ1sdjGs1KtP4X4Qlyr5LCRWs69TCwkJKa6N7H2tt5/zW9BvW1VctO56XUFOyUPcIRd8Xwf7uPPe5nwdUad/LsEKNQzph11uOW/Uusp5hk7iIliEsUoFAIBAIBAKBQCBQExMn5C2C1TDp514APL/zTsrT1k57wbsst2rqqE2xCSg1PsLzfa5ixZhW3JQXOK70xrZ/NPmhJjRsulxNoczKR6hWtKgOZf7rRe/9vGmjJi3PRVpFP29tVwfeXNW40KLx50EtMXVJNppG3T6xlhH1LKAsVW21jaHQAPdxrQJl81/TI2iqDfrSU1vJmKkHDx6kpI60WjFG6sMPP0zxXiSb2N7eToQbNmmsl2T2PFTRnHsaVV03razWNiiLC62SbH2ccVdm8dfnauoU1qnIItXtdjPkEnrVmMPzxgb/H3fc143lrGv9bep5VaBz18aua9ydTYWi89paAfr9fvq7KMG6Puu8mJxx5d6k8rLu3mCaZWkKn7fYp3Fg+8KzQNv0PmGRCgQCgUAgEAgEAoHPCSamPz9PK+P5vvPk6Fmapm39KPP7Vp9gq2316C2VhcqWe5L4mmlo61VrqT7QRfFqaq36edVeeDErF8XQ9POESeoxbWvx5yFWqkoZVKtux9h5dLje/Gqy3uNau8tYnzx4VhONx7TWD48au0koBbmWg3EeZNij1nJjYyNZm8hORgvV9evXE7sfY6W2t7dTTJXGlgLNxXxViWHQsUewvzw2Uv3fxkM1ERelUOtXUXyQekuoZcTGP3kWKcvapzFkXjm8a5PWUA9NW6LqPLMqNK7MpjvRNmYZNemrzmMgGxdprVWEZ30qsxZchvxvMvbsMjFJPT4P5S+C5/113j1Avk7eGaau58zEB6mil3nuB/zcCm2915tcTULfXRZ4Z/nz1SXRCl4vl9Z5B6ppbeiLFivAp7m0RBi6AZo04PPzMgk/D5vxaWLcsdRke9R1c6k6B36e+qxIGAPlgcsXWccy+TDO7z3ohsjb7Fs67nFyLhW915bVHtparVaGtAAYHai2t7cTxTldxXjounbtWi5H4M7OTqJLt5vOw8ND142pqboVfUaUpd3Q67QOUPad3js8t1hC3UNtzqhOp+O69PFZ3jgoI5uYlptxnedeKBPbIAAAIABJREFU5qEMyCqwy/JGWiInnbuWdMJTkOieospe76Ll/y/qoeP/D1B5oq7N+t20EK59gUAgEAgEAoFAIFATMz/PQd+BQCAQCAQCgUAgcBkIi1QgEAgEAoFAIBAI1EQcpAKBQCAQCAQCgUCgJuIgFQgEAoFAIBAIBAI1EQepQCAQCAQCgUAgEKiJOEgFAoFAIBAIBAKBQE3EQSoQCAQCgUAgEAgEaiIOUoFAIBAIBAKBQCBQE+2yL7/zne+kJFNF2ae97PEnJycpu7VmlOeVGbOZlZz/a1ZtQjOPF10B4K233qqUuvjb3/52LnGWl0uLGZH1XfyMZWQ28Ha77daFsJm/T09Pc8/3ME6dbP/o/2XZnW27V4XNXK7wstr/2Z/9WaU6/eVf/uUZUD0DelnGdOK877y/yz4j/vAP/7BSnd56662xk7ZddNb0b33rW2PPJ2JaOerGbYsqdaraR1XKoJnWCR2nBNtJZYIn4yyq9pHK8SJ47xwMBkl2eWUsm/t2nrVarZyM8ebsd77znVpzyeuHsjaren9Z/9a9v2o/TSIf6qAJWTKOfBhXHnjyv6wOHJd6tWPbe9a3v/3tRsdelbGm5bBzjPDW5vPGIL+fZF9Uhkll+yRjsMrY+9M//dMzINuW3K91u10AwNLSElZXVwEgXZeWltIe7ujoCADw5MkTAMDHH3+Mu3fvAgDu3bsHAHj8+DEAoN/vY35+HgCwvr4OALh16xZu3rwJALhx4wYAYHl5GcBw78uy/cEf/MFU+ugyMYnMqzs26o7FaewfwiIVCAQCgUAgEAgEAjVRapEi9ARntT1qfeIJXrUA1KbwlN/tdpNGgFYc3qNaTmpC9XmelnYamnrVzmo5rMaIGo7Z2dmcRYp10rbw6lakoaqDqlahsmeXWWgmKdu4oHZHrZRlmskqFqlWq1XJWqWffV5QV8v+eYCnIVVU0STV0epeJKqUQT+jrCizSPEzlQ9F76uDMkuXWpw8y7n1NtD7Pe0+68q/KRdnZ2cz8lIxrjU8MB6qjqWm5xWfV+X9Zfe0Wq3C+eR5yOi+wnqVXAS0bHb+sR5lcu7s7CxX3mlZ+6viPNle9rn3jKahbefJJ2AomzqdDgBkrnaccl/b7/exu7sLAOl6cHAAYDjWbJ92Op20j7HvabVal96HgWqo0k+VDlJqGqdwOj4+BpA9SOlA4oBZWFgAACwuLgIYbpB52CD4zMPDQ/f5ZebwJqGTz3NzIawQb7fbuYmimyI+g+2kZnyvDHXrxTY7zw2yrE4WKrirHlbsBmmSflpaWkq/VbdQWx6vbGUHL1u/MneR88p9kYKw7F12Q6v4vByyPJdg72/+bxezMtfLaStWqqDsoFjWL54s0I1fE2PM21B6cpZ/65V/q4yx5bIHpLm5uXSA4n38H8jPx8FgkJMdgfEwrnJi3GeNA+9A5c0B+5muA3ZTrHsKjlGOXe/50z5Q6ft0rbEHOk/5YOdakStjk3K/7FnnoeiAXOaiXKQEbQre2LVK/U6nk5T6vLbb7YyRABjuSwFgb28vd5Dq9/u5+nAP2O1200FKn8/7+fxp4edR+fp5Q1UZGKrAQCAQCAQCgUAgEKiJUouUak8seQS1PWp25ql7cXERvV4PANJVXbX4LJpFearf29tLp3/VolqLRJk5vA7KrEJWczsYDHJaMA1epOWNbUAcHR2lZ9BEfJ6mpi7YnoPBwC03//e+K7NOeRYgXq1VTjVq1r1xHK0fgzI1SN27etaqonJ7dfKsVVX64CKsUZ7G1vaT50ar9b4Ma1UVy55HVqDWDktSU7WfJtGsToIyt7lWq5XqZq3x3txouuw63ykrrDv28fFxkr387OjoKGeR8qx/1MCq1YnyoSwo3rMsXwTKNOj2njruaJeFsvXEw7gyr2l4batrlK2ThgB4XiG88n67DgGjfYvubca1SpV5j3hjqdVq5dxnWQ711FEPHd5jSazUkt2kzBvHul80V7S+1hLH74Gsh09T3kaeW6S1XqpFijKs3W5nZCIw2qfu7u5iZ2cn8xn7qNPpZFz6gKE3lnXt47vLvIEmRZnMrfv7XzTL1bTqExapQCAQCAQCgUAgEKiJUouUBm1a33hqH+fm5tKpe2VlBcDQmsAYF6tl2dvbw+bmJgDg6dOnAJBO+f1+3w3Y42neWoKKfIfHhWqSreZItU9ad2CoTWJ9qeFQjQM1HGVBp0RZ/NR55faCalUDZ7XRWs+ymAxr2dHAcfav95l+V1frx7Gkv7VaSM8y5sVNlcV2VdXKev11UVpqpaH2qH0Jz2JXheRgWtA+t1ZknQNe+gSCn3mxeGXxbRelVdN+sL71ZaQ7qqG08UFN90uZXOP1+Pi4VD5YrbemsfACtq2md25uLmch8DTR00LZWNG5ZeXURc6XqvDifazF2pNXXtB93bjQaaAoTlDj9xSe1UljUdiH/F3Z2tOUZcBaY7wYr7m5uZx3CrG8vJzmDOvNOJzT09NUP409tKQ0TVvjq3jLlMUwn52d5frOk5e65yuKlRw35kvLZS1G3W43Z01X7wH20d7eHoDhPpV9sr+/n7mn0+lk9oNA1iJlY6SOj4+nIk8mWffqxvleNqp4FFwUwiIVCAQCgUAgEAgEAjVRapHyTvVW+9jr9ZL1gPFQnU4n/Zan+Y2NDQDAgwcPUhKz7e1tACPNUbvdTux+fJbSpdv4AlvGceEx3Fkt+WAwyFGca2I3xvRYrdLp6alLXVrk31zmd10EtdTxb5Zb/2e5y+iNPYp2wrNqKPuRrdMk2r4rV66k8luNkmqyPYuUbW/PglmXucq71q1fXe229y61IABZa7HHnmYtvF77NIWitm232xnrpF5ZB2BUp6Ojo6Tps9admZkZN8m3LYNimpo/7RelyWU9+J21ZjOmcn5+3mV0mkZZFZ7V1qajUDlu40OVmc9qdTudTs4qfV78w7SsPZ4ljXXwLHCepR3wkxNftpbWiyfyrLqEnS9erBEw/Xp5z7fpQgaDQS5Ghf/PzMykfYL1kFlYWMjtUVTWWDnusQJPUifW4+joKNVFmeFsP1FWXLlyJVcnJoI9PDxM8oJXtdg14dVSBUUaf9bZeurMzs6m3zCWSOPivWTfRUx+49ZH13zOaZVTNhWPeu6wrPSY2t7ezsVIaTyYle1qkbKyle9qCuPKpM+Llb0M58Uhet5UZZ5H00DpQUrdUawJna5sS0tLOYKFra2tdHD69NNPM9dHjx6lQUgBpwcxbqB5XVhYyAUueweBuvBctPRQYTesAHIHKQq+lZWVVF7eo8GIVhjookxMQgHKcuhzLHTSajmsi1hZ7o3zDkhFJA/juGByTLTb7dwGQDdnVVz7tHx1iCS8HGAeIcI4qOJG6LkjeXnc7EGE7VMkWKYVnGyh/cB5oZsKe3Bg+fv9fnKdsEG9umn0XOfsu6cBVYSoG4hdeHXR5X2qgAKG45xKGC7AOuabqIe3SdYDkb5b71H3Pe/QZAOoveB+la1Wxniu403Cc6/StcweOg4ODtL6RpnKOuo8+zwGY1tFi0coZBVS3tyd5oGqzubm+Pg41UGD/YFhHTkeKRcIPeBT1szM5GnHdbxNsocoq5MlLeh2u6lsvI9hDqurq7lNN/dMe3t7aX6urq6meyw5zEWSt6hiy1K5q4KZdeG4pGJ9e3s75x43jfKp67S3BtnD9mAwSOVh+fQgxfKz7XVPaGXH4uJi+syuyeMoYstQVzHc5DObQpGbsbd/VLlmla1KMGIVYtNS4oVrXyAQCAQCgUAgEAjURKlFSk21NnEZT/Knp6eJNILalc8++wz37t1Lf+t3wEircv36dQDA7du3AQC3bt3C2tpa5vlnZ2fp9E8zuKflnASeud9q9vQkyzagVvnKlSupTjZYcnt727UmeAHDwGRU4bOzszkNsp7IPVryoiB9L3herzYQWDW2nptg3VM/21bdFT3K9Sr058R52gjPNbFIkzmJNaoMRe6HVrNCrVmr1Up/e3Ti1sXzPEr0caHa/zLtEeuxtLSUcd9VHB4eJi3g1tYWgJEb8P7+fmGSWODiLFOeax8155R1dGHe2trKBCUDI4v79evXc/02Pz/vuoIA49VHXaI9wgHe42ls1QVRr+q+p65iwLBfKLPZV4eHh0l+W026El00CW/cKXmRdeHZ2tpK91+9ejVTX3U3VQ18WSLhujKvqrXcs7hbF16P9MC72nE8LbdfhR17SnqhWmWOF85zWqQoC4CRtUCtMt46YS2qqsUeVyOt7ee5GbP8arG16WAoKxYWFtJv19fXAYzWwHv37qW6c8+kdNtaHl7ryomqLnTeemjnBdu41WqlvQk/Y5ssLCykfSPr1iQBg+4BiqzqaiHk/aenp6mMdrzt7OwkKxXHjz5LvbRYR2vx0nE3rT2EhTfP1T3eI2+6THhrlJVLmizeutIPBoPU7pxnngsz4cmAuuMwLFKBQCAQCAQCgUAgUBOVLFLqs88TNbWKu7u7SfOq8VCMkeJ91K48//zzePXVVwEgXZ977jkAI8sKMPJP3draygWd8uQ5jqVDUUSOoNoC1ejbeALG8Vy5ciVpmPkM1lvpNNXKZf2KJ9ECsDwaPGk1yPPz8zkqTqXs9HxIbQwB2121y3q1Gme91tU4s4zn0Z97CVvrxEHp/562rYh2XOPomoCW36ORt0QrmrTRxhFpuZqMtfFQpvkm1OJCTdFgMEh9TA2sxvrRF51ay0ePHgEYBmBrugSgXA7U1c6OSwiimllb9sePH6c+Yv15b7vdzmg1+VmRlnAceadaUaud5TsXFhZyRD+9Xi/9TS2rxnGwTDZGr9/vpzagVnd3d9e1VPLapHbWxrwCo/ZbWFhIdeH44Xh69OhRLp6CMn53dzfVxSPYuSgtbhFpB/vRI50oSnHhJTRvGmXto6Qm1rqpxAxsd0JjVUjIoN4qti7sbyBPdjM7OzuRV4v9rVo9OB9o0RgMBrh27RqAkcWT9bh7926SEa+99lrmnpOTk2S5otzQ+CP+Tvcq447HMvmnMp5rTL/fz8Wpsb6Hh4epL1hv7Qv7fPU2aMoypR4tVs7quCOOj49dkglgOA6tlVS5A6xFan5+vtAipV5P04Kux2xXJWxhGTWRuv7OmxcXGR+q+yBraT8+Pk5zgXODZwZgtJfg+OMeWdct3ZeXpf+pgtKDFHF6eppx0QBGg2tjYwMPHz4EMHJl6ff76VBEofDFL34RAPDGG2+kz2imZiPt7u7iwYMHAEaN8+jRo+Tew4bThaDMtaIqyjbJGrTIAceJwkV2dXU11Zdl1I73XN2m5Uph3T5042SDIRcWFtzM3nyOXZR1o0SBwsG7v7+f+RvIMvRYYVu1HgpvgnuMRUW/8zb7ZeQaHquhR0hy48aNWnXzoJsjm49C3eCUkAAYCgEN4AWyJAdeTqNpCEBvnOu7OSY4DpQFk3XjgWp5eTktvPoZMBy7PFRRLuzv72fyvTWFsnbyWO/sodUjouAcYt8eHBzkgtI9+TBJnylBBNucMkAXf7on86pEGLyf8sLLt8I+brVamY2W1gPwFVjTYhmzCqvFxcUkt7kJV5d0mw+QbaGHMtatLO9N0/BclZSdzrpy6yHXulLqeJt0A1EVZcQW7XY7d1AHRrKObc/5/vDhwyQD2He6vthD2dWrV1N/Wlf9cdZfVWrYNUkVFHwX5fH+/n4KXWA4A3H37t20j+L4pIK53W5nXJuB4cFE56LWram+LJM5qjC284jluHLlCl566SUASFfu+VQ5yHGs47EoR1bdsivRkWVznJuby7Xd0dFRzqWP1729vYxLvT5rcXExrWO6v7IHtWkpjwgvVEFdpymvuK4+88wz6TO7fzg6Oipt92kpkTxyMfah7jHYTzx3UHEJDOsFjNY31nFhYSFHbqV/jzt3wrUvEAgEAoFAIBAIBGqi1CKlhALqygeMtA9bW1vptEtT9Pr6Ou7cuQMA+PKXvwwAeP311wEAL7zwQjolUpP+8ccfAwA++OAD/OxnPwMwIqnY2dnJmVHVfa2uRsk7sRNqbbDanVarlbRlSl3Mq80Boc+0ZA1Na2DZJ3Nzc0kDTA0423h+ft4NHLfufurqZ4PIqU1Rs6rmyinSbo5Df87x5pEvlLlDnkceURR0qcQS3jiwbo76HV1Um4BqudknKysryULDKy0FMzMzqY+pmaFmc2NjI2mX1O2vyAWvKajbIa+UJSzjkydPkgaMY5Xt//zzzydLADVnSrZhNVatVitn8ZkGnTZR5BLFsUGtsVLl2jGrLh9Tz3HhWKV5pUzo9XpJLit9r5ULam2zlijKoZ2dnWQ94Pjb3d0tpLQfh2yiSvoA1fqynxYXF9Pc4WfUZN6/fx83b94EMGofjkOWGRj180W489lx0263U5+oVZHl5Nqk7jpsZ+uqtL29nQvUPo/Aqa7M8AgQrMVW11ZNe2IJP9juOr4o69iH3W43Zz2dmZnJEDKxnizXJF4h1otBxxnfxb3SxsZGmvcvv/xypr4//OEP8cEHH2TahxapK1eu4P79+wCyboLWLX8aVnktj67F6kZoZTvLOhgM8NOf/hTAyCOJa+X169czhGVF7xzX6uFZMzyLlLVOHBwcFFqk9vf3U1mta/TS0lLOta/b7ebGm+4jmnDtq9LXs7OzSc4///zzAICvfOUrAIZ79rt37wIA3nnnHQDlOVCnCdvH6trHNUz3oJQBmloJyHpePPvsswCy6xzfo/uOSfsiLFKBQCAQCAQCgUAgUBOlFin1rdZ4F2B0elxbW0taMGrzXn31VXzpS18CkNe8HB8f45NPPgEA/PjHPwYA/O///i8A4P33308xUnx3p9NJ2iRagLyknnVxHi0soRpknmqpcdD4AWo5qIVWn1u12uh3QJ4ufRzQR1nhxXDYQFvV1ni06ZZym/Cyc3t06RpbVddCQG2D1sWiyPrkxT/ZMnoWKRtPdF6M1LR9nFWrRi0r5xF9gLvdbpqTVhtd1G7TIsmwUMuU9fEfDAZJE0aLtFrSXnzxRQBIMQXE0tJSaoMy0plpWH+9OBXODU8jyf5YXV1N5DssM7XsKysrmUBYfU9TqBLrcHx8nMaRalEpzzRhpb3fI5bg32qF8mLB+MxpWEd1vrNN5+fnM6QmwEjj/PTp07TuWO+DTqeTs2Z7lP/TjjXSJMlqxaE8oMVa00dw7bbELZ1OJ1lL2IdHR0duXCVR1zJQJnuVwErJQFgPrvOUAZwfR0dHmQTdwGjsHRwcZCxAQDbuin+rVazu2qQyz655hJK3sP0/++yzNPYZI/XCCy8AGM6T//u//wMwWvsYe/vss8/i3XffzXx3eHiYsxYr/fM0YkXVW0I9WdgHGmsIDC1TNpaNc+3VV19NY1VT3TQFJajyLFH8zpJn7O/vuyQTwLDNeb+Nl1diHiXkYTmszLsIsgmdU9ybf/3rXwcwshB++umnyRLFfTjH9MrKSsaqa587jbIC+bhgJdhh+x8eHqa5wDHGs8PCwkKSh5puhXWycqhu/L6HsEgFAoFAIBAIBAKBQE2UmnM0tkc1r8AoduHKlSvptEst8gsvvJC0DTxJvv/++wCAd999F//1X/8FAPif//kfAMCHH34IYKgBoNaAz19ZWUkaKfqB88Q/rjWK8DSM+j8w0sR6FilqnFhmIE8Pr/Eh6sNchRGsKmiR0iRl1gqmcQiett5asDyqZPULttqdubm5DF03n8Fn1q0TrRMKz3Lo9V1REt2iOCh+V+Uz77uqKKMO1fJbxrODg4OkMbYpAJTC1ca9KCujxhCxj5tk7FLadmW65HesE7V8m5ubaaxSNtCf/oMPPkgaM7I9MV5C6YqV9YnjsCyR9iR106smKNe2ZhlYL/qjK8sY68/ydTqdXEyHR38+CTROiOOG0LFGTayXvFkt7LyfY8peDw8Pc/JH56oX49akFc4bz15yR95n4z0B5GSZRws9beuTB41TUap2jkN6SagVh2uRjaU5ODjIMfqpdagJK5sXr2bnqFrNOEbm5+dzcaGso97HMtIKorG6ajG1Sb8n2Tto+1iLBuvU7XaTBw1x//79FEdE+faFL3wh1Zf7IFrouZ+6efNmku1q4VV5oeUaB1VikTSWjTJrbm4uMfHRU4Df/ehHP0r15frFdX1paSm9i+3Ubrcbs+zq3LD7F/XY8MYKZTQtHhybOjeUURcYzrsiZl0gz2SraQnGQVn72L2cpsWhVfp73/teuv7gBz/I1OmNN94AMLJq6zMvA7p26P7WpuBRLzZrOdU9u03EXsYoC1Sre6k0UXcIGwDPSXPt2rU0kXj46ff7yW3vo48+AgC8/fbbAID/+q//SiZsTjIW9OrVq+lQxg3UrVu30rv0wAIMB6NSGE4Kb3AqwYVumoDshPQ2v/zfZv5WTELDSugGhn9bKnKlJ9cB6G14WC6bU0UD1TUgnVel/dT79eBVFQwgrEtZfp77nvcZ//d+V3QoayqHmf2/qIz2gE5hv7q6msaozX9xdHQ09ezpZfm72P/Hx8c5pcPJyUlyveF4pMvvxx9/nNz+uNHgpuLq1auZZwDD9rGbc5t3ook66rPVNQ8Yyj4qelg+1uvJkydpo2cPVEdHR7lUBbqhaALqVmIJIvQ9drFXFyrv0OQRFQDZAH5VRNk8LhoEbnOZNAWPkt4qtnTDYeWUKlDsZnMcEp1JoQcSTUli1x3KYM0hY9cfJTqZVr45lWu23YnT09Nc3i6lZebhkPco0QnBjdLm5maO/vzk5CSTt03LNcm6qwpCm2txdnY2yQP2xdOnT3Hv3r3MM0i+cOvWrbQfomLpG9/4BoAhMQP3QEoipe6betU1sy68DaSuvzYvz7Vr11Lbv/LKKwBG4Rwvv/wy3nvvPQCjQ4nmO7OH+JmZmcbGoRe+YMk59N26dlqXPs6twWCQ5INVXPR6vZxCTJV+qtDm/5O49tUh4zg5OUnrD/fl//zP/wwA+Id/+IdUxl/7tV8DkCV8aXIdrYqyfZjmw9Kcc8BIBqyuriY5Yvcds7OzE1PrewjXvkAgEAgEAoFAIBCoiVKLlJ7irNuQal9ptqXZ8OHDh4nG/Ec/+hEAJM2EBlzS/YDWpy984QtJq0HaQs2EbbWpmih4HJTRdVu3tm63m0teqxovSzfOa7/fd4N3realLGj/PGhyVutWp246tt6qqVN6bCDbJjZxXpFFihoy6/qoNOtVQetEmaueaok9q5Nn2fEIKIq+81wHVZvRpJbGs6iplozum6SWVep9zkmOHfalWiFVKz0N7VIZTT2QTQ4IZK0d1PhRfuzv7+fSLFBbe/v27SQ3OKa8vtNyNZ2gUt1elQSEweHsG753c3Mzafn4HV1cdnZ2chbrpgN7NZG552LCe6w1e29vL0NpDmQt7dbSoRZs61KhZByeZaRJ1z61slhr9uHhYaof685yXb16NfUnv1M5bq0YF2GN8qzx7E8lAtG1CBj11/z8fGpv3q+JrG0Q/LSos/XZakkDhm3LMnKN6fV6aa5Y4o+bN28mS45N+Hz//v2M2zOQJRWwa4MtY9262XZXEgyWl/Lq6dOnyUOH8pweOK+99lqScaRB5/9LS0speJ6eGjr/rKunpriYBHbsaRoctSKxfiTOIJHGrVu30t6O9Vb3YWv9LbOk1e0jlUU2DEGtOWxDzo2dnZ1MAmWtK5Adn8DIIrW8vJxxd+R7rDurWlSa8BapQiSkxAwcP6zvq6++mrw93nzzTQCjpMlnZ2ellPrTSshLWMs7kE2Gzvam2796y9E7jrJDZY+m3eB7JnXXDotUIBAIBAKBQCAQCNREqUVKrTKWmpeaSaW6Jf3gRx99lAIn6ZvJ0/3a2lrSWDDgUgPLqd3ge/r9fi5xp8b6jHuqLzt5avCz+vV7liiWy2puNRmn9YVt0hcYGGlF5ufn0ymbGhPV+nnxWzbOQbXjVnOsMRyWUlStVDYGQoPnq6LMIuUFsFeJa1JrjP1On0V4GrKmNbb2eRpLoNZCmwRUk3JSo07LlLa11YhpG0wzcSPgE5eoddMSNjBm6vDwMM0natCURpZzS9MhWAKBaWnYCVvHdrud05xrndmndr4NBoNcUshJYhw8UAZobImSRgC+9WlrayuTUBfIkpZYS5T1VQdG8qEoxojXaSR+9IKI9/f3c2OE3hU3btxIspTtQ00u2wnIW/v1+ZOgLMDZk1MqD9kvtOCWESHpOmq17pPGflp4iatZbvUwYfsqvbS1WFDL3Gq1koX31q1bmTq12+00ZvkeTbxaluqkLrx1XGU2y0+N+ccff5z2SFzfOFdeffXVRHHOOErKvvn5+YxVCxjOP77LtnFTFgK7VmpcD7/TBNGaQB4A7ty5k6OfpiX+4OCgloW3bj+pZ45HfsX6qPcGMJR9NjZKSVE0sSuQjZGyJGhnZ2c5q4rGSNWl3a8Lb4yzb772ta8BGMoJelKwTrpWlVmi7P+TjruiWHjvs3a7neS2tfb1er0kH9jnSnFuPWHKLLhV6xQWqUAgEAgEAoFAIBCoiUqsfQByWnJed3Z2kiaZ2oZHjx6l73naJYXpyy+/nJi47ty5A2Dkk9npdHLxEk+ePEn+xMpGBmS1uU1ANeiaiBcYarUsvbP6Q1NzSeuZJjiskthwkhgpnswBP/aHV6sd0Rgz+11ZPBGQ1yKpn75Hg16XbpZjSbUFZSx8XoyRR29vtR5e31ShFp0mPMppZWZUdLvdXCJYas2UBvSiEobqu3Q+KXsby6gJa4FRTMD29nYah5ZSfGdnJ5fAcWlpqTCZ7TRiBU5PT3MsXQcHBzlmO8suqOVTq4YmwAXqj8nzoBYAG5+irJ42vrOMmQ/IMpoCyKSHsPGSvV7PZfPkcyZNZaFQmWTlgxcXRG3/7OxsKi/rbSmQeR8wXlqHqijqby/Ord/vJy061031BrBzwrMweOyEjbBZOVZpWx5lm1XLO+9j2Rg3PT8/nz7TpMR8Ft+jsS1F6QTGqaO2bmHQAAAgAElEQVTHbGvZ+w4ODpK1gmVbXl5O+wOykjIm/Pbt2yleimOPFqmbN2/m5pPG99jYn6asu3YdOj09ddmAKS9YN+71tM9pCWGZ2+12JnaTaGo+qeeDtUipTFDPKl41Aa+i0+lk6M71urS0lPNYUrZda5FSz5km4Y3nmZmZNEY4FjVlCsvtxctXkQWT9Jn3W/suz0o+NzeX2p514TjVvQXB8aqpOTRWalJZV7p6eZtwaxbb2dlJE4kD6MaNG8m1gMKPQYcvvvhi+oz3cHA9ffo0mb55ffr0aRrYNg9JUwuwPcTooUAXJMIGKAL5zN0a5G87SZ/fhDmek1s3ZXbDpu6KKmjtBts7kHj5P7z8St7BRa91wA3BOK56kx6WCK9PvDacBLb9W61WhrqVV9uvevC1AddabruITCtDudee6vplKWg1LxsXWbrAbG9vZwhltPxzc3NuTgnCI1cZtx62b3QeqPwDhocC1s0Gux8dHWUC/LXsVck/msgRY/8GsnLCpjkYDAZp/CidNstj85cp8YwlnFlcXEz3WbdgHfNNwsttprmT+BkP8t1uN5WR/WsJXFhevV4EtN+sXNbPCF3TimSAR8TStKJF+1WJm4DsXoJ/cz7dv38/13ecO5qfydKaLywsZALSgWw72XE2zrzSOVCUbqHf7+cIV65evZrKxpAHzcfJkAfeQ8XS4uJiajslbLH0/Vqnuv3oKdrswUzXfT2AULZRoU5ijKWlpUyIhkL3KkWkX5NA11CbZ0uJZ7iH04OUunwB2TRARQcpzSNIqKLappmZNI+UxXltZ/cG6iqvLuXAdPqjDuz+zRvP7XY79YVS6gNZt2A7XpUAxHOlH3edDde+QCAQCAQCgUAgEKiJUnOOBqhbVxb+r0FfNOkuLy8ndz1an6ilUJMbrU5047p//34KtKSW4+TkJEf8oJr6SSxSaoLV/4G8JarVauVc+qjFUNc+axb2tGHqEtKEVcBrD1t+L8O3Z8FSrY2niefVmqpVs2GplVWTVRXUFHnB1R6FrfeZRZHZ28Kz2Hn/T4uy2brBnZ2NEiGy79QFpkjj7FkiPDripmCf540lrYel1aeG6cqVKzlXRi2/0hrzOzuHlUp83DnmEYEAWTpWraMlKFCtGLXp1lVMg46nTYyhrjZ2nrRarVw/eEkjVQNvE+wq8Yz9TC1enuW0Seuo5+KkWmg7P1SzqUmk9Z7j4+P0O6vhvgiUueaqx4iVs61WK2eh0XqUrUNN1m92djanOVa3J6sl3tvbS/sCS+505cqV9Cx1f2aZ7RhXGvymrfCWelzLQ9mlblX0WKHHBa1O7XY7hUEwlEHJX1h+jk8NGagSOlAV3u/UcmTHkJfGgmv38vJyLu1Jmftyk2uT7onsOq1U9V7aGk3Aq89aWFhIFihaRVV2WKukppex+6Wm05CUyQevTydxH79IuQf4VlLtV2sJVO8rj77d7mubcLEMi1QgEAgEAoFAIBAI1ETlGCme4njqVv9faoc0eJxWKhsH9dlnnyVtDK1PvG5ubibthpI9aIA6PwN8bcM4sFoltQqoTzVPtzZ+4/j42CWZALKEGKopLUrIOw6qWHuqaua1XEX363d6LQp2HccqoNqCspinOlodz5rkacW9NvBi2uqOvTKNmxfToO+08StKBFJG8tGk5bMqPF9rq91Tq6ySDgDZoF6r0VN4c7SsPOPCG3/WSnt0dJTmPuWUasqUGADIBrp6iaCbhDdmrdZ+aWnJtYzZGBAvBYK1gmsMqGeVLIvHbBLefPPGkaZrYD1tsmGVsZcxpxRlpAL8zkvubK0y52mvmySb8OSlZ1FRGnTOfa6tSiLBOWY1zx6xi469acFawk9PT3N7maWlpZwcoPWp1WolK4clANnb28tZQz1vDGKcGCkPdpyrPNO1yc4VysHBYJDawItTqkNgULc+Xmy4lW/9fj+TiBcYegrYVCOUc4uLi7nYKE0zwPt1X+iRTOi1aUwS8zet508DGv9att8s8/ZoUiZU8ovTzZ09dHQ6nbS50w0EJ5Blgtrc3EzCwxIz8Hn6rKWlpdwBSheScRdg7zCgHWLrq/l9aA5WBkObe+D/a+/LdiS5qq53TpVZVVlTl7u7enB7xhgbW0YIIcRrcM8rgMQd8mduEBKIN0DiAZB4AbiAG3wBsiVjjG262273UF3zlJVZldN3kf86uWLHjsiIHKqa79/rJnKMOOM+5+xhbXaLTDt0TGNgoq35AJjGDsYbbf37tEBqa2PMgsFigMI1r+DgBVhvvCyXnSxIc+NLGg9JQZo8kcdBWr9bfYFFLG3TrctvbWinjTTGJatvOIA3SZnALkeakcfadE/bVTYJ1vhnGaAZoNiVzWLAEsnOjDQJsrg/W2Q05XI55g7MLsP6wM9tb8kHzfynD6OzBMsQPUY47wtvvrlss2AWGwejlEG6/NZ/02TerGAdpLCuc5nYdVkfkrBPKBQKEfY3BpMTzcqdOY2Qgd3gUEbOs4Y6o4yQFexWis050Gq1Ym6xVg6zafehvl8SM69m3+PDknaRHXc9yvt77e4uEs9fxvk/ObemdoXFvrNer4fDLowGOneUSPQgpRWBLE8ui8wBz8+CZ+XglKR41q9H/S/ps0nhrn0Oh8PhcDgcDofDkROZ80hpzQtrJDjwUCTK1c4Zo0Wi9JI4ucO1guklobmpVquJ9JVpGYnzwCKdsFxTmGaZ/8duPezSp3/H79NO0Xm1AFbwu74HawT5N2kWl7TyWLS5SbTn/F1WcFb7STX2VptYVsIs7cPWnllpayxNoH6d1iZZtTXTRhayCf7MsixpWAQaWcbSrC1Teqwz1a3Whlpjhftv1ppJbkNrHKPM2kWPaWQtohrdX9wWOn8KW3ssd4uL0s5a7W2tc9o17jK1xxqWbM9TTsszYlawSF+0pp/LxO57OpyAyV707/XYYlgWpGn1Z5qmXHthMOEGYNHqJxE0MMb1ysgDLbO5D9O8JZLKqe89y7HHnjbauswWKexPYZHivRTqhb1ovV4PlihYDdGf7B3FlOfaEpWFFGsW+G9035smZu7aO9O7OxwOh8PhcDgcDsf/QaRapNL8sS3to5WwUgdX93q9oHHRJ/75+flI4C+ep+ML2M90kpO9PnmzlkV/xz7++pnn5+cxWni2bunYnmnHdLAmKM1SkaWtLO1smkUn6X3SZ1nB/vN5tQlJZUsbz5a23tLA8fuL0txY7X4Z2vy8SItRYI0krhjH1u+zxoFcFHQ/9Pv9WPwa3jMpzmVo+6yAf8uzQFuk5ubmzDQQuBdgyWcrPYKVFgHXWQVfZ0FeH/tnBePK3MuwUnP/WmMQewKOlbIsOrinjsFjy1ceq9y0+tdaYyzCDx3zqWnf+TdsuU2zfE0DaXFWWeNwLW+JPHNrmnWyLHZoY+xJT09PY/vTbrcbs5hyEl5YpLA/YW8pbYXnGCkt8y4aaeP9/6IF6qLhFimHw+FwOBwOh8PhyInMrH36JMvaR2hO+ESuaWaZ4hhaTnzG32ltMycD1tqnSeML0iwXluVNaxcArq/WOBQKcXa3WWkAuD209cmiH9av+X2attOy3ojYVh7936yAD/Ko/s1y37S4r3HrdJEWqVGMYc+6Rj2p/Ho85tUSW/eaJbjvLW2zVS6ULQ/V70UgrYxanosM5Voam6bF4MnXJDk+7eSU/z/iWdUqc78maeOZbRZWgGKxGLPwsgxOYnIdx3thmmPPkklWvLS1v9C/YeT1vBl3PORlYWXoPsjbrtNkxrRi1XCF9anZbJoeUxiL2KeyRYoT8IpEGT+tGCkt87Ja62aFZ1VOXCamMf8zHaSsjTcLCR1AVygUYlTl7EKS5C4iYlPjziJAjwVompsaCzq9meYNhM6lYN0ry+eTwDoAAEz7a03mtANUGvh/Fj05vss7WHXAbRLScqlY75NcKvkgZd077eB1kZjUzXGSe00DkyzUaZjmvUbdO2v+sKy/m7XbDiPtAGgdpLRrq3UvvRnkPFssI9PIaBzZ+v2/ra2s8loHKj3OLKWq9Xv9nIt2W8zbZ0m/T8o3mYRpHwAtWHIpSfmV9Puk77I+e9z+5H2NdrnjsBO8ZrmoFf1w57NS8TDJCcY1U54npYRxzA6X4j5/4U90OBwOh8PhcDgcjv9yFP7bNFwOh8PhcDgcDofDcdlwi5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw54Qcph8PhcDgcDofD4cgJP0g5HA6Hw+FwOBwOR074QcrhcDgcDofD4XA4csIPUg6Hw+FwOBwOh8ORE36QcjgcDofD4XA4HI6cKKd9+Ytf/GLsbL1JiX4LhULu/436j4jI//zP/4z+kUxWp4tG1jq9//77I+uUpQ0Z3A+6T/heee+btU6///3vw0N7vZ6IiHS7XRERabfbIiJyfn4uZ2dnIiLh2mq1pNVqhdf4Hf6He+CeqFuhUJBicaBXKJVK4VouD6aIdcXvfv3rX0+tnyyktfGsEmq///77mer0m9/8po9yoCydTidybbfbkT7Dd/heo1QqhbZFe1cqlfAen6G/uO800M8iIj//+c9H1imvfJi0/fPOH8Y0ZB6enzSnUT89B7n/9JXbBP1YqVRCH9ZqNRERmZubC9+h/37yk59cuBzP0ocX2U/WeOa5pWVet9sNbbmysiIiIs8991x4j3sdHR2JiMiTJ09EROTx48dyeHgoeCZ+f/XqVRERWV1dFRGRarUqIoMxgGf+7Gc/yyXzWD7girHEwBipVCpSr9dFRGLX+fn5MK5QHtRje3tbnj59KiIiOzs7od6QO7j/wsKCiIgsLS2F+/72t7+dydibxfjKKneyyvH33nuvLzKYk/Pz8yIybCO8r1arQfYCnU4nrLONRkNERJrNZniv12Bed631ll+LRGU82ijLfPrVr341ci71er2Y7Do/P499hnHKbY574srrEpedy5+E9957L1Mf/fnPf+6jXFhPeY3FlV/jmiSrO51ObE/EVz1neT1Nwy9/+cux90TjylqUjfd5aAPe50FWYlzX6/XwGvIBaLfbYQz/9Kc/TSyYW6QcDofD4XA4HA6HIydSLVKTAKdKrTnh99bJ0/ofnyYd6ZhUs5Wm6WLttaXJ1tqLSbT10BoUCoWYBpM1P1rj0+/3Y9pzfp+kfeF7WBqZy0CWvuT2uQxAg1MsFmNzlzVE0Eiy5VB/xtYOQFumyuVyxDqFMmgN5iwwSTtPKrum1ce6HNY8Bnq9XugTyxqM11pjWygUIpYFkYH2D1o/WKTwXalUutAxfJnzJQusvkCZeS6h/bm9FxcXRSRqvcHvoFmF9ebw8FBOT09FZGh9qFarMUsEW38sK1JWpK0L2jpRrVbDOEF5uIxafqNuJycncnBwICJDC1yj0QjPxP3RxixPpolRY2xWFqhx7w8Z3O/3I1ZkkaFFslgshnUZv+HvtVxmoJ8wdvv9fkxupFk78sp11J+tXNZeQa85pVIp0hYi0XUpyTunUqmEe7Bcs7wn+JoHy8vLoRyW14dIVC5bn2nZkeZZ0Ov1YvOM9xtZrVNZMQ1LlEjUYoe+w73Z4or2rNfrQdYAaKdutxtep2FmBykg6UDFnz2LB6o0dzbrN5d5yBtXKI9y39PmazynWCzGJpE16SYpIwQ2C1DtOpa0gdYTHVfLnYyFJe7B7gdZDpbTRt77ps2xWcMymWPxsBYsPkjBBQSbObw/OzsL/aTdmNiFgl3D9CLO42Lcfhq3PdOeN0qO4P0s+pIVIFZ59LzhBQmLCS/E+A5A29dqtbDp5Y09FjA9t/v9fuxes8K47drv92cu49M2WXyg1S5HvDlYWloSkeEmoVKphHmFg8Xe3p6IDA5SaA/8fmFhIfQZNhcsD5PccZPAa03aBkxv3nkMWQc7HJwgF05OTkRE5ODgIHaQOj8/j23uWQmD8TgNTPsAleWe03gG2pMPypif7AqFfkJflMvl8B/tslsqlWJrE67n5+exg9SoeuY5TPG6ZCnjcD+9p6hWq6EtrHWMN9iMbrcbnoPxXalUwv31gWqcdQnzkvdalmuiJbO1/OarPnhZbn981fvwaRysRrWF/p4V5mkHRwBjc3FxMbgs47qwsBD6RSt1z87OwnhIg7v2ORwOh8PhcDgcDkdOTGyRykoOMallin83a8vUKM1IHg3RZVutLKtTmmsFXy2NjEhUU6G1owwr6DIrWGut/8vP1hYIq7yWa5++8mv+/0VbeS5CYzlNQNtdrVZDP2mXECYTYBclHaQMrXKj0Qif6WBltipC68QWKX1lUpAsmIb2N0sf8lzUxCeWzLDcavOWMe2/7MbHGj5tEWSNLLtLiEjEKgLSA1g6FhcXw3jA/9jqOG03kVlg1utOknuliK1tRXnm5uaCKx+0rNBedzqdMJd2d3dFZEjCcHx8HH7HfYd7oV8xB9MIYpJgjXONYrEY5iiPJW3VhFaZrXKoG6xQu7u74fXx8XF4hiYxwHPm5uam4tp3mYQS496fgT7mQH0Aayy7faIda7VaeI3v2HU3ieih2+3GXPCnuY5Z7vm679m7AbDIM3iPgTbm9kIdrLABXTfci8k2sgLta1l32YpkWWi0/Ga5bnkb4H0ecgrLU2kUsoYvAFxvbXlDndgahTUHMu3KlSuBiAdrU7lcDv9Fn8NLptFohD1OGtwi5XA4HA6Hw+FwOBw5MZMYqTR/cut0ab3PYtW6SL/1vLgsK0FSm1pty5pA7T9cKBSCdsHSZmgf1FKpZFoD+Hl5YFkULMuYFRuD31vUnRbFp0jU9zfNcjeJZSANoyjlk8bTZQfO7+/vi8hA8wOtNsYGx8dAM8SBy+wvLzLUAh0fH4f4BmiVYa1qNpsxP3X2t8ezOX7KCn4eB6Ninyw/blyt8Yb32tLB81JfxxlzaePJiofiODY99/H7UqkUNM+s7cN1bW0t8h3HoeCe7NefRev3rGBWlikrBlVrnDl+kAkmYImCJRB9s7+/H+YoaMFhmWq32+F/iK1aXV0N81YH2zPRRV6kxTqXSqWYVXNxcTFCmCEylPGdTieMFxBnIO5rf38/fIZxxpYRtkSJDOTRJBapaVqi0vZD1r2mOf6YYCEpDqhcLscIQOr1eniNNsb7ubm5RBKgfr8ftP/W+jwpeJzi2drjhAlNeAxADmKMYRweHh6GsYV1CWsWr0HsYaDrw9axvOuS5X0D8LOtuCltteH32uuDrW3aWmWRU7B3z6z2RHq94rLp8vf7/bDPgNXp2rVrIiKysbER1ibIgLOzs9g+A/uPk5OTTDLPLVIOh8PhcDgcDofDkRMTq2qT6JeTWO+yatuzxk3lRZrVaxYsXReBNPY9gNlrWDunrUbQMjSbzXA6Z60LgBM/MyvhM20RYr/arGAWJ20tA9hPlv2ek5LgsYYojY1Gx6wwJqEvtaDvk8Tmk6VslwFou5kalxPdiQy05Fpjzixu+IypjKGthGaImbjwGbP8aQ0+W1DGoWzOyrw3ivUOVysmT2TQbtCGshXNohmepD78f76yX7xOZN1qtWJJdlGuer0e+hTaPvier6+vBwsH6tbtdoMcwZyFfDk8PIzEs8wSo9IFpFmlrbk/6zhd1irjPb6DNn15eTlYA6GBRf8eHx/L5uamiEi4Yi6x1Qf/X11dDffV/TUO/TnPCd3unMxUryeLi4sRunOuE2uQYV3b2toSkUH8F+QDYCWDxnUSi9Q0GV0tqzXfy4qVnCbYCqUZETEGC4VCrB3r9XqMyY9j2jRDZ1rMO56HcvDv81o7mH2NqbtFosyQ2FPwWMPvtKfE8vJyqBssoByPpy157XY70co4zpjjFAFJHgrWHqfT6SSy9rFFij0R8F5bsNi6lcbyNwmyMPS1Wq3EZM/VajXIwevXr4uIyO3bt0VksFZhnOJ/bGnU/ZqFsU9kSq59aQcS60ClzbxptNmzImu4yKD+LIfHWW7MRaKBlXwgwWDSG9ejo6MwiXAPLLpra2sx1x2mQdUbMg5azAp2OcRCqilERaIUwPhNmjuBlVsK761NlBbCLOAnyVmUREPNixXTsesA88smMQFwmGm32xG3L5HhBohdj3izvb6+LiLDgxRvovTvsXE6PDwMQo7dLPhQJRINYM4j3PMqbviAlJRtvtfrxYg4eNOhN4zs2qXd7cZxr+Ixo10gefHU85ZTAuhD73PPPSc3btwQkeFidfXq1VAnzFXc/+joKLb5xZXzGV0EkoiP2C3GWpO0G/Qk1PoWeL6z2ypf2Z0ZmzqWx+zSJyKyvb0tjx49EpGhax/698qVK+EAhb7DBkREYvLccukehTRlFAf/60344uJizKUPa1Sj0Qgbnu3t7cj14OAgQoCD/+NemlKdlYmT1EnXLSt4nuu1iWWGlQspzQU9716F3ay1axtvVPVhZH5+PtJn+AxXnRKDlayWclBvinm9zdO2GLOFQiEyd7iurPyz8qihrGiP4+PjIP9QV/x2b28vpuDrdDoxOT4JUH7L9TvtoMpEGBY1uj5Icb/rNYHJKfQhaxwymjRYxBJcHrxGG6MvV1ZW5ObNmyIi8uKLL4qIhPdLS0uRNUlkICuhiMGaxG6nWQ697trncDgcDofD4XA4HDmRWRWT5jJmfaYDVfk0zWZ1vjebBvO6+80aXCdtpbCSwV4G2EqiNUcc3A/tL2eBh3Yfp/xKpRK0ldA841S/trYW7ovfHx4exggCOPgvDwU1l5+DMjWZBWvuOYhYa7+yEpzo/mUrlXbXKhaLY2tf0qweTPfOCf60RgawsqePeuY0xyjK2Gq1Yi6gmMulUilm0VhfXw+WjI2NDREZasXX1taCpRNXaABXVlbCuMSYPTg4iI29SayhgEVuI2IH9Lbb7dhz0C+Li4uh3poWnDOqMwUrNGKoj6XlzIpRhBK44jNOKIl2h/UQfXXr1q0gD2A15ISRGAu4bm9vy+PHj0Vk6IYFq0mj0ZiKS0heoH+5fSy6fZGo9poD1GflaqVd+ixKX7YO4jXKgbnx8OFDefDggYgM3Vb4/5iD6MOFhYXQZ2gL1lCPO/ZY5mjrrJXAmenydTLvg4ODMIbgrsiU7ngWZI6V3JeTx07iWQBkpXHWLps8D/GZdjdjinbtgi+SjaQia/l7vV7MpQ/jgV07OTxAW/vYtU97k6S59jE0kUFeixR7JuB/GMdMpqBpyefn52NrD75bXV0NnhKQ35zIGmMQe6rT09OYJwlb2vLKDJQ/zd24VCqZ1inL3Q9tgLkAWYN+tLwU+DMrNQa7Z+aFXm/TiCV4vkAuo29u374tL7/8soiI3LlzJ/Jdr9cLshEy5PHjx8Faj30E7yuzWKzdIuVwOBwOh8PhcDgcOTGVhLxZAuZZwwrtCrRgOjBZJD1J20VYpixrk9ZSsi8wygutgY4XmSU4ia7WRnC5OLhbZHD6hsYEmqMXXnhBRAa+pS+99JKIDIPJcTI/PDwM2mVoO3d2dmJxDrjn/Pz82AG9bGXTQfmsKeBYKW2hSdOC8VWPNSs5ITAOVWvaeLWCslkjiBgAaEw4bg3zh2Ns0p6ZFCMyDmAdYu289rFvNpsRzT7KjbgOaMXZ8onXsIRAO8jB4UxqwbF9IlF62qwBowwr+JutUDqWplQqhX6AlhJts76+HiwG6CO00dHRUdCMsZ82yq/brVarRaxYWcBWDSugGN+h/6CRXFlZCTL6+eefj1xv3rwZ+gblQVkPDg6Cdvbrr78WkYGcePLkiYgMLSOQF71eb2oU9aOQptnu9XqhXTCO0Hblcjlo2i15y/cfF6w11rEMbK3QBBFra2uhD1BuWGq++uqr8BrzEdrZ69evh3mGuVipVMKz0T8cBzkJrNgoETvOplarhbbF81G3nZ2doEHm2Cj8FnMM92dSDW2RKpfLY/fZOPFQmsAAdWq320GuYV6hn+r1eixVBFvFgUnkOXt56Ng4JqCyvA20pZavaO+0+GZr7UOf82/yrLdWAmsA44OTrfJvONmwyHBMLi0tBdkOWc8eE2wBFRmMSchxK8Y57/6B76UtS7wm6BjrNLr0brcbs0hhTajVaqkWKSt+itNc5IEV88dxXfpZnU4nYikUGRJKvPLKK/LKK6+IyPCMgXtubW2FdQh710ePHgXvCG3lYuKnNGRevdI2XxZxghV4KDLYhN+/f19EhoFd2Lyvra2Fzk9j++HnTuMwlRYQy7krMJAx0Nj1DXXBJgGYn5+P1Wna7n/cBjrQHYLi5OQkCG/8ZmlpKSykr776qohIGIAbGxvhvtjgffLJJyIi8umnn8pXX30lIsOFoFwuxzaQ7L6Ud6OU5hLCrn5aSPBGOy0Hj+WuZQk2fSDl/0+zH7k8evO0vb0t9+7dExEJgeNog9u3b4fNrd6gjJob0zhQwb2r0WiExQWLJzY5e3t7QVDhMP/06dOI65vIUOjduHEj1OnWrVvhM5HBJgOLFwfd4158eMfzoMTJAisrvQ4C5/ujrpwxXSuIisVikB3oP8yfL7/8Mmx0mblOu0Jyvp9xD1IcPKyDuiuVSqgLNtUbGxuhH+Aiwe69aHPIGPTxgwcPwniFrOfFCr/nfC7jLsBZ1wBWjGlGRD4oo68xZlBWVtCgb1gG6Lk0ybrELph6s1ytViMufSKDMYLncR+IDMYZ1ibIT/zv1q1bwVUTc5AZMzFX2RUqr0IszaWPmfr0fqFcLod+wbqFtXVzczPm0oey9nq92KFpaWkp9hmPgWkqxCww26BmIUXb1mq1MLegwMSBqt/vRw5cuOc09xN8+ET/aBdPVkrxuNQ5uiCfmGwC/couhGm5HAEmvhmnnsxCiLZD2+/v70dY90Sih0VN+nHlypUYMRAfGq3PktpynLqgrAsLC6EuaFdeq3S4g0iclIJJP/BfrahmpV3aQYpdJsd17WNZqt2a+ZmsUNIHqNdeey1cIddQX+xFHj16FNYmrMFbW1vhWehDllFZ1iZ37XM4HA6Hw+FwOBdqWrsAACAASURBVByOnMjtT2FRnVtB+jgpQ8vyne98R0QG2pY//elPIiLyn//8J9xDZHCahrYtzT0pjV49b/mToLVnp6enwSQIzSSoFd944w3561//KiKD4F6RoTWmXq9HrFpZypy3TqxRTcr2LDLUROK0/tJLL4WgPHyHe3399dfy4YcfiojI3//+dxER+fzzz0VkoBmBZgaWAtZuou6od7fbzU3ZzMGm2jrF1kKLKlyTTVhkIGnEEgxoWCyN8yyIRSz3hoODgzCuPvvss0h5Op1OsCBAQ2NRzM4KcMvjQH1oT+ES+vTp06BBxhza3d0Nv4PGHO6i9+/fD8QTkB+wTN2+fTuMM2hsFxcXY0Qk0HCXSqVcWnTL+qQ16IuLi6HN2Q1Hu+2hz+7fvx/67Ysvvoi0w+npadD6sUUO9eY6oix5+5QtUtpVkPNCwZKMZ9+5cydYpPTcLhQKof9gZbt7926oI17ju/39/TCXtCVybm4uyJOssNaANDdWtlhq6wR+U6vVYtY1K8A7a/nyWi6svC/oL8zpxcXFiEsfyg0rEs8hkUH7QxuOPkT/3r59O+KaKzLwGIF1R1ukuBx5USgUQh9oK8XCwoKZAoDln4hECCZ0Tiy27OC+sAizRYpd+lCuWclItkSJDOQixhX6BHPg9ddfl7fffltEhu6zKNfm5mawxjERxDRTqHBuHbQ72ohJqph4AuUAOPcjrriHJtdZXV2NUXKn5dvjXIFZgDKcnZ2F8msLSrfbjaVi2N/fD6EPevyfn5/HiHVQL6YkZ48ZlEN7ZIyTkw1joNVqRQghcD+RQT9qF0O2qlhEXPozq/z4f7VaDXNUX09PTyeySCWl5uC0CyjbyspKsETBmwoWqVu3boXywmINC/3nn38e1iaswa1WK1ZPtGHWPHNukXI4HA6Hw+FwOByOnEi1SGVJ+smfs385Ts84zX3rW98SEZHvfe974QT5xz/+UUSGmsxGoxE75c46WN5CoVCIad5OTk5CgCt8mL/73e+KyECz94c//EFEBvFDIiJvvfWWiAxOthZlKT9rUrB2CM9Au0Njsr6+HjSRuK6urgbNJ6xO//jHP0RE5IMPPpB//etfIjK0MEAD+tprr4X6oS/X19dDm6E8rNHJq6mwsmVbcVNai8KfJSXm5XvwPS2fc53lnINfZ0F2YpWDyVu0X3mxWIwFn3MyVO3vPW1AM8ZkE5o2lxPewULx8OHD8BrfQRP49OnToCHEb+DXzLTbuF67di2MTbbciGRPqAewJZqD1UUkQiahrRlPnz4N1gBYnyAL7t69G+qIPkVfvfDCC0GTBrmysbERIzZAPzabzbHnUr/fj6QJEBkGS1+9ejW0JzTit2/fDpZBaOhwr729veBjjvrievfu3dBvkB2dTicSO8HPXl5ejiSCnSa0Z8HS0lKwlOKK73Z3d8M8gaaXiSUsa8Y0ZQBrZPVawckmdQyqyFBbzbF3qBPqgL5EvNutW7fCPTC+Tk5OYolFWdZMI42FjmNcWFgIbYv7c0wytMps1UZ9OXEm7o15Cgv9yspKLDaK45ZnHesKOcg0/xh7b775poiIfP/735dvfOMbIjJsb1gVDw4OIpTauD+PzUmB9rGopjnGVFOIW5Yi9HO5XI7FJ2POz8/PB+8jy2JuWWvykJ0wAQ7kJdoJzzg5OQnfYYzt7u7G4t05RQy8kOCJwfG6mEvWHkHHJjUajdxeOihPs9kM/YA5hPds3WViFZ1yCDLP6iMmBtH7K/YC0mvJ3NxcbmIn9kbTBDu8r0U50MY3b94Me0/MG1ioarVasABCHmIt/uyzz4KnCMZ1pVIJ7YM2Q7vOzc1lknlukXI4HA6Hw+FwOByOnMidkNfSwGmtX7/fDxoxWDVw4v/xj38s7777rogMT+5/+ctfRGSgcdKWAotKHbiIxLxMBYuTKTR6OBEfHx/LP//5TxEZxkWgjsvLyzE/+1lZM+bm5oKGBFo59qfHKRun9Y8++kg++ugjERlaoqBVbjabQWv2wx/+UEQG1kSRgbUNGhlOAMksbSJDi9Q4CVFZS4U+0ExWHE/EGp+khLxMOWoxslkWLw1+dl4rT15GL9RjaWkpWAk0k9Dbb78t77zzjogM44igydna2gpWHk6OPE1wAmftm416Pv/880G7jLHx5MmTQI0NrRHmzubmZoSiX2SoiT04OAiaaVg9NjY2wnhETBEnSszDGMmxGro+GE9HR0fB5xp1uHv3boghRD0w/mu1Wui/N954Q0SGmujXXnstzDM8r91uh/ZCO+B9o9HIPZ+Y7RIaSbQPrBQ3btwIGj205ZUrV4JMx/Nhlb937578+9//FpGhtg9Ww83NzVB3jAGmoGZaeJGoxWAS6PnFqRM4CS3HgIkM+3xrayvMHYw39Hm73Y55S5RKpUwpDbKCGVd1+gy03erqarC+cmyCjo3C+1arFdoZYxAsuVevXo2ktMBVJ7VmFrBxY6QsmmzWmGurcbPZDOsUxhxiGra3t0N5dfqOer0eLFFMH85sgCLRGNNpJIO2qJv1ulWpVEJfYP7/4Ac/EJGhXBAZxiIzuxgzeooM2jOtL/LuL9An3W43Rn/OaSw0Jb6VpJk9QnQKEsiW+fn50GfYq7DniuUBgmfmqQ+v+TqpfaPRCM/DeNrb24vFTbGFCtYpWKYQd7i0tBSTNUtLS+GZeowVi8VYuphR4LhNbZHCvSyL1MLCQow9mxNSs3UKZROJemaxTNUeP5a1Kit4P2ZZolAeyD/E5L/88sshvh/yHHU6Pj4O5w9mmhYZxEqhrzmNCtPYiwz7MGt6hNxkE9ZmEK9ZWGED9MEHH4iIyO9+9zsRGQj6H/3oRyIyNG9jAe92u+bgmlXeqLSFTruTFYvFMGmwYYVw+/DDD8OGCh2BDUq1Wg0mRN6oT7MuPDk0LTOe8+TJk7ABZdcjlBuDFovsu+++G4Q8gmAhBLvdbljcsAl+/PhxmOiYEHzAzrsA86TSJmQ+UFlEFNqlzzpIWdSraS6EgPXsvLD633In5IMUNn3YhGLiv/POO+GAizGHAy0TQFguE9M4VOFgxLkt0O5Mh465jvH16quvBuWKpgT/6quvIocqkWi+FSxsTG6BciAYmA9UKEcW8CaZg8RFhi4em5ubYd6gnPv7+6H+kBNIJfDWW2+FjROEPzbDhUIh5sK0vb0dc13ig/C4m6RarRajzuYcXvgMY6vdbodyYGMOgqBPP/00yBFs3lH+s7Mz07UY41PnyVlYWAjtPg1Y45rzcGE8oC0gM1utVigT5hm7Oek8SNNwqWJYc1RvNtfW1sIaA/mzv78fU0pgjpRKpdDHcB3Feluv18OzsLnY39+PKcB4w5Q3jQX/V7v08cFUu7AeHR3FXPqw5uzt7cUo9Dn3GeYW+rderydS3o+Tz4ehxxofpHBfJpOB8hXkW5ARnU4nzC1N7nR4eBjbpFuH+En2FDz/9MGDDyCa3OD09DSWU4/nibUGiwxkgHYzq9froc84v51Ifvpzzl+l24XJuZjaXWQgb7W8hwzc398PrzG/+ECFOcpzhAmKdH3yAuVqtVph/OvcXUkHKe2yxledd40PRrr/isViTAbwd3n3eaw80vIP95qfn4/tH15++eWwD0ddsI7eu3dPPv74YxGRcMXB6uDgIHZeYTIafKZJzEbBXfscDofD4XA4HA6HIydS1UtZk/Bqi9Ha2lpwN8LJ7m9/+5uIDCxUOHlCK4PTILtlWSf2adJ9joJ2AZufn48FG8Mt7sMPP4wlFYVW7Pz8POZWNU13EJEopSsncRUZmoPZlYqTBoMw49vf/nbk/Ztvvhk0x9Da4P9ffPFFOOGzFlq7brCmI6+mgqmStemZ6VWt5Ls6CBe/qdVqMZe+NI2IRYnOVKiTuIRk6WfWoqPumur81q1bMQ0/xluSW9s03ftAonBychI02bCiMEU4NI2wUKytrQWNEqw03/zmN0VkYKGC6xxnHxeJuvWgv5rNZhiHqDvKsLq6Gp6dBWwBQn0wh5giF+MAlovXX389aPxRD2ifb968GTRdGNeYn5ubm8FliV1iUTdtBeHg4KxAmy8vL4fywjqE6/LycsRNV2SgvWPXRRGJ0LizNU4kKivR99AkXr9+PZJAViRdazwJeHxry3O73Q7aXO3Cdnh4GF6jPJBpFinBtNcf1uxj7mPcoA+XlpZCmVCPJ0+eBHmMsYRxtry8HKzZ0J6jHyqVSug7DqjXSUNZjua1SDHluZWwVGQwtjF2mKAGFihcOfmutvZwMnhOXo3f6HJbngh5Ye2BeI3X5Co3btwIGnXseWBte/DgQXBDwlzD+GS3SO1+hWdNCqyRTAfPlhuR6F4G13a7HcYL5FfaWgy02+3QT7yGayIaTqabp5/Q9jxmtZWhUCiY8kHTnmOenZ6exlyuMX/u3LkTZB3kW7lcjnlp8B4p77jD2nR2dhYrG7v5atfZJCuVSDJdOsqalkoGGNfdVyS6D9OpOVCe9fX1YEXH/NnY2Ii48okM9woff/xxCLWBXEQ/iQzHFu9JmDRL1zPTXi1jfR0Oh8PhcDgcDofD8f+QSb3EGg/rdIbTPLQUlUolaBsQvwGt+eeffx60ZfDnhoasXC6bVpukE+G0Y43Snlmr1SLUlSLDALadnZ2g9cMpF789OTmJ0XZOmzaXCTGgoWCtMr5Dn0BT/sorr4QgV2jTodlrNBrB4qaDX7e3t4P2AFqBer0eNIA6sHccYCyxlRLaAiY40BoT9h3XWhT202dLFK5MuS5iJwMGCoVCLjrWSVAsFmO0vWiLZrMZNLY6cLbZbIYysuZtmmMP4+v4+DjmU465sL+/H+Y4W2x1gCe+u337dogdgIYd1o9Hjx6ZFhxdT461zEPOwPMF98B4w7i+cuVKKCusanfu3ImkFRAZypPj4+OgZUasEfqMrVsYYxbFLGvK8sbmsBVTx4/g/mdnZ0GzB8vbw4cPI0l2RYYaPotQAv353HPPhaBgJgHBMzV5h8h4MQMa2irQ7/dD37PVCVYAHcvJhDm6Ty6C2IhjdTT1PpOn4HcYqw8fPgyWQ3yG8XL9+vVgicJYxb3a7XYkyF5kMFYxl7RVji0MWcFJWjVZBwem63jEra2t0E/oE9Tt7OwsyH2MOYzrtbW1CMkEys8J20WiZEPjkgZZQJuxtwTau16vh/GoPTq+/PLLYHWH3EEda7VaJDYKz5nmmNSkAiLxFCGdTidG+MQ09doyxXJMkxD0er1wL+wbOMZGW1VarVYuunDeM+gYa5atFtECxgrqhf54+vRpjLACMvDo6CjML7byJ1lCORFuVnBKBk5xwt+1Wq0wVrAXbDQasRipLEQUPO7S6NIZeeUDz0NttcM8ZiIkWP2q1Wpoe6ypILb75JNPQtwuxiLG8uLiYliH2DNC76+ArOkR3CLlcDgcDofD4XA4HDmR22RgxU3hNU7rx8fH4TNoG9g3mGN0RCSiabiMBLxpYA0TTquaArTf7wfNKyeCExloDHDqnpVWEyfz8/PzmCYVmroXX3wxxjp47dq1UF5oxmCFunfvXrAYwr8U2pSFhQUz3sHSOuGaV+vHSfQsqxOuVvJdPV7YJ1pbt5hJSvt/swbOYgecNmtXGvTYQRm3t7fD+GLtncigT2E9ZeuZZuKZBNxW0OBBcwzLxtbWVhiHGDdXr14Nmjt8x77L2koFaw9rqnHd2dkJz8Sc5BijPBYpZiqDRgxzBGN9fX09Nv45GSHiiKBJf/r0aWJKgFKpFKFhFRmMSR37N0lfoYxLS0vhGRjPKE+z2YxYokQGWnJo9vAZ6sGaT9wf/bmxsRE0h4jJ4tgePT+73e5YKRKSwBYp1BPj4uDgIHyPMQO0Wq1Ie6BsQN51KG+fsbxiOm+RaKJplA3xiQ8fPgxjDfMRVpA7d+5E6M5Fhtr34+PjWNLRVqsV2kxro3kNzAornlXHIXQ6nVAnlGdnZyeSJFVEIkx91pwUGYw31J1jh3ms6es0xh7TQ4tE45pQz2azGaxOkMuo497eXug7HUtWrVZnxhSpYbEBWkyEfMX32Bdh7d7f348xwnE90O4Yl5wCQVtDmZE4C7gf9LjDPavVaiyum2Ph0TdY1zgtBdYeTqKN+uM36+vrkYS9jHGYInmcakZNi+WY46dQNou1T1up+DsrbsqKSxeJxtdlBbNiQy7p9eT69ethfqNujUYjyD94TCHG8P79+0FmsCVKZGDl0hbrWq2WOK+yWqRSD1Jpbg28qOjvO51OMNGjg3nwYnHF5i6NuMJ65mWAn41BjLKWSqUwCNEhltvXrMrPOUc4wFZkuKCurq6GgYMyPnjwICzAMI9CQBwfH8dcdthFSAfnsRuNpkMdR2ig/djcr7ODsxnecknQ7hAMnQOhWq3GgoM5h9UkdZkUVt1YacHEHCLD9mdXTx6r03RXwjP7/X6EKlpEImQNGGdwy1tbW4sRHrDbnx5f+M3i4mKMyGBnZycITmwIOWA4j9sY5s/c3FwYBxDCnGMK7Y95w66N7O4oMtxYcH1YiFvuexrabS0P2JUO5ebcMCKDzSr6hqndsVjBBQzgBQl9g/64evVq+I4PAIDezLJb7TTBc5XnBJ6rc/Nw7h/OX6OR5UA1ydzinGxoP87zg00R+mtzczOst+zSJzJQYEIJoSmYDw4OwrzhzSJkox6X4wSVs8JKy2N2GUP5cZDa2tqKkEvgd7gn08GLDA/s1trEpAKaZGha8lwfpHhTybmRICPRh+yCzLl9uPyj8pVZyCsnrPQhOiUBbyotl3fOcyQyqC+njBCJUmxrMovV1dXwTE2lzq6hWcAkD/pQxgcqTXwyPz8f5glkNI9D7dqoD1aot0g0nYKWgzwms4J/r8czHxw5xAblYMIMri8fUNMOWfx77SrJiu5xD/qVSiW0EeY01pUrV66EZ6H8e3t7wT0W+Qy14l8keoDCFbID48kqc5acogx37XM4HA6Hw+FwOByOnMjk2mclC016LzI44eE/TF+pv8NJcBS9+qwtUdb9006iFvGA1kiNqxEaB5yFWZuqOZgcWj/W/rFLB5f/2rVrMcpqaPpKpZJJjaq1uJP0IQc8W0nhcLUsUtp6xJp+XTa2TLHVFFdoZDQ1et4EgeMgrc2YrlVTZeuEivpeaffN20/Q2p2dncXc0KCJbTabwZUFlo2dnZ2gUYe2iAkpWMMsEiUwgZaJE+thjGI8Y4yfnJwEbVye+rArkrZg7O7uxjTKp6enQROoKW/r9XrMTYpdXSxXmjQL1LjuE81mM9QB/QHt3dbWVrCuwQq1v78f+hDyhDV82qKIPlteXo4RSvR6vYgFiq+dTmcqZBMAa2d1O3Y6nfDaCl7Po4m0XFkmWas42Fq72eC7s7OzYEUCYcnu7m6QAZgHcN9+/vnnQ7+gLzAGWP6z5Vq7QlmU21nB99IWLQ7cRzmg/d/f3w/lZOIhtIl2Fcb7paWl2Njr9/sxSxS/n8QqldTf/X4/Ns6tz5iMw3KVSntGEsZZl9iyoT0/2BKUZpFi0haRqEcH5DE8E9hdETg/Pw/WAowb7ps8FlHLIqW9UCwiioWFhZhFikmTUA9N1d1ut8N3TCKhXW054Wve+cSEKUnrQ6/Xi3kGsSVUk1NUq9WwFlikE9ZnmjSGLX15rdacmoZTo4gM26xcLsdSVnz99deB/Ay05xyCAvnJHll4b5EdAePu6dwi5XA4HA6Hw+FwOBw5kTshb9YYJv3fNE3fKKrzWVHQTnq/cU+vo56bt1ysOdTWAGgbWq1WTIve7XbD6VwH4C0sLMR8SNGHZ2dnMV9h1qKPq0ljQINiWfvSyBI4wBxX9uXVVhvLN5z9+lFPJlUQmQ5d8zRgBUOmEWKkacHG6S9o7zjhJtqbkwZiPKI9z87OYglvoa1cWVkJ41EnG67X6zGNUqlUisX/oSwLCwuROISsaLfbocyaKp81fKwx1Qm7ORlpGmmERdutv5sEqD8HRENrDOvGzs5OiE+BzCgWi7Hkudwv6Bv8hseCnmdM3KKvVuLrcZA1rjav/3veZ44LtgDosYQyHx8fB6sN5kuj0QgyC9pcWKSuX78ei42C5nx3dzcWf8SpFnQMxDjguaDXEY7zhAxg8gsd34k5vbS0FKE7F7FJj9A3TCih4/OyBpNbGOXJouczj3NtHbE8L7Ji1J4qCzjmRq+zPC71fo6fxWlGRAbeANoSiPG2u7sbI6DgOFv2ftHPy4K0mEvLgyctcTRbpiA3UQ+dKkBkuPc6PT2NzR20R7VazT2v2IKStK/msWDJXk2bzuQUWK85jkrHkFmf8bqPe2UFtzGnCeCycowXrPAPHjwIxC3oEyZDg2WTk9HjeWmWqHHX4LEl5KgDVdL3o9y9LpOhLw3jLph56zHJwtxut2Mud7wJ5IEmEhUa2vWoUCiESa83wUlubUkHqHHqlOaelnY/Pkih/JzlXP/XIpHgBYEFLJdnUpeQLMirQLDcjCyhMY3+AdisrxcgjJv5+fkYuQEfxnW+JmYT48MVrprJjPPEoA0wjhcWFnK5UKDM7IqGKy9ampGLGSTZ9VRj1ocmC5xXDK4ROs8cb3rY/QSyAu3PQbtMmCES3XBr0oZ2ux37bBqbWQtZXVdnpaAbF+z2qTeZ2GAeHR2Fwy8ORL1eL/QFSCaQx2t1dTWMQxyQMbeOjo5Sc5hN4tIH8D00UQ7njsJBijeqevPH+WUsBQt+ow9snU5nqgepLOMl6b66LbO6XCc9Y9zyaVjslADLOD7MJj2X+0uzX+J6enoaZA8rbPW6z/Mgj/LSUvBaCkdNpMIbbc2muri4GDbkOscU71NY+aEJmNCW3W43F3kG35eJKizFsHXY1fsc3h9qcgp2ydeHJVbyWN/lPUhZDIFof5SD9wM4PD1+/DjIP9SXc2PpQ5m1RgGjxkgWuGufw+FwOBwOh8PhcOREJotUmutdkoUpzRKlP3vWqM7Hef6k5Z3k/6x9SaLFZE2L5W4BcMC81sRrLQi/Zhe8acDSPo1rkUL7sIuVtmBYFiamXtfuWpVKZSaUzRb6/XiKAf29hWm7kFpIy9EF7RRTykJDyeZ6bfFk4hK2UokMLCi4F1+1RZX7N089LU2mdnFhN5xRbqZ8TfosL/L2G7T8p6enwSVFu6YUi8WIhU8k6iKhr/Pz8zFtLs877frLhBJp7ihZMWsK8ssAzxvMJU3QcXBwEPoTc6NSqQSLISxSsNjUarWICx3uITIYAzpo3soTw8jbVzw/kty8Dg8PYy5T5+fnETIZkaHG+cqVK5HgcdQTZbZc6ixL1Dj1yYq87naX4c7HSLNIAbweQt5a5WCLDsYcxirQ6/XCZ+h7JmDQuREtb5I0sIxJkjcs49lyitfavdBKicHeFDrlDRMJaUvbOFZeyyJlXa36aisVXy3vJTxPpy+yvJiY4CyvlY1p51E/naLj6OgoeKbArbnRaIQ6aAtZvV6PpY1IskTp9hkXbpFyOBwOh8PhcDgcjpzIHSOVpgnMYmG6SDKJZw2zqh+frPEMHXDLVOFAmtXJiv+xtO/jxu+MAlu/tFZ21HO09hFalVqtFrNYsMZIJ2nk9tRWCY6fughY8U/W51kwzXHImcl1sDH7iutkx2dnZzEqVg6A1X2N37AmE9rAk5MTM/BVJH+SQMvqqT/LGmSe9H0eTKOvEBvTbDZjMTFMrGLFBFjJiEWi1gqLUtpKzp2VcCgPLAryvHhW1h+26KIsaD/WzqI/OeYVFihcYTksFAoxghGmFcc9+NlZEkNnBRM+AJjbsD4dHx+H16hnr9eLxWoxwYQmQeGYRfYywPWiLFKziometeUV92eyibRnsWXK8uTAb3QiVSZnwDOxPp+cnESsLiLDMZ6XnMHynmFyG1x1ihT20mErra6blpXNZjNGSMXP13NpHIIdnpfa68aqE1+Txrv1HcpfLpcjFOr4zKI9F4km3c4KtgTq2EmMmf39/RgRUq/Xi1nGOGWEtU4BaXHK+jdZ4RYph8PhcDgcDofD4ciJsVXqoyxMSVqNUfFWl60VzIMsZb2I+lhxShYzW5qPcJLFQ98/DybR9lnMM1n/pzUysFZ0Op1YHA/H0mg/ZktrxNqqvMnnpolnhdWSNb2a5ctitGNGJ/QLNEmcNBCvNVW2yFBjButKt9s1fblFBv2cR5OZNQ4vDZdpfbLAcY9oCx0ryMkp0R/s864t3Kz51LEv7XY7Nn/T/NCfFZl/2esPzx9ONSEy1MQ2Go2Idlhk0E+aDhx92el0YnGGsPp0Op0YQxnHqWSJc84KjlPC8zlej8skMhifOgk0J4jXMRAsi7X1iT0vLtsSNc74uoh5w54FabG/emwwk5/1W824yPGwev3ksaplNlso82BUjJQeK2yR0mXgumpLU6VSCfOSU2Noaze3X94xyDJbJ31nWczWRXyX1gaAtuJZLNBspUJfom/Pz89jsXBZ68QU7czWJzKwpMM6xZ4UHIMtMpQFHGNqybBpWqKAiX2Tkg5GaRvztM8c+THK1QjvsxKFpH2Whlls7rMepHhypJFN6LwRTCCgBRATUFjjeRJa4MvCtOcck5roBZg3hnoBLpfLkSBekaEgZKpsfaDiPuGxwSQRXC7eLGbBRY35SfphbGFP7qjaRYXp69MOobw4WxtWfJclV9O0xuIkbq7PElgWaXdmDmrXCqJ6vR7IF7CpQP+2Wq2wIcHBBRsVEYkpliwCImCcoGyLglznMzw9PY2UCeVBXXROmHq9Hr6zNtxa7ne73akQnGjMyo0v7f6z3jP1er3Q/1qm9vv9mAKmUCjE5DiXWx9KsNFmt1LOI6VdyXhOjJtuJOnwYCldu91ueKY+rHDba9INVqxiLqXtH4rFYu6+ZFd5LfO4LTXRWFI9gaQ5wQdNroc1v0QGfZaX/pzXDk0yYckrJsLR+wb+LmlNSJJhTjbhcDgcDofD4XA4HBeMwn+7Fs/hcDgcDofD4XA4LhpukXI4HA6Hw+FwOByO3qWXRwAAAE9JREFUnPCDlMPhcDgcDofD4XDkhB+kHA6Hw+FwOBwOhyMn/CDlcDgcDofD4XA4HDnhBymHw+FwOBwOh8PhyAk/SDkcDofD4XA4HA5HTvwvlvYc7V6NMJQAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "num_channels=256\n",
+ "max_columns = 16\n",
+ "\n",
+ "fig, ax = plt.subplots(nrows=num_channels//max_columns, ncols=max_columns)\n",
+ "\n",
+ "fig.set_size_inches(15,15)\n",
+ "for i in range(num_channels):\n",
+ " v1_k = v1_model.simple_conv_q0.weight[i,:,:,:].numpy().mean(axis=0)\n",
+ " v1_k = v1_k / np.amax(np.abs(v1_k))/2+0.5\n",
+ " im_h=ax[i//max_columns, np.mod(i,max_columns)].imshow(v1_k, cmap='gray')\n",
+ "# ax[i//num_channels, np.mod(i,num_channels)].set_xlim([0, 223])\n",
+ " im_h.set_clim([0, 1])\n",
+ " ax[i//max_columns, np.mod(i,max_columns)].set_axis_off()\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 91,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "9.899494936611665\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Nyquist\n",
+ "\n",
+ "visual_degrees = 8\n",
+ "image_size = 224\n",
+ "\n",
+ "nyquist_f = 1/(visual_degrees/image_size)/2 / np.sqrt(2)\n",
+ "\n",
+ "print(nyquist_f)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/dm_networks.py b/dm_networks.py
new file mode 100644
index 0000000..61df2de
--- /dev/null
+++ b/dm_networks.py
@@ -0,0 +1,288 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""From https://github.com/deepmind/deepmind-research/blob/master/adversarial_robustness/pytorch/model_zoo.py"""
+from typing import Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
+CIFAR10_STD = (0.2471, 0.2435, 0.2616)
+CIFAR100_MEAN = (0.5071, 0.4865, 0.4409)
+CIFAR100_STD = (0.2673, 0.2564, 0.2762)
+
+
+class _Swish(torch.autograd.Function):
+ """Custom implementation of swish."""
+
+ @staticmethod
+ def forward(ctx, i):
+ result = i * torch.sigmoid(i)
+ ctx.save_for_backward(i)
+ return result
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ i = ctx.saved_variables[0]
+ sigmoid_i = torch.sigmoid(i)
+ return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
+
+
+class Swish(nn.Module):
+ """Module using custom implementation."""
+
+ def forward(self, input_tensor):
+ return _Swish.apply(input_tensor)
+
+
+class _Block(nn.Module):
+ """WideResNet Block."""
+
+ def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
+ super().__init__()
+ self.batchnorm_0 = nn.BatchNorm2d(in_planes)
+ self.relu_0 = activation_fn()
+ # We manually pad to obtain the same effect as `SAME` (necessary when
+ # `stride` is different than 1).
+ self.conv_0 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=0, bias=False)
+ self.batchnorm_1 = nn.BatchNorm2d(out_planes)
+ self.relu_1 = activation_fn()
+ self.conv_1 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
+ padding=1, bias=False)
+ self.has_shortcut = in_planes != out_planes
+ if self.has_shortcut:
+ self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1,
+ stride=stride, padding=0, bias=False)
+ else:
+ self.shortcut = None
+ self._stride = stride
+
+ def forward(self, x):
+ if self.has_shortcut:
+ x = self.relu_0(self.batchnorm_0(x))
+ else:
+ out = self.relu_0(self.batchnorm_0(x))
+ v = x if self.has_shortcut else out
+ if self._stride == 1:
+ v = F.pad(v, (1, 1, 1, 1))
+ elif self._stride == 2:
+ v = F.pad(v, (0, 1, 0, 1))
+ else:
+ raise ValueError('Unsupported `stride`.')
+ out = self.conv_0(v)
+ out = self.relu_1(self.batchnorm_1(out))
+ out = self.conv_1(out)
+ out = torch.add(self.shortcut(x) if self.has_shortcut else x, out)
+ return out
+
+
+class _BlockGroup(nn.Module):
+ """WideResNet block group."""
+
+ def __init__(self, num_blocks, in_planes, out_planes, stride,
+ activation_fn=nn.ReLU):
+ super().__init__()
+ block = []
+ for i in range(num_blocks):
+ block.append(
+ _Block(i == 0 and in_planes or out_planes,
+ out_planes,
+ i == 0 and stride or 1,
+ activation_fn=activation_fn))
+ self.block = nn.Sequential(*block)
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class WideResNet(nn.Module):
+ """WideResNet."""
+
+ def __init__(self,
+ num_classes: int = 10,
+ depth: int = 28,
+ width: int = 10,
+ activation_fn: nn.Module = nn.ReLU,
+ mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
+ std: Union[Tuple[float, ...], float] = CIFAR10_STD,
+ padding: int = 0,
+ num_input_channels: int = 3):
+ super().__init__()
+ self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
+ self.std = torch.tensor(std).view(num_input_channels, 1, 1)
+ self.mean_cuda = None
+ self.std_cuda = None
+ self.padding = padding
+ num_channels = [16, 16 * width, 32 * width, 64 * width]
+ assert (depth - 4) % 6 == 0
+ num_blocks = (depth - 4) // 6
+ self.init_conv = nn.Conv2d(num_input_channels, num_channels[0],
+ kernel_size=3, stride=1, padding=1, bias=False)
+ self.layer = nn.Sequential(
+ _BlockGroup(num_blocks, num_channels[0], num_channels[1], 1,
+ activation_fn=activation_fn),
+ _BlockGroup(num_blocks, num_channels[1], num_channels[2], 2,
+ activation_fn=activation_fn),
+ _BlockGroup(num_blocks, num_channels[2], num_channels[3], 2,
+ activation_fn=activation_fn))
+ self.batchnorm = nn.BatchNorm2d(num_channels[3])
+ self.relu = activation_fn()
+ self.logits = nn.Linear(num_channels[3], num_classes)
+ self.num_channels = num_channels[3]
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ if self.padding > 0:
+ x = F.pad(x, (self.padding,) * 4)
+ if x.is_cuda:
+ if self.mean_cuda is None:
+ self.mean_cuda = self.mean.cuda()
+ self.std_cuda = self.std.cuda()
+ out = (x - self.mean_cuda) / self.std_cuda
+ else:
+ out = (x - self.mean) / self.std
+ out = self.init_conv(out)
+ out = self.layer(out)
+ out = self.relu(self.batchnorm(out))
+ out = F.avg_pool2d(out, 8)
+ features = out.view(-1, self.num_channels)
+ if features_only:
+ return features
+ logits = self.logits(features)
+ if features_and_logits:
+ return features, logits
+ return logits
+
+
+class _PreActBlock(nn.Module):
+ """Pre-activation ResNet Block."""
+
+ def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
+ super().__init__()
+ self._stride = stride
+ self.batchnorm_0 = nn.BatchNorm2d(in_planes)
+ self.relu_0 = activation_fn()
+ # We manually pad to obtain the same effect as `SAME` (necessary when
+ # `stride` is different than 1).
+ self.conv_2d_1 = nn.Conv2d(in_planes, out_planes, kernel_size=3,
+ stride=stride, padding=0, bias=False)
+ self.batchnorm_1 = nn.BatchNorm2d(out_planes)
+ self.relu_1 = activation_fn()
+ self.conv_2d_2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
+ padding=1, bias=False)
+ self.has_shortcut = stride != 1 or in_planes != out_planes
+ if self.has_shortcut:
+ self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=3,
+ stride=stride, padding=0, bias=False)
+
+ def _pad(self, x):
+ if self._stride == 1:
+ x = F.pad(x, (1, 1, 1, 1))
+ elif self._stride == 2:
+ x = F.pad(x, (0, 1, 0, 1))
+ else:
+ raise ValueError('Unsupported `stride`.')
+ return x
+
+ def forward(self, x):
+ out = self.relu_0(self.batchnorm_0(x))
+ shortcut = self.shortcut(self._pad(x)) if self.has_shortcut else x
+ out = self.conv_2d_1(self._pad(out))
+ out = self.conv_2d_2(self.relu_1(self.batchnorm_1(out)))
+ return out + shortcut
+
+
+class PreActResNet(nn.Module):
+ """Pre-activation ResNet."""
+
+ def __init__(self,
+ num_classes: int = 10,
+ depth: int = 18,
+ width: int = 0, # Used to make the constructor consistent.
+ activation_fn: nn.Module = nn.ReLU,
+ mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
+ std: Union[Tuple[float, ...], float] = CIFAR10_STD,
+ padding: int = 0,
+ num_input_channels: int = 3):
+ super().__init__()
+ if width != 0:
+ raise ValueError('Unsupported `width`.')
+ self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
+ self.std = torch.tensor(std).view(num_input_channels, 1, 1)
+ self.mean_cuda = None
+ self.std_cuda = None
+ self.padding = padding
+ self.conv_2d = nn.Conv2d(num_input_channels, 64, kernel_size=3, stride=1,
+ padding=1, bias=False)
+ if depth == 18:
+ num_blocks = (2, 2, 2, 2)
+ elif depth == 34:
+ num_blocks = (3, 4, 6, 3)
+ else:
+ raise ValueError('Unsupported `depth`.')
+ self.layer_0 = self._make_layer(64, 64, num_blocks[0], 1, activation_fn)
+ self.layer_1 = self._make_layer(64, 128, num_blocks[1], 2, activation_fn)
+ self.layer_2 = self._make_layer(128, 256, num_blocks[2], 2, activation_fn)
+ self.layer_3 = self._make_layer(256, 512, num_blocks[3], 2, activation_fn)
+ self.batchnorm = nn.BatchNorm2d(512)
+ self.relu = activation_fn()
+ self.logits = nn.Linear(512, num_classes)
+
+ def _make_layer(self, in_planes, out_planes, num_blocks, stride,
+ activation_fn):
+ layers = []
+ for i, stride in enumerate([stride] + [1] * (num_blocks - 1)):
+ layers.append(
+ _PreActBlock(i == 0 and in_planes or out_planes,
+ out_planes,
+ stride,
+ activation_fn))
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ if self.padding > 0:
+ x = F.pad(x, (self.padding,) * 4)
+ if x.is_cuda:
+ if self.mean_cuda is None:
+ self.mean_cuda = self.mean.cuda()
+ self.std_cuda = self.std.cuda()
+ out = (x - self.mean_cuda) / self.std_cuda
+ else:
+ out = (x - self.mean) / self.std
+ out = self.conv_2d(out)
+ out = self.layer_0(out)
+ out = self.layer_1(out)
+ out = self.layer_2(out)
+ out = self.layer_3(out)
+ out = self.relu(self.batchnorm(out))
+ out = F.avg_pool2d(out, 4)
+ features = out.view(out.size(0), -1)
+ if features_only:
+ return features
+ logits = self.logits(features)
+ if features_and_logits:
+ return features, logits
+ return logits
+
+
+def wideresnet_28_10(num_classes=10):
+ return WideResNet(num_classes, 28, 10, activation_fn=Swish, mean=CIFAR10_MEAN,
+ std=CIFAR10_STD)
+
+def preactresnet_18(num_classes=10):
+ return PreActResNet(num_classes=num_classes, depth=18, activation_fn=Swish,
+ mean=CIFAR10_MEAN, std=CIFAR10_STD)
\ No newline at end of file
diff --git a/jpeg/__init__.py b/jpeg/__init__.py
new file mode 100644
index 0000000..f5e9937
--- /dev/null
+++ b/jpeg/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Taken from
+# https://github.com/mlomnitz/DiffJPEG
+
+from jpeg.jpeg_module import DifferentiableJPEG
+
+__all__ = ["DifferentiableJPEG"]
\ No newline at end of file
diff --git a/jpeg/compression.py b/jpeg/compression.py
new file mode 100644
index 0000000..f9ff674
--- /dev/null
+++ b/jpeg/compression.py
@@ -0,0 +1,228 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MIT License
+#
+# Copyright (c) 2021 Michael R Lomnitz
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import itertools
+
+import numpy as np
+# PyTorch
+import torch
+import torch.nn as nn
+
+# Local
+from . import utils
+
+
+class rgb_to_ycbcr_jpeg(nn.Module):
+ """Converts RGB image to YCbCr
+ Args:
+ image(tensor): batch x 3 x height x width
+ Returns:
+ result(tensor): batch x height x width x 3
+ """
+
+ def __init__(self):
+ super(rgb_to_ycbcr_jpeg, self).__init__()
+ matrix = np.array(
+ [[0.299, 0.587, 0.114], [-0.168736, -0.331264, 0.5],
+ [0.5, -0.418688, -0.081312]], dtype=np.float32).T
+ self.shift = nn.Parameter(torch.tensor([0., 128., 128.]),
+ requires_grad=False)
+ #
+ self.matrix = nn.Parameter(torch.from_numpy(matrix),
+ requires_grad=False)
+
+ def forward(self, image):
+ image = image.permute(0, 2, 3, 1)
+ result = torch.tensordot(image, self.matrix, dims=1) + self.shift
+ # result = torch.from_numpy(result)
+ result.view(image.shape)
+ return result
+
+
+class chroma_subsampling(nn.Module):
+ """Chroma subsampling on CbCv channels
+ Args:
+ image(tensor): batch x height x width x 3
+ Returns:
+ y(tensor): batch x height x width
+ cb(tensor): batch x height/2 x width/2
+ cr(tensor): batch x height/2 x width/2
+ """
+
+ def __init__(self):
+ super(chroma_subsampling, self).__init__()
+
+ def forward(self, image):
+ image_2 = image.permute(0, 3, 1, 2).clone()
+ avg_pool = nn.AvgPool2d(kernel_size=2, stride=(2, 2),
+ count_include_pad=False)
+ cb = avg_pool(image_2[:, 1, :, :].unsqueeze(1))
+ cr = avg_pool(image_2[:, 2, :, :].unsqueeze(1))
+ cb = cb.permute(0, 2, 3, 1)
+ cr = cr.permute(0, 2, 3, 1)
+ return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3)
+
+
+class block_splitting(nn.Module):
+ """ Splitting image into patches
+ Input:
+ image(tensor): batch x height x width
+ Output:
+ patch(tensor): batch x h*w/64 x h x w
+ """
+
+ def __init__(self):
+ super(block_splitting, self).__init__()
+ self.k = 8
+
+ def forward(self, image):
+ height, width = image.shape[1:3]
+ batch_size = image.shape[0]
+ image_reshaped = image.view(batch_size, height // self.k, self.k, -1,
+ self.k)
+ image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
+ return image_transposed.contiguous().view(batch_size, -1, self.k, self.k)
+
+
+class dct_8x8(nn.Module):
+ """Discrete Cosine Transformation
+ Args:
+ image(tensor): batch x height x width
+ Returns:
+ dcp(tensor): batch x height x width
+ """
+
+ def __init__(self):
+ super(dct_8x8, self).__init__()
+ tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
+ for x, y, u, v in itertools.product(range(8), repeat=4):
+ tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos(
+ (2 * y + 1) * v * np.pi / 16)
+ alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
+ #
+ self.tensor = nn.Parameter(torch.from_numpy(tensor).float(),
+ requires_grad=False)
+ self.scale = nn.Parameter(
+ torch.from_numpy(np.outer(alpha, alpha) * 0.25).float(),
+ requires_grad=False)
+
+ def forward(self, image):
+ image = image - 128
+ result = self.scale * torch.tensordot(image, self.tensor, dims=2)
+ result.view(image.shape)
+ return result
+
+
+class y_quantize(nn.Module):
+ """JPEG Quantization for Y channel
+ Args:
+ image(tensor): batch x height x width
+ rounding(function): rounding function to use
+ factor(float): Degree of compression
+ Returns:
+ image(tensor): batch x height x width
+ """
+
+ def __init__(self, rounding, factor=1):
+ super(y_quantize, self).__init__()
+ self.rounding = rounding
+ self.factor = factor
+ self.y_table = utils.y_table
+
+ def forward(self, image):
+ image = image.float() / (self.y_table * self.factor)
+ image = self.rounding(image)
+ return image
+
+
+class c_quantize(nn.Module):
+ """JPEG Quantization for CrCb channels
+ Args:
+ image(tensor): batch x height x width
+ rounding(function): rounding function to use
+ factor(float): Degree of compression
+ Returns:
+ image(tensor): batch x height x width
+ """
+
+ def __init__(self, rounding, factor=1):
+ super(c_quantize, self).__init__()
+ self.rounding = rounding
+ self.factor = factor
+ self.c_table = utils.c_table
+
+ def forward(self, image):
+ image = image.float() / (self.c_table * self.factor)
+ image = self.rounding(image)
+ return image
+
+
+class compress_jpeg(nn.Module):
+ """Full JPEG compression algortihm
+ Args:
+ imgs(tensor): batch x 3 x height x width
+ rounding(function): rounding function to use
+ factor(float): Compression factor
+ Returns:
+ compressed(dict(tensor)): batch x h*w/64 x 8 x 8
+ """
+
+ def __init__(self, rounding=torch.round, factor=1):
+ super(compress_jpeg, self).__init__()
+ self.l1 = nn.Sequential(
+ rgb_to_ycbcr_jpeg(),
+ chroma_subsampling()
+ )
+ self.l2 = nn.Sequential(
+ block_splitting(),
+ dct_8x8()
+ )
+ self.c_quantize = c_quantize(rounding=rounding, factor=factor)
+ self.y_quantize = y_quantize(rounding=rounding, factor=factor)
+
+ def forward(self, image):
+ y, cb, cr = self.l1(image * 255)
+ components = {'y': y, 'cb': cb, 'cr': cr}
+ for k in components.keys():
+ comp = self.l2(components[k])
+ if k in ('cb', 'cr'):
+ comp = self.c_quantize(comp)
+ else:
+ comp = self.y_quantize(comp)
+
+ components[k] = comp
+
+ return components['y'], components['cb'], components['cr']
diff --git a/jpeg/decompression.py b/jpeg/decompression.py
new file mode 100644
index 0000000..b3f3b50
--- /dev/null
+++ b/jpeg/decompression.py
@@ -0,0 +1,217 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MIT License
+#
+# Copyright (c) 2021 Michael R Lomnitz
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import itertools
+
+import numpy as np
+# PyTorch
+import torch
+import torch.nn as nn
+
+# Local
+from . import utils
+
+
+class y_dequantize(nn.Module):
+ """Dequantize Y channel
+ Args:
+ image(tensor): batch x height x width
+ factor(float): compression factor
+ Returns:
+ batch x height x width
+ """
+
+ def __init__(self, factor=1):
+ super(y_dequantize, self).__init__()
+ self.y_table = utils.y_table
+ self.factor = factor
+
+ def forward(self, image):
+ return image * (self.y_table * self.factor)
+
+
+class c_dequantize(nn.Module):
+ """Dequantize CbCr channel
+ Args:
+ image(tensor): batch x height x width
+ factor(float): compression factor
+ Returns:
+ batch x height x width
+ """
+
+ def __init__(self, factor=1):
+ super(c_dequantize, self).__init__()
+ self.factor = factor
+ self.c_table = utils.c_table
+
+ def forward(self, image):
+ return image * (self.c_table * self.factor)
+
+
+class idct_8x8(nn.Module):
+ """Inverse discrete Cosine Transformation
+ Args:
+ dcp(tensor): batch x height x width
+ Returns:
+ batch x height x width
+ """
+
+ def __init__(self):
+ super(idct_8x8, self).__init__()
+ alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
+ self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).float(),
+ requires_grad=False)
+ tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
+ for x, y, u, v in itertools.product(range(8), repeat=4):
+ tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
+ (2 * v + 1) * y * np.pi / 16)
+ self.tensor = nn.Parameter(torch.from_numpy(tensor).float(),
+ requires_grad=False)
+
+ def forward(self, image):
+ image = image * self.alpha
+ result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128
+ result.view(image.shape)
+ return result
+
+
+class block_merging(nn.Module):
+ """Merge pathces into image
+ Args:
+ patches(tensor) batch x height*width/64, height x width
+ height(int)
+ width(int)
+ Returns:
+ batch x height x width
+ """
+
+ def __init__(self):
+ super(block_merging, self).__init__()
+
+ def forward(self, patches, height, width):
+ k = 8
+ batch_size = patches.shape[0]
+ image_reshaped = patches.view(batch_size, height // k, width // k, k, k)
+ image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
+ return image_transposed.contiguous().view(batch_size, height, width)
+
+
+class chroma_upsampling(nn.Module):
+ """Upsample chroma layers
+ Args:
+ y(tensor): y channel image
+ cb(tensor): cb channel
+ cr(tensor): cr channel
+ Returns:
+ batch x height x width x 3
+ """
+
+ def __init__(self):
+ super(chroma_upsampling, self).__init__()
+
+ def forward(self, y, cb, cr):
+ def repeat(x, k=2):
+ height, width = x.shape[1:3]
+ x = x.unsqueeze(-1)
+ x = x.repeat(1, 1, k, k)
+ x = x.view(-1, height * k, width * k)
+ return x
+
+ cb = repeat(cb)
+ cr = repeat(cr)
+
+ return torch.cat([y.unsqueeze(3), cb.unsqueeze(3), cr.unsqueeze(3)], dim=3)
+
+
+class ycbcr_to_rgb_jpeg(nn.Module):
+ """Converts YCbCr image to RGB JPEG"""
+
+ def __init__(self):
+ super(ycbcr_to_rgb_jpeg, self).__init__()
+
+ matrix = np.array(
+ [[1., 0., 1.402], [1, -0.344136, -0.714136], [1, 1.772, 0]],
+ dtype=np.float32).T
+ self.shift = nn.Parameter(torch.tensor([0, -128., -128.]),
+ requires_grad=False)
+ self.matrix = nn.Parameter(torch.from_numpy(matrix), requires_grad=False)
+
+ def forward(self, image):
+ result = torch.tensordot(image + self.shift, self.matrix, dims=1)
+ # result = torch.from_numpy(result)
+ result.view(image.shape)
+ return result.permute(0, 3, 1, 2)
+
+
+class decompress_jpeg(nn.Module):
+ """Full JPEG decompression algortihm
+ Args:
+ compressed(dict(tensor)): batch x h*w/64 x 8 x 8
+ rounding(function): rounding function to use
+ factor(float): Compression factor
+ Returns:
+ batch x 3 x height x width
+ """
+
+ def __init__(self, height, width, rounding=torch.round, factor=1):
+ super(decompress_jpeg, self).__init__()
+ self.c_dequantize = c_dequantize(factor=factor)
+ self.y_dequantize = y_dequantize(factor=factor)
+ self.idct = idct_8x8()
+ self.merging = block_merging()
+ self.chroma = chroma_upsampling()
+ self.colors = ycbcr_to_rgb_jpeg()
+
+ self.height, self.width = height, width
+
+ def forward(self, y, cb, cr):
+ components = {'y': y, 'cb': cb, 'cr': cr}
+ for k in components.keys():
+ if k in ('cb', 'cr'):
+ comp = self.c_dequantize(components[k])
+ height, width = int(self.height / 2), int(self.width / 2)
+ else:
+ comp = self.y_dequantize(components[k])
+ height, width = self.height, self.width
+ comp = self.idct(comp)
+ components[k] = self.merging(comp, height, width)
+
+ image = self.chroma(components['y'], components['cb'], components['cr'])
+ image = self.colors(image)
+
+ image = torch.min(255 * torch.ones_like(image),
+ torch.max(torch.zeros_like(image), image))
+ return image / 255
diff --git a/jpeg/jpeg_module.py b/jpeg/jpeg_module.py
new file mode 100644
index 0000000..ac88849
--- /dev/null
+++ b/jpeg/jpeg_module.py
@@ -0,0 +1,71 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MIT License
+#
+# Copyright (c) 2021 Michael R Lomnitz
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import torch
+import torch.nn as nn
+# Local
+import torchvision.transforms
+
+from jpeg.compression import compress_jpeg
+from jpeg.decompression import decompress_jpeg
+from jpeg.utils import diff_round, quality_to_factor
+
+
+class DifferentiableJPEG(nn.Module):
+ def __init__(self, height, width, differentiable=True, quality=80):
+ """ Initialize the DiffJPEG layer
+ Args:
+ height: Original image height
+ width: Original image width
+ differentiable: If true uses custom differentiable
+ rounding function, if false uses standard torch.round
+ quality: Quality factor for jpeg compression scheme.
+ """
+ super(DifferentiableJPEG, self).__init__()
+ if differentiable:
+ rounding = diff_round
+ else:
+ rounding = torch.round
+ factor = quality_to_factor(quality)
+ self.compress = compress_jpeg(rounding=rounding, factor=factor)
+ self.decompress = decompress_jpeg(height, width, rounding=rounding,
+ factor=factor)
+
+ def forward(self, x):
+ y, cb, cr = self.compress(x)
+ recovered = self.decompress(y, cb, cr)
+
+ return recovered
diff --git a/jpeg/utils.py b/jpeg/utils.py
new file mode 100644
index 0000000..69eeeb1
--- /dev/null
+++ b/jpeg/utils.py
@@ -0,0 +1,81 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MIT License
+#
+# Copyright (c) 2021 Michael R Lomnitz
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import numpy as np
+# PyTorch
+import torch
+import torch.nn as nn
+
+y_table = np.array(
+ [[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60,
+ 55], [14, 13, 16, 24, 40, 57, 69, 56],
+ [14, 17, 22, 29, 51, 87, 80, 62], [18, 22, 37, 56, 68, 109, 103,
+ 77], [24, 35, 55, 64, 81, 104, 113, 92],
+ [49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]],
+ dtype=np.float32).T
+
+y_table = nn.Parameter(torch.from_numpy(y_table))
+#
+c_table = np.empty((8, 8), dtype=np.float32)
+c_table.fill(99)
+c_table[:4, :4] = np.array([[17, 18, 24, 47], [18, 21, 26, 66],
+ [24, 26, 56, 99], [47, 66, 99, 99]]).T
+c_table = nn.Parameter(torch.from_numpy(c_table))
+
+
+def diff_round(x):
+ """
+ Differentiable rounding function
+ Args:
+ x: Tensor.
+ Returns:
+ Rounded tensor.
+ """
+ return torch.round(x) + (x - torch.round(x))**3
+
+
+def quality_to_factor(quality):
+ """Calculate factor corresponding to quality
+ Args:
+ quality: Quality for jpeg compression
+ Returns:
+ Compression factor
+ """
+ if quality < 50:
+ quality = 5000. / quality
+ else:
+ quality = 200. - quality*2
+ return quality / 100.
diff --git a/networks.py b/networks.py
new file mode 100644
index 0000000..61c918e
--- /dev/null
+++ b/networks.py
@@ -0,0 +1,671 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from io import BytesIO
+from typing import Any
+from typing import Callable
+from typing import Optional
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torchvision.transforms.functional as torchvision_functional
+from PIL import Image
+from torch.nn import init
+from jpeg import DifferentiableJPEG
+from torchvision.models.resnet import resnet50
+from torchvision.models.inception import inception_v3
+
+
+class Lambda(nn.Module):
+ def __init__(self, function: Callable):
+ super().__init__()
+ self.function = function
+
+ def forward(self, x, **kwargs):
+ return self.function(x, **kwargs)
+
+
+class InputNormalization(nn.Module):
+ def __init__(self, module: nn.Module, mean: torch.Tensor, std: torch.Tensor):
+ super().__init__()
+ self.module = module
+ self.register_buffer("mean", mean[..., None, None])
+ self.register_buffer("std", std[..., None, None])
+
+ def forward(self, x, *args, **kwargs):
+ return self.module(
+ torchvision_functional.normalize(x, self.mean, self.std, False), *args,
+ **kwargs)
+
+
+class Detector(nn.Module):
+ def __init__(self, encoder: Optional[nn.Module] = None,
+ n_features_encoder: int = 0, classifier: Optional[nn.Module] = None,
+ n_features_classifier: int = 0, ):
+ super().__init__()
+ assert encoder is not None or classifier is not None
+
+ self.encoder = encoder
+ self.classifier = classifier
+ n_features = n_features_encoder + n_features_classifier
+ self.head = nn.Sequential(
+ nn.Linear(n_features, n_features * 4),
+ nn.ReLU(),
+ nn.Linear(n_features * 4, n_features * 4),
+ nn.ReLU(),
+ nn.Linear(n_features * 4, n_features * 4),
+ nn.ReLU(),
+ nn.Dropout(0.2),
+ nn.Linear(n_features * 4, n_features),
+ nn.ReLU(),
+ nn.Linear(n_features, 2),
+ )
+
+ def train(self, mode: bool = True) -> nn.Module:
+ if self.encoder is not None:
+ self.encoder.train(mode)
+ self.head.train(mode)
+ self.training = mode
+ # keep classifier always in test mode
+ if self.classifier is not None:
+ self.classifier.train(False)
+
+ return self
+
+ def forward(self, x):
+ features = []
+ if self.encoder is not None:
+ features.append(self.encoder(x))
+ if self.classifier is not None:
+ features.append(self.classifier(x))
+ if len(features) > 1:
+ features = torch.cat(features, 1)
+ else:
+ features = features[0]
+
+ return self.head(features)
+
+
+class ScaledLogitsModule(nn.Module):
+ def __init__(self, module: nn.Module, scale: float):
+ super().__init__()
+ self.module = module
+ self.scale = scale
+
+ def forward(self, *args, **kwargs):
+ return self.module(*args, **kwargs) * self.scale
+
+
+class GaussianNoiseInputModule(nn.Module):
+ def __init__(self, module: nn.Module, stddev: float):
+ super().__init__()
+ self.stddev = stddev
+ self.module = module
+
+ def forward(self, x, *args, **kwargs):
+ x = x + torch.randn_like(x) * self.stddev
+ return self.module(x, *args, **kwargs)
+
+
+class __GaussianNoiseGradientFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, input, stddev):
+ ctx.intermediate_results = stddev
+ return input
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ stddev = ctx.intermediate_results
+ grad_input = grad_output + torch.randn_like(grad_output) * stddev
+ return grad_input, None
+
+
+gaussian_noise_gradient = __GaussianNoiseGradientFunction.apply
+
+
+class GaussianNoiseGradientModule(nn.Module):
+ def __init__(self, module: nn.Module, stddev: float):
+ super().__init__()
+ self.module = module
+ self.stddev = stddev
+
+ def forward(self, x, *args, **kwargs):
+ return gaussian_noise_gradient(self.module(x, *args, **kwargs), self.stddev)
+
+
+class __JPEGForwardIdentityBackwardFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx: Any, input: torch.Tensor, quality: int) -> torch.Tensor:
+ res = []
+ for x in input.permute(0, 2, 3, 1).detach().cpu().numpy():
+ output = BytesIO()
+ x = (np.clip(x, 0, 1) * 255).astype(np.uint8)
+ Image.fromarray(x).save(output, 'JPEG', quality=quality)
+ x = Image.open(output)
+ res.append(np.array(x).transpose(2, 0, 1) / 255.0)
+ res = torch.Tensor(np.array(res)).to(input.device)
+
+ return res
+
+ @staticmethod
+ def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
+ return grad_output, None
+
+
+jpeg_forward_identity_backward = __JPEGForwardIdentityBackwardFunction.apply
+
+
+class __LambdaForwardIdentityBackward(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx: Any, input: torch.Tensor,
+ function: Callable) -> torch.Tensor:
+ return function(input)
+
+ @staticmethod
+ def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
+ return grad_output, None, None
+
+
+lambda_forward_identity_backward = __LambdaForwardIdentityBackward.apply
+
+
+class JPEGForwardIdentityBackwardModule(nn.Module):
+ def __init__(self, module: nn.Module, quality: int, size: int, legacy=False):
+ super().__init__()
+ self.module = module
+
+ if legacy:
+ self.jpeg = lambda x: jpeg_forward_identity_backward(x, quality)
+ else:
+ self.jpeg_module = DifferentiableJPEG(size, size, True, quality=quality)
+ self.jpeg = lambda x: lambda_forward_identity_backward(x,
+ self.jpeg_module)
+
+ def forward(self, x, *args, **kwargs):
+ return self.module(self.jpeg(x), *args, **kwargs)
+
+
+class DifferentiableJPEGModule(nn.Module):
+ def __init__(self, module: nn.Module, quality: int, size: int):
+ super().__init__()
+ self.module = module
+ self.jpeg = DifferentiableJPEG(size, size, True, quality=quality)
+
+ def forward(self, x, *args, **kwargs):
+ return self.module(self.jpeg(x), *args, **kwargs)
+
+
+class __GausianBlurForwardIdentityBackwardFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx: Any, input: torch.Tensor, kernel_size: int,
+ stddev: float) -> torch.Tensor:
+ return torchvision_functional.gaussian_blur(input, kernel_size, stddev)
+
+ @staticmethod
+ def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
+ return grad_output, None, None
+
+
+gaussian_blur_forward_identity_backward = __GausianBlurForwardIdentityBackwardFunction.apply
+
+
+class GausianBlurForwardIdentityBackwardModule(nn.Module):
+ def __init__(self, module: nn.Module, kernel_size: int, stddev: float):
+ super().__init__()
+ self.module = module
+ self.kernel_size = kernel_size
+ self.stddev = stddev
+
+ def forward(self, x, *args, **kwargs):
+ return self.module(
+ gaussian_blur_forward_identity_backward(x, self.kernel_size,
+ self.stddev), *args, **kwargs)
+
+
+class __UniversalSingularValueThresholding(torch.autograd.Function):
+ """Universal Singular Value Thresholding (USVT) """
+
+ @staticmethod
+ def forward(ctx: Any, input: torch.Tensor, me_channel_concat: bool = True,
+ maskp: float = 0.5, svdprob: float = 0.8):
+ device = input.device
+ batch_num, c, h, w = input.size()
+
+ output = torch.zeros_like(input).cpu().numpy()
+
+ for i in range(batch_num):
+ img = (input[i] * 2 - 1).cpu().numpy()
+
+ if me_channel_concat:
+ img = np.concatenate((np.concatenate((img[0], img[1]), axis=1), img[2]),
+ axis=1)
+ mask = np.random.binomial(1, maskp, h * w * c).reshape(h, w * c)
+ p_obs = len(mask[mask == 1]) / (h * w * c)
+
+ if svdprob is not None:
+ u, sigma, v = np.linalg.svd(img * mask)
+ S = np.zeros((h, w))
+ for j in range(int(svdprob * h)):
+ S[j][j] = sigma[j]
+ S = np.concatenate((S, np.zeros((h, w * 2))), axis=1)
+ W = np.dot(np.dot(u, S), v) / p_obs
+ W[W < -1] = -1
+ W[W > 1] = 1
+ est_matrix = (W + 1) / 2
+ for channel in range(c):
+ output[i, channel] = est_matrix[:, channel * h:(channel + 1) * h]
+ else:
+ est_matrix = ((img * mask) + 1) / 2
+ for channel in range(c):
+ output[i, channel] = est_matrix[:, channel * h:(channel + 1) * h]
+
+ else:
+ mask = np.random.binomial(1, maskp, h * w).reshape(h, w)
+ p_obs = len(mask[mask == 1]) / (h * w)
+ for channel in range(c):
+ u, sigma, v = np.linalg.svd(img[channel] * mask)
+ S = np.zeros((h, w))
+ for j in range(int(svdprob * h)):
+ S[j][j] = sigma[j]
+ W = np.dot(np.dot(u, S), v) / p_obs
+ W[W < -1] = -1
+ W[W > 1] = 1
+ output[i, channel] = (W + 1) / 2
+
+ output = torch.from_numpy(output).float().to(device)
+
+ return output
+
+ @staticmethod
+ def backward(ctx: Any, grad_output: torch.Tensor):
+ return grad_output, None, None, None
+
+
+universal_singular_value_thresholding = __UniversalSingularValueThresholding.apply
+
+
+class UVSTModule(nn.Module):
+ """Apply Universal Singular Value Thresholding as suggested in ME-Net:
+ Chatterjee, S. et al. Matrix estimation by universal singular value thresholding. 2015."""
+
+ def __init__(self, module: nn.Module, me_channel_concat: bool = True,
+ maskp: float = 0.5, svdprob: float = 0.8):
+ super().__init__()
+ self.module = module
+ self.me_channel_concat = me_channel_concat
+ self.maskp = maskp
+ self.svdprob = svdprob
+
+ def forward(self, x, *args, **kwargs):
+ x = universal_singular_value_thresholding(x, self.me_channel_concat,
+ self.maskp, self.svdprob)
+ return self.module(x, *args, **kwargs)
+
+
+class _ThermometerEncodingFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx: Any, input: torch.Tensor, l: int) -> torch.Tensor:
+ ctx.intermediate_results = input, l
+ return _ThermometerEncodingFunction.tau(input, l)
+
+ @staticmethod
+ def tau_hat(x, l):
+ x_hat = torch.unsqueeze(x, 2)
+ k = torch.arange(l, dtype=x.dtype, device=x.device)
+ k = k.view((1, 1, -1, 1, 1))
+ y = torch.minimum(torch.maximum(x_hat - k / l, torch.zeros_like(x_hat)),
+ torch.ones_like(x_hat))
+
+ shape = list(x.shape)
+ shape[1] = -1
+ y = y.view(shape)
+
+ return y
+
+ @staticmethod
+ def tau(x, l):
+ return torch.ceil(_ThermometerEncodingFunction.tau_hat(x, l))
+
+ @staticmethod
+ def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
+ input, l = ctx.intermediate_results
+ with torch.enable_grad():
+ value_input = _ThermometerEncodingFunction.tau_hat(input.requires_grad_(),
+ l)
+ grad_output = torch.autograd.grad(
+ (value_input,), (input,), (grad_output,))[0].detach()
+
+ return grad_output, None
+
+
+thermometer_encoding = _ThermometerEncodingFunction.apply
+
+
+class ThermometerEncodingModule(nn.Module):
+ def __init__(self, l: int, differentiable: bool):
+ super().__init__()
+ self._l = l
+ self.differentaible = differentiable
+ if differentiable:
+ self.apply_fn = lambda x: thermometer_encoding(x, l)
+ else:
+ # TODO
+ # self.apply_fn = lambda y: lambda_forward_identity_backward(
+ # y, lambda x: thermometer_encoding(x, l))
+ self.apply_fn = lambda x: thermometer_encoding(x, l)
+
+ @property
+ def l(self):
+ return self._l
+
+ def forward(self, x):
+ if self.differentaible:
+ with torch.no_grad():
+ return self.apply_fn(x)
+ else:
+ return self.apply_fn(x)
+
+
+'''ResNet in PyTorch.
+For Pre-activation ResNet, see 'preact_resnet.py'.
+Reference:
+[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
+ Deep Residual Learning for Image Recognition. arXiv:1512.03385
+'''
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class _CifarResNetBasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1):
+ super(_CifarResNetBasicBlock, self).__init__()
+ self.conv1 = nn.Conv2d(
+ in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
+ stride=1, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes,
+ kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = self.bn2(self.conv2(out))
+ out += self.shortcut(x)
+ out = F.relu(out)
+ return out
+
+
+class _CifarResNetBottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1):
+ super().__init__()
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
+ bias=False)
+ self.bn3 = nn.BatchNorm2d(self.expansion * planes)
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != self.expansion * planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
+ stride=stride, bias=False),
+ nn.BatchNorm2d(self.expansion * planes)
+ )
+
+ def forward(self, x, fake_relu=False):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = F.relu(self.bn2(self.conv2(out)))
+ out = self.bn3(self.conv3(out))
+ out += self.shortcut(x)
+ return F.relu(out)
+
+
+class _CifarResNet(nn.Module):
+ def __init__(self, block, num_blocks, num_classes=10, n_input_channels=3):
+ super(_CifarResNet, self).__init__()
+ self.in_planes = 64
+
+ self.conv1 = nn.Conv2d(n_input_channels, 64, kernel_size=3,
+ stride=1, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
+
+ def _make_layer(self, block, planes, num_blocks, stride):
+ strides = [stride] + [1] * (num_blocks - 1)
+ layers = []
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, stride))
+ self.in_planes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only=False, features_and_logits=False):
+ out = F.relu(self.bn1(self.conv1(x)))
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = self.layer4(out)
+ out = F.avg_pool2d(out, 4)
+ out = out.view(out.size(0), -1)
+ if features_and_logits:
+ return out, self.linear(out)
+ if not features_only:
+ out = self.linear(out)
+ return out
+
+
+def cifar_resnet18(num_classes=10):
+ """Resnet18 architecture adapted for small resolutions
+ Taken from https://github.com/kuangliu/pytorch-cifar"""
+ return _CifarResNet(_CifarResNetBasicBlock, [2, 2, 2, 2],
+ num_classes=num_classes)
+
+
+def cifar_resnet50(num_classes=10):
+ """Resnet50 architecture adapted for small resolutions
+ Taken from https://github.com/kuangliu/pytorch-cifar"""
+ return _CifarResNet(_CifarResNetBottleneck, [3, 4, 6, 3],
+ num_classes=num_classes)
+
+
+class _ThermometerCifarResNet(nn.Module):
+ def __init__(self, num_classes: int, l: int, differentiable: bool):
+ super().__init__()
+ self.encoder = ThermometerEncodingModule(l, differentiable)
+ self.model = _CifarResNet(_CifarResNetBasicBlock, [2, 2, 2, 2],
+ num_classes=num_classes, n_input_channels=l * 3)
+
+ @property
+ def l(self):
+ return self.encoder.l
+
+ def forward(self, x, features_only: bool = False, skip_encoder: bool = False):
+ if not skip_encoder:
+ x = self.encoder(x)
+ return self.model(x, features_only)
+
+
+# Taken from https://github.com/meliketoy/wide-resnet.pytorch
+class WideResNetBasicBlock(nn.Module):
+ def __init__(self, in_planes, planes, dropout_rate, stride=1):
+ super(WideResNetBasicBlock, self).__init__()
+ self.bn1 = nn.BatchNorm2d(in_planes)
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
+ bias=True)
+ self.dropout = nn.Dropout(p=dropout_rate)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=True)
+
+ self.shortcut = nn.Sequential()
+ if stride != 1 or in_planes != planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
+ )
+
+ def forward(self, x):
+ out = self.dropout(self.conv1(F.relu(self.bn1(x))))
+ out = self.conv2(F.relu(self.bn2(out)))
+ out += self.shortcut(x)
+
+ return out
+
+
+# Taken from https://github.com/meliketoy/wide-resnet.pytorch
+class WideResNet(nn.Module):
+ def __init__(self, depth, widen_factor, dropout_rate, num_classes=10,
+ n_input_channels=3):
+ super(WideResNet, self).__init__()
+ self.in_planes = 16
+
+ assert ((depth - 4) % 6 == 0), 'WideResNet depth should be 6n+4'
+ n = (depth - 4) / 6
+ k = widen_factor
+
+ nStages = [16, 16 * k, 32 * k, 64 * k]
+
+ self.conv1 = WideResNet.conv3x3(n_input_channels, nStages[0])
+ self.layer1 = self._wide_layer(WideResNetBasicBlock, nStages[1], n,
+ dropout_rate, stride=1)
+ self.layer2 = self._wide_layer(WideResNetBasicBlock, nStages[2], n,
+ dropout_rate, stride=2)
+ self.layer3 = self._wide_layer(WideResNetBasicBlock, nStages[3], n,
+ dropout_rate, stride=2)
+ self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
+ self.linear = nn.Linear(nStages[3], num_classes)
+
+ # initialize weights
+ self.apply(WideResNet.__conv_init)
+
+ @staticmethod
+ def conv3x3(in_planes, out_planes, stride=1):
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=True)
+
+ @staticmethod
+ def __conv_init(m):
+ classname = m.__class__.__name__
+ if classname.find('Conv') != -1:
+ init.xavier_uniform_(m.weight, gain=np.sqrt(2))
+ init.constant_(m.bias, 0)
+ elif classname.find('BatchNorm') != -1:
+ init.constant_(m.weight, 1)
+ init.constant_(m.bias, 0)
+
+ def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
+ strides = [stride] + [1] * (int(num_blocks) - 1)
+ layers = []
+
+ for stride in strides:
+ layers.append(block(self.in_planes, planes, dropout_rate, stride))
+ self.in_planes = planes
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x, features_only: bool = False):
+ out = self.conv1(x)
+ out = self.layer1(out)
+ out = self.layer2(out)
+ out = self.layer3(out)
+ out = F.relu(self.bn1(out))
+ out = F.avg_pool2d(out, 8)
+ out = out.view(out.size(0), -1)
+
+ if not features_only:
+ out = self.linear(out)
+
+ return out
+
+
+class _ThermometerCifarWideResNet344(nn.Module):
+ def __init__(self, num_classes: int, l: int, differentiable: bool):
+ super().__init__()
+ self.encoder = ThermometerEncodingModule(l, differentiable)
+ self.model = WideResNet(depth=34, widen_factor=4, dropout_rate=0.3,
+ num_classes=num_classes, n_input_channels=l * 3)
+
+ @property
+ def l(self):
+ return self.encoder.l
+
+ def forward(self, x, features_only: bool = False, skip_encoder: bool = False):
+ if not skip_encoder:
+ x = self.encoder(x)
+ return self.model(x, features_only)
+
+
+def thermometer_encoding_cifar_resnet18(num_classes=10, l=10,
+ differentiable=True):
+ """Resnet18 architecture adapted for small resolutions
+ Taken from https://github.com/kuangliu/pytorch-cifar"""
+ return _ThermometerCifarResNet(num_classes=num_classes, l=l,
+ differentiable=differentiable)
+
+
+def thermometer_encoding_cifar_wideresnet344(num_classes=10, l=10,
+ differentiable=True):
+ """WideResnet architecture.
+ Taken from https://github.com/meliketoy/wide-resnet.pytorch"""
+ return _ThermometerCifarWideResNet344(num_classes=num_classes, l=l,
+ differentiable=differentiable)
+
+
+def non_differentiable_10_thermometer_encoding_cifar_resnet18(num_classes=10):
+ return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
+ l=10, differentiable=False)
+
+
+def differentiable_10_thermometer_encoding_cifar_resnet18(num_classes=10):
+ return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
+ l=10, differentiable=True)
+
+
+def non_differentiable_16_thermometer_encoding_cifar_resnet18(num_classes=10):
+ return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
+ l=16, differentiable=False)
+
+
+def differentiable_16_thermometer_encoding_cifar_resnet18(num_classes=10):
+ return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
+ l=16, differentiable=True)
+
+
+def non_differentiable_16_thermometer_encoding_cifar_wideresnet344(
+ num_classes=10):
+ return thermometer_encoding_cifar_wideresnet344(num_classes=num_classes,
+ l=16, differentiable=False)
+
+
+def differentiable_16_thermometer_encoding_cifar_wideresnet344(num_classes=10):
+ return thermometer_encoding_cifar_wideresnet344(num_classes=num_classes,
+ l=16, differentiable=True)
diff --git a/tqdm_utils.py b/tqdm_utils.py
new file mode 100644
index 0000000..2c29ffb
--- /dev/null
+++ b/tqdm_utils.py
@@ -0,0 +1,71 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Makes print and tqdm work better together.
+Based on the idea presented in https://stackoverflow.com/a/37243211
+"""
+
+import contextlib
+import sys
+import time
+import warnings
+
+import tqdm
+from tqdm import tqdm
+
+__all__ = ["tqdm_print"]
+
+
+class __DummyFile(object):
+ file = None
+
+ def __init__(self, file):
+ self.file = file
+
+ def write(self, x):
+ # Avoid print() second call (useless \n)
+ if len(x.rstrip()) > 0:
+ with tqdm.external_write_mode():
+ tqdm.write(x, file=self.file)
+
+ def flush(self):
+ pass
+
+
+@contextlib.contextmanager
+def tqdm_print(include_warnings=True):
+ """Makes sure printing text/showing warnings does not interrupt a
+ progressbar but just moves it to the bottom by wrapping stdout and
+ passing all write statements through tqdm.write."""
+
+ save_stdout = sys.stdout
+ sys.stdout = __DummyFile(sys.stdout)
+
+ if include_warnings:
+ def redirected_showwarning(message, category, filename, lineno,
+ file=sys.stdout, line=None):
+ if file is None:
+ file = sys.stdout
+ save_showwarning(message, category, filename, lineno, file, line)
+
+ save_showwarning = warnings.showwarning
+ warnings.showwarning = redirected_showwarning
+
+ try:
+ yield
+ finally:
+ # restore stdout
+ sys.stdout = save_stdout
+ if include_warnings:
+ warnings.showwarning = save_showwarning
diff --git a/utils.py b/utils.py
new file mode 100644
index 0000000..2ab5df3
--- /dev/null
+++ b/utils.py
@@ -0,0 +1,339 @@
+# Copyright 2022 The Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from typing import Dict
+from typing import Optional
+from typing import Tuple
+from typing import Union
+
+import numpy as np
+import torch
+import torch.utils.data
+from typing_extensions import Literal
+
+NormType = Union[Literal["linf"], Literal["l2"], Literal["l1"]]
+LabelRandomization = Tuple[Literal["random"], Literal["systematically"], Literal[None]]
+
+
+def clipping_aware_rescaling_l2_torch(
+ x0: torch.Tensor, delta: torch.Tensor, target_l2: Union[float, torch.Tensor]
+):
+ """Rescale delta such that it exactly lies target_l2 away in l2 from x0 after clipping.
+
+ Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
+
+ Args:
+ x0: Tensor containing the base samples.
+ delta: Tensor containing the perturbations to add to x0.
+ target_l2: Target l2 distance.
+
+ Returns:
+ Tensor containing required rescaling factors.
+ """
+ N = x0.shape[0]
+ assert delta.shape[0] == N
+
+ delta2 = delta.pow(2).reshape((N, -1))
+ space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
+ f2 = space.pow(2) / torch.max(delta2, 1e-20 * torch.ones_like(delta2))
+ f2_sorted, ks = torch.sort(f2, dim=-1)
+ m = torch.cumsum(delta2.gather(dim=-1, index=ks.flip(dims=(1,))), dim=-1).flip(
+ dims=(1,)
+ )
+ dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
+ dx = torch.cat((f2_sorted[:, :1], dx), dim=-1)
+ dy = m * dx
+ y = torch.cumsum(dy, dim=-1)
+
+ if not issubclass(type(target_l2), torch.Tensor):
+ target_l2 = torch.ones(len(x0)).to(x0.device) * target_l2
+
+ assert len(target_l2) == len(
+ x0
+ ), f"Inconsistent length of `target_l2`. Must have length {len(x0)}."
+ assert len(target_l2.shape) == 1, "Inconsistent shape of `target_l2` (must be 1D)."
+
+ target_l2 = target_l2.view((-1, 1, 1, 1)).expand(*x0.shape)
+ target_l2 = target_l2.view(len(target_l2), -1)
+
+ target_l2 = target_l2.type(delta.dtype)
+
+ c = y >= target_l2**2
+
+ # work-around to get first nonzero element in each row
+ f = torch.arange(c.shape[-1], 0, -1, device=c.device)
+ v, j = torch.max(c.long() * f, dim=-1)
+
+ rows = torch.arange(0, N)
+
+ eps2 = f2_sorted[rows, j] - (y[rows, j] - target_l2[rows, j] ** 2) / m[rows, j]
+ # it can happen that for certain rows even the largest j is not large enough
+ # (i.e. v == 0), then we will just use it (without any correction) as it's
+ # the best we can do (this should also be the only cases where m[j] can be
+ # 0 and they are thus not a problem)
+ eps2 = torch.where(v == 0, f2_sorted[:, -1], eps2)
+
+ eps = torch.sqrt(eps2)
+ eps = eps.reshape((-1,) + (1,) * (len(x0.shape) - 1))
+
+ return eps
+
+
+def clipping_aware_rescaling_l1_torch(
+ x0: torch.Tensor, delta: torch.Tensor, target_l1: Union[float, torch.Tensor]
+):
+ """Rescale delta such that it exactly lies target_l1 away in l1 from x0 after clipping.
+
+ Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
+
+ Args:
+ x0: Tensor containing the base samples.
+ delta: Tensor containing the perturbations to add to x0.
+ target_l1: Target l1 distance.
+
+ Returns:
+ Tensor containing required rescaling factors.
+ """
+ N = x0.shape[0]
+ assert delta.shape[0] == N
+
+ delta2 = delta.abs().reshape((N, -1))
+ space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
+ f2 = space.abs() / torch.max(delta2, 1e-20 * torch.ones_like(delta2))
+ f2_sorted, ks = torch.sort(f2, dim=-1)
+ m = torch.cumsum(delta2.gather(dim=-1, index=ks.flip(dims=(1,))), dim=-1).flip(
+ dims=(1,)
+ )
+ dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
+ dx = torch.cat((f2_sorted[:, :1], dx), dim=-1)
+ dy = m * dx
+ y = torch.cumsum(dy, dim=-1)
+ # c = y >= target_l2
+
+ if not issubclass(type(target_l1), torch.Tensor):
+ target_l1 = torch.ones(len(x0)).to(x0.device) * target_l1
+
+ assert len(target_l1) == len(
+ x0
+ ), f"Inconsistent length of `target_l2`. Must have length {len(x0)}."
+ assert len(target_l1.shape) == 1, "Inconsistent shape of `target_l2` (must be 1D)."
+
+ target_l1 = target_l1.view((-1, 1, 1, 1)).expand(*x0.shape)
+ target_l1 = target_l1.view(len(target_l1), -1)
+
+ target_l1 = target_l1.type(delta.dtype)
+
+ c = y >= target_l1
+
+ # Work-around to get first nonzero element in each row.
+ f = torch.arange(c.shape[-1], 0, -1, device=c.device)
+ v, j = torch.max(c.long() * f, dim=-1)
+
+ rows = torch.arange(0, N)
+
+ eps2 = f2_sorted[rows, j] - (y[rows, j] - target_l1[rows, j]) / m[rows, j]
+ # It can happen that for certain rows even the largest j is not large enough
+ # (i.e. v == 0), then we will just use it (without any correction) as it's
+ # the best we can do (this should also be the only cases where m[j] can be
+ # 0 and they are thus not a problem).
+ eps = torch.where(v == 0, f2_sorted[:, -1], eps2)
+
+ eps = eps.reshape((-1,) + (1,) * (len(x0.shape) - 1))
+ return eps
+
+
+def clipping_aware_rescaling_linf_torch(
+ x0: torch.Tensor, delta: torch.Tensor, target_linf: Union[float, torch.Tensor]
+):
+ """Rescale delta such that it exactly lies target_linf away in l2inf from x0 after clipping.
+
+ Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
+
+ Args:
+ x0: Tensor containing the base samples.
+ delta: Tensor containing the perturbations to add to x0.
+ target_linf: Target l2 distance.
+
+ Returns:
+ Tensor containing required rescaling factors.
+ """
+ N = x0.shape[0]
+ assert delta.shape[0] == N
+
+ if not issubclass(type(target_linf), torch.Tensor):
+ target_linf = torch.ones(len(x0)).to(x0.device) * target_linf
+
+ assert len(target_linf) == len(
+ x0
+ ), f"Inconsistent length of `target_linf`. Must have length {len(x0)}."
+ assert (
+ len(target_linf.shape) == 1
+ ), "Inconsistent shape of `target_linf` (must be 1D)."
+
+ target_linf = target_linf.view((-1, 1, 1, 1)).expand(*x0.shape)
+ target_linf = target_linf.view(len(target_linf), -1)
+
+ target_linf = target_linf.type(delta.dtype)
+
+ delta2 = delta.abs().reshape((N, -1))
+ space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
+
+ space_mask = space < target_linf
+
+ if torch.any(torch.all(space_mask, dim=-1)):
+ print("Not possible to rescale delta yield set Linf distance")
+
+ delta2[space_mask] = 0
+
+ delta2_sorted, _ = torch.sort(delta2, dim=-1, descending=True)
+
+ eps = target_linf[:, 0] / delta2_sorted[:, 0]
+
+ eps = eps.view(-1, 1, 1, 1)
+
+ return eps
+
+
+def clipping_aware_rescaling(
+ x0: torch.Tensor,
+ delta: torch.Tensor,
+ target_distance: Union[float, torch.Tensor],
+ norm: NormType,
+ growing: bool = True,
+ shrinking: bool = True,
+ return_delta: bool = False,
+):
+ """Rescale delta such that it exactly lies target_distance away from x0 after clipping.
+
+ Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
+
+ Args:
+ x0: Tensor containing the base samples.
+ delta: Tensor containing the perturbations to add to x0.
+ target_distance: Target distance.
+ norm: Norm for measuring the distance between x0 and delta.
+ growing: If True, delta is allowed to grow.
+ shrinking: If True, delta is allowed to shrink.
+ return_delta: Return rescaled delta in addition to x0
+ plus rescaled delta.
+
+ Returns:
+ If return_delta, Tuple of (x0 plus rescaled delta, rescaled delta), otherwise
+ only x0 plus rescaled delta.
+ """
+ if norm == "linf":
+ eps = clipping_aware_rescaling_linf_torch(x0, delta, target_distance)
+ elif norm == "l2":
+ eps = clipping_aware_rescaling_l2_torch(x0, delta, target_distance)
+ elif norm == "l1":
+ eps = clipping_aware_rescaling_l1_torch(x0, delta, target_distance)
+ else:
+ raise ValueError("Invalid norm")
+
+ if not shrinking:
+ eps = torch.clamp_min(eps, 1.0)
+ if not growing:
+ eps = torch.clamp_max(eps, 1.0)
+
+ x = x0 + eps * delta
+ x = torch.clamp(x, 0, 1)
+
+ if return_delta:
+ return x, eps * delta
+ else:
+ return x
+
+
+def normalize(x: torch.Tensor, norm: NormType):
+ """Normalize data to have unit norm.
+
+ Args:
+ x: Data to normalize.
+ norm: Norm to use.
+ Returns:
+ Normalized x0.
+ """
+ if norm == "linf":
+ x = torch.sign(x)
+ elif norm in ("l2", "l1"):
+ x /= torch.norm(x, p=1 if norm == "l1" else 2, keepdim=True, dim=(1, 2, 3))
+ else:
+ raise ValueError("Invalid norm:", norm)
+ return x
+
+
+class RandomizeLabelsDataset(torch.utils.data.Dataset):
+ def __init__(
+ self,
+ base: torch.utils.data.Dataset,
+ mode: LabelRandomization,
+ label_map: Optional[Dict[int, int]] = None,
+ n_classes: int = 10,
+ ):
+ if not n_classes > 0:
+ raise ValueError("n_classes must be > 0.")
+ if mode is None and label_map is None:
+ raise ValueError("If mode is None, label_map must not be None.")
+ if not mode in (None, "random", "systematically"):
+ raise ValueError("mode must be one of None, random, systematically.")
+
+ self.base = base
+ self.mode = mode
+ if label_map is None:
+ if mode == "random":
+ labels = np.random.randint(low=0, high=n_classes, size=len(base))
+ elif mode == "systematically":
+ labels = [
+ (a + b) % n_classes for a, b in enumerate(list(range(n_classes)))
+ ]
+ random.shuffle(labels)
+ label_map = {i: labels[i] for i in range(len(labels))}
+ self.label_map = label_map
+
+ def __getitem__(self, item):
+ x, y = self.base[item]
+ if self.mode == "random":
+ y = self.label_map[item]
+ elif self.mode == "systematically":
+ y = self.label_map[y]
+ else:
+ raise ValueError()
+ return x, y
+
+ def __len__(self):
+ return len(self.base)
+
+ def __repr__(self):
+ return f"RandomizeLabelsDataset(base_dataset: {repr(self.base)}, mode: {self.mode})"
+
+
+def build_dataloader_from_arrays(x: np.ndarray, y: np.ndarray, batch_size: int = 1):
+ """Wrap two arrays in a dataset and data loader.
+
+ Args:
+ x: Array containing input data.
+ y: Array containing target data.
+ batch_size: Batch size of the newly created data loader.
+
+ Returns:
+ Dataloader based on x,y.
+ """
+ x_tensor = torch.tensor(x, device="cpu", dtype=torch.float32)
+ y_tensor = torch.tensor(y, device="cpu", dtype=torch.long)
+
+ dataset = torch.utils.data.TensorDataset(x_tensor, y_tensor)
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size)
+
+ return dataloader