From 4cf7dd3ee8b0fee986c05dec763da109b31a0cde Mon Sep 17 00:00:00 2001 From: Thomas M Kehrenberg Date: Fri, 23 Feb 2024 23:58:14 +0100 Subject: [PATCH] Ignore error in decorator --- src/algs/adv/base.py | 6 +++--- src/algs/adv/scorer.py | 4 ++-- src/algs/fs/lff.py | 6 +++--- src/arch/autoencoder/artifact.py | 4 ++-- src/evaluation/metrics.py | 2 +- src/labelling/encode.py | 4 ++-- src/labelling/encoder.py | 4 ++-- src/labelling/finetuning.py | 4 ++-- src/labelling/noise.py | 6 +++--- src/labelling/pipeline.py | 2 +- src/models/classifier.py | 8 ++++---- 11 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/algs/adv/base.py b/src/algs/adv/base.py index 673e3bac..07f5edc9 100644 --- a/src/algs/adv/base.py +++ b/src/algs/adv/base.py @@ -42,7 +42,7 @@ class Components(DcModule, Generic[D]): pred_y: Optional[Classifier] pred_s: Optional[Classifier] - @torch.no_grad() + @torch.no_grad() # pyright: ignore def train_ae(self) -> None: self.ae.train() if self.pred_y is not None: @@ -52,7 +52,7 @@ def train_ae(self) -> None: if isinstance(self.disc, nn.Module): self.disc.eval() - @torch.no_grad() + @torch.no_grad() # pyright: ignore def train_disc(self) -> None: self.ae.eval() if self.pred_y is not None: @@ -172,7 +172,7 @@ def training_step( self.log_recons(x=x_dep, dm=dm, ae=comp.ae, itr=itr, split="deployment") return logging_dict - @torch.no_grad() + @torch.no_grad() # pyright: ignore def log_recons( self, x: Tensor, diff --git a/src/algs/adv/scorer.py b/src/algs/adv/scorer.py index 06d093e1..80f81996 100644 --- a/src/algs/adv/scorer.py +++ b/src/algs/adv/scorer.py @@ -25,7 +25,7 @@ _PBAR_COL: Final[str] = "#ffe252" -@torch.no_grad() +@torch.no_grad() # pyright: ignore def _encode_and_score_recons( dl: CdtDataLoader[TernarySample], *, @@ -60,7 +60,7 @@ def _encode_and_score_recons( return CdtDataset(x=zy, y=y, s=s), recon_score -@torch.no_grad() +@torch.no_grad() # pyright: ignore def balanced_accuracy(y_pred: Tensor, *, y_true: Tensor) -> Tensor: return cdtm.subclass_balanced_accuracy(y_pred=y_pred, y_true=y_true, s=y_true) diff --git a/src/algs/fs/lff.py b/src/algs/fs/lff.py index 87dd6121..56e13709 100644 --- a/src/algs/fs/lff.py +++ b/src/algs/fs/lff.py @@ -34,20 +34,20 @@ def __init__(self, labels: Tensor, *, alpha: float = 0.9) -> None: self.register_buffer("parameter", torch.zeros(len(labels))) self.register_buffer("updated", torch.zeros(len(labels))) - @torch.no_grad() + @torch.no_grad() # pyright: ignore def update(self, data: Tensor, *, index: Union[Tensor, int]) -> None: self.parameter[index] = ( self.alpha * self.parameter[index] + (1 - self.alpha * self.updated[index]) * data ) self.updated[index] = 1 - @torch.no_grad() + @torch.no_grad() # pyright: ignore def max_loss(self, label: int) -> Tensor: label_index = self.labels == label return self.parameter[label_index].max() @override - @torch.no_grad() + @torch.no_grad() # pyright: ignore def __getitem__(self, index: IndexType) -> Tensor: return self.parameter[index].clone() diff --git a/src/arch/autoencoder/artifact.py b/src/arch/autoencoder/artifact.py index 00fd84cd..174dcdb9 100644 --- a/src/arch/autoencoder/artifact.py +++ b/src/arch/autoencoder/artifact.py @@ -20,7 +20,7 @@ FILENAME: Final[str] = "model.pt" -@torch.no_grad() +@torch.no_grad() # pyright: ignore def save_ae_artifact( model: AePair, *, run: Union[Run, RunDisabled], factory_config: dict[str, Any], name: str ) -> None: @@ -52,7 +52,7 @@ def _process_root_dir(root: Optional[Union[Path, str]]) -> Path: return root -@torch.no_grad() +@torch.no_grad() # pyright: ignore def load_ae_from_artifact( name: str, *, diff --git a/src/evaluation/metrics.py b/src/evaluation/metrics.py index 70941ace..b3d8921a 100644 --- a/src/evaluation/metrics.py +++ b/src/evaluation/metrics.py @@ -77,7 +77,7 @@ class SummaryMetric(Enum): ) -@torch.no_grad() +@torch.no_grad() # pyright: ignore def compute_metrics( pair: EmEvalPair, *, diff --git a/src/labelling/encode.py b/src/labelling/encode.py index 08d0cada..840dd4dc 100644 --- a/src/labelling/encode.py +++ b/src/labelling/encode.py @@ -61,7 +61,7 @@ def from_npz(cls, fpath: Union[Path, str]) -> Self: return enc -@torch.no_grad() +@torch.no_grad() # pyright: ignore def generate_encodings( dm: DataModule, *, @@ -102,7 +102,7 @@ def generate_encodings( return encodings -@torch.no_grad() +@torch.no_grad() # pyright: ignore def encode_with_group_ids( model: nn.Module, *, dl: CdtDataLoader[TernarySample[Tensor]], device: Union[str, torch.device] ) -> tuple[Tensor, Tensor]: diff --git a/src/labelling/encoder.py b/src/labelling/encoder.py index 16fad20e..b8232372 100644 --- a/src/labelling/encoder.py +++ b/src/labelling/encoder.py @@ -48,7 +48,7 @@ def __init__( def forward(self, x: Tensor) -> Tensor: # type: ignore return self.encoder(x) - @torch.no_grad() + @torch.no_grad() # pyright: ignore def load_from_path(self, fpath: Union[Path, str]) -> None: fpath = Path(fpath) if fpath.exists(): @@ -58,7 +58,7 @@ def load_from_path(self, fpath: Union[Path, str]) -> None: else: raise RuntimeError(f"Checkpoint {fpath.resolve()} does not exist.") - @torch.no_grad() + @torch.no_grad() # pyright: ignore def encode( self, dm: DataModule, diff --git a/src/labelling/finetuning.py b/src/labelling/finetuning.py index eda6b301..4f0e58cd 100644 --- a/src/labelling/finetuning.py +++ b/src/labelling/finetuning.py @@ -125,7 +125,7 @@ def train_step( optimizer.step() return output, loss.item() - @torch.no_grad() + @torch.no_grad() # pyright: ignore def predict_loop( self, model: nn.Module, @@ -159,7 +159,7 @@ def predict_loop( y = torch.cat(all_y) return preds, s, y - @torch.no_grad() + @torch.no_grad() # pyright: ignore def validate( self, model: nn.Module, diff --git a/src/labelling/noise.py b/src/labelling/noise.py index 5d8ce187..8895b648 100644 --- a/src/labelling/noise.py +++ b/src/labelling/noise.py @@ -8,7 +8,7 @@ __all__ = ["ClnMetric", "centroidal_label_noise", "sample_noise_indices", "uniform_label_noise"] -@torch.no_grad() +@torch.no_grad() # pyright: ignore def sample_noise_indices( labels: Tensor, *, @@ -27,7 +27,7 @@ def sample_noise_indices( return torch.randperm(len(labels), generator=generator)[:num_to_flip] -@torch.no_grad() +@torch.no_grad() # pyright: ignore def uniform_label_noise( labels: Tensor, *, @@ -50,7 +50,7 @@ class ClnMetric(Enum): EUCLIDEAN = "euclidean" -@torch.no_grad() +@torch.no_grad() # pyright: ignore def centroidal_label_noise( labels: Tensor, *, diff --git a/src/labelling/pipeline.py b/src/labelling/pipeline.py index bd87d0b0..98701fcb 100644 --- a/src/labelling/pipeline.py +++ b/src/labelling/pipeline.py @@ -121,7 +121,7 @@ class ClipClassifier(Labeller): # cache_encoder: bool = False - @torch.no_grad() + @torch.no_grad() # pyright: ignore def evaluate( self, g_pred: Tensor, *, g_true: Tensor, use_wandb: bool, prefix: Optional[str] = None ) -> dict[str, float]: diff --git a/src/models/classifier.py b/src/models/classifier.py index 7283a35a..0ef1190c 100644 --- a/src/models/classifier.py +++ b/src/models/classifier.py @@ -30,7 +30,7 @@ __all__ = ["Classifier", "SetClassifier"] -@torch.no_grad() +@torch.no_grad() # pyright: ignore def cat_cpu_flatten(*ls: list[Tensor], dim: int = 0) -> Iterator[Tensor]: for ls_ in ls: yield torch.cat(ls_, dim=dim).cpu().flatten() @@ -58,7 +58,7 @@ def predict( ) -> EvalTuple[Tensor, Tensor]: ... - @torch.no_grad() + @torch.no_grad() # pyright: ignore def predict( self, data: CdtDataLoader[TernarySample], @@ -174,7 +174,7 @@ class SetClassifier(Model): model: SetPredictor # overriding the definition in `Model` criterion: Optional[Loss] = None - @torch.no_grad() + @torch.no_grad() # pyright: ignore def _fetch_train_data( self, *args: tuple[Iterator[S], int], device: torch.device ) -> Iterator[_ScSample]: @@ -233,7 +233,7 @@ def fit( pbar.close() logger.info("Finished training") - @torch.no_grad() + @torch.no_grad() # pyright: ignore def predict( self, *dls: CdtDataLoader[S], device: Union[torch.device, str], max_steps: int ) -> EvalTuple[None, None]: