Skip to content

Commit 4ae9f09

Browse files
authored
remove PerplexityCallback in pydantic api (#636)
### Description Fix CI by dropping PerplexityCallback in pydantic api ### Type of changes - [x ] Bug fix (non-breaking change which fixes an issue) ### Usage ```bash pytest sub-packages/bionemo-esm2/tests/bionemo/esm2/scripts/test_pydantic_train.py::test_pretrain_pydantic_cli ``` Related to e553389 Signed-off-by: sichu <[email protected]>
1 parent 257e918 commit 4ae9f09

File tree

2 files changed

+8
-6
lines changed

2 files changed

+8
-6
lines changed

sub-packages/bionemo-llm/src/bionemo/llm/run/config_models.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,8 @@ class TrainingConfig(BaseModel):
311311
accelerator: str = "gpu"
312312
# NOTE: VERY important for distributed training performance.
313313
gc_interval: int = 0
314-
include_perplexity: bool = False
314+
log_train_ppl: bool = False
315+
log_val_ppl: bool = True
315316
enable_checkpointing: bool = True
316317

317318

sub-packages/bionemo-llm/src/bionemo/llm/train.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
from pydantic import BaseModel
3333

3434
from bionemo.core.utils.dtypes import get_autocast_dtype
35-
from bionemo.llm.lightning import BionemoLightningModule, PerplexityLoggingCallback
35+
from bionemo.llm.lightning import BionemoLightningModule
3636
from bionemo.llm.model.biobert.lightning import biobert_lightning_module
3737
from bionemo.llm.model.lr_scheduler import WarmupAnnealDecayHoldScheduler
3838
from bionemo.llm.run.config_models import (
@@ -132,9 +132,6 @@ def setup_trainer(
132132
LearningRateMonitor(),
133133
]
134134

135-
if training_config.include_perplexity:
136-
callbacks.append(PerplexityLoggingCallback())
137-
138135
if training_config.gc_interval > 0:
139136
callbacks.append(
140137
nl_callbacks.GarbageCollectionCallback(
@@ -252,7 +249,11 @@ def train(
252249
)
253250

254251
model: BionemoLightningModule = biobert_lightning_module(
255-
config=bionemo_model_config, tokenizer=data.tokenizer, optimizer=optimizer
252+
config=bionemo_model_config,
253+
tokenizer=data.tokenizer,
254+
optimizer=optimizer,
255+
log_train_ppl=training_config.log_train_ppl,
256+
log_val_ppl=training_config.log_val_ppl,
256257
)
257258
trainer: nl.Trainer = setup_trainer(parallel_config, training_config, nsys_config=nsys_config)
258259
nemo_logger: nl.NeMoLogger = nemo_logger_factory(experiment_config, wandb_config=wandb_config)

0 commit comments

Comments
 (0)