-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathconfig_deep.yaml
76 lines (75 loc) · 1.5 KB
/
config_deep.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# pytorch_lightning==2.0.2
seed_everything: true
trainer:
accelerator: cuda
strategy: auto
devices: auto
num_nodes: 1
precision: 32-true
logger: null
callbacks: null
fast_dev_run: false
max_epochs: 100
min_epochs: null
max_steps: -1
min_steps: null
max_time: null
limit_train_batches: null
limit_val_batches: null
limit_test_batches: null
limit_predict_batches: null
overfit_batches: 0.0
val_check_interval: 18
check_val_every_n_epoch: 1
num_sanity_val_steps: null
log_every_n_steps: 1
enable_checkpointing: null
enable_progress_bar: null
enable_model_summary: null
accumulate_grad_batches: 1
gradient_clip_val: null
gradient_clip_algorithm: null
deterministic: null
benchmark: null
inference_mode: true
use_distributed_sampler: true
profiler: null
detect_anomaly: false
barebones: false
plugins: null
sync_batchnorm: false
reload_dataloaders_every_n_epochs: 1
default_root_dir: null
logger:
class_path: CSVLogger
init_args:
save_dir: "lightning_logs"
name: "lightning_logs"
flush_logs_every_n_steps: 1
model:
layers:
- 784
- 256
- 10
tau_mem: 20.0
tau_stdp: 10.0
controller_rate: 0.1
max_val_steps: 10
max_train_steps: 100
positive_control: 0.1
alpha_stdp: 1.0
batch_size: 1000
data:
data_dir: data/
batch_size: 1000
ckpt_path: null
optimizer:
class_path: torch.optim.Adam
init_args:
betas:
- 0.9
- 0.9
lr: 1e-3
maximize: false
foreach: null
differentiable: false