Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev/brianreicher tasks #74

Draft
wants to merge 21 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 11 additions & 21 deletions .github/workflows/black.yaml
Original file line number Diff line number Diff line change
@@ -1,27 +1,17 @@
name: black-action
name: Python Black

on: [push, pull_request]

jobs:
linter_name:
name: runner / black
lint:
name: Python Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check files using the black formatter
uses: rickstaa/action-black@v1
id: action_black
with:
black_args: "."
- name: Create Pull Request
if: steps.action_black.outputs.is_formatted == 'true'
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
title: "Format Python code with psf/black push"
commit-message: ":art: Format Python code with psf/black"
body: |
There appear to be some python formatting errors in ${{ github.sha }}. This pull request
uses the [psf/black](https://github.com/psf/black) formatter to fix these issues.
base: ${{ github.head_ref }} # Creates pull request onto pull request or commit branch
branch: actions/black
- name: Setup Python
uses: actions/setup-python@v1
- name: Setup checkout
uses: actions/checkout@master
- name: Lint with Black
run: |
pip install black
black -v --check dacapo tests
9 changes: 5 additions & 4 deletions .github/workflows/docs.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
name: Generate Pages

on: [push, pull_request]

name: Pages
on:
push:
branches:
- master
jobs:
docs:
runs-on: ubuntu-latest
Expand Down
34 changes: 34 additions & 0 deletions .github/workflows/publish.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: Publish

on:
push:
tags: "*"

jobs:
build-n-publish:
name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install pypa/build
run: >-
python -m
pip install
build
--user
- name: Build a binary wheel and a source tarball
run: >-
python -m
build
--sdist
--wheel
--outdir dist/
- name: Publish distribution 📦 to PyPI
if: startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@release/v1
with:
password: ${{ secrets.PYPI_API_TOKEN }}
7 changes: 4 additions & 3 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
name: Test

on: [push, pull_request]
on:
push:

jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
python-version: ["3.9", "3.10"]

steps:
- uses: actions/checkout@v2
Expand All @@ -22,4 +23,4 @@ jobs:
pip install -r requirements-dev.txt
- name: Test with pytest
run: |
pytest tests
pytest tests
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# DaCapo ![DaCapo](docs/source/_static/icon_dacapo.png)
![DaCapo](docs/source/_static/dacapo.svg)

[![tests](https://github.com/funkelab/dacapo/actions/workflows/tests.yaml/badge.svg)](https://github.com/funkelab/dacapo/actions/workflows/tests.yaml)
[![black](https://github.com/funkelab/dacapo/actions/workflows/black.yaml/badge.svg)](https://github.com/funkelab/dacapo/actions/workflows/black.yaml)
Expand Down
152 changes: 152 additions & 0 deletions care_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
import dacapo
import logging
import math
import torch
from torchsummary import summary

# CARE task specific elements
from dacapo.experiments.datasplits.datasets.arrays import (
ZarrArrayConfig,
IntensitiesArrayConfig,
)
from dacapo.experiments.datasplits.datasets import RawGTDatasetConfig
from dacapo.experiments.datasplits import TrainValidateDataSplitConfig
from dacapo.experiments.architectures import CNNectomeUNetConfig
from dacapo.experiments.tasks import CARETaskConfig

from dacapo.experiments.trainers import GunpowderTrainerConfig
from dacapo.experiments.trainers.gp_augments import (
SimpleAugmentConfig,
ElasticAugmentConfig,
IntensityAugmentConfig,
)
from funlib.geometry import Coordinate
from dacapo.experiments.run_config import RunConfig
from dacapo.experiments.run import Run
from dacapo.store.create_store import create_config_store
from dacapo.train import train


# set basic login configs
logging.basicConfig(level=logging.INFO)

raw_array_config_zarr = ZarrArrayConfig(
name="raw",
file_name="/n/groups/htem/users/br128/data/CBvBottom/CBxs_lobV_bottomp100um_training_0.n5",
dataset="volumes/raw_30nm",
)

gt_array_config_zarr = ZarrArrayConfig(
name="gt",
file_name="/n/groups/htem/users/br128/data/CBvBottom/CBxs_lobV_bottomp100um_training_0.n5",
dataset="volumes/interpolated_90nm_aligned",
)

raw_array_config_int = IntensitiesArrayConfig(
name="raw_norm", source_array_config=raw_array_config_zarr, min=0.0, max=1.0
)

gt_array_config_int = IntensitiesArrayConfig(
name="gt_norm", source_array_config=gt_array_config_zarr, min=0.0, max=1.0
)

dataset_config = RawGTDatasetConfig(
name="CBxs_lobV_bottomp100um_CARE_0",
raw_config=raw_array_config_int,
gt_config=gt_array_config_int,
)

# TODO: check datasplit config, this honestly might work
datasplit_config = TrainValidateDataSplitConfig(
name="CBxs_lobV_bottomp100um_training_0.n5",
train_configs=[dataset_config],
validate_configs=[dataset_config],
)
"""
kernel size 3
2 conv passes per block

1 -- 100%, lose 4 pix - 286 pix
2 -- 50%, lose 8 pix - 142 pix
3 -- 25%, lose 16 pix - 32 pix
"""
# UNET config
architecture_config = CNNectomeUNetConfig(
name="small_unet",
input_shape=Coordinate(156, 156, 156),
# eval_shape_increase=Coordinate(72, 72, 72),
fmaps_in=1,
num_fmaps=8,
fmaps_out=32,
fmap_inc_factor=4,
downsample_factors=[(2, 2, 2), (2, 2, 2), (2, 2, 2)],
constant_upsample=True,
)


# CARE task
task_config = CARETaskConfig(name="CAREModel", num_channels=1, dims=3)


# trainier
trainer_config = GunpowderTrainerConfig(
name="gunpowder",
batch_size=2,
learning_rate=0.0001,
augments=[
SimpleAugmentConfig(),
ElasticAugmentConfig(
control_point_spacing=(100, 100, 100),
control_point_displacement_sigma=(10.0, 10.0, 10.0),
rotation_interval=(0, math.pi / 2.0),
subsample=8,
uniform_3d_rotation=True,
),
IntensityAugmentConfig(
scale=(0.25, 1.75),
shift=(-0.5, 0.35),
clip=False,
),
],
num_data_fetchers=20,
snapshot_interval=10000,
min_masked=0.15,
)


# run config
run_config = RunConfig(
name="CARE_train",
task_config=task_config,
architecture_config=architecture_config,
trainer_config=trainer_config,
datasplit_config=datasplit_config,
repetition=0,
num_iterations=100000,
validation_interval=1000,
)

run = Run(run_config)

# run summary TODO create issue
print(summary(run.model, (1, 156, 156, 156)))


# store configs, then train
config_store = create_config_store()

config_store.store_datasplit_config(datasplit_config)
config_store.store_architecture_config(architecture_config)
config_store.store_task_config(task_config)
config_store.store_trainer_config(trainer_config)
config_store.store_run_config(run_config)

# Optional start training by config name:
train(run_config.name)

# CLI dacapo train -r {run_config.name}


"""
RuntimeError: Can not downsample shape torch.Size([1, 128, 47, 47, 47]) with factor (2, 2, 2), mismatch in spatial dimension 2
"""
1 change: 0 additions & 1 deletion dacapo/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,3 @@ def apply(run_name: str, iteration: int, dataset_name: str):
iteration,
dataset_name,
)
raise NotImplementedError("This function is not yet implemented.")
2 changes: 1 addition & 1 deletion dacapo/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def validate(run_name, iteration):

@cli.command()
@click.option(
"-r", "--run-name", required=True, type=str, help="The name of the run to use."
"-r", "--run", required=True, type=str, help="The name of the run to use."
)
@click.option(
"-i",
Expand Down
16 changes: 10 additions & 6 deletions dacapo/experiments/architectures/cnnectome_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,11 @@ def __init__(
self.l_conv = nn.ModuleList(
[
ConvPass(
in_channels
if level == 0
else num_fmaps * fmap_inc_factor ** (level - 1),
(
in_channels
if level == 0
else num_fmaps * fmap_inc_factor ** (level - 1)
),
num_fmaps * fmap_inc_factor**level,
kernel_size_down[level],
activation=activation,
Expand Down Expand Up @@ -327,9 +329,11 @@ def __init__(
+ num_fmaps
* fmap_inc_factor
** (level + (1 - upsample_channel_contraction[level])),
num_fmaps * fmap_inc_factor**level
if num_fmaps_out is None or level != 0
else num_fmaps_out,
(
num_fmaps * fmap_inc_factor**level
if num_fmaps_out is None or level != 0
else num_fmaps_out
),
kernel_size_up[level],
activation=activation,
padding=padding,
Expand Down
79 changes: 79 additions & 0 deletions dacapo/experiments/architectures/nlayer_discriminator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from .architecture import Architecture

import torch
import torch.nn as nn
import functools


class NLayerDiscriminator(Architecture):
"""Defines a PatchGAN discriminator"""

def __init__(self, architecture_config):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ngf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super().__init__()

input_nc: int = architecture_config.input_nc
ngf: int = architecture_config.ngf
n_layers: int = architecture_config.n_layers
norm_layer = architecture_config.norm_layer

if (
type(norm_layer) == functools.partial
): # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d

kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True),
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]

nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]

sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)

def forward(self, input):
"""Standard forward."""
return self.model(input)
Loading
Loading