From 1e0db7774b5bdb38615d444c881265ec7cec390e Mon Sep 17 00:00:00 2001 From: Hobofan Date: Mon, 22 Feb 2016 13:09:45 +0100 Subject: [PATCH] feat/everything: introduce most of the changes for 0.2.0 --- .travis.yml | 20 +- Cargo.toml | 31 +- README.md | 20 +- benches/network_benches.rs | 443 ++++++++ examples/benchmarks.rs | 529 ++++++++++ perf/README.md | 13 + perf/run_perf.sh | 14 + src/layer.rs | 1077 ++++++++++++-------- src/layers/activation/mod.rs | 22 +- src/layers/activation/relu.rs | 59 ++ src/layers/activation/sigmoid.rs | 71 +- src/layers/common/convolution.rs | 238 ++++- src/layers/common/linear.rs | 181 ++++ src/layers/common/log_softmax.rs | 57 ++ src/layers/common/mod.rs | 104 +- src/layers/common/pooling.rs | 170 +++ src/layers/common/softmax.rs | 58 ++ src/layers/loss/mod.rs | 21 +- src/layers/loss/negative_log_likelihood.rs | 121 +++ src/layers/loss/softmax.rs | 8 - src/layers/mod.rs | 58 +- src/layers/utility/flatten.rs | 3 +- src/layers/utility/mod.rs | 2 + src/layers/utility/reshape.rs | 89 ++ src/lib.rs | 21 +- src/network.rs | 346 +++---- src/shared_memory.rs | 18 - src/solver.rs | 110 +- src/solvers/mod.rs | 93 +- src/solvers/sgd/mod.rs | 20 +- src/solvers/sgd/momentum.rs | 69 +- src/util.rs | 87 ++ src/weight.rs | 185 ++++ tests/layer_specs.rs | 5 +- tests/network_specs.rs | 380 ++++++- tests/solver_specs.rs | 5 +- 36 files changed, 3883 insertions(+), 865 deletions(-) create mode 100644 benches/network_benches.rs create mode 100644 examples/benchmarks.rs create mode 100644 perf/README.md create mode 100755 perf/run_perf.sh create mode 100644 src/layers/activation/relu.rs create mode 100644 src/layers/common/linear.rs create mode 100644 src/layers/common/log_softmax.rs create mode 100644 src/layers/common/pooling.rs create mode 100644 src/layers/common/softmax.rs create mode 100644 src/layers/loss/negative_log_likelihood.rs delete mode 100644 src/layers/loss/softmax.rs create mode 100644 src/layers/utility/reshape.rs delete mode 100644 src/shared_memory.rs create mode 100644 src/util.rs create mode 100644 src/weight.rs diff --git a/.travis.yml b/.travis.yml index 1564ce7b..0981ba39 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,25 +6,23 @@ rust: - stable matrix: allow_failures: - - rust: beta - - rust: stable - rust: nightly - env: TRAVIS_CARGO_NIGHTLY_FEATURE=lint + env: FEATURES=lint exclude: - rust: beta - env: TRAVIS_CARGO_NIGHTLY_FEATURE=lint + env: FEATURES=lint - rust: stable - env: TRAVIS_CARGO_NIGHTLY_FEATURE=lint + env: FEATURES=lint before_script: - | pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH script: - | - travis-cargo build && - travis-cargo test && - travis-cargo bench && - travis-cargo --only stable doc + travis-cargo build -- --no-default-features --features $FEATURES && + travis-cargo test -- --no-default-features --features $FEATURES && + travis-cargo bench -- --no-default-features --features $FEATURES && + travis-cargo --only stable doc -- --no-default-features --features $FEATURES addons: apt: packages: @@ -47,5 +45,5 @@ env: global: - secure: QcJ9u0BrVpvjYnerd/3dukvM+GLFQNikIoDHhtKjVenuM2ozZtW6+/RyyXVC1YMh/SghwTnu4Kcnv1sdmwuiC5KWdPoppfalXdxafPkl5PGEfTOexe6L5UAJNW6BdA4lbRKM3xnaUg0Guq6x6tD/zdABIkh8nym/gRLGKT40e9Xitkf6wUQqPBHTGZimip59qg5Fty8lAD48pCBEXynJm+ihA2tz6EDhp0/7wvieHyEl/FqNwvUL5+Z9EeTzEJfKNF8PA5DTHkgeXgeCnWKLm8cCdPEziRZlgdQtvIW27oZBkNTQGHyqI9/tVYhaW4AeKstzE5BoJuyRzmerWYRQCNiz8bgyAjc5HnpWLJPmPSFaGBWTRzwYwUk/iOUP4YEZiN3p0Xj1sKgSB0TA2AjKWND7cufwjrW8NdPdZ3hURVOnM8DHYSQMm2HOfbUNnkw+P5M8n+flT2HKWFdnPhJ3n12rDlLYdHeg9PQ3emJ6kE8Y/jrNT+6yZRrSwLQnsV0uU8Ii44MFQHpdUOGuOIxZFGh9rjKsUwhruUpGtbwI4FWPOqiQJvIaBFY1IUjIVlVCZevvIG3fPXvPksIEKwK93hM/ThDi2PLq2qwBpA87RNfKxDG4S0aR2j19IG+ludbpPcP95mYFVnGCb4rpj44iZoCifC8c9tVqC4L85hEGzik= matrix: - - TRAVIS_CARGO_NIGHTLY_FEATURE=dev - - TRAVIS_CARGO_NIGHTLY_FEATURE=lint + - FEATURES=travis + - FEATURES=lint diff --git a/Cargo.toml b/Cargo.toml index 5808be9d..0bd3aae7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,12 +14,35 @@ keywords = ["deep-learning", "neural-networks", "machine-learning", "framework"] license = "MIT" [dependencies] -phloem = "~0.3.0" -collenchyma = "= 0.0.3" -log = "~0.3.2" +collenchyma = { version = "0.0.8", default-features = false } +collenchyma-blas = { version = "0.2.0", default-features = false } +collenchyma-nn = { version = "0.3.0", default-features = false } + +log = "0.3.2" +rand = "0.3.0" + clippy = { version = "0.0.41", optional = true } +timeit = "0.1.2" + +[dev-dependencies] +env_logger = "0.3" + [features] -default = [] +default = ["native", "cuda", "opencl"] +native = ["collenchyma/native", "collenchyma-blas/native", "collenchyma-nn/native"] +cuda = ["collenchyma/cuda", "collenchyma-blas/cuda", "collenchyma-nn/cuda"] +opencl = ["collenchyma/opencl", "collenchyma-blas/opencl", "collenchyma-nn/opencl"] + +travis = ["native"] dev = [] +unstable = [] # for travis-cargo lint = ["clippy"] + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = false +debug-assertions = false +codegen-units = 1 diff --git a/README.md b/README.md index d7d375ad..6c230022 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,7 @@ such as feeding in data, logging, or returning results. You can use the layers that ship with Leaf (e.g. Convolutional, ReLU, RNN, SVM, etc.) or thanks to Rust, easily extend Leaf with your own layers. -Leaf strives for leading-edge performance -([benchmarks are next][benchmarks-issue]), while providing a clear and +Leaf strives for [leading-edge performance][benchmarks], while providing a clear and expressive architecture that creates - as we hope - an innovative and active community around machine intelligence and fuels future research. @@ -42,7 +41,7 @@ For more information, [rust]: https://www.rust-lang.org/ [autumn]: http://autumnai.com [tensorflow]: https://github.com/tensorflow/tensorflow -[benchmarks-issue]: https://github.com/autumnai/leaf/issues/26 +[benchmarks]: #benchmarks [documentation]: http://autumnai.github.io/leaf > Disclaimer: Leaf is currently in a very early and heavy stage of development. @@ -69,6 +68,21 @@ Leaf right from the command line. [cargo-edit]: https://github.com/killercup/cargo-edit [leaf-examples]: https://github.com/autumnai/leaf-examples +## Benchmarks + +| (in ms) | Leaf | Torch | Neon | Caffe | Tensorflow | +| ---------------- |---------|----------|---------|----------|------------| +| **Alexnet** +| *FORWARD* | 30.8 | 33.0 | 30.9 | 42.0 | 46.1 | +| *BACKWARD* | 70.6 | 66.1 | 67.2 | 85.3 | 156.0 | +| *TOTAL* | 101.4 | 99.1 | 98.1 | 127.3 | 202.1 | +| **Overfeat** +| *FORWARD* | 104.6 | 113.5 | | 142.3 | | +| *BACKWARD* | 216.7 | 213.7 | | 287.9 | | +| *TOTAL* | 321.3 | 327.2 | | 430.2 | | +| | | | | | | | + + ## Leaf Ecosystem and Extensions We design Leaf and all other crates for machine learning completely modular and diff --git a/benches/network_benches.rs b/benches/network_benches.rs new file mode 100644 index 00000000..c2cf015f --- /dev/null +++ b/benches/network_benches.rs @@ -0,0 +1,443 @@ +#![feature(test)] + +extern crate test; +#[macro_use] +extern crate timeit; +extern crate collenchyma as co; +extern crate leaf; + +use test::Bencher; +use co::prelude::*; + +use std::sync::{Arc, RwLock}; +use leaf::layers::*; +use leaf::layer::*; +use leaf::network::*; +use std::rc::Rc; + +#[cfg(feature = "native")] +fn native_backend() -> Rc> { + Rc::new(Backend::::default().unwrap()) +} + +#[cfg(feature = "cuda")] +fn cuda_backend() -> Rc> { + Rc::new(Backend::::default().unwrap()) +} + +#[cfg(feature = "opencl")] +#[allow(dead_code)] +fn opencl_backend() -> Rc> { + Rc::new(Backend::::default().unwrap()) +} + +#[inline(never)] +#[allow(unused_variables)] +fn bench_profile ()>( + b: &mut Bencher, + mut bench_func: F, + times: usize) { + timeit_loops!(times, { + bench_func(); + }); +} + +// #[inline(never)] +// fn sync_back_and_forth( +// b: &mut Bencher, +// n: usize, +// nt_device: &DeviceType, +// cl_device: &DeviceType, +// mem: &mut SharedTensor +// ) { +// b.iter(|| { +// for _ in 0..n { +// match mem.sync(&cl_device) { +// Ok(_) => assert!(true), +// Err(err) => { +// println!("{:?}", err); +// assert!(false); +// } +// } +// match mem.sync(&nt_device) { +// Ok(_) => assert!(true), +// Err(err) => { +// println!("{:?}", err); +// assert!(false); +// } +// } +// } +// }); +// } + +#[bench] +#[ignore] +#[cfg(feature = "cuda")] +fn bench_mnsit_forward_1(b: &mut Bencher) { + let mut cfg = NetworkConfig::default(); + // set up input + cfg.add_input("in", &vec![1, 30, 30]); + cfg.add_input("label", &vec![1, 1, 10]); + // set up sigmoid + let mut sig_cfg = LayerConfig::new("sig", LayerType::Sigmoid); + sig_cfg.add_input("in"); + sig_cfg.add_output("sig_out"); + cfg.add_layer(sig_cfg); + + let fc_layer_cfg = LinearConfig { output_size: 10 }; + let mut fc_cfg = LayerConfig::new("fully_connected", LayerType::Linear(fc_layer_cfg)); + fc_cfg.add_input("sig_out"); + fc_cfg.add_output("fc_out"); + cfg.add_layer(fc_cfg); + // set up softmax_loss + // let mut loss_cfg = LayerConfig::new("loss", LayerType::SoftmaxLoss); + // loss_cfg.add_input("fc_out"); + // loss_cfg.add_input("label"); + // cfg.add_layer(loss_cfg); + + let backend = cuda_backend(); + let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + let loss = &mut 0f32; + + let _ = timeit_loops!(10, { + let inp = SharedTensor::::new(backend.device(), &vec![1, 30, 30]).unwrap(); + let label = SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + let label_lock = Arc::new(RwLock::new(label)); + + network.forward(&[inp_lock, label_lock], loss); + }); + // b.iter(|| { + // for _ in 0..1 { + // let inp = Blob::from_data(SharedTensor::::new(backend.device(), &vec![1, 30, 30]).unwrap()); + // let label = Blob::from_data(SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap()); + // + // let inp_lock = Arc::new(RwLock::new(inp)); + // let label_lock = Arc::new(RwLock::new(label)); + // + // network.forward(&[inp_lock, label_lock], loss); + // } + // }); +} + +#[bench] +// #[ignore] +#[cfg(feature = "cuda")] +fn alexnet_forward(b: &mut Bencher) { + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 224, 224]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 64, + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 192, + filter_shape: vec![5], + padding: vec![2], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 384, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // Layer: conv4 + let conv4_layer_cfg = ConvolutionConfig { + num_output: 256, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + conv4_cfg.add_input("conv3_out"); + conv4_cfg.add_output("conv4_preac"); + cfg.add_layer(conv4_cfg); + // Layer: conv4/relu + let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + conv4_relu_cfg.add_input("conv4_preac"); + conv4_relu_cfg.add_output("conv4_out"); + cfg.add_layer(conv4_relu_cfg); + // Layer: conv5 + let conv5_layer_cfg = ConvolutionConfig { + num_output: 256, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv5_cfg = LayerConfig::new("conv5", LayerType::Convolution(conv5_layer_cfg)); + conv5_cfg.add_input("conv4_out"); + conv5_cfg.add_output("conv5_preac"); + cfg.add_layer(conv5_cfg); + // Layer: conv5/relu + let mut conv5_relu_cfg = LayerConfig::new("conv5/relu", LayerType::ReLU); + conv5_relu_cfg.add_input("conv5_preac"); + conv5_relu_cfg.add_output("conv5_out"); + cfg.add_layer(conv5_relu_cfg); + // Layer: pool3 + let pool3_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool3_cfg = LayerConfig::new("pool3", LayerType::Pooling(pool3_layer_cfg)); + pool3_cfg.add_input("conv5_out"); + pool3_cfg.add_output("pool3_out"); + cfg.add_layer(pool3_cfg); + // Layer: fc1 + let fc1_layer_cfg = LinearConfig { output_size: 4096 }; + let mut fc1_cfg = LayerConfig::new("fc1", LayerType::Linear(fc1_layer_cfg)); + fc1_cfg.add_input("pool3_out"); + fc1_cfg.add_output("fc1_out"); + cfg.add_layer(fc1_cfg); + // Layer: fc2 + let fc2_layer_cfg = LinearConfig { output_size: 4096 }; + let mut fc2_cfg = LayerConfig::new("fc2", LayerType::Linear(fc2_layer_cfg)); + fc2_cfg.add_input("fc1_out"); + fc2_cfg.add_output("fc2_out"); + cfg.add_layer(fc2_cfg); + // Layer: fc3 + let fc3_layer_cfg = LinearConfig { output_size: 1000 }; + let mut fc3_cfg = LayerConfig::new("fc3", LayerType::Linear(fc3_layer_cfg)); + fc3_cfg.add_input("fc2_out"); + fc3_cfg.add_output("fc3_out"); + cfg.add_layer(fc3_cfg); + + let backend = cuda_backend(); + // let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + + let func = || { + let forward_time = timeit_loops!(1, { + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 112, 112]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock], loss); + }); + println!("Forward step: {}", forward_time); + }; + { bench_profile(b, func, 10); } +} + +#[bench] +#[ignore] +#[cfg(feature = "cuda")] +fn small_alexnet_forward(b: &mut Bencher) { + // let _ = env_logger::init(); + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 112, 112]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 32, + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 96, + filter_shape: vec![5], + padding: vec![2], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 142, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // Layer: conv4 + let conv4_layer_cfg = ConvolutionConfig { + num_output: 128, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + conv4_cfg.add_input("conv3_out"); + conv4_cfg.add_output("conv4_preac"); + cfg.add_layer(conv4_cfg); + // Layer: conv4/relu + let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + conv4_relu_cfg.add_input("conv4_preac"); + conv4_relu_cfg.add_output("conv4_out"); + cfg.add_layer(conv4_relu_cfg); + // Layer: conv5 + let conv5_layer_cfg = ConvolutionConfig { + num_output: 128, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv5_cfg = LayerConfig::new("conv5", LayerType::Convolution(conv5_layer_cfg)); + conv5_cfg.add_input("conv4_out"); + conv5_cfg.add_output("conv5_preac"); + cfg.add_layer(conv5_cfg); + // Layer: conv5/relu + let mut conv5_relu_cfg = LayerConfig::new("conv5/relu", LayerType::ReLU); + conv5_relu_cfg.add_input("conv5_preac"); + conv5_relu_cfg.add_output("conv5_out"); + cfg.add_layer(conv5_relu_cfg); + // Layer: pool3 + let pool3_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool3_cfg = LayerConfig::new("pool3", LayerType::Pooling(pool3_layer_cfg)); + pool3_cfg.add_input("conv5_out"); + pool3_cfg.add_output("pool3_out"); + cfg.add_layer(pool3_cfg); + // Layer: fc1 + let fc1_layer_cfg = LinearConfig { output_size: 2048 }; + let mut fc1_cfg = LayerConfig::new("fc1", LayerType::Linear(fc1_layer_cfg)); + fc1_cfg.add_input("pool3_out"); + fc1_cfg.add_output("fc1_out"); + cfg.add_layer(fc1_cfg); + // Layer: fc2 + let fc2_layer_cfg = LinearConfig { output_size: 2048 }; + let mut fc2_cfg = LayerConfig::new("fc2", LayerType::Linear(fc2_layer_cfg)); + fc2_cfg.add_input("fc1_out"); + fc2_cfg.add_output("fc2_out"); + cfg.add_layer(fc2_cfg); + // Layer: fc3 + let fc3_layer_cfg = LinearConfig { output_size: 500 }; + let mut fc3_cfg = LayerConfig::new("fc3", LayerType::Linear(fc3_layer_cfg)); + fc3_cfg.add_input("fc2_out"); + fc3_cfg.add_output("fc3_out"); + cfg.add_layer(fc3_cfg); + + let backend = cuda_backend(); + // let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + + let mut func = || { + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 112, 112]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock], loss); + }; + { func(); bench_profile(b, func, 10); } +} diff --git a/examples/benchmarks.rs b/examples/benchmarks.rs new file mode 100644 index 00000000..98df064e --- /dev/null +++ b/examples/benchmarks.rs @@ -0,0 +1,529 @@ +#[macro_use] +extern crate timeit; +extern crate collenchyma as co; +extern crate leaf; + +use co::prelude::*; + +use std::sync::{Arc, RwLock}; +use leaf::layers::*; +use leaf::layer::*; +use leaf::network::*; +use std::rc::Rc; + +fn main() { + // bench_mnsit_forward(); + bench_alexnet(); + bench_overfeat(); +} + +#[cfg(feature = "native")] +#[allow(dead_code)] +fn native_backend() -> Rc> { + let framework = Native::new(); + let hardwares = &framework.hardwares().to_vec(); + let backend_config = BackendConfig::new(framework, hardwares); + Rc::new(Backend::new(backend_config).unwrap()) +} + +#[cfg(feature = "cuda")] +#[allow(dead_code)] +fn cuda_backend() -> Rc> { + let framework = Cuda::new(); + let hardwares = &framework.hardwares()[0..1].to_vec(); + println!("Device: {:?}/{}", hardwares[0].hardware_type().unwrap(), hardwares[0].name().unwrap()); + let backend_config = BackendConfig::new(framework, hardwares); + Rc::new(Backend::new(backend_config).unwrap()) +} + +#[cfg(feature = "opencl")] +#[allow(dead_code)] +fn opencl_backend() -> Rc> { + let framework = OpenCL::new(); + let hardwares = &framework.hardwares()[1..2].to_vec(); + let backend_config = BackendConfig::new(framework, hardwares); + Rc::new(Backend::new(backend_config).unwrap()) +} + +#[inline(never)] +fn bench_profile ()>( + name: &str, + mut bench_func: F, + times: usize) +{ + println!("Running benchmark {}", name); + println!("----------"); + for _ in 0..10 { + bench_func(); + } + let average_time = timeit_loops!(times, { + bench_func(); + }); + println!("----------"); + println!("Average time {}", autoscale_time(average_time)); + println!(""); +} + +fn autoscale_time(sec: f64) -> String { + let (div, unit_str) = get_time_scale(sec); + format!("{:.5} {}", sec / div, unit_str) +} + +fn scale_time(sec: f64, unit: &str) -> String { + // let (div, unit_str) = get_time_scale(sec); + let div = match unit { + "s" => 1.0, + "ms" => 0.001, + "µs" => 0.000_001, + "ns" => 0.000_000_001, + _ => panic!() + }; + format!("{:.5} {}", sec / div, unit) +} + +// get fitting order of magnitude for a time measurement +fn get_time_scale<'a>(sec: f64) -> (f64, &'a str) { + if sec > 1.0 { + (1.0, "s") + } else if sec > 0.001 { + (0.001, "ms") + } else if sec > 0.000_001 { + (0.000_001, "µs") + } else { + (0.000_000_001, "ns") + } +} + + +// #[bench] +#[allow(dead_code)] +#[cfg(feature = "cuda")] +fn bench_mnsit_forward() { + let mut cfg = NetworkConfig::default(); + // set up input + cfg.add_input("in", &vec![1, 30, 30]); + cfg.add_input("label", &vec![1, 1, 10]); + // set up sigmoid + let mut sig_cfg = LayerConfig::new("sig", LayerType::Sigmoid); + sig_cfg.add_input("in"); + sig_cfg.add_output("sig_out"); + cfg.add_layer(sig_cfg); + + let fc_layer_cfg = LinearConfig { + output_size: 10, + }; + let mut fc_cfg = LayerConfig::new("fully_connected", LayerType::Linear(fc_layer_cfg)); + fc_cfg.add_input("sig_out"); + fc_cfg.add_output("fc_out"); + cfg.add_layer(fc_cfg); + // // set up softmax_loss + // let mut loss_cfg = LayerConfig::new("loss", LayerType::SoftmaxLoss); + // loss_cfg.add_input("fc_out"); + // loss_cfg.add_input("label"); + // cfg.add_layer(loss_cfg); + + let backend = cuda_backend(); + let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + let loss = &mut 0f32; + + let func = || { + let forward_time = timeit_loops!(1, { + let inp = SharedTensor::::new(backend.device(), &vec![1, 30, 30]).unwrap(); + let label = SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + let label_lock = Arc::new(RwLock::new(label)); + + network.forward(&[inp_lock, label_lock], loss); + }); + println!("Forward step: {}", scale_time(forward_time, "ms")); + }; + { bench_profile("mnist_forward", func, 10); } +} + +#[cfg(not(feature = "cuda"))] +fn bench_alexnet() {} +#[cfg(feature = "cuda")] +fn bench_alexnet() { + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 224, 224]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 64, + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 192, + filter_shape: vec![5], + padding: vec![2], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 384, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // Layer: conv4 + let conv4_layer_cfg = ConvolutionConfig { + num_output: 256, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + conv4_cfg.add_input("conv3_out"); + conv4_cfg.add_output("conv4_preac"); + cfg.add_layer(conv4_cfg); + // Layer: conv4/relu + let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + conv4_relu_cfg.add_input("conv4_preac"); + conv4_relu_cfg.add_output("conv4_out"); + cfg.add_layer(conv4_relu_cfg); + // Layer: conv5 + let conv5_layer_cfg = ConvolutionConfig { + num_output: 256, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv5_cfg = LayerConfig::new("conv5", LayerType::Convolution(conv5_layer_cfg)); + conv5_cfg.add_input("conv4_out"); + conv5_cfg.add_output("conv5_preac"); + cfg.add_layer(conv5_cfg); + // Layer: conv5/relu + let mut conv5_relu_cfg = LayerConfig::new("conv5/relu", LayerType::ReLU); + conv5_relu_cfg.add_input("conv5_preac"); + conv5_relu_cfg.add_output("conv5_out"); + cfg.add_layer(conv5_relu_cfg); + // Layer: pool3 + let pool3_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool3_cfg = LayerConfig::new("pool3", LayerType::Pooling(pool3_layer_cfg)); + pool3_cfg.add_input("conv5_out"); + pool3_cfg.add_output("pool3_out"); + cfg.add_layer(pool3_cfg); + // Layer: fc1 + let fc1_layer_cfg = LinearConfig { + output_size: 4096, + }; + let mut fc1_cfg = LayerConfig::new("fc1", LayerType::Linear(fc1_layer_cfg)); + fc1_cfg.add_input("pool3_out"); + fc1_cfg.add_output("fc1_out"); + cfg.add_layer(fc1_cfg); + // Layer: fc2 + let fc2_layer_cfg = LinearConfig { + output_size: 4096, + }; + let mut fc2_cfg = LayerConfig::new("fc2", LayerType::Linear(fc2_layer_cfg)); + fc2_cfg.add_input("fc1_out"); + fc2_cfg.add_output("fc2_out"); + cfg.add_layer(fc2_cfg); + // Layer: fc3 + let fc3_layer_cfg = LinearConfig { + output_size: 1000, + }; + let mut fc3_cfg = LayerConfig::new("fc3", LayerType::Linear(fc3_layer_cfg)); + fc3_cfg.add_input("fc2_out"); + fc3_cfg.add_output("fc3_out"); + cfg.add_layer(fc3_cfg); + + let backend = cuda_backend(); + // let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + + { + let func = || { + let forward_time = timeit_loops!(1, { + { + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 224, 224]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock.clone()], loss); + } + }); + println!("Forward step: {}", scale_time(forward_time, "ms")); + }; + { bench_profile("alexnet_forward", func, 10); } + } + { + let func = || { + let backward_time = timeit_loops!(1, { + { + network.backward_input(); + } + }); + println!("backward input step: {}", scale_time(backward_time, "ms")); + }; + { bench_profile("alexnet_backward_input", func, 10); } + } + { + let func = || { + let backward_time = timeit_loops!(1, { + { + network.backward_parameters(); + } + }); + println!("backward parameters step: {}", scale_time(backward_time, "ms")); + }; + { bench_profile("alexnet_backward_parameters", func, 10); } + } +} + +#[cfg(not(feature = "cuda"))] +fn bench_overfeat() {} +#[cfg(feature = "cuda")] +fn bench_overfeat() { + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 231, 231]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 96, + filter_shape: vec![11], + padding: vec![0], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![2], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 256, + filter_shape: vec![5], + padding: vec![0], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![2], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 512, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // Layer: conv4 + let conv4_layer_cfg = ConvolutionConfig { + num_output: 1024, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + conv4_cfg.add_input("conv3_out"); + conv4_cfg.add_output("conv4_preac"); + cfg.add_layer(conv4_cfg); + // Layer: conv4/relu + let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + conv4_relu_cfg.add_input("conv4_preac"); + conv4_relu_cfg.add_output("conv4_out"); + cfg.add_layer(conv4_relu_cfg); + // Layer: conv5 + let conv5_layer_cfg = ConvolutionConfig { + num_output: 1024, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv5_cfg = LayerConfig::new("conv5", LayerType::Convolution(conv5_layer_cfg)); + conv5_cfg.add_input("conv4_out"); + conv5_cfg.add_output("conv5_preac"); + cfg.add_layer(conv5_cfg); + // Layer: conv5/relu + let mut conv5_relu_cfg = LayerConfig::new("conv5/relu", LayerType::ReLU); + conv5_relu_cfg.add_input("conv5_preac"); + conv5_relu_cfg.add_output("conv5_out"); + cfg.add_layer(conv5_relu_cfg); + // Layer: pool5 + let pool5_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![2], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool5_cfg = LayerConfig::new("pool5", LayerType::Pooling(pool5_layer_cfg)); + pool5_cfg.add_input("conv5_out"); + pool5_cfg.add_output("pool5_out"); + cfg.add_layer(pool5_cfg); + // Layer: fc1 + let fc1_layer_cfg = LinearConfig { + output_size: 3072, + }; + let mut fc1_cfg = LayerConfig::new("fc1", LayerType::Linear(fc1_layer_cfg)); + fc1_cfg.add_input("pool5_out"); + fc1_cfg.add_output("fc1_out"); + cfg.add_layer(fc1_cfg); + // Layer: fc2 + let fc2_layer_cfg = LinearConfig { + output_size: 4096, + }; + let mut fc2_cfg = LayerConfig::new("fc2", LayerType::Linear(fc2_layer_cfg)); + fc2_cfg.add_input("fc1_out"); + fc2_cfg.add_output("fc2_out"); + cfg.add_layer(fc2_cfg); + // Layer: fc3 + let fc3_layer_cfg = LinearConfig { + output_size: 1000, + }; + let mut fc3_cfg = LayerConfig::new("fc3", LayerType::Linear(fc3_layer_cfg)); + fc3_cfg.add_input("fc2_out"); + fc3_cfg.add_output("fc3_out"); + cfg.add_layer(fc3_cfg); + + let backend = cuda_backend(); + // let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + + { + let func = || { + let forward_time = timeit_loops!(1, { + { + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 231, 231]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock.clone()], loss); + } + }); + println!("Forward step: {}", scale_time(forward_time, "ms")); + }; + { bench_profile("overfeat_forward", func, 10); } + } + { + let func = || { + let backward_time = timeit_loops!(1, { + { + network.backward_input(); + } + }); + println!("backward input step: {}", scale_time(backward_time, "ms")); + }; + { bench_profile("overfeat_backward_input", func, 10); } + } + { + let func = || { + let backward_time = timeit_loops!(1, { + { + network.backward_parameters(); + } + }); + println!("backward parameters step: {}", scale_time(backward_time, "ms")); + }; + { bench_profile("overfeat_backward_parameters", func, 10); } + } +} diff --git a/perf/README.md b/perf/README.md new file mode 100644 index 00000000..e4924e39 --- /dev/null +++ b/perf/README.md @@ -0,0 +1,13 @@ +# Profiling + +Collenchyma comes with scripts to help with profiling performance problems. + +Run [perf](http://www.brendangregg.com/perf.html) on one of the benchmark test: + +```sh +# compile latest version of benchmarks with DWARF information +cargo rustc --bench rblas_overhead -- -g +# benchmark binary is at target/debug/shared_memory-54e69b24ec0c2d04 +# benchmark is called bench_256_sync_1mb_native_cuda +sudo ./perf/run_perf.sh target/debug/shared_memory-54e69b24ec0c2d04 bench_256_sync_1mb_native_cuda # perf needs sudo +``` diff --git a/perf/run_perf.sh b/perf/run_perf.sh new file mode 100755 index 00000000..5100217e --- /dev/null +++ b/perf/run_perf.sh @@ -0,0 +1,14 @@ +#! /bin/bash +set -e +if [ $# -lt 2 ] + then + echo "No binary name or benchmark name supplied" + exit 1 +fi +binaryname=$1 +benchname=$2 +mkdir -p target/perf +perf record -a -g --output target/perf/${benchname}.data ${binaryname} --bench ${benchname} +perf script -f -i target/perf/${benchname}.data > target/perf/${benchname}.scripted +stackcollapse-perf target/perf/${benchname}.scripted | grep ${benchname} > target/perf/${benchname}.folded +flamegraph target/perf/${benchname}.folded > target/perf/${benchname}.svg diff --git a/src/layer.rs b/src/layer.rs index 24bf5bd5..387fd655 100644 --- a/src/layer.rs +++ b/src/layer.rs @@ -1,97 +1,18 @@ //! Provides the generics and interfaces for the specific [Layers][layers]. //! [layers]: ../layers/index.html -use co::backend::IBackend; -use co::libraries::blas::IBlas; -use co::libraries::numeric_helpers::Float; -use phloem::Blob; -use shared_memory::{ArcLock, HeapBlob}; +use co::{IBackend, SharedTensor}; use layers::*; +use weight::WeightConfig; +use util::{ArcLock, native_backend, LayerOps}; use std::fmt; use std::cmp; use std::collections::{HashMap, HashSet}; use std::rc::Rc; use std::sync::{Arc, RwLock}; -use std::sync::{RwLockReadGuard, RwLockWriteGuard}; - -/// Secures sequential execution as bottom Blob for a forward and as top Blob for a backward -/// operation. -/// -/// Ensures that no layer is reading the HeapBlob, while the current layer is still writing. -/// The RwLockReadGuard unlocks automatically as soon as the {forward, backward} operation of -/// the layer is finished and allows for a quick operation transition to the following layer. -/// Is automatically created by the {forward, backward} method of a [Layer][1] and passed to the -/// specific [forward_{cpu, gpu}][2] implementation. -/// [1]: ./trait.ILayer.html#method.forward -/// [2]: ./trait.ILayer.html#tymethod.forward_cpu -/// -/// ## Example -/// -/// Creates a ReadBlob for seldom scenarios such as testing. -/// -/// ``` -/// extern crate phloem; -/// # extern crate leaf; -/// # extern crate collenchyma as co; -/// use phloem::Blob; -/// use std::sync::{RwLock, RwLockReadGuard}; -/// # use leaf::layer::ReadBlob; -/// # use co::backend::{Backend, BackendConfig}; -/// # use co::frameworks::Native; -/// # use co::framework::IFramework; -/// # use std::rc::Rc; -/// -/// # fn main() { -/// # let framework = Native::new(); -/// # let hardwares = framework.hardwares(); -/// # let backend_config = BackendConfig::new(framework, hardwares); -/// # let backend = Rc::new(Backend::new(backend_config).unwrap()); -/// let lock = RwLock::new(Box::new(Blob::::of_shape(Some(backend.device()), &[3, 2, 3]))); -/// let read_blob: ReadBlob = lock.read().unwrap(); -/// # } -/// ``` -pub type ReadBlob<'_> = RwLockReadGuard<'_, HeapBlob>; - -/// Secures sequential execution as top Blob for a forward and as bottom Blob for a backward -/// operation. -/// -/// Ensures that no layer is writing to the HeapBlob, while the current layer is still reading it. -/// The RwLockWriteGuard unlocks automatically as soon as the {forward, backward} operation of -/// the layer is finished and allows for a quick operation transition to the following layer. -/// Is automatically created by the {forward, backward} method of a [Layer][1] and passed to the -/// specific [forward_{cpu, gpu}][2] implementation. -/// [1]: ./trait.ILayer.html#method.forward -/// [2]: ./trait.ILayer.html#tymethod.forward_cpu -/// -/// ## Example -/// -/// Creates a ReadBlob for seldom scenarios such as testing. -/// -/// ``` -/// extern crate phloem; -/// # extern crate leaf; -/// # extern crate collenchyma as co; -/// use phloem::Blob; -/// use std::sync::{RwLock, RwLockWriteGuard}; -/// # use leaf::layer::WriteBlob; -/// # use co::backend::{Backend, BackendConfig}; -/// # use co::frameworks::Native; -/// # use co::framework::IFramework; -/// # use std::rc::Rc; -/// -/// # fn main() { -/// # let framework = Native::new(); -/// # let hardwares = framework.hardwares(); -/// # let backend_config = BackendConfig::new(framework, hardwares); -/// # let backend = Rc::new(Backend::new(backend_config).unwrap()); -/// let lock = RwLock::new(Box::new(Blob::::of_shape(Some(backend.device()), &[4, 2, 1]))); -/// let read_blob: WriteBlob = lock.write().unwrap(); -/// # } -/// ``` -pub type WriteBlob<'_> = RwLockWriteGuard<'_, HeapBlob>; #[derive(Debug)] /// The generic Layer -pub struct Layer> { +pub struct Layer> { /// Identifies the Network /// /// The name is mainly used for logging purposes. @@ -104,7 +25,7 @@ pub struct Layer> { /// This is the part that does most of the work ([forward][2]/[backward][3]). /// [2]: ./trait.ILayer.html#method.forward /// [3]: ./trait.ILayer.html#method.backward - pub worker: Box, + pub worker: Box>, backend: Rc, @@ -113,12 +34,17 @@ pub struct Layer> { needs_backward: bool, /// The vector that stores shared references to the weights in the form of blobs. - pub blobs: Vec>, - - /// The vector that indicates whether each top blob contributes to - /// the [loss][1] of the network and with which weight. - /// [1]: http://caffe.berkeleyvision.org/tutorial/loss.html - loss: Vec, + pub weights_data: Vec>>, + /// The vector that stores shared references to the weights in the form of blobs. + pub weights_gradient: Vec>>, + // contains all the learnable weights (does not include bias(?) and shared weights) + learnable_weights: Vec>>, + // learning rate for each weight + weights_lr: Vec>, + // weight decay for each weight + weights_weight_decay: Vec>, + // display name for each weight + weights_display_names: Vec, /// Vector indicating whether to compute the diff of each weight blob. /// @@ -129,22 +55,30 @@ pub struct Layer> { /// [1]: ../layers/index.html weight_propagate_down: Vec, - /// References to all the bottom blobs of the layer. - pub bottom_blobs: Vec>, - bottom_blob_names: HashMap)>, - bottom_need_backwards: Vec, - - /// References to all the top blobs of the layer. - pub top_blobs: Vec>, - top_blob_names: HashMap)>, + /// References to all the input blobs of the layer. + pub input_blobs_data: Vec>>, + /// References to all the input blobs of the layer. + pub input_blobs_gradient: Vec>>, + input_blob_names: Vec, + input_need_backwards: Vec, + + /// References to all the output blobs of the layer. + pub output_blobs_data: Vec>>, + /// References to all the output blobs of the layer. + pub output_blobs_gradient: Vec>>, + output_blob_names: Vec, + /// The vector that indicates whether each output blob contributes to + /// the [loss][1] of the network and with which weight. + /// [1]: http://caffe.berkeleyvision.org/tutorial/loss.html + loss: Vec, /// All the blobs of the layer that can be addressed by name. /// /// Does not contain anonymous blobs. - pub blob_names: HashMap>, + pub blob_names: HashMap>, ArcLock>)>, } -impl> Layer { +impl + 'static> Layer { /// Creates a new Layer from a [LayerConfig][1]. /// [1]: ./struct.LayerConfig.html /// @@ -159,16 +93,23 @@ impl> Layer { needs_backward: true, - blobs: Vec::new(), - loss: Vec::new(), + weights_data: Vec::new(), + weights_gradient: Vec::new(), + learnable_weights: Vec::new(), weight_propagate_down: Vec::new(), + weights_lr: Vec::new(), + weights_weight_decay: Vec::new(), + weights_display_names: Vec::new(), - bottom_blobs: Vec::new(), - bottom_blob_names: HashMap::new(), - bottom_need_backwards: Vec::new(), + input_blobs_data: Vec::new(), + input_blobs_gradient: Vec::new(), + input_blob_names: Vec::new(), + input_need_backwards: Vec::new(), - top_blobs: Vec::new(), - top_blob_names: HashMap::new(), + output_blobs_data: Vec::new(), + output_blobs_gradient: Vec::new(), + output_blob_names: Vec::new(), + loss: vec![1f32, 1f32, 1f32], blob_names: HashMap::new(), @@ -183,141 +124,269 @@ impl> Layer { /// [1]: #method.from_config /// [2]: ./enum.LayerType.html /// [3]: ../layers/index.html - fn worker_from_config(config: &LayerConfig) -> Box { - match config.layer_type { + fn worker_from_config(config: &LayerConfig) -> Box> { + match config.layer_type.clone() { + LayerType::Convolution(layer_config) => Box::new(Convolution::from_config(&layer_config)), + LayerType::Linear(layer_config) => Box::new(Linear::from_config(&layer_config)), + LayerType::LogSoftmax => Box::new(LogSoftmax::default()), + LayerType::Pooling(layer_config) => Box::new(Pooling::from_config(&layer_config)), + LayerType::Softmax => Box::new(Softmax::default()), + LayerType::ReLU => Box::new(ReLU), LayerType::Sigmoid => Box::new(Sigmoid), + LayerType::NegativeLogLikelihood => Box::new(NegativeLogLikelihood::default()), + LayerType::Reshape(layer_config) => Box::new(Reshape::from_config(&layer_config)), } } - /// Connect layer to the other layers in a [Network][1] and set up Blobs. + /// Connect the layer to the other layers in a [Network][1] and set up Blobs. /// [1]: ../network/struct.Network.html /// - /// Connects to the bottoms provided by other layers via the `registry`. - /// Adds top blobs to the layer and then adds them to the `registry`, so the next - /// layers can connect them as their bottoms. + /// Connects to the outputs provided by other layers via the `registry`. + /// Adds output blobs to the layer and then adds them to the `registry`, so the next + /// layers can connect them as their inputs. /// In the end it intializes the underlying [layer implementation][2]. /// /// [2]: ./trait.ILayer.html /// /// Called during [Network][1] initialization. - pub fn connect(&mut self, registry: &mut HashMap>) { - // connect to all required bottoms - for bottom_name in &self.config.bottoms.clone() { - self.connect_bottom(bottom_name, registry) + pub fn connect( + &mut self, + registry: &mut HashMap>, ArcLock>)>, + weight_registry: &mut HashMap>, ArcLock>, Option, Option)>) { + // connect to all required inputs + for input_name in &self.config.inputs.clone() { + self.connect_input(input_name, registry) } - // setup tops - for (top_id, _) in self.config.tops.clone().iter().rev().enumerate() { - self.append_top(top_id, registry); + // setup outputs + for (output_id, _) in self.config.outputs.clone().iter().rev().enumerate() { + self.append_output(output_id, registry); + } + let config = self.config.clone(); + for (output_id, _) in self.config.outputs.clone().iter().rev().enumerate() { + self.append_weight(&config, weight_registry, 0, output_id); } // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter // specified fewer than the required number (as specified by - // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. - let auto_top_blobs = self.worker.auto_top_blobs(); - let min_top_blobs = self.worker.min_top_blobs(); - let exact_num_top_blobs = self.worker.exact_num_top_blobs(); - if auto_top_blobs { - let needed_num_top = cmp::max(min_top_blobs, exact_num_top_blobs); - for _ in 0..(needed_num_top - self.top_blobs.len()) { - // Add "anonymous" top blobs -- do not add to registry + // exact_num_top_blobs() or min_output_blobs()), allocate them here. + let auto_output_blobs = self.worker.auto_output_blobs(); + debug!("Layer {} - auto_output_blobs: {}", &self.name, &auto_output_blobs); + let min_output_blobs = self.worker.min_output_blobs(); + let exact_num_output_blobs = self.worker.exact_num_output_blobs().unwrap_or(0); + if auto_output_blobs { + let needed_num_outputs = cmp::max(min_output_blobs, exact_num_output_blobs); + for _ in 0..(needed_num_outputs - self.output_blobs_data.len()) { + // Add "anonymous" output blobs -- do not add to registry // as we don't want these blobs to be usable as input // to other layers. - info!("Adding anonymous top blob"); - self.create_anonymous_top(); + info!("Adding anonymous output blob for layer {}", &self.name); + self.create_anonymous_output(); } } - self.worker.init(); + self.worker.init(self.backend.clone()); + self.worker.reshape(self.backend.clone(), + &mut self.input_blobs_data, + &mut self.input_blobs_gradient, + &mut self.weights_data, + &mut self.weights_gradient, + &mut self.output_blobs_data, + &mut self.output_blobs_gradient); + for t in &self.output_blobs_data { + println!("{} output shape: {:?}", self.name, t.read().unwrap().desc()); + } } - /// Append blob as [bottom blob][1] to the Layer. + /// Append blob as [input blob][1] to the Layer. /// [1]: ../layer/index.html /// /// During network initalization the blobs will be appended to the Layers as per their - /// [LayerConfig][3]. It is also determined if a bottom blob skips backpropagation + /// [LayerConfig][3]. It is also determined if a output blob skips backpropagation /// from [LayerConfig.propagate_down][3] (see also [init_backprop][5]). /// /// [3]: ../layer/struct.LayerConfig.html /// [5]: #method.init_backprop - fn connect_bottom(&mut self, blob_name: &str, available_blobs: &mut HashMap>) { - let bottom_id = self.config.bottoms.iter().position(|bottom_name| bottom_name == blob_name).unwrap(); + fn connect_input(&mut self, blob_name: &str, available_blobs: &mut HashMap>, ArcLock>)>) { + let input_id = self.config.inputs.iter().position(|input_name| input_name == blob_name).unwrap(); if !available_blobs.contains_key(&*blob_name) { - error!("Unknown bottom blob {} (layer '{}', bottom_id: {})", + error!("Unknown input blob {} (layer '{}', input_id: {})", blob_name, self.name, - bottom_id); + input_id); } - info!("{} <- {}", self.name, blob_name); + info!("{:<15} -> {:>15}", blob_name, self.name); - self.bottom_blob_names.insert(blob_name.to_owned(), (self.bottom_blobs.len(), available_blobs[&*blob_name].clone())); - self.bottom_blobs.push(available_blobs[&*blob_name].clone()); - available_blobs.remove(&*blob_name); + self.input_blob_names.push(blob_name.to_owned()); + self.input_blobs_data.push(available_blobs[&*blob_name].0.clone()); + self.input_blobs_gradient.push(available_blobs[&*blob_name].1.clone()); + // available_blobs.remove(&*blob_name); let mut propagate_down = true; - // Check if the backpropagation on bottom_id should be skipped + // Check if the backpropagation on input_id should be skipped if !self.config.propagate_down.is_empty() { - propagate_down = self.config.propagate_down[bottom_id]; + propagate_down = self.config.propagate_down[input_id]; } let need_backward = propagate_down; - self.bottom_need_backwards.push(need_backward); + self.input_need_backwards.push(need_backward); } - /// Append blob as [top blob][1] to the Layer. + /// Append blob as [output blob][1] to the Layer. /// [1]: ../layer/index.html /// /// During network initalization the blobs will be appended to the Layers as per their /// [LayerConfig][2]. It is also determined if computations can be done in-place, in which /// no additional Blob will be allocated.
/// Finally, the new blob will be added to the registry, so that the other layers can - /// connect it as their bottom. + /// connect it as their input. /// [2]: ../layer/struct.LayerConfig.html - fn append_top(&mut self, - top_id: usize, - registry: &mut HashMap>) { + fn append_output(&mut self, + output_id: usize, + registry: &mut HashMap>, ArcLock>)>) { let layer_config = &self.config; - let blob_name = layer_config.top(top_id).unwrap().clone(); - let blob: ArcLock; + let blob_name = layer_config.output(output_id).unwrap().clone(); + let blob_data: ArcLock>; + let blob_gradient: ArcLock>; - if layer_config.bottom(top_id).is_some() && *layer_config.bottom(top_id).unwrap() == blob_name { + if layer_config.input(output_id).is_some() && *layer_config.input(output_id).unwrap() == blob_name { info!("{} -> {} (in-place)", layer_config.name, blob_name); - blob = registry[&blob_name].clone(); + blob_data = registry[&blob_name].0.clone(); + blob_gradient = registry[&blob_name].1.clone(); } else if registry.contains_key(&blob_name) { // If we are not doing in-place computation but have duplicated blobs, raise an // error. error!("Top blob {} produced by multiple sources.", blob_name); return } else { - // if (Caffe::root_solver()) { { - info!("{} -> {}", layer_config.name, blob_name); - info!("Input {} -> {}", top_id, blob_name); + info!("{:<15} -> {:>15}", self.name, blob_name); + info!("Input {} -> {}", output_id, blob_name); } - blob = Arc::new(RwLock::new(Box::new(Blob::new()))); + let backend: Rc> = self.backend.clone(); + blob_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA + blob_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA } - self.top_blob_names.insert(blob_name.clone(), (self.top_blobs.len(),blob.clone())); - self.top_blobs.push(blob.clone()); - self.blob_names.insert(blob_name.clone(), blob.clone()); - registry.insert(blob_name.clone(), blob.clone()); + self.output_blob_names.push(blob_name.clone()); + self.output_blobs_data.push(blob_data.clone()); + self.output_blobs_gradient.push(blob_gradient.clone()); + self.blob_names.insert(blob_name.clone(), (blob_data.clone(), blob_gradient.clone())); + registry.insert(blob_name.clone(), (blob_data.clone(), blob_gradient.clone())); } - /// Append anonymous blob as [top blob][1] to the Layer. + /// Append anonymous blob as [output blob][1] to the Layer. /// [1]: ../layer/index.html /// - /// [Layer implementations][2] may request creation of anonymous top blobs - /// via [auto_top_blobs][3]. Since the blobs are not named, other layers can - /// not use them as their bottom blobs. + /// [Layer implementations][2] may request creation of anonymous output blobs + /// via [auto_output_blobs][3]. Since the blobs are not named, other layers can + /// not use them as their input blobs. /// [2]: ./trait.ILayer.html - /// [3]: ./trait.ILayer.html#method.auto_top_blobs - fn create_anonymous_top(&mut self) { + /// [3]: ./trait.ILayer.html#method.auto_output_blobs + fn create_anonymous_output(&mut self) { let blob_name = "(automatic)".to_owned(); info!("{} -> {}", self.name, blob_name); - let blob: ArcLock = Arc::new(RwLock::new(Box::new(Blob::new()))); - self.top_blobs.push(blob); + let backend: Rc> = self.backend.clone(); + let output_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA + let output_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA + self.output_blobs_data.push(output_data); + self.output_blobs_gradient.push(output_gradient); + } + + fn append_weight(&mut self, layer_config: &LayerConfig, registry: &mut HashMap>, ArcLock>, Option, Option)>, layer_id: usize, weight_id: usize) { + info!("Appending weight to layer {}", &layer_config.name); + let weights_len = self.weights_data.len(); + let weight_name = if weights_len > weight_id { + layer_config.param(weight_id).unwrap().name.clone() + } else { + "".to_owned() + }; + + // use weight_name (or weight_id as a fallback) as display_name + let display_name = if !weight_name.is_empty() { + weight_name.clone() + } else { + format!("{}", weight_id) + }; + self.weights_display_names.push(display_name.clone()); + // create name for registry + let registry_name = format!("SHARED_WEIGHT_{}", display_name); + + // add to tracking vectors + let net_weight_id = weights_len; + let output_data = self.output_blobs_data[weight_id].read().unwrap(); + let weight_data = Arc::new(RwLock::new(SharedTensor::::new(output_data.latest_device(), output_data.desc()).unwrap())); + let weight_gradient = Arc::new(RwLock::new(SharedTensor::::new(output_data.latest_device(), output_data.desc()).unwrap())); + self.weights_data.push(weight_data.clone()); + self.weights_gradient.push(weight_gradient.clone()); + + let mut weight_config = &WeightConfig::default(); + if layer_config.params_len() > weight_id { + weight_config = layer_config.param(weight_id).unwrap(); + } + // This layer "owns" this weight blob -- it is either anonymous + // (i.e., not given a weight_name) or explicitly given a name that we + // haven't already seen. + if weight_name.is_empty() || !registry.contains_key(®istry_name) { + // self.weight_owners.push(None); + if !weight_name.is_empty() { + registry.insert(weight_name.clone(), + (weight_data.clone(), weight_gradient.clone(), weight_config.lr_mult, weight_config.decay_mult)); + } + let learnable_weight_id = self.learnable_weights.len(); + self.learnable_weights.push(weight_data.clone()); + // self.learnable_weight_ids.push(learnable_weight_id); + self.weights_lr.push(weight_config.lr_mult); + self.weights_weight_decay.push(weight_config.decay_mult); + } else { + // Named weight blob with name we've seen before: share weights + + let (shared_weight_data, shared_weight_gradient, shared_lr, shared_decay_mult) = registry.get(®istry_name).unwrap().clone(); + info!("Sharing weight blob '{}'", weight_name.clone()); + + // TODO: move shape checking into reshape? + // can only share weights if blobs match by shape or capacity + // if weights_len > weight_id { + // if let Err(e) = layer_config.param(weight_id) + // .unwrap() + // .check_dimensions(&this_blob.read().unwrap(), + // &owner_blob.read().unwrap(), + // weight_name.clone(), + // self.layers[owner_layer_id].name.clone(), + // self.layers[layer_id].name.clone()) { + // error!("{}", e) + // } + // } + + // can only share parameters if both have same lr_mult + if let Some(lr_mult) = weight_config.lr_mult { + if let Some(owner_lr_mult) = shared_lr { + if !lr_mult.eq(&owner_lr_mult) { + error!("Shared param '{}' has mismatched lr_mult.", + weight_name.clone()); + } + } else { + // this is the first shared instance that has a lr_mult value so we take that + registry.remove(®istry_name).unwrap(); + registry.insert(registry_name.clone(), (shared_weight_data.clone(), shared_weight_gradient.clone(), weight_config.lr_mult, shared_decay_mult)); + } + } + // can only share weights if both have same decay_mult + if let Some(decay_mult) = weight_config.decay_mult { + if let Some(owner_decay_mult) = shared_decay_mult { + if !decay_mult.eq(&owner_decay_mult) { + error!("Shared param '{}' has mismatched decay_mult.", + weight_name.clone()); + } + } else { + // this is the first shared instance that has a decay_mult value so we take that + registry.remove(®istry_name).unwrap(); + registry.insert(registry_name, (shared_weight_data.clone(), shared_weight_gradient.clone(), shared_lr, weight_config.decay_mult)); + } + } + } } /// Initializes layer for [backpropagation][1] @@ -333,15 +402,15 @@ impl> Layer { blobs_skip_backp: &mut HashSet) { let mut layer_contributes_loss = false; let mut layer_skip_propagate_down = true; - for (top_id, top_blob) in self.top_blobs.iter().enumerate() { - let blob_name = self.name_for_blob(top_blob); + for (output_id, _) in self.output_blobs_data.iter().enumerate() { + let blob_name = self.output_blob_names.get(output_id); // layer is a loss layer or under a loss layer - if self.loss(top_id).is_some() || blobs_under_loss.contains(blob_name) { + if self.loss(output_id).is_some() || blob_name.is_some() && blobs_under_loss.contains(blob_name.unwrap()) { layer_contributes_loss = true; } // layer is not marked to skip backpropagation - if !blobs_skip_backp.contains(blob_name) { + if blob_name.is_none() || blob_name.is_some() && !blobs_skip_backp.contains(blob_name.unwrap()) { layer_skip_propagate_down = false; } // layer contributes loss to some @@ -350,33 +419,32 @@ impl> Layer { } } - // If this layer can skip backward computation, also all his bottom blobs + // If this layer can skip backward computation, also all his input blobs // don't need backpropagation if self.needs_backward && layer_skip_propagate_down { self.needs_backward = false; - for (bottom_id, _) in self.bottom_blobs.iter().enumerate() { - self.bottom_need_backwards[bottom_id] = false; + for (input_id, _) in self.input_blobs_data.iter().enumerate() { + self.input_need_backwards[input_id] = false; } } // layer doesn't contribute loss so it does not need to be backpropagated if !layer_contributes_loss { self.needs_backward = false; } - // if (Caffe::root_solver()) { // Caffe { info!("{} needs backward computation: {}", self.name, self.needs_backward); } - for (bottom_name, (bottom_id, _)) in self.bottom_blob_names.clone() { + for (input_id, input_name) in self.input_blob_names.iter().enumerate() { if layer_contributes_loss { - blobs_under_loss.insert(bottom_name.clone()); + blobs_under_loss.insert(input_name.clone()); } else { - self.bottom_need_backwards[bottom_id] = false; + self.input_need_backwards[input_id] = false; } - if !self.bottom_need_backwards[bottom_id] { - blobs_skip_backp.insert(bottom_name.clone()); + if !self.input_need_backwards[input_id] { + blobs_skip_backp.insert(input_name.clone()); } } } @@ -388,13 +456,13 @@ impl> Layer { /// Forcing backpropagation is useful for debugging. pub fn init_force_backward(&mut self) { self.needs_backward = true; - for (bottom_id, _) in self.bottom_need_backwards.clone().iter().enumerate() { - self.bottom_need_backwards[bottom_id] = - *self.bottom_need_backwards - .get(bottom_id) - .unwrap_or(&self.worker.allow_force_backward(bottom_id)); + for (input_id, _) in self.input_need_backwards.clone().iter().enumerate() { + self.input_need_backwards[input_id] = + *self.input_need_backwards + .get(input_id) + .unwrap_or(&self.worker.allow_force_backward(input_id)); } - for (weight_id, _) in self.blobs.clone().iter().enumerate() { + for (weight_id, _) in self.weights_data.clone().iter().enumerate() { self.set_weight_propagate_down(weight_id, true); } } @@ -403,7 +471,26 @@ impl> Layer { /// /// See [ILayer.forward](./trait.ILayer.html#method.forward) pub fn forward(&mut self) -> f32 { - self.worker.forward(&self.bottom_blobs, &mut self.top_blobs) + debug!("LAYER: {:?}", &self.name); + self.worker.sync(&self.backend, + &mut self.input_blobs_data, &mut self.input_blobs_gradient, + &mut self.weights_data, &mut self.weights_gradient, + &mut self.output_blobs_data, &mut self.output_blobs_gradient); + let forward_time = timeit_loops!(1, { + // aquire all the locks + let btm: Vec<_> = self.input_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let wgts: Vec<_> = self.weights_data.iter().map(|w| w.read().unwrap()).collect(); + let out_ref = self.output_blobs_data.iter().cloned().collect::>(); + let mut out = &mut out_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let mut output_w = &mut out.iter_mut().map(|a| a).collect::>(); + // extract SharedTensors from Blobs + let weights_data: Vec<&SharedTensor> = wgts.iter().enumerate().map(|(_, val)| &**val).collect(); + let input_data: Vec<&SharedTensor> = btm.iter().enumerate().map(|(_, val)| &**val).collect(); + let mut output_data: Vec<&mut SharedTensor> = output_w.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect(); + self.worker.forward(&self.backend, &input_data, &weights_data, &mut output_data); + }); + debug!("{:<15} - Forward time: {:.5} ms", &self.name, forward_time / 0.001); + Self::calculate_loss(&self.backend, &self.worker, &mut self.weights_data, &mut self.output_blobs_data) } /// Uses the underlying layer implementation to compute a backward step. @@ -411,10 +498,119 @@ impl> Layer { /// See [ILayer.backward](./trait.ILayer.html#method.backward) pub fn backward(&mut self) { if self.needs_backward { - self.worker.backward(&self.top_blobs, &self.bottom_need_backwards, &mut self.bottom_blobs) + debug!("LAYER: {:?}", &self.name); + self.worker.sync(&self.backend, + &mut self.input_blobs_data, &mut self.input_blobs_gradient, + &mut self.weights_data, &mut self.weights_gradient, + &mut self.output_blobs_data, &mut self.output_blobs_gradient); + let output_data: Vec<_> = self.output_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_data: Vec<&SharedTensor> = output_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let output_gradient: Vec<_> = self.output_blobs_gradient.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_gradient: Vec<&SharedTensor> = output_gradient.iter().enumerate().map(|(_, val)| &**val).collect(); + let wgts_data: Vec<_> = self.weights_data.iter().map(|b| b.read().unwrap()).collect(); + let weights_data: Vec<&SharedTensor> = wgts_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let input_data: Vec<_> = self.input_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let input_blobs_data: Vec<&SharedTensor> = input_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let inp_gradient_ref = self.input_blobs_gradient.iter().cloned().collect::>(); + let mut inp_gradient = &mut inp_gradient_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let mut input_gradient = &mut inp_gradient.iter_mut().map(|a| a).collect::>(); + let mut input_blobs_gradient: Vec<&mut SharedTensor> = input_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect(); + let wgt_gradient_ref = self.weights_gradient.iter().cloned().collect::>(); + let mut wgt_gradient = &mut wgt_gradient_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let mut weights_gradient = &mut wgt_gradient.iter_mut().map(|a| a).collect::>(); + let mut weights_blobs_gradient: Vec<&mut SharedTensor> = weights_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect(); + self.worker.backward(&self.backend, + &output_blobs_data, + &output_blobs_gradient, + &weights_data, + &mut weights_blobs_gradient, + &input_blobs_data, + &mut input_blobs_gradient) } } + /// Calculate the gradient w.r.t. input. + /// + /// This method is mostly used when doing backpropagation. + pub fn backward_input(&mut self) { + self.worker.sync(&self.backend, + &mut self.input_blobs_data, &mut self.input_blobs_gradient, + &mut self.weights_data, &mut self.weights_gradient, + &mut self.output_blobs_data, &mut self.output_blobs_gradient); + let output_data: Vec<_> = self.output_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_data: Vec<&SharedTensor> = output_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let output_gradient: Vec<_> = self.output_blobs_gradient.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_gradient: Vec<&SharedTensor> = output_gradient.iter().enumerate().map(|(_, val)| &**val).collect(); + let wgts_data: Vec<_> = self.weights_data.iter().map(|b| b.read().unwrap()).collect(); + let weights_data: Vec<&SharedTensor> = wgts_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let input_data: Vec<_> = self.input_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let input_blobs_data: Vec<&SharedTensor> = input_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let btm_gradient_ref = self.input_blobs_gradient.iter().cloned().collect::>(); + let mut btm_gradient = &mut btm_gradient_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let mut input_gradient = &mut btm_gradient.iter_mut().map(|a| a).collect::>(); + let mut input_blobs_gradient: Vec<&mut SharedTensor> = input_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect(); + self.worker.compute_input_gradient(&self.backend, + &weights_data, + &output_blobs_data, + &output_blobs_gradient, + &input_blobs_data, + &mut input_blobs_gradient) + } + + /// Calculate the gradient w.r.t. parameters. + /// + /// "Parameters" here refers to weights and also possibly bias, depending on the layer. + /// + /// This method is mostly used when doing backpropagation. + pub fn backward_parameters(&mut self) { + self.worker.sync(&self.backend, + &mut self.input_blobs_data, &mut self.input_blobs_gradient, + &mut self.weights_data, &mut self.weights_gradient, + &mut self.output_blobs_data, &mut self.output_blobs_gradient); + let output_data: Vec<_> = self.output_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_data: Vec<&SharedTensor> = output_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let output_gradient: Vec<_> = self.output_blobs_gradient.iter().map(|b| b.read().unwrap()).collect(); + let output_blobs_gradient: Vec<&SharedTensor> = output_gradient.iter().enumerate().map(|(_, val)| &**val).collect(); + let wgts_data: Vec<_> = self.weights_data.iter().map(|b| b.read().unwrap()).collect(); + let weights_data: Vec<&SharedTensor> = wgts_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let input_data: Vec<_> = self.input_blobs_data.iter().map(|b| b.read().unwrap()).collect(); + let input_blobs_data: Vec<&SharedTensor> = input_data.iter().enumerate().map(|(_, val)| &**val).collect(); + let wgt_gradient_ref = self.weights_gradient.iter().cloned().collect::>(); + let mut wgt_gradient = &mut wgt_gradient_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let mut weights_gradient = &mut wgt_gradient.iter_mut().map(|a| a).collect::>(); + let mut weights_blobs_gradient: Vec<&mut SharedTensor> = weights_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect(); + self.worker.compute_parameters_gradient(&self.backend, + &output_blobs_data, + &output_blobs_gradient, + &input_blobs_data, + &mut weights_blobs_gradient) + } + + fn calculate_loss(backend: &B, worker: &Box>, weights: &mut Vec>>, outputs: &mut Vec>>) -> f32 { + // get weight of the loss of each weight-output-pair + let loss_weights = outputs.iter().enumerate().map(|(output_id, _)| worker.loss_weight(output_id)).collect::>(); + // filter out all weights that are not contributing to the total loss + let mut contributing_weights = weights.iter().enumerate().filter_map(|(i, val)| { + match loss_weights[i].is_none() { + true => None, + false => Some(val.clone()) + } + }).collect::>(); + let mut contributing_outputs = outputs.iter().enumerate().filter_map(|(i, val)| { + match loss_weights[i].is_none() { + true => None, + false => Some(val.clone()) + } + }).collect::>(); + let filtered_native_weights = loss_weights.iter().filter_map(|&val| val).collect::>(); + worker.calculate_loss(backend, &filtered_native_weights, &mut contributing_weights, &mut contributing_outputs) + } + + /// Synchronize the layers backend. + pub fn synchronize(&self) { + self.backend.synchronize().unwrap(); + } + /// Sets whether the layer should compute gradients w.r.t. a /// weight at a particular index given by `weight_id`. /// @@ -428,145 +624,286 @@ impl> Layer { } + /// Returns the names of all the input blobs. + pub fn input_blob_names(&self) -> &[String] { + &self.input_blob_names + } + /// Returns the [loss weight][1] associated with the weight blob /// with id `weight_id`. /// [1]: http://caffe.berkeleyvision.org/tutorial/loss.html pub fn loss(&self, weight_id: usize) -> Option<&f32> { self.loss.get(weight_id) } - - /// Find the name for a supplied blob. - fn name_for_blob(&self, blob: &ArcLock) -> &str { - // let (res, _) = self.blob_names.iter().find(|&(_, b)| blob == b).unwrap(); - // - // res - unimplemented!(); - } } /// A Layer in a [Neural Network][1] that can handle forward and backward of a computation step. /// [1]: ../network/index.html -pub trait ILayer { +pub trait ILayer : ComputeOutput + ComputeInputGradient + ComputeParametersGradient { /// Initialize the layer for computation. /// /// Allows for layer-specific one time setup, e.g. precomputing constant values. /// - /// Is called during [Network][1] initalization + /// Is called during [Network][1] initalization. /// [1]: ../network/type.Network.html - fn init(&mut self) {} + fn init(&mut self, backend: Rc) {} - /// Compute the [feedforward][1] layer output. - /// [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network - fn forward_layer(&self, bottom: &[ReadBlob], top: &mut Vec<&mut WriteBlob>); - /// Compute the gradients for the bottom blobs - /// if the corresponding value of `propagate_down` is true. - /// Uses the CPU. - fn backward_layer(&self, top: &[ReadBlob], propagate_down: &[bool], bottom: &mut Vec<&mut WriteBlob>); - - /// Compute the [feedforward][1] layer output using the currently set computation method. + /// Adjust to shapes of the output blobs to fit the shapes of the input blobs. + /// + /// Is called during [Network][1] initalization, after [init][2]. + /// + /// **Caution**: `input_data` should only be reshaped, but not resized. + /// + /// [1]: ../network/type.Network.html + /// [2]: #method.init + fn reshape(&mut self, + backend: Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) {} + + /// Compute the [feedforward][1] layer output using the provided Backend. /// [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network /// - /// Aquires read locks for the bottom blobs ([ReadBlob][2]) - /// and write locks for the top blobs ([WriteBlob][3]) to ensure sequential computation, + /// Aquires read locks for the input blobs ([ReadBlob][2]) + /// and write locks for the output blobs ([WriteBlob][3]) to ensure sequential computation, /// and then passes them to computation method specific function ([forward_cpu][4]). /// /// [2]: ./type.ReadBlob.html /// [3]: ./type.WriteBlob.html /// [3]: #method.forward_cpu #[cfg_attr(lint, allow(map_clone))] - fn forward(&self, bottom: &[ArcLock], top: &mut Vec>) -> f32 { - // Lock(); - // Reshape(bottom, top); // Reshape the layer to fit top & bottom blob - let mut loss = 0f32; - - let btm: Vec<_> = bottom.iter().map(|b| b.read().unwrap()).collect(); - let tp_ref = top.iter().cloned().collect::>(); - let mut tp = &mut tp_ref.iter().map(|b| b.write().unwrap()).collect::>(); - let mut top_w = &mut tp.iter_mut().map(|a| a).collect::>(); - self.forward_layer(&btm, top_w); - - for (top_id, top_layer) in top.iter().enumerate() { - // if (!this->loss(top_id)) { continue; } // Caffe - // if !self.loss(top_id) { continue; } - - let top_blob = top_layer.read().unwrap(); + fn forward(&self, + backend: &B, + input_data: &[&SharedTensor], + weights_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor] + ) { + self.compute_output(backend, weights_data, input_data, output_data); + } - let data = top_blob.data(); - let loss_weights = top_blob.diff(); + // TODO: remove + /// Calculate the loss for the output blobs in the layer. + /// + /// If `loss_weight(i)` returns `NAN` for a blob, no loss will be calculated for that blob. + /// + /// `calculate_loss` is called at the end of the forward computation step. + fn calculate_loss(&self, backend: &B, loss_weights: &[f32], weights: &mut Vec>>, outputs: &mut Vec>>) -> f32 { + let mut loss = 0f32; - // TODO - // loss += leaf_cpu_dot(data, loss_weights); + let out_ref = outputs.iter().cloned().collect::>(); + let out = &mut out_ref.iter().map(|b| b.write().unwrap()).collect::>(); + let wgts_ref = weights.iter().cloned().collect::>(); + let wgts = &mut wgts_ref.iter().map(|b| b.write().unwrap()).collect::>(); + + for (_, (output_blob, _)) in out.iter_mut().zip(loss_weights).enumerate() { + let native_backend = native_backend(); + // setup loss weight + // let mut weight = SharedTensor::::new(native_backend.device(), &vec![1]).unwrap(); + // match weight.add_device(native_backend.device()) { _ => weight.sync(native_backend.device()).unwrap() } + // + // ::util::write_to_memory(weight.get_mut(native_backend.device()).unwrap(), &[1]); + // match output_blob.add_device(native_backend.device()) { _ => output_blob.sync(native_backend.device()).unwrap() } + // let mut shared_loss = SharedTensor::::new(native_backend.device(), &vec![1]).unwrap(); + // // calculate weighted loss + // native_backend.dot_plain(output_blob, &weight, &mut shared_loss).unwrap(); + + let native_output = output_blob.get(native_backend.device()).unwrap().as_native().unwrap(); + // let native_loss = shared_loss.get(native_backend.device()).unwrap().as_native().unwrap(); + loss += native_output.as_slice::()[0]; + // TODO: factor in loss_weights } - // Unlock(); - loss } - /// Compute the [backpropagation][1] layer output and gradient using the currently set computation method. + /// Compute the [backpropagation][1] layer output and gradient using the provided backend. /// [1]: https://en.wikipedia.org/wiki/Backpropagation /// - /// Aquires read locks for the top blobs ([ReadBlob][2]) - /// and write locks for the bottom blobs ([WriteBlob][3]) to ensure sequential computation, - /// and then passes them to computation method specific function ([backward_cpu][4]). + /// Aquires write locks for the input blobs to ensure sequential computation, + /// and then do a [compute_input_gradient][3] and [compute_parameters_gradient][4]. /// - /// [2]: ./type.ReadBlob.html - /// [3]: ./type.WriteBlob.html - /// [3]: #method.backward_cpu + /// [3]: ./trait.ComputeInputGradient.html#method.compute_input_gradient + /// [4]: ./trait.ComputeParametersGradient.html#method.compute_parameters_gradient #[cfg_attr(lint, allow(map_clone))] - fn backward(&self, top: &[ArcLock], propagate_down: &[bool], bottom: &mut Vec>) { - let tp: Vec<_> = top.iter().map(|b| b.read().unwrap()).collect(); - let bt_ref = bottom.iter().cloned().collect::>(); - let mut bt = &mut bt_ref.iter().map(|b| b.write().unwrap()).collect::>(); - let mut btm = &mut bt.iter_mut().map(|a| a).collect::>(); - self.backward_layer(&tp, propagate_down, btm); - } - - /// Return whether "anonymous" top blobs are created automatically for the layer. - /// - /// If this method returns true, Network::init will create enough "anonymous" top - /// blobs to fulfill the requirement specified by [exact_num_top_blobs][1] or - /// [min_top_blobs][2]. - /// [1]: #method.exact_num_top_blobs - /// [2]: #method.min_top_blobs - fn auto_top_blobs(&self) -> bool { + fn backward(&self, + backend: &B, + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + weights_data: &[&SharedTensor], + weights_gradients: &mut [&mut SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + self.compute_input_gradient(backend, weights_data, output_data, output_gradients, input_data, input_gradients); + self.compute_parameters_gradient(backend, output_data, output_gradients, input_data, weights_gradients); + } + + /// Synchronize the blobs before doing a forward or backward operation. + /// + /// This is necessary because the forward_layer and backward_layer methods only immutably + /// borrow the corresponding input blobs and weights which they are not supposed to change. + /// However synchronizing all blobs to the same device may be neccessary for some computations, + /// which can only be done with a mutable borrow. + fn sync(&self, + backend: &B, + input_data: &mut [ArcLock>], + input_gradients: &mut [ArcLock>], + weights_data: &mut [ArcLock>], + weights_gradients: &mut [ArcLock>], + output_data: &mut Vec>>, + output_gradients: &mut Vec>>) { + if self.sync_native() { + let backend = native_backend(); + for tensor in input_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in input_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in weights_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in weights_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in output_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in output_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + } else { + for tensor in input_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in input_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in weights_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in weights_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in output_data { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + for tensor in output_gradients { + let mut sync = tensor.write().unwrap(); + match sync.add_device(backend.device()) { _ => sync.sync(backend.device()).unwrap() } + } + } + } + + /// Return whether "anonymous" output blobs are created automatically for the layer. + /// + /// If this method returns true, Network::init will create enough "anonymous" output + /// blobs to fulfill the requirement specified by [exact_num_output_blobs][1] or + /// [min_output_blobs][2]. + /// [1]: #method.exact_num_output_blobs + /// [2]: #method.min_output_blobs + fn auto_output_blobs(&self) -> bool { false } - /// Returns the minimum number of top blobs required by the layer, + /// Returns the minimum number of output blobs required by the layer, /// or 0 if no minimum number is required. /// /// This method should be overridden to return a positive value if your - /// layer expects some minimum number of top blobs. - fn min_top_blobs(&self) -> usize { + /// layer expects some minimum number of output blobs. + fn min_output_blobs(&self) -> usize { 0 } - /// Returns the exact number of top blobs required by the layer, - /// or 0 if no exact number is required. + /// Returns the exact number of output blobs required by the layer, + /// or `None` if no exact number is required. /// /// This method should be overridden to return a positive value if your - /// layer expects some exact number of top blobs. - fn exact_num_top_blobs(&self) -> usize { - 0 + /// layer expects some exact number of output blobs. + fn exact_num_output_blobs(&self) -> Option { + None } - /// Returns the exact number of bottom blobs required by the layer, - /// or 0 if no exact number is required. + /// Returns the exact number of input blobs required by the layer, + /// or `None` if no exact number is required. /// /// This method should be overridden to return a positive value if your - /// layer expects some exact number of bottom blobs. - fn exact_num_bottom_blobs(&self) -> usize { - 0 + /// layer expects some exact number of input blobs. + fn exact_num_input_blobs(&self) -> Option { + None } - /// Return whether to allow force_backward for a given bottom blob index. + /// Return whether to allow force_backward for a given input blob index. /// - /// If AllowForceBackward(i) == false, we will ignore the force_backward + /// If allow_force_backward(i) == false, we will ignore the force_backward /// setting and backpropagate to blob i only if it needs gradient information /// (as is done when force_backward == false). - fn allow_force_backward(&self, bottom_id: usize) -> bool { + fn allow_force_backward(&self, input_id: usize) -> bool { true } + /// Return wether a simple native backend should be used to [sync][1] instead of the default backend. + /// [1]: #method.sync + /// + /// If `false` is returned the default backend will be used, otherwise a new native backend + /// will be created and provided as argument to `sync`. + fn sync_native(&self) -> bool { + false + } + + /// Return the associated loss weight for a given output blob index. + /// + /// If loss_weight(i) == `None`, no loss will be calculated for the output blob. + /// + /// This is usually overridden by loss layers. + fn loss_weight(&self, output_id: usize) -> Option { + None + } +} + +/// A Layer that can compute the output for a given input. +pub trait ComputeOutput { + /// Compute output for given input and write them into `output_data`. + fn compute_output(&self, + backend: &B, + weights_data: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]); +} + +/// A Layer that can compute the gradient with respect to its input. +pub trait ComputeInputGradient { + /// Compute gradients with respect to the inputs and write them into `input_gradients`. + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]); } -impl fmt::Debug for ILayer { +/// A Layer that can compute the gradient with respect to its parameters (= weights, bias, etc.). +pub trait ComputeParametersGradient { + /// Compute gradients with respect to the parameters and write them into `parameters_gradients`. + fn compute_parameters_gradient(&self, + backend: &B, + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + parameters_gradients: &mut [&mut SharedTensor]) {} +} + +impl fmt::Debug for ILayer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", "foo", "bar") } @@ -581,60 +918,90 @@ pub struct LayerConfig { /// The type of the Layer pub layer_type: LayerType, - /// The name for each top Blob - pub tops: Vec, + /// The name for each output Blob + pub outputs: Vec, - /// The name for each bottom Blob - pub bottoms: Vec, + /// The name for each input Blob + pub inputs: Vec, /// Specifies training configuration for each weight blob. pub params: Vec, - /// Specifies on which bottoms the backpropagation should be skipped. - /// The size must be either 0 or equal to the number of bottoms. + /// Specifies on which inputs the backpropagation should be skipped. + /// The size must be either 0 or equal to the number of inputs. pub propagate_down: Vec, } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] /// The Layer Types pub enum LayerType { + // Common layers + /// Convolution Layer + Convolution(ConvolutionConfig), + /// Linear Layer + Linear(LinearConfig), + /// LogSoftmax Layer + LogSoftmax, + /// Pooling Layer + Pooling(PoolingConfig), + /// Softmax Layer + Softmax, + // Activation layers + /// ReLU Layer + ReLU, /// Sigmoid Layer Sigmoid, + // Loss layers + /// NegativeLogLikelihood Layer + NegativeLogLikelihood, + // Utility layers + /// Reshape Layer + Reshape(ReshapeConfig), } impl LayerConfig { /// Creates a new LayerConfig - pub fn new(name: String, layer_type: LayerType) -> LayerConfig { + pub fn new(name: &str, layer_type: LayerType) -> LayerConfig { LayerConfig { - name: name, + name: name.to_owned(), layer_type: layer_type, - tops: Vec::new(), - bottoms: Vec::new(), + outputs: Vec::new(), + inputs: Vec::new(), params: Vec::new(), propagate_down: Vec::new(), } } - /// Returns the Name of the requested top Blob - pub fn top(&self, top_id: usize) -> Option<&String> { - self.tops.get(top_id) + /// Returns the Name of the requested output Blob + pub fn output(&self, output_id: usize) -> Option<&String> { + self.outputs.get(output_id) + } + + /// Returns the number of output Blobs + pub fn outputs_len(&self) -> usize { + self.outputs.len() + } + + /// Add a output by name + pub fn add_output(&mut self, output_name: &str) { + self.outputs.push(output_name.to_owned()); } - /// Returns the number of top Blobs - pub fn tops_len(&self) -> usize { - self.tops.len() + /// Returns the Name of the requested input Blob + pub fn input(&self, input_id: usize) -> Option<&String> { + self.inputs.get(input_id) } - /// Returns the Name of the requested bottom Blob - pub fn bottom(&self, bottom_id: usize) -> Option<&String> { - self.bottoms.get(bottom_id) + /// Returns the number of input Blobs + pub fn inputs_len(&self) -> usize { + self.inputs.len() } - /// Returns the number of bottom Blobs - pub fn bottoms_len(&self) -> usize { - self.bottoms.len() + /// Add a input by name + pub fn add_input(&mut self, input_name: &str) { + self.inputs.push(input_name.to_owned()); } /// Returns the requested WeightConfig @@ -655,122 +1022,10 @@ impl LayerConfig { /// Checks if propagate down length makes sense. fn validate_propagate_down_len(&self) -> Result<(), &'static str> { - if self.propagate_down.is_empty() || self.propagate_down.len() == self.bottoms.len() { + if self.propagate_down.is_empty() || self.propagate_down.len() == self.inputs_len() { Ok(()) } else { - Err("propagate_down config must be specified either 0 or bottom_size times") + Err("propagate_down config must be specified either 0 or inputs_len times") } } - - // /// Checks if propagate down length is sane - // pub fn check_propagate_down_len(&self) -> bool { - // self.propagate_down.is_empty() || self.propagate_down.len() == self.bottoms.len() - // } -} - - -#[derive(Debug, Clone)] -/// Specifies training configuration for a weight blob. -pub struct WeightConfig { - /// The name of the weight blob -- useful for sharing weights among - /// layers, but never required otherwise. To share a weight between two - /// layers, give it a (non-empty) name. - /// - /// Default: "" - pub name: String, - /// Whether to require shared weights to have the same shape, or just the same - /// count - /// - /// Default: DimCheckMode::Strict - pub share_mode: DimCheckMode, - - /// The multiplier on the global learning rate for this parameter. - /// - /// Default: 1.0f32 - pub lr_mult: Option, - - /// The multiplier on the global weight decay for this parameter. - /// - /// Default: 1.0f32 - pub decay_mult: Option, -} - -impl Default for WeightConfig { - fn default() -> WeightConfig { - WeightConfig { - name: "".to_owned(), - share_mode: DimCheckMode::Strict, - lr_mult: None, - decay_mult: None, - } - } -} - -impl WeightConfig { - /// Checks dimensions of two blobs according to the `share_mode`. - /// Returns an error if there is a count/shape mismatch. - pub fn check_dimensions(&self, - blob_one: &Blob, - blob_two: &Blob, - param_name: String, - owner_name: String, - layer_name: String) - -> Result<(), String> { - match self.share_mode { - // Permissive dimension checking -- only check counts are the same. - DimCheckMode::Permissive => { - if blob_one.capacity() != blob_two.capacity() { - return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}'; - count mismatch. - Owner layer weight shape is {}; - Sharing layer weight shape is {}", - param_name, - owner_name, - layer_name, - blob_two.shape_string(), - blob_one.shape_string())); - } - } - // Strict dimension checking -- all dims must be the same. - DimCheckMode::Strict => { - if blob_one.shape() != blob_two.shape() { - return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}'; - shape mismatch. - Owner layer weight shape is {}; - Sharing layer expects weight shape {}", - param_name, - owner_name, - layer_name, - blob_two.shape_string(), - blob_one.shape_string())); - } - } - } - Ok(()) - } - - /// The multiplier on the global learning rate for this weight blob. - pub fn lr_mult(&self) -> f32 { - match self.lr_mult { - Some(val) => val, - None => 1.0f32, - } - } - - /// The multiplier on the global weight decay for this weight blob. - pub fn decay_mult(&self) -> f32 { - match self.decay_mult { - Some(val) => val, - None => 1.0f32, - } - } -} - -#[derive(Debug, Copy, Clone)] -/// Enum for specifing the shared weights behaviour -pub enum DimCheckMode { - /// Strict requires that shapes match. - Strict, - /// Permissive requires only the count of weights to match. - Permissive, } diff --git a/src/layers/activation/mod.rs b/src/layers/activation/mod.rs index 78007a22..c60ab710 100644 --- a/src/layers/activation/mod.rs +++ b/src/layers/activation/mod.rs @@ -1,7 +1,7 @@ //! Provides nonlinear activation methods. //! -//! Activation Layers take a bottom Blob, provide the activation operation and -//! produce a top Blob. +//! Activation Layers take a input tensor, provide the activation operation and +//! produce a output tensor. //! Thanks to the nonlinearity of the activation methods, we can 'learn' and //! detect nonlinearities //! in our (complex) datasets. @@ -10,11 +10,23 @@ //! classification a //! step function might be very useful. For more complex tasks continious //! activation functions such -//! as Sigmoid, TanH, Softmax or ReLU should be used. In most cases ReLU might -//! prove the best -//! results. +//! as [Sigmoid][mod_sigmoid], TanH, [ReLU][mod_relu] should be used. In most cases ReLU might +//! provide the best results. //! //! The activation function is also sometimes called transfer function. +//! +//! [mod_sigmoid]: ./sigmoid/index.html +//! [mod_relu]: ./relu/index.html +#[macro_export] +macro_rules! impl_ilayer_activation { + () => ( + fn exact_num_output_blobs(&self) -> Option { Some(1) } + fn exact_num_input_blobs(&self) -> Option { Some(1) } + ) +} + +pub use self::relu::ReLU; pub use self::sigmoid::Sigmoid; +pub mod relu; pub mod sigmoid; diff --git a/src/layers/activation/relu.rs b/src/layers/activation/relu.rs new file mode 100644 index 00000000..341ee1b1 --- /dev/null +++ b/src/layers/activation/relu.rs @@ -0,0 +1,59 @@ +//! Applies the nonlinear Rectified Linear Unit. +//! +//! Non-linearity activation function: y = max(0, x) +//! +//! This is generally the preferred choice over Sigmod or TanH. +//! The max function used in ReLU is usually faster to compute than the exponentiation +//! needed in a Sigmoid layer. + +use co::{IBackend,SharedTensor}; +use conn::Relu; +use layer::*; +use util::ArcLock; + +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] +/// ReLU Activation Layer +pub struct ReLU; + +impl> ILayer for ReLU { + impl_ilayer_activation!(); + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let inp = input_data[0].read().unwrap(); + input_gradient[0].write().unwrap().resize(inp.desc()).unwrap(); + output_data[0].write().unwrap().resize(inp.desc()).unwrap(); + output_gradient[0].write().unwrap().resize(inp.desc()).unwrap(); + } +} + +impl> ComputeOutput for ReLU { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + backend.relu_plain(input_data[0], output_data[0]).unwrap(); + } +} + +impl> ComputeInputGradient for ReLU { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + backend.relu_grad_plain(output_data[0], output_gradients[0], input_data[0], input_gradients[0]).unwrap(); + } +} + +impl> ComputeParametersGradient for ReLU {} diff --git a/src/layers/activation/sigmoid.rs b/src/layers/activation/sigmoid.rs index 8eb74c84..0d513d1c 100644 --- a/src/layers/activation/sigmoid.rs +++ b/src/layers/activation/sigmoid.rs @@ -12,52 +12,53 @@ //! * can be computed faster //! * is therefore the most popular activation function in DNNs as of this //! writing (2015). +use co::{IBackend, SharedTensor}; +use conn; use layer::*; +use util::ArcLock; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] /// Sigmoid Activation Layer pub struct Sigmoid; -impl ILayer for Sigmoid { +impl> ILayer for Sigmoid { impl_ilayer_activation!(); - fn forward_layer(&self, bottom: &[ReadBlob], top: &mut Vec<&mut WriteBlob>) { - let bottom_data = bottom[0].data(); - let top_data = top[0].mut_data(); - - // TODO - // for (i, _) in bottom_data.iter().enumerate() { - // top_data[i] = Sigmoid::sigmoid(bottom_data[i]) - // } - } - - fn backward_layer(&self, top: &[ReadBlob], propagate_down: &[bool], bottom: &mut Vec<&mut WriteBlob>) { - if propagate_down[0] { - let top_data = top[0].data(); - let top_diff = top[0].diff(); - let count = bottom[0].capacity(); - let bottom_diff = bottom[0].mut_diff(); - - - // for i in 0..count { - // TODO - // let sigmoid_x = top_data[i]; - // bottom_diff[i] = top_diff[i] * Sigmoid::sigmoid_prime_precalc(sigmoid_x) - // } - } + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let inp = input_data[0].read().unwrap(); + output_data[0].write().unwrap().resize(inp.desc()).unwrap(); + output_gradient[0].write().unwrap().resize(inp.desc()).unwrap(); } } -impl Sigmoid { - fn sigmoid(z: f32) -> f32 { - 1f32 / (1f32 + (-z).exp()) - } - - fn sigmoid_prime(z: f32) -> f32 { - Sigmoid::sigmoid_prime_precalc(Sigmoid::sigmoid(z)) +impl> ComputeOutput for Sigmoid { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + backend.sigmoid_plain(input_data[0], output_data[0]).unwrap(); } +} - fn sigmoid_prime_precalc(sigmoid_z: f32) -> f32 { - sigmoid_z * (1f32 - sigmoid_z) +impl> ComputeInputGradient for Sigmoid { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + backend.sigmoid_grad_plain(output_data[0], output_gradients[0], input_data[0], input_gradients[0]).unwrap(); } } + +impl> ComputeParametersGradient for Sigmoid {} diff --git a/src/layers/common/convolution.rs b/src/layers/common/convolution.rs index 03293365..e040cb4a 100644 --- a/src/layers/common/convolution.rs +++ b/src/layers/common/convolution.rs @@ -1,8 +1,238 @@ -//! Convolves the top Blob +//! Convolves the input tensor. //! //! Does this convolution with a set of learnable filters, each producing one -//! feature map in the top Blob. +//! feature map in the output tensor. +use std::rc::Rc; +use co::{IBackend, DeviceType, SharedTensor}; +use conn; +use layer::*; +use util::{ArcLock, native_backend, cast_vec_usize_to_i32}; +use super::FilterLayer; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] /// Convolution Layer -pub struct Convolution; +pub struct Convolution> { + axis: usize, + num_output: usize, + filter_shape: Vec, + stride: Vec, + padding: Vec, + + convolution_configs: Option>, +} + +impl> Convolution { + /// Create a Convolution layer from a ConvolutionConfig. + pub fn from_config(config: &ConvolutionConfig) -> Convolution { + Convolution { + num_output: config.num_output, + + filter_shape: config.filter_shape.clone(), + stride: config.stride.clone(), + padding: config.padding.clone(), + + axis: config.axis(), + + convolution_configs: None, + } + } + + fn calculate_filter_shape(&self, input_shape: &[usize]) -> Vec { + let num_spatial_dims = self.num_spatial_dims(input_shape); + let spatial_dims = self.spatial_filter_dims(num_spatial_dims); + let filter_n = self.num_output; // number of output feature maps + let filter_c = input_shape[self.axis]; // number of input feature maps + let filter_h = spatial_dims[0]; + let filter_w = spatial_dims[1]; + + vec![filter_n, filter_c, filter_h, filter_w] + } + + fn create_filter(&self, device: &DeviceType, input_shape: &[usize]) -> SharedTensor { + let filter_shape = self.calculate_filter_shape(input_shape); + + SharedTensor::::new(device, &filter_shape).unwrap() + } +} + +impl> FilterLayer for Convolution { + /// Calculates the number of spatial dimensions for the pooling operation. + fn num_spatial_dims(&self, input_shape: &[usize]) -> usize { + match input_shape.len() { + 4 => 2, + _ => panic!("Only 2D convolutions supported at the moment") + } + } + + fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec { + let num_spatial_dims = self.num_spatial_dims(input_shape); + let filter = self.spatial_filter_dims(num_spatial_dims); + let padding = self.padding_dims(num_spatial_dims); + let stride = self.stride_dims(num_spatial_dims); + let mut output_shape = Vec::new(); + for dim in &input_shape[0..self.axis].to_vec() { + output_shape.push(*dim); + } + output_shape.push(self.num_output); + for spatial_dim in Self::calculate_spatial_output_dims(&input_shape[(self.axis + 1)..], &filter, &padding, &stride) { + output_shape.push(spatial_dim); + } + + output_shape + } + + fn filter_shape(&self) -> &[usize] { + &self.filter_shape + } + + fn stride(&self) -> &[usize] { + &self.stride + } + + fn padding(&self) -> &[usize] { + &self.padding + } +} + +impl> ILayer for Convolution { + impl_ilayer_common!(); + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + for i in 0..input_data.len() { + let inp = input_data[0].read().unwrap(); + let mut output_data = output_data[0].write().unwrap(); + let mut output_gradient = output_gradient[0].write().unwrap(); + let input_shape = inp.desc(); + let output_shape = self.calculate_output_shape(input_shape); + output_data.resize(&output_shape).unwrap(); + output_gradient.resize(&output_shape).unwrap(); + + let device = ::device(&backend); + let num_spatial_dims = self.num_spatial_dims(inp.desc()); + let mut filter = self.create_filter(device, input_shape); + let stride = cast_vec_usize_to_i32(self.stride_dims(num_spatial_dims)); + let padding = cast_vec_usize_to_i32(self.padding_dims(num_spatial_dims)); + + // add copy on native as workaround for bug in new_convolution_config + let native = native_backend(); + let _ = filter.add_device(native.device()); + let config = backend.new_convolution_config(&inp, &output_data, &mut filter, + conn::ConvForwardAlgo::Auto, conn::ConvBackwardFilterAlgo::Auto, conn::ConvBackwardDataAlgo::Auto, + &stride, &padding).unwrap(); + weights_data[0].write().unwrap().resize(filter.desc()).unwrap(); + weights_gradient[0].write().unwrap().resize(filter.desc()).unwrap(); + self.convolution_configs = Some(Rc::new(config)); + } + } +} + +impl> ComputeOutput for Convolution { + fn compute_output(&self, + backend: &B, + weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + let filter_data = weights[0]; + let conv_config = self.convolution_configs.as_ref().unwrap(); + backend.convolution_plain(filter_data, input_data[0], output_data[0], conv_config).unwrap(); + } +} + +impl> ComputeInputGradient for Convolution { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + _output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + let filter_data = weights_data[0]; + let conv_config = self.convolution_configs.as_ref().unwrap(); + // compute gradient w.r.t. input + backend.convolution_grad_data_plain(filter_data, output_gradients[0], input_gradients[0], conv_config).unwrap(); + } +} + +impl> ComputeParametersGradient for Convolution { + fn compute_parameters_gradient(&self, + backend: &B, + _output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + parameters_gradients: &mut [&mut SharedTensor]) { + // TODO: compute gradient w.r.t to bias + let filter_gradient = &mut parameters_gradients[0]; + let conv_config = self.convolution_configs.as_ref().unwrap(); + // compute gradient w.r.t. filter + backend.convolution_grad_filter_plain(input_data[0], output_gradients[0], filter_gradient, conv_config).unwrap(); + } +} + + +#[derive(Debug, Clone)] +/// Specifies configuration parameters for a Convolution Layer. +pub struct ConvolutionConfig { + /// The number of output values + pub num_output: usize, + /// The size of the kernel + pub filter_shape: Vec, + /// The stride size + pub stride: Vec, + /// The padding size + pub padding: Vec, + /// The axis to interpret as "channels" when performing convolution. + /// + /// Preceding dimensions are treated as independent inputs, and + /// succeeding dimensions are treated as "spatial". + /// + /// Defaults to `1` + pub axis: Option, +} + +impl ConvolutionConfig { + /// The axis to interpret as "channels" when performing convolution. + /// + /// Preceding dimensions are treated as independent inputs, and + /// succeeding dimensions are treated as "spatial". + /// + /// Defaults to `1` + pub fn axis(&self) -> usize { + self.axis.unwrap_or(1) + } +} + +#[cfg(test)] +mod tests { + use co::*; + use super::{Convolution, ConvolutionConfig}; + use super::super::FilterLayer; + + #[test] + #[cfg(feature="cuda")] + fn correct_shapes() { + let cfg = ConvolutionConfig { + num_output: 64, + + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + + axis: None, + }; + let layer = Convolution::>::from_config(&cfg); + let num_spatial_dims = layer.num_spatial_dims(&vec![1, 3, 224, 224]); + assert_eq!(2, num_spatial_dims); + assert_eq!(vec![11, 11], layer.spatial_filter_dims(2)); + assert_eq!(vec![2, 2], layer.padding_dims(2)); + assert_eq!(vec![4, 4], layer.stride_dims(2)); + assert_eq!(vec![64, 3, 11, 11], layer.calculate_filter_shape(&vec![1, 3, 224, 224])); + assert_eq!(vec![1, 64, 55, 55], layer.calculate_output_shape(&vec![1, 3, 224, 224])); + } +} diff --git a/src/layers/common/linear.rs b/src/layers/common/linear.rs new file mode 100644 index 00000000..33413e21 --- /dev/null +++ b/src/layers/common/linear.rs @@ -0,0 +1,181 @@ +//! Applies a linear transformation to the input data `y = a * x + b` +//! +//! The variables are: +//! +//! - `y`: output value +//! - `a`: weight (a trainable weight in a neural network) +//! - `x`: input value +//! - `b`: bias (not implemented yet) +//! +//! ## Input +//! +//! The input can either have one or two dimensions: +//! +//! - If the input has one dimension the transformation will just be applied to the input data. +//! - If the input has two dimensions **the first dimension is treated as batch size** (`N`) +//! and the transformation will be applied to every vector in the second dimension, using the +//! same weights and biases. +//! +//! In the context of convolutional neural networks this layer is also +//! called a "fully-connected layer" if it is used at the end of the network. +use std::rc::Rc; +use co::backend::IBackend; +use co::tensor::SharedTensor; +use coblas::transpose::Transpose; +use coblas::plugin::*; +use layer::*; +use util::{ArcLock, native_scalar, LayerOps}; +use weight::FillerType; + +#[derive(Debug)] +/// Linear Layer +pub struct Linear { + output_size: usize, + + one: SharedTensor, + zero: SharedTensor, +} + +impl Linear { + /// Create a Linear layer from a LinearConfig. + pub fn from_config(config: &LinearConfig) -> Linear { + let one = native_scalar(1f32); + let zero = native_scalar(0f32); + + Linear { + output_size: config.output_size, + + one: one, + zero: zero, + } + } + + // Calculates the input size by skipping the batch size. + fn calculate_input_size(input_shape: &[usize]) -> usize { + input_shape.iter().skip(1).fold(1, |prod, i| prod * i) + } + + fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec { + let n = input_shape[0]; // batch size + vec![n, self.output_size] + } + + fn calculate_weight_shape(&self, input_shape: &[usize]) -> Vec { + let m = Self::calculate_input_size(input_shape); + vec![self.output_size, m] + } +} + +impl> ILayer for Linear { + impl_ilayer_common!(); + + fn init(&mut self, backend: Rc) { + let device = ::device(&backend); + let _ = self.one.add_device(device); + self.one.sync(device).unwrap(); + let _ = self.zero.add_device(device); + self.zero.sync(device).unwrap(); + } + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let input = input_data[0].read().unwrap(); + // reshape top + let output_shape = self.calculate_output_shape(input.desc()); + output_data[0].write().unwrap().resize(&output_shape).unwrap(); + output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); + // reshape weight + let weight_shape = self.calculate_weight_shape(input.desc()); + // TODO: change weight creation to not require this + if let Some(weight) = weights_data.get(0) { + weight.write().unwrap().resize(&weight_shape).unwrap(); + let filler = FillerType::Glorot { + input_size: Self::calculate_input_size(input.desc()), + output_size: self.output_size, + }; + filler.fill(&mut weight.write().unwrap()); + + let native_backend = ::util::native_backend(); + let bound_weight = weight.read().unwrap(); + let native_output = bound_weight.get(native_backend.device()).unwrap().as_native().unwrap(); + } + if let Some(weight) = weights_gradient.get(0) { + weight.write().unwrap().resize(&weight_shape).unwrap(); + } + } +} + +impl> ComputeOutput for Linear { + fn compute_output(&self, + backend: &B, + weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + backend.gemm_plain(&self.one, Transpose::NoTrans, input_data[0], Transpose::Trans, weights[0], &self.zero, output_data[0]).unwrap(); + let has_bias_term = false; // TODO: implement bias term + if has_bias_term { + let bias_multiplier = unimplemented!(); + let bias_data = unimplemented!(); + backend.gemm_plain(&self.one, Transpose::NoTrans, bias_multiplier, Transpose::NoTrans, bias_data, &self.one, output_data[0]).unwrap(); + } + } +} + +impl> ComputeInputGradient for Linear { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + // Gradient with respect to input data + backend.gemm_plain(&self.one, Transpose::NoTrans, output_gradients[0], Transpose::NoTrans, weights_data[0], &self.zero, input_gradients[0]).unwrap(); + } +} + +impl> ComputeParametersGradient for Linear { + fn compute_parameters_gradient(&self, + backend: &B, + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + parameters_gradients: &mut [&mut SharedTensor]) { + // gradient w.r.t. weights + backend.gemm_plain(&self.one, Transpose::Trans, output_gradients[0], Transpose::NoTrans, input_data[0], &self.zero, parameters_gradients[0]).unwrap(); + + // TODO: implement gradient w.r.t bias + // if (bias_term_ && this->param_propagate_down_[1]) { + // const Dtype* top_diff = top[0]->gpu_diff(); + // // Gradient with respect to bias + // caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + // bias_multiplier_.gpu_data(), (Dtype)1., + // this->blobs_[1]->mutable_gpu_diff()); + // } + } +} + +impl ::std::default::Default for Linear { + fn default() -> Linear { + let config = LinearConfig { + output_size: 10, + }; + + Self::from_config(&config) + } +} + + +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] +/// Specifies configuration parameters for a Linear Layer. +pub struct LinearConfig { + /// The number of output values + pub output_size: usize, +} diff --git a/src/layers/common/log_softmax.rs b/src/layers/common/log_softmax.rs new file mode 100644 index 00000000..476f2fb5 --- /dev/null +++ b/src/layers/common/log_softmax.rs @@ -0,0 +1,57 @@ +//! Computes the logarithmic softmax of its input. +//! +use co::{IBackend, SharedTensor}; +use conn; +use layer::*; +use util::ArcLock; + +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] +/// LogSoftmax Layer +pub struct LogSoftmax; + +impl> ILayer for LogSoftmax { + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let input_desc = input_data[0].read().unwrap().desc().clone(); + input_gradient[0].write().unwrap().resize(&input_desc).unwrap(); + output_data[0].write().unwrap().resize(&input_desc).unwrap(); + output_gradient[0].write().unwrap().resize(&input_desc).unwrap(); + } +} + +impl> ComputeOutput for LogSoftmax { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + backend.log_softmax_plain(input_data[0], output_data[0]).unwrap(); + } +} + +impl> ComputeInputGradient for LogSoftmax { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + backend.log_softmax_grad_plain(output_data[0], output_gradients[0], input_gradients[0]).unwrap(); + } +} + +impl> ComputeParametersGradient for LogSoftmax { } + +impl ::std::default::Default for LogSoftmax { + fn default() -> LogSoftmax { + LogSoftmax + } +} diff --git a/src/layers/common/mod.rs b/src/layers/common/mod.rs index 81b64bc8..4ed5bd73 100644 --- a/src/layers/common/mod.rs +++ b/src/layers/common/mod.rs @@ -2,6 +2,108 @@ //! //! For now the layers in common should be discribed as layers that are typical //! layers for building neural networks but are not activation or loss layers. -pub use self::convolution::Convolution; +#[macro_export] +macro_rules! impl_ilayer_common { + () => ( + fn exact_num_output_blobs(&self) -> Option { Some(1) } + fn exact_num_input_blobs(&self) -> Option { Some(1) } + ) +} + +pub use self::convolution::{Convolution, ConvolutionConfig}; +pub use self::linear::{Linear, LinearConfig}; +pub use self::log_softmax::LogSoftmax; +pub use self::pooling::{Pooling, PoolingConfig, PoolingMode}; +pub use self::softmax::Softmax; pub mod convolution; +pub mod linear; +pub mod log_softmax; +pub mod pooling; +pub mod softmax; + +/// Provides common utilities for Layers that utilize a filter with stride and padding. +/// +/// This is used by the Convolution and Pooling layers. +pub trait FilterLayer { + /// Computes the shape of the spatial dimensions. + fn calculate_spatial_output_dims(input_dims: &[usize], filter_dims: &[usize], padding: &[usize], stride: &[usize]) -> Vec { + let mut output_dims = Vec::with_capacity(input_dims.len()); + for (i, _) in input_dims.iter().enumerate() { + output_dims.push(((input_dims[i] + (2 * padding[i]) - filter_dims[i]) / stride[i]) + 1); + } + output_dims + } + + /// Calculate output shape based on the shape of filter, padding, stride and input. + fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec; + + /// Calculates the number of spatial dimensions for the pooling operation. + fn num_spatial_dims(&self, input_shape: &[usize]) -> usize; + + /// Retrievs the spatial dimensions for the filter based on `self.filter_shape()` + /// and the number of spatial dimensions. + /// + /// The spatial dimensions only make up part of the whole filter shape. The other parts are the + /// number of input and output feature maps. + fn spatial_filter_dims(&self, num_spatial_dims: usize) -> Vec { + let mut spatial_dims = Vec::with_capacity(num_spatial_dims); + let filter_shape = self.filter_shape(); + if filter_shape.len() == 1 { + for i in 0..num_spatial_dims { + spatial_dims.push(filter_shape[0]); + } + } else if filter_shape.len() == num_spatial_dims { + panic!("unimplemented: You can not yet specify one filter dimension per spatial dimension"); + } else { + panic!("Must either specify one filter_shape or one filter_shape per spatial dimension. Supplied {:?}", filter_shape.len()); + } + + spatial_dims + } + + /// Retrievs the stride for the convolution based on `self.stride` + /// and the number of spatial dimensions. + fn stride_dims(&self, num_spatial_dims: usize) -> Vec { + let mut stride_dims = Vec::with_capacity(num_spatial_dims); + let stride = self.stride(); + if stride.len() == 1 { + for i in 0..num_spatial_dims { + stride_dims.push(stride[0]); + } + } else if stride.len() == num_spatial_dims { + panic!("unimplemented: You can not yet specify one stride per spatial dimension"); + } else { + panic!("Must either specify one stride or one stride per spatial dimension. Supplied {:?}", stride.len()); + } + + stride_dims + } + + /// Retrievs the padding for the convolution based on `self.padding` + /// and the number of spatial dimensions. + fn padding_dims(&self, num_spatial_dims: usize) -> Vec { + let mut padding_dims = Vec::with_capacity(num_spatial_dims); + let padding = self.padding(); + if padding.len() == 1 { + for i in 0..num_spatial_dims { + padding_dims.push(padding[0]); + } + } else if padding.len() == num_spatial_dims { + panic!("unimplemented: You can not yet specify one padding per spatial dimension"); + } else { + panic!("Must either specify one padding or one padding per spatial dimension. Supplied {:?}", padding.len()); + } + + padding_dims + } + + /// The filter_shape that will be used by `spatial_filter_dims`. + fn filter_shape(&self) -> &[usize]; + + /// The stride that will be used by `stride_dims`. + fn stride(&self) -> &[usize]; + + /// The padding that will be used by `padding_dims`. + fn padding(&self) -> &[usize]; +} diff --git a/src/layers/common/pooling.rs b/src/layers/common/pooling.rs new file mode 100644 index 00000000..66c36811 --- /dev/null +++ b/src/layers/common/pooling.rs @@ -0,0 +1,170 @@ +//! Applies pooling to the input. +//! +//! This layers looks at adjectant values of the input and then computes a +//! simple pooling operation over them (e.g. taking their maximum or average value). +//! *See [PoolingMode][pooling_mode]* +//! +//! [pooling_mode]: ./enum.PoolingMode.html +//! +//! ## Input Data +//! +//! The layer expects the input to be in either 4D NCHW (2 spatial dimensions) +//! or 5D NCDHW (3 spatial dimensions) format. +use std::rc::Rc; +use co::{IBackend, SharedTensor}; +use co::plugin::Float; +use conn; +use layer::*; +use util::{ArcLock, cast_vec_usize_to_i32}; +use super::FilterLayer; + +#[derive(Debug, Clone)] +/// [Pooling](./index.html) Layer +pub struct Pooling> { + mode: PoolingMode, + + filter_shape: Vec, + stride: Vec, + padding: Vec, + + pooling_configs: Vec>, +} + +impl> Pooling { + /// Create a Pooling layer from a PoolingConfig. + pub fn from_config(config: &PoolingConfig) -> Pooling { + Pooling { + mode: config.mode, + + filter_shape: config.filter_shape.clone(), + stride: config.stride.clone(), + padding: config.padding.clone(), + + pooling_configs: vec![], + } + } +} + +impl> FilterLayer for Pooling { + /// Calculates the number of spatial dimensions for the pooling operation. + fn num_spatial_dims(&self, input_shape: &[usize]) -> usize { + match input_shape.len() { + 4 => 2, + 5 => 3, + _ => panic!("A pooling layer currently only supports 4D or 5D input.") + } + } + + fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec { + let num_spatial_dims = self.num_spatial_dims(input_shape); + let filter = self.spatial_filter_dims(num_spatial_dims); + let padding = self.padding_dims(num_spatial_dims); + let stride = self.stride_dims(num_spatial_dims); + let mut output_shape = Vec::new(); + for dim in &input_shape[0..2].to_vec() { + output_shape.push(*dim); + } + println!("FILTER SHAPE 1 {:?}", output_shape); + for spatial_dim in Self::calculate_spatial_output_dims(&input_shape[2..], &filter, &padding, &stride) { + output_shape.push(spatial_dim); + } + println!("FILTER OUT SHAPE {:?}", output_shape); + + output_shape + } + + fn filter_shape(&self) -> &[usize] { + &self.filter_shape + } + + fn stride(&self) -> &[usize] { + &self.stride + } + + fn padding(&self) -> &[usize] { + &self.padding + } +} + +impl> ILayer for Pooling { + impl_ilayer_common!(); + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + for i in 0..input_data.len() { + let inp = input_data[0].read().unwrap(); + let input_shape = inp.desc(); + let output_shape = self.calculate_output_shape(input_shape); + output_data[0].write().unwrap().resize(&output_shape).unwrap(); + output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); + + let num_spatial_dims = self.num_spatial_dims(inp.desc()); + let filter = cast_vec_usize_to_i32(self.spatial_filter_dims(num_spatial_dims)); + let stride = cast_vec_usize_to_i32(self.stride_dims(num_spatial_dims)); + let padding = cast_vec_usize_to_i32(self.padding_dims(num_spatial_dims)); + + let config = backend.new_pooling_config(&filter, &padding, &stride).unwrap(); + self.pooling_configs.push(Rc::new(config)); + } + } +} + +impl> ComputeOutput for Pooling { + fn compute_output(&self, + backend: &B, + weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + let config = &self.pooling_configs[0]; + match self.mode { + PoolingMode::Max => backend.pooling_max_plain(input_data[0], output_data[0], &*config).unwrap(), + // TODO: implement average pooling + // PoolingMode::Average => unimplemented!(), + } + } +} + +impl> ComputeInputGradient for Pooling { + fn compute_input_gradient(&self, + backend: &B, + _weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + let config = &self.pooling_configs[0]; + match self.mode { + PoolingMode::Max => backend.pooling_max_grad_plain(output_data[0], output_gradients[0], input_data[0], input_gradients[0], config).unwrap() + } + } +} + +impl> ComputeParametersGradient for Pooling { } + +#[derive(Debug, Clone)] +/// Specifies configuration parameters for a Pooling Layer. +pub struct PoolingConfig { + /// The PoolingMode to use + pub mode: PoolingMode, + /// The shape of the filter + pub filter_shape: Vec, + /// The stride size + pub stride: Vec, + /// The padding size + pub padding: Vec, +} + +#[derive(Debug, Copy, Clone)] +/// The different modes of pooling that can be calculated. +pub enum PoolingMode { + /// The maximum value inside the pooling window will be used as result. + Max, + // /// The average of all values inside the pooling window will be used as result. + // Average, +} diff --git a/src/layers/common/softmax.rs b/src/layers/common/softmax.rs new file mode 100644 index 00000000..0ab38c6e --- /dev/null +++ b/src/layers/common/softmax.rs @@ -0,0 +1,58 @@ +//! Computes the softmax of its input. +//! +//! For the logarithmic softmax see the `LogSoftmax` layer. +use co::{IBackend, SharedTensor}; +use conn; +use layer::*; +use util::ArcLock; + +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] +/// Softmax Layer +pub struct Softmax; + +impl> ILayer for Softmax { + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let input_desc = input_data[0].read().unwrap().desc().clone(); + input_gradient[0].write().unwrap().resize(&input_desc).unwrap(); + output_data[0].write().unwrap().resize(&input_desc).unwrap(); + output_gradient[0].write().unwrap().resize(&input_desc).unwrap(); + } +} + +impl> ComputeOutput for Softmax { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + backend.softmax_plain(input_data[0], output_data[0]).unwrap(); + } +} + +impl> ComputeInputGradient for Softmax { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + backend.softmax_grad_plain(output_data[0], output_gradients[0], input_gradients[0]).unwrap(); + } +} + +impl> ComputeParametersGradient for Softmax { } + +impl ::std::default::Default for Softmax { + fn default() -> Softmax { + Softmax + } +} diff --git a/src/layers/loss/mod.rs b/src/layers/loss/mod.rs index 9b50f90d..3a39dd6b 100644 --- a/src/layers/loss/mod.rs +++ b/src/layers/loss/mod.rs @@ -1,6 +1,23 @@ //! Provides methods to calculate the loss (cost) of some output. //! //! A loss function is also sometimes called cost function. -pub use self::softmax::Softmax; +#[macro_export] +macro_rules! impl_ilayer_loss { + () => ( + fn exact_num_output_blobs(&self) -> Option { Some(1) } + fn exact_num_input_blobs(&self) -> Option { Some(1) } + fn auto_output_blobs(&self) -> bool { true } -pub mod softmax; + fn loss_weight(&self, output_id: usize) -> Option { + if output_id == 0 { + Some(1f32) + } else { + None + } + } + ) +} + +pub use self::negative_log_likelihood::NegativeLogLikelihood; + +pub mod negative_log_likelihood; diff --git a/src/layers/loss/negative_log_likelihood.rs b/src/layers/loss/negative_log_likelihood.rs new file mode 100644 index 00000000..bb672aaa --- /dev/null +++ b/src/layers/loss/negative_log_likelihood.rs @@ -0,0 +1,121 @@ +//! TODO: DOC +//! +use co::{IBackend, ITensorDesc, SharedTensor}; +use layer::*; +use util::{ArcLock, native_backend}; + +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] +/// NegativeLogLikelihood Loss Layer +pub struct NegativeLogLikelihood; + +impl NegativeLogLikelihood { + fn calculate_outer_num(softmax_axis: usize, input_shape: &[usize]) -> usize { + input_shape.iter().take(softmax_axis + 1).fold(1, |prod, i| prod * i) + } + + fn calculate_inner_num(softmax_axis: usize, input_shape: &[usize]) -> usize { + input_shape.iter().skip(softmax_axis + 1).fold(1, |prod, i| prod * i) + } + + fn batch_size(input_shape: &[usize]) -> usize { + match input_shape.len() { + 1 => 1, + 2 => input_shape[0], + _ => panic!("NegativeLogLikelihood layer only supports 1D/2D inputs") + } + } + + fn num_classes(input_shape: &[usize]) -> usize { + match input_shape.len() { + 1 => input_shape[0], + 2 => input_shape[1], + _ => panic!("NegativeLogLikelihood layer only supports 1D/2D inputs"), + } + } +} + +impl ILayer for NegativeLogLikelihood { + impl_ilayer_loss!(); + + fn sync_native(&self) -> bool { + true + } + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + let data = input_data[0].read().unwrap(); + let label = input_data[1].read().unwrap(); + + input_gradient[0].write().unwrap().resize(data.desc()).unwrap(); + output_data[0].write().unwrap().resize(label.desc()).unwrap(); + } +} + +impl ComputeOutput for NegativeLogLikelihood { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) { + let probabilities = input_data[0]; + let labels = input_data[1]; + + let batch_size = Self::batch_size(labels.desc()); + + let native = native_backend(); + let native_labels = labels.get(native.device()).unwrap().as_native().unwrap().as_slice::(); + let native_probabilities = probabilities.get(native.device()).unwrap().as_native().unwrap().as_slice::(); + + let mut writable_loss = Vec::::new(); + for &label_value in native_labels { + let probability_value = native_probabilities[label_value as usize]; + writable_loss.push(-probability_value); + } + + let mut loss = writable_loss.iter().fold(0f32, |sum, &val| sum + val); + loss = loss / (batch_size as f32); + writable_loss = vec![loss]; + + ::util::write_to_memory(output_data[0].get_mut(native.device()).unwrap(), &writable_loss); + } +} + +impl ComputeInputGradient for NegativeLogLikelihood { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) { + let labels = input_data[1]; + let batch_size = Self::batch_size(input_data[0].desc()); + let num_classes = Self::num_classes(input_data[0].desc()); + + let native = native_backend(); + let native_labels = labels.get(native.device()).unwrap().as_native().unwrap().as_slice::(); + let mut writable_gradient = vec![0f32; input_gradients[0].desc().size()]; + + for (batch_n, &label_value) in native_labels.iter().enumerate() { + let index = (num_classes * batch_n) + label_value as usize; + writable_gradient[index] = -1f32; + } + input_gradients[0].sync(native.device()).unwrap(); + ::util::write_to_memory(input_gradients[0].get_mut(native.device()).unwrap(), &writable_gradient); + } +} + +impl ComputeParametersGradient for NegativeLogLikelihood { } + +impl ::std::default::Default for NegativeLogLikelihood { + fn default() -> NegativeLogLikelihood { + NegativeLogLikelihood + } +} diff --git a/src/layers/loss/softmax.rs b/src/layers/loss/softmax.rs deleted file mode 100644 index fcc048b8..00000000 --- a/src/layers/loss/softmax.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Computes the multinomial logistic loss of the softmax of its bottom Blob. -//! -//! This is conceptually identical to a softmax layer followed by a multinomial -//! logistic loss layer, but provides a more numerically stable gradient. - -#[derive(Debug, Copy, Clone)] -/// Softmax Loss Layer -pub struct Softmax; diff --git a/src/layers/mod.rs b/src/layers/mod.rs index c5282bc3..a65bbd1d 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -6,23 +6,23 @@ //! The operations provided by the layers can be //! roughly grouped into four categories: //! -//! * __Activation__
+//! * [__Activation__][mod_activation]
//! Activation Layers provide element-wise operations and produce one top Blob //! of the same size as the bottom Blob. //! It can be seen as a synonym to nonlinear [Activation Functions][2]. //! -//! * __Common__
+//! * [__Common__][mod_common]
//! Common Layers can differ in their connectivity and behavior and are //! typically all network layer //! types which are not covered by activation or loss layers. //! Examples would be fully connected //! layers, covolutional layers, pooling layers, etc. //! -//! * __Loss__
+//! * [__Loss__][mod_loss]
//! Loss Layers compare an output to a target value and assign cost to //! minimize. Loss Layers are often the last layer in a [Network][1]. //! -//! * __Utility__
+//! * [__Utility__][mod_utility]
//! Utility Layers provide all kind of helpful functionality, which might not //! be directly related //! to machine learning and neural nets. This could be operations for @@ -32,48 +32,46 @@ //! Utility Layers follow the general behavior of a layer, like the other types //! do. //! -//! For more information about how all these layers work specifically, see the +//! For more information about how these layers work together, see the //! documentation for the general [Layer module][3]. //! -//! ## Examples -//! -//! ``` -//! extern crate leaf; -//! use leaf::layers::*; -//! -//! # fn main() { -//! let _ = activation::Sigmoid; -//! let _ = common::Convolution; -//! let _ = loss::Softmax; -//! let _ = utility::Flatten; -//! # } -//! ``` -//! //! [2]: https://en.wikipedia.org/wiki/Activation_function //! [3]: ../layer/index.html +//! +//! [mod_activation]: ./activation/index.html +//! [mod_common]: ./common/index.html +//! [mod_loss]: ./loss/index.html +//! [mod_utility]: ./utility/index.html /// Implement [ILayer][1] for [activation layers][2]. /// [1]: ./layer/trait.ILayer.html /// [2]: ./layers/activation/index.html -#[macro_export] -macro_rules! impl_ilayer_activation { - () => ( - fn exact_num_top_blobs(&self) -> usize { 1 } - fn exact_num_bottom_blobs(&self) -> usize { 1 } - ) -} #[allow(unused_import_braces)] -pub use self::activation::{Sigmoid}; +pub use self::activation::{ + ReLU, + Sigmoid, +}; #[allow(unused_import_braces)] -pub use self::common::{Convolution}; +pub use self::common::{ + Convolution, ConvolutionConfig, + Linear, LinearConfig, + LogSoftmax, + Pooling, PoolingConfig, PoolingMode, + Softmax, +}; #[allow(unused_import_braces)] -pub use self::loss::{Softmax}; +pub use self::loss::{ + NegativeLogLikelihood, +}; #[allow(unused_import_braces)] -pub use self::utility::{Flatten}; +pub use self::utility::{ + Flatten, + Reshape, ReshapeConfig, +}; pub mod activation; pub mod common; diff --git a/src/layers/utility/flatten.rs b/src/layers/utility/flatten.rs index 2ae98d43..642574a6 100644 --- a/src/layers/utility/flatten.rs +++ b/src/layers/utility/flatten.rs @@ -3,6 +3,7 @@ //! Input of shape n * c * h * w becomes //! a simple vector output of shape n * (c*h*w). //! -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] +#[allow(missing_copy_implementations)] /// Flattening Utility Layer pub struct Flatten; diff --git a/src/layers/utility/mod.rs b/src/layers/utility/mod.rs index 0482eb71..05911187 100644 --- a/src/layers/utility/mod.rs +++ b/src/layers/utility/mod.rs @@ -10,5 +10,7 @@ //! //! [1]: ../../layer/index.html pub use self::flatten::Flatten; +pub use self::reshape::{Reshape, ReshapeConfig}; pub mod flatten; +pub mod reshape; diff --git a/src/layers/utility/reshape.rs b/src/layers/utility/reshape.rs new file mode 100644 index 00000000..6d16f6f0 --- /dev/null +++ b/src/layers/utility/reshape.rs @@ -0,0 +1,89 @@ +//! Utility layer to give the input another shape. +//! +//! Reshaping a input tensor is required so that it becomes +//! usable for Layers that interpret meaning into the shape of +//! the tensor. +//! +//! A lot of layers interpret the last dimensions as NCHW, +//! where the letters stand for: +//! +//! - `N` : number of batch samples +//! - `C` : number of feature maps +//! - `H` : height +//! - `W` : width +use co::{IBackend, SharedTensor}; +use layer::*; +use util::ArcLock; + +#[derive(Debug, Clone)] +/// Reshape Utility Layer +pub struct Reshape{ + shape: Vec, +} + +impl Reshape { + /// Create a Reshape layer from a ReshapeConfig. + pub fn from_config(config: &ReshapeConfig) -> Reshape { + Reshape { + shape: config.shape.clone(), + } + } +} + +impl ILayer for Reshape { + fn auto_output_blobs(&self) -> bool { + false + } + + fn reshape(&mut self, + backend: ::std::rc::Rc, + input_data: &mut Vec>>, + input_gradient: &mut Vec>>, + weights_data: &mut Vec>>, + weights_gradient: &mut Vec>>, + output_data: &mut Vec>>, + output_gradient: &mut Vec>>) { + input_data[0].write().unwrap().reshape(&self.shape).unwrap(); + input_gradient[0].write().unwrap().reshape(&self.shape).unwrap(); + } +} + +impl ComputeOutput for Reshape { + fn compute_output(&self, + backend: &B, + _weights: &[&SharedTensor], + input_data: &[&SharedTensor], + output_data: &mut [&mut SharedTensor]) {} +} + +impl ComputeInputGradient for Reshape { + fn compute_input_gradient(&self, + backend: &B, + weights_data: &[&SharedTensor], + output_data: &[&SharedTensor], + output_gradients: &[&SharedTensor], + input_data: &[&SharedTensor], + input_gradients: &mut [&mut SharedTensor]) {} +} + +impl ComputeParametersGradient for Reshape {} + +#[derive(Debug, Clone)] +/// Specifies configuration parameters for a Reshape Layer. +pub struct ReshapeConfig { + /// The target shape that the input should assume. + /// + /// Preceding dimensions are treated as independent inputs + /// + /// Defaults to `1` + pub shape: Vec, +} + +impl ReshapeConfig { + /// Create a ReshapeConfig that describes a Reshape layer with a provided shape. + pub fn of_shape(shape: &[usize]) -> ReshapeConfig { + ReshapeConfig { + shape: shape.to_owned() + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 0ed2fb8e..133ba2d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,7 @@ //! Layers, the building block of a Leaf Network, are small units, describing computation over //! numerical input data. Generally speaking Layers take input and produce an output, but //! essentially a Layer can describe any functionality e.g. logging as long as it obeys to the -//! general behaviour specifications of a Layer. Any Layer can be grouped in one of four +//! general behaviour specifications of a Layer. A Layer can be grouped in one of four //! Layer types which are closer defined at the [Layers page][layers]. Every //! layer serves a special purpose and can occur zero, one or many times inside a Network. //! @@ -105,9 +105,10 @@ //! [issue-loss]: https://github.com/autumnai/leaf/issues/18 //! [issue-activation]: https://github.com/autumnai/leaf/issues/19 //! [issue-common]: https://github.com/autumnai/leaf/issues/20 -#![cfg_attr(lint, feature(plugin))] -#![cfg_attr(lint, plugin(clippy))] -#![feature(augmented_assignments)] +#![cfg_attr(feature="lint", feature(plugin))] +#![cfg_attr(feature="lint", plugin(clippy))] +#![cfg_attr(feature="lint", allow(type_complexity))] + #![allow(dead_code)] #![allow(unused_variables)] #![deny(missing_docs, @@ -122,13 +123,21 @@ #![cfg_attr(feature="clippy", plugin(clippy))] #![cfg_attr(feature="clippy", deny(clippy, clippy_pedantic))] +#[macro_use] +extern crate timeit; #[macro_use] extern crate log; -extern crate phloem; +extern crate rand; extern crate collenchyma as co; -pub mod shared_memory; +extern crate collenchyma_blas as coblas; +extern crate collenchyma_nn as conn; pub mod layer; pub mod layers; +#[cfg(feature="cuda")] pub mod solver; +#[cfg(feature="cuda")] pub mod solvers; pub mod network; +pub mod weight; + +pub mod util; diff --git a/src/network.rs b/src/network.rs index 0f59c7bc..df9c7b11 100644 --- a/src/network.rs +++ b/src/network.rs @@ -25,22 +25,21 @@ //! ## Glossary //! ### Input Layers / Blobs //! -//! A input layer is the bottom-most layer of a network.
+//! A input layer is the first layer of a network.
//! During a forward step the data is put into the input layer, //! passed through all the intermediate (hidden) layers and generates a //! result in the output layer. //! //! The blobs in a input layer contain externally preprocessed data that has //! been brought into a form suitable for consumption by a neural network. -use co::backend::IBackend; -use co::libraries::blas::IBlas; +use std::rc::Rc; +use co::IBackend; +use co::tensor::*; use std::collections::{HashMap, HashSet}; use std::sync::{Arc, RwLock}; -use shared_memory::*; use layer::{ILayer, Layer}; -use layer::{LayerConfig, WeightConfig}; -use phloem::Blob; -use std::rc::Rc; +use layer::LayerConfig; +use util::{ArcLock, LayerOps, SolverOps}; #[derive(Debug)] /// Defines a [Network][1] that contains the [Layers][2] and [Blobs][3] that store @@ -55,18 +54,24 @@ use std::rc::Rc; /// A Network is usually used together with a [Solver][6] to optimize the networks' weights. /// /// [6]: ../solver/struct.Solver.html -pub struct Network> { +pub struct Network> { /// Identifies the Network /// /// The name is mainly used for logging purposes. pub name: String, layers: Vec>, - blobs: Vec>, // the blobs storing intermediate results between the layer. + blobs_data: Vec>>, // the blobs storing intermediate results between the layer. + blobs_gradient: Vec>>, // the blobs storing intermediate results between the layer. blob_names: Vec, - input_blobs: Vec>, - output_blobs: Vec>, + input_blobs_data: Vec>>, + input_blobs_gradient: Vec>>, + input_blob_names: Vec, + output_blobs_data: Vec>>, + output_blobs_gradient: Vec>>, + + registry: HashMap>, ArcLock>)>, weight_owners: Vec>, weight_display_names: Vec, @@ -78,25 +83,32 @@ pub struct Network> { /// /// Parameters are currently in the process of being renamed to weights throughout the codebase. /// [Issue #17](https://github.com/autumnai/leaf/issues/17) - weights: Vec>, - learnable_weights: Vec>, + weights: Vec>>, + learnable_weights_data: Vec>>, + learnable_weights_gradient: Vec>>, learnable_weight_ids: Vec, weights_lr: Vec>, weights_weight_decay: Vec>, } -impl> Default for Network { +impl> Default for Network { fn default() -> Network { Network { name: "".to_owned(), layers: vec![], - blobs: vec![], + blobs_data: vec![], + blobs_gradient: vec![], blob_names: vec![], - input_blobs: vec![], - output_blobs: vec![], + input_blobs_data: vec![], + input_blobs_gradient: vec![], + input_blob_names: vec![], + output_blobs_data: vec![], + output_blobs_gradient: vec![], + + registry: HashMap::new(), weight_owners: vec![], weight_display_names: vec![], @@ -104,7 +116,8 @@ impl> Default for Network { weight_names_index: HashMap::::new(), weights: vec![], - learnable_weights: vec![], + learnable_weights_data: vec![], + learnable_weights_gradient: vec![], learnable_weight_ids: vec![], weights_lr: vec![], @@ -113,7 +126,7 @@ impl> Default for Network { } } -impl> Network { +impl + 'static> Network { /// Creates a Network from a [NetworkConfig][1]. /// [1]: ./struct.NetworkConfig.html /// @@ -124,21 +137,19 @@ impl> Network { /// # extern crate leaf; /// /// # use leaf::network::*; - /// # use collenchyma::backend::{Backend, BackendConfig}; - /// # use collenchyma::frameworks::Native; - /// # use collenchyma::framework::IFramework; + /// # use collenchyma::prelude::*; /// # use std::rc::Rc; /// + /// # #[cfg(feature="cuda")] /// # fn main() { /// // create backend - /// let framework = Native::new(); - /// let hardwares = framework.hardwares(); - /// let backend_config = BackendConfig::new(framework, hardwares); - /// let backend = Rc::new(Backend::new(backend_config).unwrap()); + /// let backend = Rc::new(Backend::::default().unwrap()); /// // create network /// let cfg = NetworkConfig::default(); /// Network::from_config(backend, &cfg); /// # } + /// # #[cfg(not(feature="cuda"))] + /// # fn main() {} /// ``` pub fn from_config(backend: Rc, param: &NetworkConfig) -> Network { let mut network = Network::default(); @@ -155,14 +166,15 @@ impl> Network { /// [1]: ./struct.NetworkConfig.html fn init(&mut self, backend: Rc, in_config: &NetworkConfig) { let config = in_config.clone(); - let registry = &mut HashMap::>::new(); + let mut registry = HashMap::>, ArcLock>)>::new(); + let weight_registry = &mut HashMap::>, ArcLock>, Option, Option)>::new(); for (input_name, input_shape) in config.inputs.iter().zip(config.input_shapes.iter()) { - self.init_input_blob(&input_name, input_shape, registry); + self.init_input_blob(backend.clone(), &input_name, input_shape, &mut registry); } for layer_config in &config.layers { - self.init_layer(backend.clone(), &layer_config, registry); + self.init_layer(backend.clone(), &layer_config, &mut registry, weight_registry); } // Go through the net backwards to determine which blobs contribute to the @@ -173,7 +185,7 @@ impl> Network { // computation for the entire layer let blobs_under_loss = &mut HashSet::::new(); let blobs_skip_backp = &mut HashSet::::new(); - for layer in &mut self.layers { + for layer in &mut self.layers.iter_mut().rev() { layer.init_backprop( blobs_under_loss, blobs_skip_backp); } @@ -186,10 +198,12 @@ impl> Network { // In the end, all remaining blobs are considered output blobs. for (blob_name, blob) in registry.iter() { info!("This network produces output {}", blob_name); - self.output_blobs.push(blob.clone()); + self.output_blobs_data.push(blob.0.clone()); + self.output_blobs_gradient.push(blob.1.clone()); } self.share_weights(); + self.registry = registry; info!("Network initialization done."); } @@ -208,14 +222,8 @@ impl> Network { fn init_layer(&mut self, backend: Rc, layer_config: &LayerConfig, - registry: &mut HashMap>) { - // Caffe - // bool share_from_root = !Caffe::root_solver() - // && root_net_->layers_[layer_id]->ShareInParallel(); - // // Inherit mode from net if unset. - // if (!param.layer(layer_id).has_mode()) { - // param.mutable_layer(layer_id)->set_mode(mode_); - // } + registry: &mut HashMap>, ArcLock>)>, + weight_registry: &mut HashMap>, ArcLock>, Option, Option)>) { // Setup layer. if let Err(e) = layer_config.validate() { @@ -226,12 +234,12 @@ impl> Network { let mut layer = Layer::from_config(backend, &layer_config); // Figure out this layer's input and output - // self.layers.last_mut().unwrap().connect(registry); - layer.connect(registry); - - for (weight_id, _) in layer.blobs.iter().enumerate() { - let layer_id = self.layers.len(); - self.append_weight(layer_id, weight_id); + layer.connect(registry, weight_registry); + for weight_data in &layer.weights_data { + self.learnable_weights_data.push(weight_data.clone()); + } + for weight_gradient in &layer.weights_gradient { + self.learnable_weights_gradient.push(weight_gradient.clone()); } self.layers.push(layer); @@ -251,8 +259,8 @@ impl> Network { // } for (i, _) in self.weights.clone().iter().enumerate() { if let Some(j) = self.weight_owners[i] { - assert!(self.weights[i].read().unwrap().capacity() == - self.weights[j].read().unwrap().capacity()); + assert!(self.weights[i].read().unwrap().desc().size() == + self.weights[j].read().unwrap().desc().size()); self.weights[i] = self.weights[j].clone(); // sharing whole blob? } } @@ -268,9 +276,10 @@ impl> Network { /// [2]: ../layer/struct.Layer.html#method.connect #[cfg_attr(lint, allow(ptr_arg))] fn init_input_blob(&mut self, + backend: Rc, blob_name: &str, input_shape: &Vec, - registry: &mut HashMap>) { + registry: &mut HashMap>, ArcLock>)> ) { if registry.contains_key(blob_name) { // If we are not doing in-place computation but have duplicated blobs, raise an @@ -278,120 +287,18 @@ impl> Network { error!("Top blob {} produced by multiple sources.", blob_name); return } else { - // if (Caffe::root_solver()) { - { - info!("Input {} -> {}", self.input_blobs.len(), blob_name); - } + info!("Input {} -> {}", self.input_blobs_data.len(), blob_name); - let blob: ArcLock = Arc::new(RwLock::new(Box::new(Blob::new()))); - let blob_id = self.blobs.len(); - self.blobs.push(blob.clone()); + let ibackend: Rc> = backend; + let blob_data: ArcLock> = Arc::new(RwLock::new(SharedTensor::new(ibackend.device(), input_shape).unwrap())); + let blob_gradient: ArcLock> = Arc::new(RwLock::new(SharedTensor::new(ibackend.device(), input_shape).unwrap())); + let blob_id = self.blobs_data.len(); + self.blobs_data.push(blob_data.clone()); self.blob_names.push(blob_name.to_owned()); - // Set the (explicitly specified) dimensions of the input blob. - // let input_shape = config.input_shape(top_id).unwrap().clone(); - blob.write().unwrap().reshape(&input_shape.clone()); - - self.input_blobs.push(blob.clone()); - registry.insert(blob_name.to_owned(), blob); - } - } - - /// Append a weight blob to the network. - /// - /// During network initalization weight blobs are appended to the correct layers. - /// If a layer's [LayerConfig][1] states that the weights are shared, - /// this function also makes sure to set a reference to the other weight blob instead of - /// allocating a new one. - /// - /// [1]: ../layer/struct.LayerConfig.html - fn append_weight(&mut self, layer_id: usize, weight_id: usize) { - let layer_config = self.layers[layer_id].config.clone(); - let weights_len = self.weights.len(); - let weight_name = if weights_len > weight_id { - layer_config.param(weight_id).unwrap().name.clone() - } else { - "".to_owned() - }; - - // use weight_name (or weight_id as a fallback) as display_name - if !weight_name.is_empty() { - self.weight_display_names.push(weight_name.clone()); - } else { - self.weight_display_names.push(format!("{}", weight_id)); - } - - // add to tracking vectors - let net_weight_id = weights_len; - self.weights.push(self.layers[layer_id].blobs[weight_id].clone()); - self.weight_layer_indices.push((layer_id, weight_id)); - - let mut weight_config = &WeightConfig::default(); - if layer_config.params_len() > weight_id { - weight_config = layer_config.param(weight_id).unwrap(); - } - // This layer "owns" this weight blob -- it is either anonymous - // (i.e., not given a weight_name) or explicitly given a name that we - // haven't already seen. - if weight_name.is_empty() || !self.weight_names_index.contains_key(&weight_name) { - self.weight_owners.push(None); - if !weight_name.is_empty() { - self.weight_names_index.insert(weight_name.clone(), net_weight_id); - } - let learnable_weight_id = self.learnable_weights.len(); - self.learnable_weights.push(self.weights[net_weight_id].clone()); - self.learnable_weight_ids.push(learnable_weight_id); - self.weights_lr.push(weight_config.lr_mult.clone()); - self.weights_weight_decay.push(weight_config.decay_mult.clone()); - } else { - // Named weight blob with name we've seen before: share weights - - let owner_net_weight_id = *self.weight_names_index.get(&weight_name).unwrap(); - self.weight_owners.push(Some(owner_net_weight_id)); - let (owner_layer_id, owner_weight_id) = self.weight_layer_indices[owner_net_weight_id]; - info!("Sharing weights '{}' owned by layer '{}', weight index {}", - weight_name.clone(), - self.layers[owner_layer_id].name, - owner_weight_id); - let this_blob = self.layers[layer_id].blobs[weight_id].clone(); - let owner_blob = self.layers[owner_layer_id].blobs[owner_weight_id].clone(); - // can only share weights if blobs match by shape or capacity - if weights_len > weight_id { - if let Err(e) = layer_config.param(weight_id) - .unwrap() - .check_dimensions(&this_blob.read().unwrap(), - &owner_blob.read().unwrap(), - weight_name.clone(), - self.layers[owner_layer_id].name.clone(), - self.layers[layer_id].name.clone()) { - error!("{}", e) - } - } - - let learnable_weight_id = self.learnable_weight_ids[owner_net_weight_id]; - self.learnable_weight_ids.push(learnable_weight_id); - // can only share parameters if both have same lr_mult - if let Some(lr_mult) = weight_config.lr_mult { - if let Some(owner_lr_mult) = self.weights_lr[learnable_weight_id] { - if !lr_mult.eq(&owner_lr_mult) { - error!("Shared param '{}' has mismatched lr_mult.", - weight_name.clone()); - } - } else { - self.weights_lr[learnable_weight_id] = weight_config.lr_mult; - } - } - // can only share weights if both have same decay_mult - if let Some(decay_mult) = weight_config.decay_mult { - if let Some(owner_decay_mult) = self.weights_weight_decay[learnable_weight_id] { - if !decay_mult.eq(&owner_decay_mult) { - error!("Shared param '{}' has mismatched decay_mult.", - weight_name.clone()); - } - } else { - self.weights_weight_decay[learnable_weight_id] = weight_config.decay_mult; - } - } + self.input_blobs_data.push(blob_data.clone()); + self.input_blob_names.push(blob_name.to_owned()); + registry.insert(blob_name.to_owned(), (blob_data, blob_gradient)); } } @@ -405,7 +312,7 @@ impl> Network { /// /// [4]: ../solver/struct.Solver.html /// [5]: https://en.wikipedia.org/wiki/Backpropagation#Phase_1:_Propagation - pub fn forward_backward(&mut self, bottom: &[ArcLock]) -> f32 { + pub fn forward_backward(&mut self, bottom: &[ArcLock>]) -> f32 { let loss = &mut 0f32; self.forward(bottom, loss); @@ -424,9 +331,16 @@ impl> Network { /// /// This is the go-to if you just want to feed data to your network and get the corresponding /// output. - pub fn forward(&mut self, input: &[ArcLock], loss: &mut f32) -> &Vec> { + pub fn forward(&mut self, input: &[ArcLock>], loss: &mut f32) -> &Vec>> { for (i, inp) in input.iter().enumerate() { - self.input_blobs[i] = inp.clone(); + self.input_blobs_data[i] = inp.clone(); + for layer in &mut self.layers { + for (blob_index, blob_name) in layer.input_blob_names().to_owned().iter().enumerate() { + if blob_name == &self.input_blob_names[i] { + layer.input_blobs_data[blob_index] = inp.clone(); + } + } + } } self.forward_prefilled(Some(loss)) @@ -442,8 +356,8 @@ impl> Network { /// otherwise [forward][4] is the prefered method to forward through the whole network. /// /// [4]: #method.forward - pub fn forward_prefilled(&mut self, loss: Option<&mut f32>) -> &Vec> { - let end = self.layers.len() - 1; + pub fn forward_prefilled(&mut self, loss: Option<&mut f32>) -> &Vec>> { + let end = self.layers.len(); match loss { Some(loss_result) => { // not sure if loss_result will really be changed @@ -454,7 +368,7 @@ impl> Network { } } - &self.output_blobs + &self.output_blobs_data } /// Compute [forward step][1] for a part of (or the whole) network and returns the [total loss][2]. @@ -470,13 +384,18 @@ impl> Network { /// /// [3]: #method.forward_prefilled pub fn forward_from_to(&mut self, start: usize, end: usize) -> f32 { - assert!(end < self.layers.len()); + assert!(end <= self.layers.len()); let mut loss = 0f32; for i in start..end { loss += self.layers[i].forward(); + if i == (end - 1) { + // synchronize after last layer + self.layers[i].synchronize(); + } } + debug!("LOSS {:?}", loss); loss } @@ -491,8 +410,22 @@ impl> Network { /// Backpropagating a network is only useful during training and handled by a [Solver][3] /// [3]: ../solver/index.html pub fn backward(&mut self) { - let start = self.layers.len() - 1; - self.backward_from_to(start, 0); + let start = self.layers.len(); + debug!("BACKWARD NETWORK START: {:?}", &start); + self.backward_input_from_to(start, 0); + self.backward_parameters_from_to(start, 0); + } + + /// TODO: Docs + pub fn backward_input(&mut self) { + let start = self.layers.len(); + self.backward_input_from_to(start, 0); + } + + /// TODO: Docs + pub fn backward_parameters(&mut self) { + let start = self.layers.len(); + self.backward_parameters_from_to(start, 0); } /// Compute [backpropagation][1] step for a part of (or the whole) network. @@ -505,11 +438,29 @@ impl> Network { /// If you want to compute a foward step for the whole network you should use [backward][3]. /// Computing a backward on a part of the network is usually only done for debugging purposes. /// [3]: #method.backward - pub fn backward_from_to(&mut self, start: usize, end: usize) { - assert!(start < self.layers.len()); + pub fn backward_input_from_to(&mut self, start: usize, end: usize) { + // assert!(start < self.layers.len()); + debug!("BACKWARD NETWORK LAYERS"); + for i in (end..start).rev() { + debug!("BACKWARD NETWORK LAYER {:?}", &self.layers[i].name); + self.layers[i].backward_input(); + if i == end { + // synchronize after last layer + self.layers[i].synchronize(); + } + } + } - for i in start..end { - self.layers[i].backward(); + /// TODO: Docs + pub fn backward_parameters_from_to(&mut self, start: usize, end: usize) { + debug!("BACKWARD NETWORK LAYERS"); + for i in (end..start).rev() { + debug!("BACKWARD NETWORK LAYER {:?}", &self.layers[i].name); + self.layers[i].backward_parameters(); + if i == end { + // synchronize after last layer + self.layers[i].synchronize(); + } } } @@ -522,14 +473,16 @@ impl> Network { /// /// [2]: ../solver/struct.Solver.html pub fn clear_weight_diffs(&mut self) { - for weight_blob in &mut self.learnable_weights.iter() { - // TODO - // for p in weight_blob.write().unwrap().mut_diff().iter_mut() { - // *p = 0f32; - // } + for weight_gradient in &mut self.learnable_weights_gradient.iter() { + let filler = ::weight::FillerType::Constant { + value: 0f32 + }; + filler.fill(&mut weight_gradient.write().unwrap()); } } +} +impl> Network { /// Updates the [weights][1] with the weight update computed by the [Solver][2]. /// [1]: https://en.wikipedia.org/wiki/Synaptic_weight /// [2]: ../solver/struct.Solver.html @@ -538,15 +491,31 @@ impl> Network { /// The update value is computed in previous steps according to the [learning rate policy][3] /// /// [3]: ../solver/enum.LRPolicy.html - pub fn update_weights(&mut self) { - for weight_blob in &self.learnable_weights { - weight_blob.write().unwrap().apply_diff() + pub fn update_weights>(&mut self, backend: &SolverB) { + let mut shared_a = ::util::native_scalar(-1f32); + let _ = shared_a.add_device(backend.device()); + shared_a.sync(backend.device()).unwrap(); + for (weight_gradient, weight_data) in self.learnable_weights_gradient.iter().zip(&mut self.learnable_weights_data) { + weight_gradient.write().unwrap().sync(backend.device()).unwrap(); + weight_data.write().unwrap().sync(backend.device()).unwrap(); + backend.axpy_plain(&shared_a, &weight_gradient.read().unwrap(), &mut weight_data.write().unwrap()).unwrap(); + // weight_blob.write().unwrap().apply_diff(backend) // TODO: solver } } #[allow(missing_docs)] - pub fn learnable_weights(&self) -> &Vec> { - &self.learnable_weights + pub fn learnable_weight_data(&self) -> &Vec>> { + &self.learnable_weights_data + } + + #[allow(missing_docs)] + pub fn learnable_weight_gradients(&self) -> &Vec>> { + &self.learnable_weights_gradient + } + + /// get the data associated with the provided tensor name + pub fn get_data(&self, name: &str) -> ArcLock> { + self.registry.get(name).unwrap().0.clone() } #[allow(missing_docs)] @@ -574,7 +543,7 @@ pub struct NetworkConfig { /// Defines the names of the [input blobs][1]. /// [1]: ./index.html#input-layers--blobs /// - /// The input blobs are identified by name so they can be referenced as [bottom blobs][2] + /// The input blobs are identified by name so they can be referenced as [input blobs][2] /// in a [LayerConfig][3]. /// /// [2]: ../layer/index.html @@ -638,6 +607,11 @@ impl NetworkConfig { self.layers.get(layer_id) } + /// Add layer at the end of the network. + pub fn add_layer(&mut self, layer: LayerConfig) { + self.layers.push(layer); + } + #[allow(missing_docs)] pub fn input(&self, input_id: usize) -> Option<&String> { self.inputs.get(input_id) @@ -647,6 +621,12 @@ impl NetworkConfig { pub fn input_shape(&self, input_id: usize) -> Option<&Vec> { self.input_shapes.get(input_id) } + + /// Add a input to the network. + pub fn add_input(&mut self, input_name: &str, shape: &[usize]) { + self.inputs.push(input_name.to_owned()); + self.input_shapes.push(shape.to_owned()); + } } #[derive(Debug, Clone)] diff --git a/src/shared_memory.rs b/src/shared_memory.rs deleted file mode 100644 index 966c60fd..00000000 --- a/src/shared_memory.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! -//! -//! This is quite unimportant and might be refactored soon. -//! -//! See [Issue #22][issue] for more informations. -//! [issue]: https://github.com/autumnai/leaf/issues/22 -use std::sync::{Arc, RwLock}; -use phloem::Blob; - -/// shared Lock used for our memory blobs -pub type ArcLock = Arc>; -/// Blob allocated on the heap via Box -pub type HeapBlob = Box>; - -/// ... -pub fn new_shared_heapblob() -> ArcLock { - Arc::new(RwLock::new(Box::new(Blob::new()))) -} diff --git a/src/solver.rs b/src/solver.rs index 8218816c..57664eac 100644 --- a/src/solver.rs +++ b/src/solver.rs @@ -1,18 +1,17 @@ //! Provides the generics and interfaces for the specific [Solvers][solvers]. //! [solvers]: ../solvers/index.html -use co::backend::*; -use co::framework::*; -use co::frameworks::Native; -use co::libraries::blas::IBlas; -use shared_memory::*; +use co::prelude::*; use network::*; use solvers::*; +use util::{LayerOps, SolverOps}; use std::rc::Rc; +use std::sync::{Arc, RwLock}; +use std::marker::PhantomData; #[derive(Debug)] /// Solver that optimizes a [Network][1]. /// [1]: ../network/struct.Network.html -pub struct Solver> { +pub struct Solver, B: IBackend + LayerOps> { net: Network, /// The implementation of the Solver pub worker: S, @@ -20,9 +19,11 @@ pub struct Solver> { config: SolverConfig, /// The current iteration / number of times weights have been updated iter: usize, + + solver_backend: PhantomData, } -impl> Solver { +impl, B: IBackend + LayerOps> Solver { /// Create Solver from [SolverConfig][1] /// [1]: ./struct.SolverConfig.html /// @@ -44,43 +45,31 @@ impl> Solver { /// let solver = Solver::>>, Backend>::from_config(&cfg); /// # } /// ``` - pub fn from_config(config: &SolverConfig) -> Solver>>, Backend> { - let framework = Native::new(); - let hardwares = framework.hardwares(); - let backend_config = BackendConfig::new(framework, hardwares); - let backend = Rc::new(Backend::new(backend_config).unwrap()); + pub fn from_config(config: &SolverConfig) -> Solver, Backend>>, Backend, Backend> { + let native_backend = Rc::new(Backend::::default().unwrap()); + let cuda_backend = Rc::new(Backend::::default().unwrap()); - let worker = config.solver.with_config(backend.clone(), &config); + let worker = config.solver.with_config(native_backend.clone(), &config); Solver { worker: worker, - net: Network::from_config(backend, &config.train_net), + net: Network::from_config(cuda_backend, &config.train_net), iter: 0, config: config.clone(), + solver_backend: PhantomData::>, } } } -impl, B: IBackend + IBlas> Solver{ +impl, SolverB: IBackend + SolverOps, B: IBackend + LayerOps + 'static> Solver{ fn init(&mut self, backend: Rc, config: SolverConfig) { - // Caffe - // CHECK(Caffe::root_solver() || root_solver_) - // << "root_solver_ needs to be set for all non-root solvers"; info!("Initializing solver from configuration: {:?}", config); self.config = config; assert!(self.config.average_loss > 1); - // Caffe - // if (Caffe::root_solver() && param_.random_seed() >= 0) { - // Caffe::set_random_seed(param_.random_seed()); - // } - Solver::::init_train_net(backend, &mut self.config, &mut self.net); - // if (Caffe::root_solver()) { - { - // self.init_test_nets(); - info!("Solver scaffolding done."); - } + Solver::::init_train_net(backend, &mut self.config, &mut self.net); + info!("Solver scaffolding done."); } /// Initialize the training net @@ -108,8 +97,6 @@ impl, B: IBackend + IBlas> Solver{ // net_.reset(new Net(net_param, root_solver_->net_.get())); // } *net = Network::from_config(backend, ¶m.train_net); - - unimplemented!(); } // might take a solver state as argument in the future to resume a stopped @@ -126,7 +113,7 @@ impl, B: IBackend + IBlas> Solver{ let stop_iter = start_iter + iters; // int average_loss = this->param_.average_loss(); // Caffe let mut losses = Vec::::new(); - let mut smoothed_loss = 0f32; + let smoothed_loss = 0f32; while self.iter < stop_iter { let mut loss = 0f32; @@ -157,9 +144,11 @@ impl, B: IBackend + IBlas> Solver{ // const bool display = param_.display() && iter_ % param_.display() == 0; // net_->set_debug_info(display && param_.debug_info()); - let noop_bottom = vec![new_shared_heapblob()]; - for _ in 0..self.config.minibatch_size - 1 { - loss += self.net.forward_backward(&noop_bottom); + // let noop_bottom = vec![Blob::from_data(SharedTensor::new(self.net.backend.device(), ()).unwrap())]; + // let minibatch = self.minibatch(); + let minibatch: Vec> = unimplemented!(); + for batch_element in minibatch { + loss += self.net.forward_backward(&[Arc::new(RwLock::new(batch_element))]); } // average the loss across iterations of minibatch loss /= self.config.minibatch_size as f32; @@ -168,11 +157,11 @@ impl, B: IBackend + IBlas> Solver{ if losses.len() < self.config.average_loss { losses.push(loss); let size = losses.len() as f32; - smoothed_loss = (smoothed_loss * (size - 1f32) + loss) / size; + let _ = (smoothed_loss * (size - 1f32) + loss) / size; } else { let idx = (self.iter - start_iter) % self.config.average_loss; smoothed_loss += (loss - losses[idx]) / self.config.average_loss as f32; - losses[idx] = loss; + losses[idx] = smoothed_loss; } // Caffe @@ -203,33 +192,33 @@ impl, B: IBackend + IBlas> Solver{ // callbacks_[i]->on_gradients_ready(); // } - // Caffe / Display - // if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - // LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - // } self.worker.apply_update(&self.config, &mut self.net, self.iter); // Increment the internal iter counter -- its value should always indicate // the number of times the weights have been updated. self.iter += 1; + } + } - // Caffe - // SolverAction::Enum request = GetRequestedAction(); - // - // // Save a snapshot if needed. - // if ((param_.snapshot() - // && iter_ % param_.snapshot() == 0 - // && Caffe::root_solver()) || - // (request == SolverAction::SNAPSHOT)) { - // Snapshot(); - // } - // if (SolverAction::STOP == request) { - // requested_early_exit_ = true; - // // Break out of training loop. - // break; - // } + fn minibatch(&self) -> &Vec> { + unimplemented!(); + } - } + /// Returns the network trained by the solver. + /// + /// This is the recommended method to get a usable trained network. + pub fn network(&self) -> &Network { + &self.net + } + + /// Returns the network trained by the solver. + /// + /// This is the recommended method to get a trained network, + /// if you want to alter the network. Keep in mind that altering the network + /// might render the solver unusable and continuing training the network with it will yield + /// unexpected results. + pub fn mut_network(&mut self) -> &mut Network { + &mut self.net } } @@ -237,7 +226,7 @@ impl, B: IBackend + IBlas> Solver{ /// /// See [Solvers][1] /// [1]: ../solvers/index.html -pub trait ISolver { +pub trait ISolver> { /// Update the weights of the net with part of the gradient. /// /// The [second phase of backpropagation learning][1]. @@ -255,7 +244,7 @@ pub trait ISolver { fn apply_update(&mut self, param: &SolverConfig, network: &mut Network, iter: usize); /// TODO: [DOC] - fn backend(&self) -> &B; + fn backend(&self) -> &SolverB; } #[derive(Debug, Clone)] @@ -483,7 +472,7 @@ pub enum SolverKind { impl SolverKind { /// Create a Solver of the specified kind with the supplied SolverConfig. - pub fn with_config + 'static>(&self, backend: Rc, config: &SolverConfig) -> Box> { + pub fn with_config + 'static, NetB: IBackend + LayerOps>(&self, backend: Rc, config: &SolverConfig) -> Box> { match *self { SolverKind::SGD(sgd) => { sgd.with_config(backend, config) @@ -502,7 +491,8 @@ pub enum SGDKind { impl SGDKind { /// Create a Solver of the specified kind with the supplied SolverConfig. - pub fn with_config + 'static>(&self, backend: Rc, config: &SolverConfig) -> Box> { + pub fn with_config + 'static, NetB: IBackend + LayerOps>(&self, backend: Rc, config: &SolverConfig) -> Box> { + // pub fn with_config + 'static>(&self, backend: Rc, config: &SolverConfig) -> Box> { match *self { SGDKind::Momentum => { Box::new(Momentum::::new(backend)) diff --git a/src/solvers/mod.rs b/src/solvers/mod.rs index 0c179fe0..9703e91c 100644 --- a/src/solvers/mod.rs +++ b/src/solvers/mod.rs @@ -31,17 +31,16 @@ pub use self::sgd::{Momentum}; pub mod sgd; -use co::backend::IBackend; -use co::shared_memory::SharedMemory; -use co::libraries::blas::IBlas; -use shared_memory::*; +use co::{IBackend, MemoryType, SharedTensor}; +use conn::NN; use solver::*; use network::Network; +use util::{ArcLock, native_backend, LayerOps, SolverOps}; -trait SGDSolver> : ISolver { +trait SGDSolver, NetB: IBackend + LayerOps> : ISolver { fn compute_update_value(&mut self, config: &SolverConfig, - weight_blob: &ArcLock, + weight_blob: &ArcLock>, history_blob_id: usize, global_lr: &f32, blob_lr: &f32); @@ -58,22 +57,31 @@ trait SGDSolver> : ISolver { /// /// [3]: https://en.wikipedia.org/wiki/Recurrent_neural_network /// [4]: https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm - fn clip_gradients(&self, config: &SolverConfig, net: &mut Network) { + #[allow(unused_must_use)] + fn clip_gradients>(&self, config: &SolverConfig, net: &mut Network) { // skip clipping gradients if SolverConfig.clip_gradients is set to None if let Some(clip_threshold) = config.clip_gradients { - let net_weights = net.learnable_weights(); + let native = native_backend(); + + let net_gradients = net.learnable_weight_gradients(); let mut sumsq_diff = 0f32; let backend = self.backend(); - let mut result = SharedMemory::::new(backend.device(), 1); - for weight_blob in net_weights { - let mut blob = weight_blob.write().unwrap(); - // self.backend().nrm2(blob.mut_diff(), &mut result); - // TODO - // let blob_sumsq_diff = leaf_cpu_dot(blob.cpu_diff(), blob.cpu_diff()); - // sumsq_diff += blob_sumsq_diff; + for net_gradient in net_gradients { + let gradient = net_gradient.read().unwrap(); + let mut result = SharedTensor::::new(backend.device(), &1).unwrap(); + // gradient.sumsq_diff(self.backend(), &mut result); + self.backend().dot_plain(&gradient, &gradient, &mut result); + + let mut result = SharedTensor::::new(backend.device(), &1).unwrap(); + match result.add_device(native.device()) { _ => result.sync(native.device()).unwrap() } + if let &MemoryType::Native(ref sumsq_result) = result.get(native.device()).unwrap() { + let sumsq_diff_slice = sumsq_result.as_slice::(); + sumsq_diff += sumsq_diff_slice[0]; + } else { + panic!(); + } } let l2norm_diff = sumsq_diff.sqrt(); - unimplemented!(); // needs either simple devision or similar if l2norm_diff > clip_threshold { let scale_factor = clip_threshold / l2norm_diff; info!("Gradient clipping: scaling down gradients (L2 norm {} > {}) @@ -82,11 +90,17 @@ trait SGDSolver> : ISolver { clip_threshold, scale_factor); - for weight_blob in net_weights { - let mut blob = weight_blob.write().unwrap(); - let diff = blob.mut_diff(); - // TODO - // leaf_cpu_scal(&scale_factor, diff); + let mut scale_shared = SharedTensor::::new(native.device(), &1).unwrap(); + if let &mut MemoryType::Native(ref mut scale) = scale_shared.get_mut(native.device()).unwrap() { + let scale_slice = scale.as_mut_slice::(); + scale_slice[0] = scale_factor; + } else { + panic!(); + } + + for weight_gradient in net_gradients { + let mut gradient = weight_gradient.write().unwrap(); + backend.scal(&mut scale_shared, &mut gradient); } } } @@ -98,20 +112,26 @@ trait SGDSolver> : ISolver { /// To counteract that we are accumulating the gradients over multiple samples, /// we need to scale the gradients down to the equivalent of a single sample.
/// E.g. with a `minibatch_size` of 4 we need to scale the gradient by 0.25 (= 1/4). - fn normalize(&self, config: &SolverConfig, weight_blob: &ArcLock) { + fn normalize(&self, config: &SolverConfig, weight_blob: &ArcLock>) { if config.minibatch_size > 1 { let scale_factor = 1f32 / config.minibatch_size as f32; - let mut write_blob = weight_blob.write().unwrap(); - let mut shared_scale_factor = SharedMemory::::new(self.backend().device(), 1); - // let _ = self.backend().scale(&mut shared_scale_factor, write_blob.mut_diff()); - unimplemented!(); + let mut gradient = weight_blob.write().unwrap(); + let native = native_backend(); + let mut scale_factor_shared = SharedTensor::::new(native.device(), &1).unwrap(); + if let &mut MemoryType::Native(ref mut scale) = scale_factor_shared.get_mut(native.device()).unwrap() { + let scale_slice = scale.as_mut_slice::(); + scale_slice[0] = scale_factor; + } else { + panic!(); + } + self.backend().scal_plain(&scale_factor_shared, &mut gradient).unwrap(); } } /// [Regularize][1] the gradient according to the configured [RegularizationMethod][2]. /// [1]: https://cs231n.github.io/neural-networks-2/#reg /// [2]: ../solver/enum.RegularizationMethod.html - fn regularize(&self, config: &SolverConfig, weight_blob: &ArcLock, blob_weight_decay: Option) { + fn regularize(&self, config: &SolverConfig, weight_gradient: &ArcLock>, blob_weight_decay: Option) { if let Some(global_weight_decay) = config.weight_decay { if let Some(regularization_method) = config.regularization_method { match blob_weight_decay { @@ -119,15 +139,24 @@ trait SGDSolver> : ISolver { let local_decay = global_weight_decay * weight_decay_mult; match regularization_method { RegularizationMethod::L2 => { - // TODO - // leaf_cpu_axpy(&local_decay, - // weight_blob.read().unwrap().cpu_data(), - // weight_blob.write().unwrap().mutable_cpu_diff()); + let native = native_backend(); + let mut decay_shared = SharedTensor::::new(native.device(), &1).unwrap(); + if let &mut MemoryType::Native(ref mut decay) = decay_shared.get_mut(native.device()).unwrap() { + let decay_slice = decay.as_mut_slice::(); + decay_slice[0] = local_decay; + } else { + panic!(); + } + let gradient = &mut weight_gradient.write().unwrap(); + // gradient.regularize_l2(self.backend(), &decay_shared); + // backend.axpy_plain(&decay_shared, &self.data, &mut self.diff).unwrap(); + // TODO: solver + unimplemented!(); } } } None => { - error!("Weight decay multiplier for blob missing."); + error!("Weight decay multiplier for gradient missing."); } } } diff --git a/src/solvers/sgd/mod.rs b/src/solvers/sgd/mod.rs index 229936fe..bd7becde 100644 --- a/src/solvers/sgd/mod.rs +++ b/src/solvers/sgd/mod.rs @@ -25,26 +25,26 @@ #[macro_export] macro_rules! impl_isolver_sgd { ($t:ty) => ( - impl> ISolver for $t { - fn apply_update(&mut self, config: &SolverConfig, net: &mut Network, iter: usize) { + impl, NetB: IBackend + LayerOps> ISolver for $t { + fn apply_update(&mut self, config: &SolverConfig, net: &mut Network, iter: usize) { // CHECK(Caffe::root_solver()); // Caffe let rate = config.get_learning_rate(iter); - self.clip_gradients(config, net); - for (weight_id, weight_blob) in net.learnable_weights().iter().enumerate() { - self.normalize(config, weight_blob); - self.regularize(config, weight_blob, net.weights_weight_decay()[weight_id]); + SGDSolver::::clip_gradients(self, config, net); + for (weight_id, weight_gradient) in net.learnable_weight_gradients().iter().enumerate() { + SGDSolver::::normalize(self, config, weight_gradient); + SGDSolver::::regularize(self, config, weight_gradient, net.weights_weight_decay()[weight_id]); - self.compute_update_value(config, - weight_blob, + SGDSolver::::compute_update_value(self, config, + weight_gradient, weight_id, &rate, &net.weights_lr()[weight_id].unwrap()); } - net.update_weights(); + net.update_weights(ISolver::::backend(self)); } - fn backend(&self) -> &B { + fn backend(&self) -> &SolverB { &self.backend } } diff --git a/src/solvers/sgd/momentum.rs b/src/solvers/sgd/momentum.rs index c4dfbbf1..2ee288c0 100644 --- a/src/solvers/sgd/momentum.rs +++ b/src/solvers/sgd/momentum.rs @@ -13,26 +13,29 @@ //! into the same direction you will reach the optimum faster. //! It also makes solving more stable. use co::backend::*; -use co::libraries::blas::IBlas; -use shared_memory::*; +use co::tensor::*; +use co::memory::MemoryType; +// use shared_memory::*; use network::Network; use solver::*; use solvers::SGDSolver; use std::rc::Rc; +use std::sync::{Arc, RwLock}; +use util::*; #[derive(Debug, Clone)] /// Stochastic Gradient Descent with Momentum. /// /// See [module description][1] for more information. /// [1]: ./index.html -pub struct Momentum> { +pub struct Momentum> { /// The gradient update from the previous iteration for each blob. - history: Vec>, + history: Vec>>, /// The backend used for computing the gradient. - backend: Rc, + backend: Rc, } -impl> Momentum { +impl> Momentum { /// Create a new SGD Momentum solver. /// /// Should not be called directly. @@ -40,7 +43,7 @@ impl> Momentum { /// /// [1]: ../../../network/struct.Network.html#method.from_config /// [2]: ../../../solver/struct.Solver.html#method.from_config - pub fn new(backend: Rc) -> Momentum { + pub fn new(backend: Rc) -> Momentum { Momentum { history: Vec::new(), backend: backend @@ -48,38 +51,54 @@ impl> Momentum { } /// Initialize the SGD Momentum solver, allocating memory for its history. - fn init(&mut self, net: &Network) { - self.history = Vec::with_capacity(net.learnable_weights().len()); + fn init>(&mut self, net: &Network) { + self.history = Vec::with_capacity(net.learnable_weight_gradients().len()); - for weight_blob in net.learnable_weights() { - let shape = weight_blob.read().unwrap().shape(); - let history_blob = new_shared_heapblob(); - history_blob.write().unwrap().reshape(&shape); - self.history.push(history_blob); + for weight_gradient in net.learnable_weight_gradients() { + let shape = weight_gradient.read().unwrap().desc().clone(); + let history_tensor = Arc::new(RwLock::new(SharedTensor::new(self.backend.device(), &shape).unwrap())); + self.history.push(history_tensor); } } } -impl> SGDSolver for Momentum { +impl, NetB: IBackend + LayerOps> SGDSolver for Momentum { fn compute_update_value(&mut self, config: &SolverConfig, - weight_blob: &ArcLock, + weight_gradient: &ArcLock>, history_blob_id: usize, global_lr: &f32, blob_lr: &f32) { let history_blob = &self.history[history_blob_id]; - let momentum = config.momentum; + let local_momentum = config.momentum; let local_lr = global_lr * blob_lr; + let mut lr_shared = SharedTensor::::new(self.backend.device(), &1).unwrap(); + if let &mut MemoryType::Native(ref mut lr) = lr_shared.get_mut(self.backend.device()).unwrap() { + let lr_slice = lr.as_mut_slice::(); + lr_slice[0] = local_lr; + } else { + panic!(); + } + + let mut momentum_shared = SharedTensor::::new(self.backend.device(), &1).unwrap(); + if let &mut MemoryType::Native(ref mut momentum) = momentum_shared.get_mut(self.backend.device()).unwrap() { + let momentum_slice = momentum.as_mut_slice::(); + momentum_slice[0] = local_momentum; + } else { + panic!(); + } + // Compute the update to history, then copy it to the parameter diff. - // TODO - // leaf_cpu_axpby(&local_lr, - // weight_blob.read().unwrap().cpu_diff(), - // &momentum, - // history_blob.write().unwrap().mutable_cpu_data()); - // TODO - // *weight_blob.write().unwrap().mut_diff() = *history_blob.read().unwrap().data().clone(); + let _ = Axpby::::axpby_plain(ISolver::::backend(self), + &lr_shared, + &weight_gradient.read().unwrap(), + &momentum_shared, + &mut history_blob.write().unwrap()); + + let _ = ISolver::::backend(self).copy_plain( + &history_blob.read().unwrap(), &mut weight_gradient.write().unwrap()); } } -impl_isolver_sgd!(Momentum); +impl_isolver_sgd!(Momentum); diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 00000000..b54acebd --- /dev/null +++ b/src/util.rs @@ -0,0 +1,87 @@ +//! Provides common utility functions +use std::sync::{Arc, RwLock}; +use co::backend::{Backend, BackendConfig}; +use co::framework::IFramework; +use co::frameworks::Native; +use co::memory::MemoryType; +use co::tensor::SharedTensor; +use co::plugin::numeric_helpers::*; +use coblas::plugin::*; +use conn; + +/// Shared Lock used for our tensors +pub type ArcLock = Arc>; + +/// Create a simple native backend. +/// +/// This is handy when you need to sync data to host memory to read/write it. +pub fn native_backend() -> Backend { + let framework = Native::new(); + let hardwares = &framework.hardwares().to_vec(); + let backend_config = BackendConfig::new(framework, hardwares); + Backend::new(backend_config).unwrap() +} + +/// Write into a native Collenchyma Memory. +pub fn write_to_memory(mem: &mut MemoryType, data: &[T]) { + match mem { + &mut MemoryType::Native(ref mut mem) => { + let mut mem_buffer = mem.as_mut_slice::(); + for (index, datum) in data.iter().enumerate() { + mem_buffer[index] = *datum; + } + }, + #[cfg(any(feature = "opencl", feature = "cuda"))] + _ => {} + } +} + +/// Create a Collenchyma SharedTensor for a scalar value. +pub fn native_scalar(scalar: T) -> SharedTensor { + let native = native_backend(); + let mut shared_scalar = SharedTensor::::new(native.device(), &vec![1]).unwrap(); + write_to_memory(shared_scalar.get_mut(native.device()).unwrap(), &[scalar]); + + shared_scalar +} + +/// Casts a Vec to as Vec +pub fn cast_vec_usize_to_i32(input: Vec) -> Vec { + let mut out = Vec::new(); + for i in input.iter() { + out.push(*i as i32); + } + out +} + +/// Extends IBlas with Axpby +pub trait Axpby : Axpy + Scal { + /// Performs the operation y := a*x + b*y . + /// + /// Consists of a scal(b, y) followed by a axpby(a,x,y). + fn axpby_plain(&self, a: &SharedTensor, x: &SharedTensor, b: &SharedTensor, y: &mut SharedTensor) -> Result<(), ::co::error::Error> { + try!(self.scal_plain(b, y)); + try!(self.axpy_plain(a, x, y)); + Ok(()) + } +} + +impl + Scal> Axpby for T {} + +/// Encapsulates all traits required by Solvers. +pub trait SolverOps : Axpby + Dot + Copy {} + +impl + Dot + Copy> SolverOps for T {} + +/// Encapsulates all traits used in Layers. +pub trait LayerOps : conn::Convolution + conn::Pooling + conn::Relu + conn::Sigmoid + conn::Softmax + conn::LogSoftmax + + Gemm {} + +impl + conn::Pooling + conn::Relu + conn::Sigmoid + conn::Softmax + conn::LogSoftmax + + Gemm> LayerOps for T {} + +// pub trait LayerOps : conn::Relu + conn::Sigmoid + conn::Softmax + conn::LogSoftmax +// + Gemm {} +// +// impl + conn::Sigmoid + conn::Softmax + conn::LogSoftmax +// + Gemm> LayerOps for T {} diff --git a/src/weight.rs b/src/weight.rs new file mode 100644 index 00000000..c1c6be72 --- /dev/null +++ b/src/weight.rs @@ -0,0 +1,185 @@ +//! Provides configuration of weights and their initialization. +use rand; +use rand::distributions::{IndependentSample, Range}; +use co::{ITensorDesc, SharedTensor}; +use co::plugin::numeric_helpers::Float; +// use shared_memory::*; +use util::native_backend; + +#[derive(Debug, Clone)] +/// Specifies training configuration for a weight blob. +pub struct WeightConfig { + /// The name of the weight blob -- useful for sharing weights among + /// layers, but never required otherwise. To share a weight between two + /// layers, give it a (non-empty) name. + /// + /// Default: "" + pub name: String, + /// Whether to require shared weights to have the same shape, or just the same + /// count + /// + /// Default: DimCheckMode::Strict + pub share_mode: DimCheckMode, + + /// The multiplier on the global learning rate for this parameter. + /// + /// Default: 1.0f32 + pub lr_mult: Option, + + /// The multiplier on the global weight decay for this parameter. + /// + /// Default: 1.0f32 + pub decay_mult: Option, + + /// The filler that initializes the weights in the weight blob. + /// + /// Default: None + pub filler: Option, +} + +impl Default for WeightConfig { + fn default() -> WeightConfig { + WeightConfig { + name: "".to_owned(), + share_mode: DimCheckMode::Strict, + lr_mult: None, + decay_mult: None, + filler: None, + } + } +} + +impl WeightConfig { + /// Checks dimensions of two blobs according to the `share_mode`. + /// Returns an error if there is a count/shape mismatch. + pub fn check_dimensions(&self, + tensor_one: &SharedTensor, + tensor_two: &SharedTensor, + param_name: String, + owner_name: String, + layer_name: String) + -> Result<(), String> { + match self.share_mode { + // Permissive dimension checking -- only check counts are the same. + DimCheckMode::Permissive => { + if tensor_one.desc().size() != tensor_two.desc().size() { + return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}'; + count mismatch. + Owner layer weight shape is {:?}; + Sharing layer weight shape is {:?}", + param_name, + owner_name, + layer_name, + tensor_two.desc(), + tensor_one.desc())); + } + } + // Strict dimension checking -- all dims must be the same. + DimCheckMode::Strict => { + if tensor_one.desc().size() != tensor_two.desc().size() { + return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}'; + shape mismatch. + Owner layer weight shape is {:?}; + Sharing layer expects weight shape {:?}", + param_name, + owner_name, + layer_name, + tensor_two.desc(), + tensor_one.desc())); + } + } + } + Ok(()) + } + + /// The multiplier on the global learning rate for this weight blob. + pub fn lr_mult(&self) -> f32 { + match self.lr_mult { + Some(val) => val, + None => 1.0f32, + } + } + + /// The multiplier on the global weight decay for this weight blob. + pub fn decay_mult(&self) -> f32 { + match self.decay_mult { + Some(val) => val, + None => 1.0f32, + } + } +} + +#[derive(Debug, Copy, Clone)] +/// Enum for specifing the shared weights behaviour +pub enum DimCheckMode { + /// Strict requires that shapes match. + Strict, + /// Permissive requires only the count of weights to match. + Permissive, +} + +#[derive(Debug, Copy, Clone)] +/// Enum for specifing the type of Filler. +pub enum FillerType { + /// Fills the weight blob with a constant `value` (all values are the same). + Constant { + /// The value that will be used to fill the blob. + value: f32 + }, + /// Fills the weight blobs based on the paper: + /// + /// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.` + /// + /// Also known as Xavier filler. + Glorot { + /// Number of input nodes for each output. + input_size: usize, + /// Number of output nodes for each input. + output_size: usize, + }, +} + +impl FillerType { + /// Uses a filler as specified by this FillerType to fill the values in a SharedTensor + /// + /// This filling of weights is usually done directly after creation of the weight blob. + pub fn fill(&self, weight: &mut SharedTensor) { + let native = native_backend(); + let native_device = native.device(); + let actual_device = weight.latest_device().clone(); + // sync to native so we can fill + match weight.add_device(native_device) { _ => weight.sync(native_device).unwrap() } + + match *self { + FillerType::Constant { value } => Self::fill_constant(weight, value), + FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size), + } + + // sync back to the actual device + weight.sync(&actual_device).unwrap(); + } + + /// Directly use the [Constant Filler](#variant.Constant). + pub fn fill_constant(weight: &mut SharedTensor, value: f32) { + let native = native_backend(); + let native_weight = weight.get_mut(native.device()).unwrap().as_mut_native().unwrap(); + + for e in native_weight.as_mut_slice::() { + *e = value; + } + } + + /// Directly use the [Glorot Filler](#variant.Glorot). + pub fn fill_glorot(weight: &mut SharedTensor, num_inputs: usize, num_outputs: usize) { + let native = native_backend(); + let native_weight = weight.get_mut(native.device()).unwrap().as_mut_native().unwrap(); + + let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt(); + + let between = Range::new(-init_range, init_range); + let mut rng = rand::thread_rng(); + for e in native_weight.as_mut_slice::() { + *e = between.ind_sample(&mut rng); + } + } +} diff --git a/tests/layer_specs.rs b/tests/layer_specs.rs index 99529764..c0fe4a84 100644 --- a/tests/layer_specs.rs +++ b/tests/layer_specs.rs @@ -1,12 +1,11 @@ extern crate leaf; -extern crate phloem; extern crate collenchyma as co; -#[cfg(test)] +#[cfg(all(test, whatever))] +// #[cfg(test)] mod layer_spec { use leaf::layer::*; - use phloem::Blob; use std::rc::Rc; use co::backend::{Backend, BackendConfig}; use co::frameworks::Native; diff --git a/tests/network_specs.rs b/tests/network_specs.rs index c5d05a99..c25b2391 100644 --- a/tests/network_specs.rs +++ b/tests/network_specs.rs @@ -1,25 +1,383 @@ +#[macro_use] +extern crate log; +extern crate env_logger; extern crate leaf; -extern crate phloem; extern crate collenchyma as co; +extern crate collenchyma_nn as conn; #[cfg(test)] mod network_spec { use std::rc::Rc; - use co::backend::{Backend, BackendConfig}; - use co::framework::IFramework; - use co::frameworks::Native; + use std::sync::{Arc, RwLock}; + use co::prelude::*; use leaf::network::*; + use leaf::layers::*; + use leaf::layer::{LayerConfig, LayerType}; + use env_logger; - fn backend() -> Rc> { - let framework = Native::new(); - let hardwares = framework.hardwares(); - let backend_config = BackendConfig::new(framework, hardwares); - Rc::new(Backend::new(backend_config).unwrap()) + #[cfg(feature="cuda")] + fn cuda_backend() -> Rc> { + Rc::new(Backend::::default().unwrap()) } + fn native_backend() -> Rc> { + Rc::new(Backend::::default().unwrap()) + } + + #[cfg(feature="cuda")] #[test] - fn new_layer() { + fn new_network() { let cfg = NetworkConfig::default(); - Network::from_config(backend(), &cfg); + Network::from_config(cuda_backend(), &cfg); + } + + #[cfg(feature="cuda")] + #[test] + fn mnist_forward_backward() { + let _ = env_logger::init(); + let mut cfg = NetworkConfig::default(); + // set up input + cfg.add_input("in", &vec![1, 30, 30]); + cfg.add_input("label", &vec![1, 1, 10]); + // set up convolution + + // layer { + // name: "conv1" + // type: "Convolution" + // param { lr_mult: 1 } + // param { lr_mult: 2 } + // convolution_param { + // num_output: 20 + // kernel_size: 5 + // stride: 1 + // weight_filler { + // type: "xavier" + // } + // bias_filler { + // type: "constant" + // } + // } + // bottom: "data" + // top: "conv1" + // } + + // let conv_layer_cfg = ConvolutionConfig { + // num_output: 20, + // filter_shape: vec![5], + // padding: vec![0], // ? + // stride: vec![1], + // axis: None + // }; + // let mut conv_cfg = LayerConfig::new("conv", LayerType::Convolution(conv_layer_cfg)); + // conv_cfg.add_input("in"); + // conv_cfg.add_output("conv_out"); + // cfg.add_layer(conv_cfg); + + // set up sigmoid + let mut sig_cfg = LayerConfig::new("sig", LayerType::Sigmoid); + sig_cfg.add_input("in"); + sig_cfg.add_output("sig_out"); + cfg.add_layer(sig_cfg); + + let mut fc_layer_cfg = LinearConfig { + output_size: 10, + }; + let mut fc_cfg = LayerConfig::new("fully_connected", LayerType::Linear(fc_layer_cfg)); + fc_cfg.add_input("sig_out"); + fc_cfg.add_output("fc_out"); + cfg.add_layer(fc_cfg); + // // set up softmax_loss + // let mut loss_cfg = LayerConfig::new("loss", LayerType::SoftmaxLoss); + // loss_cfg.add_input("fc_out"); + // loss_cfg.add_input("label"); + // cfg.add_layer(loss_cfg); + + let backend = cuda_backend(); + let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![1, 30, 30]).unwrap(); + let label = SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap(); + + let inp_lock = Arc::new(RwLock::new(inp)); + let label_lock = Arc::new(RwLock::new(label)); + network.forward(&[inp_lock, label_lock], loss); + network.backward(); + } + + #[cfg(feature="cuda")] + #[test] + #[ignore] + // modeled after https://github.com/soumith/convnet-benchmarks/blob/c4dfa528cd7f2abd2e9abd91b294f91d01146c42/caffe/imagenet_winners/alexnet.prototxt + fn alexnet_forward() { + let _ = env_logger::init(); + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 224, 224]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 64, + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 192, + filter_shape: vec![5], + padding: vec![2], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 284, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // // Layer: conv4 + // let conv4_layer_cfg = ConvolutionConfig { + // num_output: 256, + // filter_shape: vec![3], + // padding: vec![1], + // stride: vec![1], + // axis: None + // }; + // let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + // conv4_cfg.add_input("conv3_out"); + // conv4_cfg.add_output("conv4_preac"); + // cfg.add_layer(conv4_cfg); + // // Layer: conv4/relu + // let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + // conv4_relu_cfg.add_input("conv4_preac"); + // conv4_relu_cfg.add_output("conv4_out"); + // cfg.add_layer(conv4_relu_cfg); + + let backend = cuda_backend(); + let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 224, 224]).unwrap(); + // let label = Blob::from_data(SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap()); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock], loss); + } + + #[cfg(feature="cuda")] + #[test] + // modeled after https://github.com/soumith/convnet-benchmarks/blob/c4dfa528cd7f2abd2e9abd91b294f91d01146c42/caffe/imagenet_winners/alexnet.prototxt + // smaller version to fit into 2GB of GPU memory + // - reduced the input dimensions from 224 to 112 + // - halfed all the convolution layers output dimensions + fn small_alexnet_forward() { + let _ = env_logger::init(); + let mut cfg = NetworkConfig::default(); + // Layer: data + cfg.add_input("data", &vec![128, 3, 112, 112]); + // Layer: conv1 + let conv1_layer_cfg = ConvolutionConfig { + num_output: 32, + filter_shape: vec![11], + padding: vec![2], + stride: vec![4], + axis: None + }; + let mut conv1_cfg = LayerConfig::new("conv1", LayerType::Convolution(conv1_layer_cfg)); + conv1_cfg.add_input("data"); + conv1_cfg.add_output("conv1_preac"); + cfg.add_layer(conv1_cfg); + // Layer: conv1/relu + let mut conv1_relu_cfg = LayerConfig::new("conv1/relu", LayerType::ReLU); + conv1_relu_cfg.add_input("conv1_preac"); + conv1_relu_cfg.add_output("conv1_out"); + cfg.add_layer(conv1_relu_cfg); + // Layer: pool1 + let pool1_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool1_cfg = LayerConfig::new("pool1", LayerType::Pooling(pool1_layer_cfg)); + pool1_cfg.add_input("conv1_out"); + pool1_cfg.add_output("pool1_out"); + cfg.add_layer(pool1_cfg); + // Layer: conv2 + let conv2_layer_cfg = ConvolutionConfig { + num_output: 96, + filter_shape: vec![5], + padding: vec![2], + stride: vec![1], + axis: None + }; + let mut conv2_cfg = LayerConfig::new("conv2", LayerType::Convolution(conv2_layer_cfg)); + conv2_cfg.add_input("pool1_out"); + conv2_cfg.add_output("conv2_preac"); + cfg.add_layer(conv2_cfg); + // Layer: conv2/relu + let mut conv2_relu_cfg = LayerConfig::new("conv2/relu", LayerType::ReLU); + conv2_relu_cfg.add_input("conv2_preac"); + conv2_relu_cfg.add_output("conv2_out"); + cfg.add_layer(conv2_relu_cfg); + // Layer: pool2 + let pool2_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool2_cfg = LayerConfig::new("pool2", LayerType::Pooling(pool2_layer_cfg)); + pool2_cfg.add_input("conv2_out"); + pool2_cfg.add_output("pool2_out"); + cfg.add_layer(pool2_cfg); + // Layer: conv3 + let conv3_layer_cfg = ConvolutionConfig { + num_output: 142, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv3_cfg = LayerConfig::new("conv3", LayerType::Convolution(conv3_layer_cfg)); + conv3_cfg.add_input("pool2_out"); + conv3_cfg.add_output("conv3_preac"); + cfg.add_layer(conv3_cfg); + // Layer: conv3/relu + let mut conv3_relu_cfg = LayerConfig::new("conv3/relu", LayerType::ReLU); + conv3_relu_cfg.add_input("conv3_preac"); + conv3_relu_cfg.add_output("conv3_out"); + cfg.add_layer(conv3_relu_cfg); + // Layer: conv4 + let conv4_layer_cfg = ConvolutionConfig { + num_output: 128, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv4_cfg = LayerConfig::new("conv4", LayerType::Convolution(conv4_layer_cfg)); + conv4_cfg.add_input("conv3_out"); + conv4_cfg.add_output("conv4_preac"); + cfg.add_layer(conv4_cfg); + // Layer: conv4/relu + let mut conv4_relu_cfg = LayerConfig::new("conv4/relu", LayerType::ReLU); + conv4_relu_cfg.add_input("conv4_preac"); + conv4_relu_cfg.add_output("conv4_out"); + cfg.add_layer(conv4_relu_cfg); + // Layer: conv5 + let conv5_layer_cfg = ConvolutionConfig { + num_output: 128, + filter_shape: vec![3], + padding: vec![1], + stride: vec![1], + axis: None + }; + let mut conv5_cfg = LayerConfig::new("conv5", LayerType::Convolution(conv5_layer_cfg)); + conv5_cfg.add_input("conv4_out"); + conv5_cfg.add_output("conv5_preac"); + cfg.add_layer(conv5_cfg); + // Layer: conv5/relu + let mut conv5_relu_cfg = LayerConfig::new("conv5/relu", LayerType::ReLU); + conv5_relu_cfg.add_input("conv5_preac"); + conv5_relu_cfg.add_output("conv5_out"); + cfg.add_layer(conv5_relu_cfg); + // Layer: pool3 + let pool3_layer_cfg = PoolingConfig { + mode: PoolingMode::Max, + filter_shape: vec![3], + stride: vec![2], + padding: vec![0], // TODO: make optional + }; + let mut pool3_cfg = LayerConfig::new("pool3", LayerType::Pooling(pool3_layer_cfg)); + pool3_cfg.add_input("conv5_out"); + pool3_cfg.add_output("pool3_out"); + cfg.add_layer(pool3_cfg); + // Layer: fc1 + let mut fc1_layer_cfg = LinearConfig { + output_size: 2048, + }; + let mut fc1_cfg = LayerConfig::new("fc1", LayerType::Linear(fc1_layer_cfg)); + fc1_cfg.add_input("pool3_out"); + fc1_cfg.add_output("fc1_out"); + cfg.add_layer(fc1_cfg); + // Layer: fc2 + let mut fc2_layer_cfg = LinearConfig { + output_size: 2048, + }; + let mut fc2_cfg = LayerConfig::new("fc2", LayerType::Linear(fc2_layer_cfg)); + fc2_cfg.add_input("fc1_out"); + fc2_cfg.add_output("fc2_out"); + cfg.add_layer(fc2_cfg); + // Layer: fc3 + let mut fc3_layer_cfg = LinearConfig { + output_size: 500, + }; + let mut fc3_cfg = LayerConfig::new("fc3", LayerType::Linear(fc3_layer_cfg)); + fc3_cfg.add_input("fc2_out"); + fc3_cfg.add_output("fc3_out"); + cfg.add_layer(fc3_cfg); + + let backend = cuda_backend(); + let native_backend = native_backend(); + let mut network = Network::from_config(backend.clone(), &cfg); + let loss = &mut 0f32; + let inp = SharedTensor::::new(backend.device(), &vec![128, 3, 112, 112]).unwrap(); + // let label = Blob::from_data(SharedTensor::::new(native_backend.device(), &vec![1, 1, 10]).unwrap()); + + let inp_lock = Arc::new(RwLock::new(inp)); + network.forward(&[inp_lock], loss); } } diff --git a/tests/solver_specs.rs b/tests/solver_specs.rs index ba6bbb83..10eabff4 100644 --- a/tests/solver_specs.rs +++ b/tests/solver_specs.rs @@ -1,8 +1,9 @@ extern crate leaf; extern crate collenchyma as co; -#[cfg(test)] -mod network_spec { +#[cfg(all(test, whatever))] +// #[cfg(test)] +mod solver_specs { use leaf::solver::*; use co::backend::Backend; use co::frameworks::Native;