diff --git a/include/caffe/layers/mkl_layers.hpp b/include/caffe/layers/mkl_layers.hpp index 32ce6e622..361b4b1b4 100644 --- a/include/caffe/layers/mkl_layers.hpp +++ b/include/caffe/layers/mkl_layers.hpp @@ -73,8 +73,8 @@ class MKLConvolutionLayer : public ConvolutionLayer { const vector& propagate_down, const vector*>& bottom); // Customized methods - void Init( const vector*>& bottom, - const vector*>& top); + void Init(const vector*>& bottom, + const vector*>& top); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); @@ -169,6 +169,9 @@ class MKLLRNLayer : public Layer { virtual void CrossChannelBackward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + void Init(const vector*>& bottom, + const vector*>& top); + int size_; int pre_pad_; Dtype alpha_; @@ -205,6 +208,9 @@ class MKLPoolingLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); + void Init(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "Pooling"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int MinTopBlobs() const { return 1; } @@ -270,6 +276,12 @@ class MKLReLULayer : public NeuronLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + void Init(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "ReLU"; } protected: @@ -291,6 +303,8 @@ class MKLReLULayer : public NeuronLayer { shared_ptr > bwd_top_diff_; shared_ptr > bwd_bottom_diff_; dnnPrimitive_t reluFwd_, reluBwd_; + vector sizes_; + vector strides_; }; template @@ -301,8 +315,8 @@ class MKLConcatLayer : public Layer { concatFwd_(static_cast(NULL)), concatBwd_(static_cast(NULL)), fwd_top_data_ (new MKLData()), - bwd_top_diff_ (new MKLDiff()) { - } + bwd_top_diff_ (new MKLDiff()), + split_channels_ (NULL) { } virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, @@ -323,6 +337,9 @@ class MKLConcatLayer : public Layer { const vector& propagate_down, const vector*>& bottom); + void Init(const vector*>& bottom, + const vector*>& top); + private: dnnPrimitive_t concatFwd_; dnnPrimitive_t concatBwd_; @@ -375,6 +392,9 @@ class MKLBatchNormLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + void Init(const vector*>& bottom, + const vector*>& top); + // Dtype moving_average_fraction_; Dtype eps_; bool use_weight_bias_; @@ -410,6 +430,9 @@ class MKLSplitLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); + void Init(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "Split"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int MinTopBlobs() const { return 1; } @@ -429,6 +452,8 @@ class MKLSplitLayer : public Layer { vector > > bwd_top_diff; vector coeffs_; size_t num_tops; + vector sizes_src_; + vector strides_src_; dnnPrimitive_t sumPrimitive; }; @@ -446,6 +471,9 @@ class MKLEltwiseLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); + void Init(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "Eltwise"; } virtual inline int MinBottomBlobs() const { return 2; } virtual inline int ExactNumTopBlobs() const { return 1; } @@ -472,6 +500,8 @@ class MKLEltwiseLayer : public Layer { vector coeffs_; Blob max_idx_; size_t num_bottoms; + int channels_, num_; + int height_, width_; bool stable_prod_grad_; }; diff --git a/include/caffe/mkl_memory.hpp b/include/caffe/mkl_memory.hpp index a785c1e29..52538b84c 100644 --- a/include/caffe/mkl_memory.hpp +++ b/include/caffe/mkl_memory.hpp @@ -96,11 +96,15 @@ struct MKLMemoryDescriptorBase : PrvMemDescr, void create_internal_layout(const dnnPrimitive_t primitive, dnnResourceType_t type); void create_user_layout(size_t dimension, const size_t size[], - const size_t strides[]); + const size_t strides[], + bool create_conversion_if_possible = true); void create_layouts( const dnnPrimitive_t primitive, dnnResourceType_t type, size_t dimension, const size_t size[], const size_t strides[]); + void remove_internal_layout(); + void remove_user_layout(); + virtual PrvDescrType get_descr_type() {return PRV_DESCR_MKL2017;} virtual size_t prv_size() { return dnnLayoutGetMemorySize(layout_int); @@ -112,6 +116,8 @@ struct MKLMemoryDescriptorBase : PrvMemDescr, virtual void convert_to_prv(void* cpu_ptr); virtual bool layout_compare(shared_ptr other); virtual void convert_from_other(shared_ptr other); + protected: + void remove_conversions(); protected: Dtype* internal_ptr; }; diff --git a/include/mkl_dnn_cppwrapper.h b/include/mkl_dnn_cppwrapper.h index 9e23a6155..9608dff33 100644 --- a/include/mkl_dnn_cppwrapper.h +++ b/include/mkl_dnn_cppwrapper.h @@ -101,28 +101,44 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. {return dnnAllocateBuffer_F64(pPtr, layout);} TEMPLATE_PREFIX dnnError_t dnnReleaseBuffer( - void *ptr); + void* ptr); SPEC_PREFIX dnnError_t dnnReleaseBuffer( - void *ptr) - {return dnnReleaseBuffer_F32(ptr);} + void* ptr) { + dnnError_t status = E_SUCCESS; + if( ptr != NULL) { + status = dnnReleaseBuffer_F32(ptr); + } + return status; + } SPEC_PREFIX dnnError_t dnnReleaseBuffer( - void *ptr) - {return dnnReleaseBuffer_F64(ptr);} + void* ptr) { + dnnError_t status = E_SUCCESS; + if( ptr != NULL) { + status = dnnReleaseBuffer_F64(ptr); + } + return status; + } TEMPLATE_PREFIX dnnError_t dnnLayoutDelete( dnnLayout_t& layout); SPEC_PREFIX dnnError_t dnnLayoutDelete( dnnLayout_t& layout) { - dnnError_t status = dnnLayoutDelete_F32(layout); - layout = NULL; + dnnError_t status = E_SUCCESS; + if( layout != NULL) { + status = dnnLayoutDelete_F32(layout); + layout = NULL; + } return status; } SPEC_PREFIX dnnError_t dnnLayoutDelete( dnnLayout_t& layout) { - dnnError_t status = dnnLayoutDelete_F64(layout); - layout = NULL; + dnnError_t status = E_SUCCESS; + if( layout != NULL) { + status = dnnLayoutDelete_F64(layout); + layout = NULL; + } return status; -} + } TEMPLATE_PREFIX dnnError_t dnnPrimitiveAttributesCreate( dnnPrimitiveAttributes_t *attributes); @@ -186,14 +202,20 @@ TEMPLATE_PREFIX dnnError_t dnnDelete( dnnPrimitive_t& primitive); SPEC_PREFIX dnnError_t dnnDelete( dnnPrimitive_t& primitive) { - dnnError_t status = dnnDelete_F32(primitive); - primitive = NULL; + dnnError_t status = E_SUCCESS; + if (primitive != NULL) { + status = dnnDelete_F32(primitive); + primitive = NULL; + } return status; } SPEC_PREFIX dnnError_t dnnDelete( dnnPrimitive_t& primitive) { - dnnError_t status = dnnDelete_F64(primitive); - primitive = NULL; + dnnError_t status = E_SUCCESS; + if (primitive != NULL) { + status = dnnDelete_F64(primitive); + primitive = NULL; + } return status; } diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index cb249d21c..6adafb679 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -68,19 +68,24 @@ void Blob::Reshape(const vector& shape) { } int* shape_data = static_cast(shape_data_->mutable_cpu_data()); #endif - + bool actual_reshaping = false; for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); if (count_ != 0) { CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; } count_ *= shape[i]; - shape_[i] = shape[i]; + if (shape_[i] != shape[i]) { + actual_reshaping = true; + shape_[i] = shape[i]; + } #ifndef CPU_ONLY shape_data[i] = shape[i]; #endif } - if (count_ > capacity_) { + // We restart sync objects when there was change of shape + // requested count is bgger than current capacity + if ( (actual_reshaping == true) || (count_ > capacity_) ) { capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index e5c21f34a..8e6dff22c 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -183,6 +183,6 @@ STUB_GPU(InnerProductLayer); INSTANTIATE_CLASS(InnerProductLayer); -//REGISTER_LAYER_CLASS(InnerProduct); +// REGISTER_LAYER_CLASS(InnerProduct); } // namespace caffe diff --git a/src/caffe/layers/mkl_batch_norm_layer.cpp b/src/caffe/layers/mkl_batch_norm_layer.cpp index ed5d10141..9e3189f63 100644 --- a/src/caffe/layers/mkl_batch_norm_layer.cpp +++ b/src/caffe/layers/mkl_batch_norm_layer.cpp @@ -47,9 +47,9 @@ namespace caffe { template MKLBatchNormLayer::~MKLBatchNormLayer() { - if (batchNormFwd != NULL) dnnDelete(batchNormFwd); - if (batchNormBwdData != NULL) dnnDelete(batchNormBwdData); - if (batchNormBwdScaleShift != NULL) dnnDelete(batchNormBwdScaleShift); + dnnDelete(batchNormFwd); + dnnDelete(batchNormBwdData); + dnnDelete(batchNormBwdScaleShift); dnnLayoutDelete(layout_usr_); dnnReleaseBuffer(workspace_buffer_); @@ -57,7 +57,7 @@ MKLBatchNormLayer::~MKLBatchNormLayer() { } template -void MKLBatchNormLayer::LayerSetUp(const vector*>& bottom, +void MKLBatchNormLayer::Init(const vector*>& bottom, const vector*>& top) { eps_ = this->layer_param_.batch_norm_param().eps(); use_weight_bias_ = this->layer_param_.batch_norm_param().use_weight_bias(); @@ -93,24 +93,28 @@ void MKLBatchNormLayer::LayerSetUp(const vector*>& bottom, bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); + // TODO: Make a cleanup routine to avoid + // copy of following code in the Destructor + dnnError_t e; + dnnLayoutDelete(layout_usr_); e = dnnLayoutCreate(&layout_usr_, dim, sizes, strides); CHECK_EQ(e, E_SUCCESS); - fwd_bottom_data->create_user_layout(dim, sizes, strides); - fwd_top_data ->create_user_layout(dim, sizes, strides); - bwd_bottom_diff->create_user_layout(dim, sizes, strides); - bwd_top_diff ->create_user_layout(dim, sizes, strides); + fwd_bottom_data->create_user_layout(dim, sizes, strides, false); + fwd_top_data ->create_user_layout(dim, sizes, strides, false); + bwd_bottom_diff->create_user_layout(dim, sizes, strides, false); + bwd_top_diff ->create_user_layout(dim, sizes, strides, false); - workspace_buffer_ = NULL; - scaleShift_buffer_ = NULL; + dnnReleaseBuffer(workspace_buffer_); + dnnReleaseBuffer(scaleShift_buffer_); // "Lazy" allocation because here we don't know // what layout is used by neighbours. // Primitives will be allocated during the first fwd pass - batchNormFwd = NULL; - batchNormBwdData = NULL; - batchNormBwdScaleShift = NULL; + dnnDelete(batchNormFwd); + dnnDelete(batchNormBwdData); + dnnDelete(batchNormBwdScaleShift); if (use_weight_bias_) { if ( bias_term_ ) { @@ -147,10 +151,23 @@ void MKLBatchNormLayer::LayerSetUp(const vector*>& bottom, } } +template +void MKLBatchNormLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); +} template void MKLBatchNormLayer::Reshape(const vector*>& bottom, const vector*>& top) { + bool reshaping = true; + if ((num_ == bottom[0]->num()) && + channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + reshaping = false; + } + if (bottom[0] == top[0]) { // in-place computation temp_.ReshapeLike(*bottom[0]); } else { @@ -160,6 +177,10 @@ void MKLBatchNormLayer::Reshape(const vector*>& bottom, num_ = bottom[0]->num(); top[0]->Reshape(num_, channels_, height_, width_); } + + if (reshaping == true) { + Init(bottom, top); + } } template @@ -302,13 +323,16 @@ void MKLBatchNormLayer::Backward_cpu( const vector*>& bottom) { void *bottom_data = NULL; if (bottom[0] == top[0]) { - bottom_data = reinterpret_cast(const_cast(temp_.cpu_data())); + bottom_data = reinterpret_cast( + const_cast(temp_.cpu_data())); } else { bottom_data = - reinterpret_cast(const_cast(bottom[0]->prv_data())); + reinterpret_cast( + const_cast(bottom[0]->prv_data())); if (NULL == bottom_data) bottom_data = - reinterpret_cast(const_cast(bottom[0]->cpu_data())); + reinterpret_cast( + const_cast(bottom[0]->cpu_data())); } dnnError_t e; diff --git a/src/caffe/layers/mkl_concat_layer.cpp b/src/caffe/layers/mkl_concat_layer.cpp index 59238cd39..e2929ca84 100644 --- a/src/caffe/layers/mkl_concat_layer.cpp +++ b/src/caffe/layers/mkl_concat_layer.cpp @@ -46,10 +46,11 @@ namespace caffe { template MKLConcatLayer::~MKLConcatLayer() { dnnDelete(concatFwd_); dnnDelete(concatBwd_); + delete[] split_channels_; } template -void MKLConcatLayer::LayerSetUp(const vector*>& bottom, +void MKLConcatLayer::Init(const vector*>& bottom, const vector*>& top) { size_t dim_src = bottom[0]->shape().size(); size_t dim_dst = dim_src; @@ -63,6 +64,8 @@ void MKLConcatLayer::LayerSetUp(const vector*>& bottom, CHECK_EQ(bottom[0]->width(), bottom[i]->width()); } + + delete[] split_channels_; split_channels_ = new size_t[num_concats_]; for (size_t i = 0; i < num_concats_; ++i) { CHECK_EQ(dim_src, bottom[i]->shape().size()); @@ -81,8 +84,14 @@ void MKLConcatLayer::LayerSetUp(const vector*>& bottom, split_channels_[i] = bottom[i]->channels(); channels_ += split_channels_[i]; - fwd_bottom_data_[i]->create_user_layout(dim_src, sizes_src, strides_src); - bwd_bottom_diff_[i]->create_user_layout(dim_src, sizes_src, strides_src); + fwd_bottom_data_[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + bwd_bottom_diff_[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); } // XXX: almost the same computations as above for src @@ -94,21 +103,37 @@ void MKLConcatLayer::LayerSetUp(const vector*>& bottom, sizes_dst[d] = bottom[0]->shape()[dim_dst - 1 - d]; strides_dst[d] = (d == 0) ? 1 : strides_dst[d - 1] * sizes_dst[d - 1]; } - bwd_top_diff_->create_user_layout(dim_dst, sizes_dst, strides_dst); - fwd_top_data_->create_user_layout(dim_dst, sizes_dst, strides_dst); + bwd_top_diff_->create_user_layout(dim_dst, sizes_dst, strides_dst, false); + fwd_top_data_->create_user_layout(dim_dst, sizes_dst, strides_dst, false); - concatFwd_ = NULL; - concatBwd_ = NULL; + dnnDelete(concatFwd_); + dnnDelete(concatBwd_); +} + +template +void MKLConcatLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + num_ = 0; + height_ = 0; + width_ = 0; + Init(bottom,top); } template void MKLConcatLayer::Reshape(const vector*>& bottom, const vector*>& top) { + if ((num_ == bottom[0]->num()) && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + top[0]->Reshape(num_, channels_, height_, width_); + return; + } + num_ = bottom[0]->num(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); - top[0]->Reshape(num_, channels_, height_, width_); + Init(bottom, top); } template @@ -118,10 +143,8 @@ void MKLConcatLayer::Forward_cpu(const vector *>& bottom, vector bottom_data; bool isFirstPass = (concatFwd_ == NULL); dnnLayout_t *layouts = NULL; - bool *isBottomDataFilled = NULL; if (isFirstPass) { layouts = new dnnLayout_t[num_concats_]; - isBottomDataFilled = new bool[num_concats_](); } for (size_t n = 0; n < num_concats_; n++) { @@ -133,7 +156,6 @@ void MKLConcatLayer::Forward_cpu(const vector *>& bottom, reinterpret_cast(const_cast(bottom[n]->cpu_data())); if (isFirstPass) { layouts[n] = fwd_bottom_data_[n]->layout_usr; - isBottomDataFilled[n] = true; } } else if (isFirstPass) { CHECK((bottom[n]->get_prv_data_descriptor())->get_descr_type() == @@ -160,17 +182,12 @@ void MKLConcatLayer::Forward_cpu(const vector *>& bottom, CHECK_EQ(e, E_SUCCESS); for (size_t n = 0; n < num_concats_; ++n) { - if (isBottomDataFilled[n]) continue; - - fwd_bottom_data_[n]->create_internal_layout(concatFwd_, - (dnnResourceType_t)(dnnResourceMultipleSrc + n)); bwd_bottom_diff_[n]->create_internal_layout(concatBwd_, (dnnResourceType_t)(dnnResourceMultipleDst + n)); } } delete[] layouts; - delete[] isBottomDataFilled; void *concat_res[dnnResourceNumber]; for (int n = 0; n < num_concats_; ++n) { diff --git a/src/caffe/layers/mkl_convolution_layer.cpp b/src/caffe/layers/mkl_convolution_layer.cpp index 18068d852..98daab326 100644 --- a/src/caffe/layers/mkl_convolution_layer.cpp +++ b/src/caffe/layers/mkl_convolution_layer.cpp @@ -284,6 +284,7 @@ void MKLConvolutionLayer::Init( // layout_usr = internal layout of weight data on forward convolution bwdf2fwd_filter_diff->create_internal_layout(convolutionBwdFilter, dnnResourceDiffFilter); + bwdf2fwd_filter_diff->remove_user_layout(); status = dnnLayoutCreateFromPrimitive( &bwdf2fwd_filter_diff->layout_usr, convolutionFwd, dnnResourceFilter); CHECK_EQ(status, 0) << "Failed dnnLayoutCreateFromPrimitive with status " @@ -316,17 +317,15 @@ void MKLConvolutionLayer::Init( bwdb_bias_diff_iter->create_layouts(convolutionBwdBias, dnnResourceDiffBias, 1, bias_sizes, bias_strides); } - } - template void MKLConvolutionLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { ConvolutionLayer::LayerSetUp(bottom, top); - Init(bottom,top); + Init(bottom, top); } template @@ -340,7 +339,7 @@ void MKLConvolutionLayer::Reshape(const vector*>& bottom, this->num_ == bottom[0]->num()) return; - Init(bottom,top); + Init(bottom, top); } template diff --git a/src/caffe/layers/mkl_eltwise_layer.cpp b/src/caffe/layers/mkl_eltwise_layer.cpp index 1c653cf2d..83e60a0ad 100644 --- a/src/caffe/layers/mkl_eltwise_layer.cpp +++ b/src/caffe/layers/mkl_eltwise_layer.cpp @@ -50,19 +50,12 @@ MKLEltwiseLayer::~MKLEltwiseLayer() { } template -void MKLEltwiseLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - CHECK(this->layer_param().eltwise_param().coeff_size() == 0 - || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << - "MKLEltwise Layer takes one coefficient per bottom blob."; - CHECK(!(this->layer_param().eltwise_param().operation() - == EltwiseParameter_EltwiseOp_PROD - && this->layer_param().eltwise_param().coeff_size())) << - "MKLEltwise layer only takes coefficients for summation."; - - CHECK(this->layer_param().eltwise_param().operation() == - EltwiseParameter_EltwiseOp_SUM) - << "MKLEltwise Layer only process summation."; +void MKLEltwiseLayer::Init(const vector*>& bottom, + const vector*>& top) { + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); op_ = this->layer_param_.eltwise_param().operation(); // Blob-wise coefficients for the elementwise operation. @@ -88,16 +81,44 @@ void MKLEltwiseLayer::LayerSetUp(const vector*>& bottom, bwd_bottom_diff.push_back( shared_ptr >(new MKLDiff)); CHECK_EQ(dim_src, bottom[i]->shape().size()); - fwd_bottom_data[i]->create_user_layout(dim_src, sizes_src, strides_src); - bwd_bottom_diff[i]->create_user_layout(dim_src, sizes_src, strides_src); + fwd_bottom_data[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + bwd_bottom_diff[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); } - fwd_top_data->create_user_layout(dim_src, sizes_src, strides_src); + fwd_top_data->create_user_layout(dim_src, sizes_src, strides_src,false); + + dnnDelete(sumPrimitive); +} + + +template +void MKLEltwiseLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK(this->layer_param().eltwise_param().coeff_size() == 0 + || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << + "MKLEltwise Layer takes one coefficient per bottom blob."; + CHECK(!(this->layer_param().eltwise_param().operation() + == EltwiseParameter_EltwiseOp_PROD + && this->layer_param().eltwise_param().coeff_size())) << + "MKLEltwise layer only takes coefficients for summation."; + + CHECK(this->layer_param().eltwise_param().operation() == + EltwiseParameter_EltwiseOp_SUM) + << "MKLEltwise Layer only process summation."; + + Init(bottom,top); } template void MKLEltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) { + for (int i = 1; i < bottom.size(); ++i) { CHECK(bottom[i]->shape() == bottom[0]->shape()); } @@ -107,6 +128,16 @@ void MKLEltwiseLayer::Reshape(const vector*>& bottom, EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->shape()); } + + if (channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width() && + num_ == bottom[0]->num() && + num_bottoms == bottom.size()) { + return; + } + + Init(bottom,top); } template diff --git a/src/caffe/layers/mkl_lrn_layer.cpp b/src/caffe/layers/mkl_lrn_layer.cpp index 36ae14948..db7e361cb 100644 --- a/src/caffe/layers/mkl_lrn_layer.cpp +++ b/src/caffe/layers/mkl_lrn_layer.cpp @@ -52,7 +52,7 @@ MKLLRNLayer::~MKLLRNLayer() { } template -void MKLLRNLayer::LayerSetUp(const vector*>& bottom, +void MKLLRNLayer::Init(const vector*>& bottom, const vector*>& top) { size_ = this->layer_param_.lrn_param().local_size(); CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; @@ -83,14 +83,24 @@ void MKLLRNLayer::LayerSetUp(const vector*>& bottom, bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); - fwd_bottom_data->create_user_layout(dim, sizes, strides); - fwd_top_data ->create_user_layout(dim, sizes, strides); - bwd_bottom_diff->create_user_layout(dim, sizes, strides); - bwd_top_diff ->create_user_layout(dim, sizes, strides); + fwd_bottom_data->create_user_layout(dim, sizes, strides, false); + fwd_top_data ->create_user_layout(dim, sizes, strides, false); + bwd_bottom_diff->create_user_layout(dim, sizes, strides, false); + bwd_top_diff ->create_user_layout(dim, sizes, strides, false); // Fwd, Bwd primitives and lrn_buffer_ are allocated in "Lazy" // mode, because here we don't know // what layout is used by neighbours. + dnnDelete(lrnFwd); + dnnDelete(lrnBwd); + dnnReleaseBuffer(lrn_buffer_); + lrn_buffer_ = NULL; +} + +template +void MKLLRNLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); } template @@ -98,6 +108,15 @@ void MKLLRNLayer::Reshape(const vector*>& bottom, const vector*>& top) { CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " << "corresponding to (num, channels, height, width)"; + + bool reshaping = true; + if ((num_ == bottom[0]->num()) && + channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + reshaping = false; + } + channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); @@ -112,6 +131,10 @@ void MKLLRNLayer::Reshape(const vector*>& bottom, default: LOG(FATAL) << "Unknown normalization region."; } + + if (reshaping == true) { + Init(bottom, top); + } } template diff --git a/src/caffe/layers/mkl_pooling_layer.cpp b/src/caffe/layers/mkl_pooling_layer.cpp index 4dd3c0723..55a1f92e6 100644 --- a/src/caffe/layers/mkl_pooling_layer.cpp +++ b/src/caffe/layers/mkl_pooling_layer.cpp @@ -55,7 +55,8 @@ MKLPoolingLayer::~MKLPoolingLayer() { } template -void MKLPoolingLayer::LayerSetUp(const vector*>& bottom, +void MKLPoolingLayer::Init( + const vector*>& bottom, const vector*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param(); @@ -127,18 +128,36 @@ void MKLPoolingLayer::LayerSetUp(const vector*>& bottom, pooled_height_ = static_cast(ceil(static_cast( bottom[0]->height() + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; pooled_width_ = static_cast(ceil(static_cast( - bottom[0]->height() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; + bottom[0]->width() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. if ((pooled_height_ - 1) * stride_h_ >= bottom[0]->height() + pad_h_) { --pooled_height_; } - if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->height() + pad_w_) { + if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->width() + pad_w_) { --pooled_width_; } CHECK_LT((pooled_height_ - 1) * stride_h_, bottom[0]->height() + pad_h_); - CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->height() + pad_w_); + CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->width() + pad_w_); + } + + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + if (top.size() > 1) { + (reinterpret_cast* > (top[1]) )->Reshape(bottom[0]->num(), + channels_, pooled_height_, pooled_width_); + } + // If max/min/avg pooling, we will initialize the vector index part. + if (top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } + // If stochastic pooling, we will initialize the random index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_STOCHASTIC) { + rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); } size_t dim = 4; @@ -180,13 +199,19 @@ void MKLPoolingLayer::LayerSetUp(const vector*>& bottom, bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); - fwd_bottom_data->create_user_layout(dim, src_sizes, src_strides); - fwd_top_data ->create_user_layout(dim, dst_sizes, dst_strides); - bwd_bottom_diff->create_user_layout(dim, src_sizes, src_strides); - bwd_top_diff ->create_user_layout(dim, dst_sizes, dst_strides); + fwd_bottom_data->create_user_layout(dim, src_sizes, src_strides, false); + fwd_top_data ->create_user_layout(dim, dst_sizes, dst_strides, false); + bwd_bottom_diff->create_user_layout(dim, src_sizes, src_strides, false); + bwd_top_diff ->create_user_layout(dim, dst_sizes, dst_strides, false); // Primitives will be allocated during the first fwd pass - poolingFwd = NULL; - poolingBwd = NULL; + dnnDelete(poolingFwd); + dnnDelete(poolingBwd); +} + +template +void MKLPoolingLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); } template @@ -195,77 +220,16 @@ void MKLPoolingLayer::Reshape(const vector*>& bottom, CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " << "corresponding to (num, channels, height, width)"; - bool shape_changed = true; if (channels_ == bottom[0]->channels() && height_ == bottom[0]->height() && width_ == bottom[0]->width() && - num_ == bottom[0]->num()) - shape_changed = false; - - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); - num_ = bottom[0]->num(); - - if (global_pooling_) { - kernel_h_ = bottom[0]->height(); - kernel_w_ = bottom[0]->width(); - } - pooled_height_ = static_cast(ceil(static_cast( - height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil(static_cast( - width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; - if (pad_h_ || pad_w_) { - // If we have padding, ensure that the last pooling starts strictly - // inside the image (instead of at the padding); otherwise clip the last. - if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { - --pooled_height_; - } - if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { - --pooled_width_; - } - CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); - CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); - } - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); - if (top.size() > 1) { - (reinterpret_cast* > (top[1]) )->Reshape(bottom[0]->num(), - channels_, pooled_height_, pooled_width_); - } - // If max/min/avg pooling, we will initialize the vector index part. - if (top.size() == 1) { - max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); - } - // If stochastic pooling, we will initialize the random index part. - if (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_STOCHASTIC) { - rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + num_ == bottom[0]->num()) { + return; } - if (shape_changed) { - // Recreate MKL layout - size_t dim = 4; - size_t src_sizes[4], src_strides[4]; - - src_sizes[0] = bottom[0]->width(); - src_sizes[1] = bottom[0]->height(); - src_sizes[2] = bottom[0]->channels(); - src_sizes[3] = bottom[0]->num(); - - src_strides[0] = 1; - src_strides[1] = src_sizes[0]; - src_strides[2] = src_sizes[0]*src_sizes[1]; - src_strides[3] = src_sizes[0]*src_sizes[1]*src_sizes[2]; - - fwd_bottom_data->create_user_layout(dim, src_sizes, src_strides); - } + Init(bottom, top); } -// TODO(Yangqing): Is there a faster way to do pooling in the channel-first -// case? template void MKLPoolingLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { diff --git a/src/caffe/layers/mkl_relu_layer.cpp b/src/caffe/layers/mkl_relu_layer.cpp index e967af430..71d811f81 100644 --- a/src/caffe/layers/mkl_relu_layer.cpp +++ b/src/caffe/layers/mkl_relu_layer.cpp @@ -49,33 +49,83 @@ MKLReLULayer::~MKLReLULayer() { } template -void MKLReLULayer::LayerSetUp(const vector*>& bottom, +void MKLReLULayer::Init( + const vector*>& bottom, const vector*>& top) { -// CHECK_EQ(top[0]->shape(), bottom[0]->shape()); size_t dim = bottom[0]->shape().size(); - size_t sizes[dim], strides[dim]; + this->sizes_.resize(dim); + this->strides_.resize(dim); for (size_t d = 0; d < dim; ++d) { - sizes[d] = bottom[0]->shape()[dim - 1 - d]; - strides[d] = (d == 0) ? 1 : strides[d-1]*sizes[d-1]; + this->sizes_[d] = bottom[0]->shape()[dim - 1 - d]; + this->strides_[d] = (d == 0) ? 1 : this->strides_[d-1]*this->sizes_[d-1]; } // Names are for debugging only - fwd_bottom_data_->name = "fwd_bottom_data @ " + this->layer_param_.name(); - fwd_top_data_->name = "fwd_top_data @ " + this->layer_param_.name(); - bwd_bottom_diff_->name = "bwd_bottom_diff @ " + this->layer_param_.name(); - bwd_top_diff_->name = "bwd_top_diff @ " + this->layer_param_.name(); + this->fwd_bottom_data_->name = "fwd_bottom_data @ " + + this->layer_param_.name(); + this->fwd_top_data_->name = "fwd_top_data @ " + + this->layer_param_.name(); + this->bwd_bottom_diff_->name = "bwd_bottom_diff @ " + + this->layer_param_.name(); + this->bwd_top_diff_->name = "bwd_top_diff @ " + + this->layer_param_.name(); - fwd_bottom_data_->create_user_layout(dim, sizes, strides); - fwd_top_data_ ->create_user_layout(dim, sizes, strides); - bwd_bottom_diff_->create_user_layout(dim, sizes, strides); - bwd_top_diff_ ->create_user_layout(dim, sizes, strides); + this->fwd_bottom_data_->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->fwd_top_data_ ->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->bwd_bottom_diff_->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->bwd_top_diff_ ->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); // "Lazy" allocation because here we don't know // what layout is used by neighbours. - reluFwd_ = NULL; // Will be allocated in a "lazy" way in first forward pass - reluBwd_ = NULL; // Will be allocated in a "lazy" way in first backward pass + dnnDelete(reluFwd_); + dnnDelete(reluBwd_); +} + +template +void MKLReLULayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { +// CHECK_EQ(top[0]->shape(), bottom[0]->shape()); + Init(bottom, top); +} + +template +void MKLReLULayer::Reshape(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::Reshape(bottom, top); + + // Here I check for sizes whther to destroy primitives + size_t dim = bottom[0]->shape().size(); + + // If dimensions of blobs are the same as they were then + // do not really destroy primitives + if (dim == this->sizes_.size()) { + // .. check for strides and size dims if they corresspond each other + + // TODO: speedup comparison? + bool is_match = true; + for (size_t d = 0; d < dim; ++d) { + is_match = is_match && (this->sizes_[d] == + bottom[0]->shape()[dim - 1 - d]); + is_match = is_match && (this->strides_[d] == ((d == 0) ? 1 : + this->strides_[d-1]*this->sizes_[d-1])); + } + + // If no new modification was done to layout sizes, + // strides realtivly to previous iteration then + // no primitives recreation is needed + if (is_match) { + return; + } + } + + Init(bottom, top); } + template void MKLReLULayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { diff --git a/src/caffe/layers/mkl_split_layer.cpp b/src/caffe/layers/mkl_split_layer.cpp index e5307a982..7a568a129 100644 --- a/src/caffe/layers/mkl_split_layer.cpp +++ b/src/caffe/layers/mkl_split_layer.cpp @@ -49,26 +49,43 @@ MKLSplitLayer::~MKLSplitLayer() { } template -void MKLSplitLayer::LayerSetUp(const vector*>& bottom, +void MKLSplitLayer::Init( + const vector*>& bottom, const vector*>& top) { num_tops = top.size(); size_t dim_src = bottom[0]->shape().size(); - - size_t sizes_src[dim_src], strides_src[dim_src]; + this->sizes_src_.resize(dim_src); + this->strides_src_.resize(dim_src); for (size_t d = 0; d < dim_src; ++d) { - sizes_src[d] = bottom[0]->shape()[dim_src - d - 1]; - strides_src[d] = (d == 0) ? 1 : strides_src[d-1]*sizes_src[d-1]; + this->sizes_src_[d] = bottom[0]->shape()[dim_src - d - 1]; + this->strides_src_[d] = (d == 0) ? + 1 : this->strides_src_[d-1]*this->sizes_src_[d-1]; } for (size_t i = 0; i < num_tops; ++i) { bwd_top_diff.push_back(shared_ptr >(new MKLDiff)); - bwd_top_diff[i]->create_user_layout(dim_src, sizes_src, strides_src); + bwd_top_diff[i]->create_user_layout(dim_src, + &(this->sizes_src_[0]), + &(this->strides_src_[0]), + false); } // Blob-wise coefficients for the elementwise operation. coeffs_ = vector(top.size(), 1); - bwd_bottom_diff->create_user_layout(dim_src, sizes_src, strides_src); + bwd_bottom_diff->create_user_layout(dim_src, + &(this->sizes_src_[0]), + &(this->strides_src_[0]), + false); + + // Primitive will be created at first time it is to be used + dnnDelete(sumPrimitive); +} + +template +void MKLSplitLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); } template @@ -86,6 +103,34 @@ void MKLSplitLayer::Reshape(const vector*>& bottom, top[i]->ReshapeLike(*bottom[0]); CHECK_EQ(count_, top[i]->count()); } + + // Here we check + // Here I check for sizes whther to destroy primitives + size_t dim_src = bottom[0]->shape().size(); + + // If dimensions of blobs are the same as they were then + // do not really destroy primitives + if (dim_src == this->sizes_src_.size()) { + // .. check for strides and size dims if they corresspond each other + + // TODO: speedup comparison? + bool is_match = true; + for (size_t d = 0; d < dim_src; ++d) { + is_match = is_match && (this->sizes_src_[d] == + bottom[0]->shape()[dim_src - 1 - d]); + is_match = is_match && (this->strides_src_[d] == ((d == 0) ? 1 : + this->strides_src_[d-1]*this->sizes_src_[d-1])); + } + + // If no new modification was done to layout sizes, + // strides realtivly to previous iteration then + // no primitives recreation is needed + if (is_match) { + return; + } + } + + Init(bottom, top); } template diff --git a/src/caffe/layers/mkldnn_lrn_layer.cpp b/src/caffe/layers/mkldnn_lrn_layer.cpp index ede8cde1f..25b37bfd0 100644 --- a/src/caffe/layers/mkldnn_lrn_layer.cpp +++ b/src/caffe/layers/mkldnn_lrn_layer.cpp @@ -45,8 +45,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace caffe { template -void MKLDNNLRNLayer::LayerSetUp(const vector*>& bottom - ,const vector*>& top) +void MKLDNNLRNLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { VLOG(1) << "MKLDNNLRNLayer::LayerSetUp: " << this->layer_param_.name(); diff --git a/src/caffe/mkl_memory.cpp b/src/caffe/mkl_memory.cpp index 08727b367..ab44c9821 100644 --- a/src/caffe/mkl_memory.cpp +++ b/src/caffe/mkl_memory.cpp @@ -49,18 +49,7 @@ namespace caffe { template void MKLMemoryDescriptorBase::create_conversions() { int status; - if (this->convert_from_int) { - DLOG(INFO) << "convert_from_int layout already created, recreating for" - << this->name; - status = dnnDelete(this->convert_from_int); - CHECK_EQ(status, E_SUCCESS); - } - if (this->convert_to_int) { - DLOG(INFO) << "convert_to_int layout already created, recreating for" - << this->name; - status = dnnDelete(this->convert_to_int); - CHECK_EQ(status, E_SUCCESS); - } + this->remove_conversions(); if (layout_int && !dnnLayoutCompare(layout_usr, layout_int)) { CHECK(layout_usr); @@ -78,15 +67,27 @@ void MKLMemoryDescriptorBase::create_conversions() { } template -void MKLMemoryDescriptorBase::create_internal_layout( - const dnnPrimitive_t primitive, dnnResourceType_t type) { +void MKLMemoryDescriptorBase::remove_conversions() { int status; - if (this->layout_int) { - DLOG(INFO) << "Internal layout already created, recreating for" + if (this->convert_from_int) { + DLOG(INFO) << "convert_from_int layout already created, recreating for" << this->name; - status = dnnLayoutDelete(this->layout_int); + status = dnnDelete(this->convert_from_int); CHECK_EQ(status, E_SUCCESS); } + if (this->convert_to_int) { + DLOG(INFO) << "convert_to_int layout already created, recreating for" + << this->name; + status = dnnDelete(this->convert_to_int); + CHECK_EQ(status, E_SUCCESS); + } +} + +template +void MKLMemoryDescriptorBase::create_internal_layout( + const dnnPrimitive_t primitive, dnnResourceType_t type) { + int status; + this->remove_internal_layout(); status = dnnLayoutCreateFromPrimitive( &this->layout_int, primitive, type); CHECK_EQ(status, E_SUCCESS) @@ -99,29 +100,70 @@ void MKLMemoryDescriptorBase::create_internal_layout( template void MKLMemoryDescriptorBase::create_user_layout( - size_t dimension, const size_t size[], const size_t strides[]) { + size_t dimension, + const size_t size[], + const size_t strides[], + bool create_conversion_if_possible) { int status; - if (this->layout_usr) { - DLOG(INFO) << "User layout already created, recreating for" - << this->name; - status = dnnLayoutDelete(this->layout_usr); - CHECK_EQ(status, E_SUCCESS); - } - + this->remove_user_layout(); status = dnnLayoutCreate( &this->layout_usr, dimension, size, strides); CHECK_EQ(status, E_SUCCESS) << "Failed dnnLayoutCreate with status " << status << " for buffer: " << this->name << "\n"; - if (this->layout_int) - this->create_conversions(); + // If conversion creation is to happen + // then if only we have internal layout already in place + // we can proceed with conversion creation. + // Otherwise we make sure that existing conversions are deleted + // as with new layout creation they are being instantly invalidated + if (create_conversion_if_possible) { + if (this->layout_int) { + this->create_conversions(); + } + } else { + this->remove_conversions(); + } +} + +template +void MKLMemoryDescriptorBase::remove_internal_layout() { + int status; + if (this->layout_int) { + DLOG(INFO) << "Internal layout already created, recreating for" + << this->name; + status = dnnLayoutDelete(this->layout_int); + CHECK_EQ(status, E_SUCCESS); + + // with layout invalidated we should also remove Allocation + // as next layout may declare diffrent sizes of allocation + status = dnnReleaseBuffer(this->internal_ptr); + this->internal_ptr = NULL; + CHECK_EQ(status, E_SUCCESS); + } +} + +template +void MKLMemoryDescriptorBase::remove_user_layout() { + int status; + if (this->layout_usr) { + DLOG(INFO) << "Internal layout already created, recreating for" + << this->name; + status = dnnLayoutDelete(this->layout_usr); + CHECK_EQ(status, E_SUCCESS); + + // with layout invalidated we should also remove Allocation + // as next layout may declare diffrent sizes of allocation + status = dnnReleaseBuffer(this->internal_ptr); + this->internal_ptr = NULL; + CHECK_EQ(status, E_SUCCESS); + } } template void MKLMemoryDescriptorBase::create_layouts( const dnnPrimitive_t primitive, dnnResourceType_t type, size_t dimension, const size_t size[], const size_t strides[]) { - // To avoid creating conversion among potentialiy diffrent + // To avoid creating conversion among potentialiy diffrent // (in terms of size) layouts we need to destroy existing layouts here if (this->layout_usr) { @@ -137,6 +179,11 @@ void MKLMemoryDescriptorBase::create_layouts( template void MKLMemoryDescriptorBase::convert_from_prv(void* cpu_ptr) { CHECK(cpu_ptr); + // When no conversion is available then + // recreate them if layouts are available + if (this-> convert_from_int == NULL) { + this->create_conversions(); + } CHECK(this->convert_from_int); int status; void *convert_resources[dnnResourceNumber]; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 48559cf86..38a3d02d9 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -2411,8 +2411,6 @@ TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { this->RunFilterNetTest(input_proto_test, output_proto_test); } - - TYPED_TEST(NetTest, TestReshape) { typedef typename TypeParam::Dtype Dtype; // We set up bottom blobs of two different sizes, switch between @@ -2485,6 +2483,132 @@ TYPED_TEST(NetTest, TestReshape) { EXPECT_FALSE(same_spatial_shape); } +// TODO: this test should work for Caffe Engine as well +// but there were problems visible on Intel OpenMP +// that need to be investigated +#ifdef MKL2017_SUPPORTED +// This test is just checking if this +// configuration does not explode +TYPED_TEST(NetTest, TestForwardReshapeForward) { + typedef typename TypeParam::Dtype Dtype; + const string& proto = + "name: 'TestNetwork' " + " layer {" + " top: 'data'" + " top: 'label'" + " name: 'data'" + " type: 'DummyData'" + " dummy_data_param {" + " shape: { dim: 32 dim: 3 dim: 227 dim: 227 }" + " data_filler {" + " type: 'constant'" + " value: 0.01" + " }" + " }" + " transform_param {" + " mirror: true" + " crop_size: 224" + " mean_value: 104" + " mean_value: 117" + " mean_value: 123" + " }" + " }" + " layer {" + " bottom: 'data'" + " top: 'conv'" + " name: 'conv1'" + " type: 'Convolution'" + " param {" + " lr_mult: 1" + " decay_mult: 1" + " }" + " convolution_param {" + " " + " num_output: 64" + " engine: MKL2017 " + " pad: 3" + " kernel_size: 7" + " stride: 2" + " weight_filler {" + " type: 'xavier'" + " }" + " bias_term: false" + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu1'" + " name: 'relu1'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu2'" + " name: 'relu2'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'relu1'" + " bottom: 'relu2'" + " top: 'concat'" + " name: 'concat'" + " type: 'Concat'" + " concat_param {" + " engine: MKL2017 " + " " + " }" + " } " + " layer {" + " bottom: 'concat'" + " top: 'lrn'" + " name: 'LRN'" + " type: 'LRN'" + " lrn_param {" + " engine: MKL2017 " + " local_size: 5" + " alpha: 0.0001" + " beta: 0.75" + " }" + " }" + " layer {" + " bottom: 'lrn'" + " top: 'pooling'" + " name: 'Pooling'" + " type: 'Pooling'" + " pooling_param {" + " engine: MKL2017 " + " kernel_size: 5" + " stride: 2" + " pool: MAX" + " }" + " }" + " layer {" + " bottom: 'pooling'" + " top: 'bn'" + " name: 'BatchNorm'" + " type: 'BatchNorm'" + " batch_norm_param {" + " engine: MKL2017 " + " }" + " }"; + this->InitNetFromProtoString(proto); + this->net_->Forward(); + shared_ptr > input_blob = this->net_->blob_by_name("data"); + input_blob->Reshape(1, 3, 1280, 720); + this->net_->Forward(); +} +#endif + + + TYPED_TEST(NetTest, TestSkipPropagateDown) { // check bottom_need_backward if propagate_down is true this->InitSkipPropNet(false);