Skip to content
This repository has been archived by the owner on Aug 5, 2022. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'internal/release_1.1.3'
Browse files Browse the repository at this point in the history
  • Loading branch information
daisyden committed Dec 11, 2018
2 parents e94b3ff + 9d10d6f commit c0408ba
Show file tree
Hide file tree
Showing 33 changed files with 69,290 additions and 109,381 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ endif()
project(Caffe C CXX)

# ---[ Caffe version
set(CAFFE_TARGET_VERSION "1.1.2" CACHE STRING "Caffe logical version")
set(CAFFE_TARGET_SOVERSION "1.1.2" CACHE STRING "Caffe soname version")
set(CAFFE_TARGET_VERSION "1.1.3" CACHE STRING "Caffe logical version")
set(CAFFE_TARGET_SOVERSION "1.1.3" CACHE STRING "Caffe soname version")
add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION})

# ---[ Using cmake scripts and modules
Expand Down
4 changes: 3 additions & 1 deletion INSTALL.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Installation
#### Installation


See http://caffe.berkeleyvision.org/installation.html for the latest
installation instructions.


Check the users group in case you need help:
https://groups.google.com/forum/#!forum/caffe-users
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ LIB_BUILD_DIR := $(BUILD_DIR)/lib
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a
DYNAMIC_VERSION_MAJOR := 1
DYNAMIC_VERSION_MINOR := 1
DYNAMIC_VERSION_REVISION := 2
DYNAMIC_VERSION_REVISION := 3
DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
#DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR)
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
Expand Down
2 changes: 1 addition & 1 deletion docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ docker run -ti caffe:cpu caffe --version
```
which should show a message like:
```
caffe version 1.1.2
caffe version 1.1.3
```

One can also build and run the Caffe tests in the image using:
Expand Down
8 changes: 8 additions & 0 deletions examples/faster-rcnn/license.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
The MIT License
Copyright (c) <year> <copyright holders>

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
68 changes: 17 additions & 51 deletions examples/pycaffe/tune_model.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import os
import sys
import argparse

sys.path.insert(0, "python")
import caffe
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import copy
import utils


def genOptimalModel(net, mkldnn_direct_time_map, mkldnn_winograd_time_map, optimal_model):
for index in range(0, len(net.layer)):
l = net.layer[index]
Expand All @@ -17,37 +19,21 @@ def genOptimalModel(net, mkldnn_direct_time_map, mkldnn_winograd_time_map, optim
stride = 1
if len(l.convolution_param.stride) != 0:
stride = l.convolution_param.stride[0]
if mkldnn_winograd_time_map[l.name] < mkldnn_direct_time_map[l.name] and kernel_size == 3 and stride == 1 and l.convolution_param.num_output % 16 == 0:
if kernel_size == 3 and stride == 1 and l.convolution_param.num_output % 16 ==0:
l.convolution_param.conv_algorithm = "winograd"
else:
l.convolution_param.conv_algorithm = "direct"

with open(optimal_model, "w") as f:
f.write(txtf.MessageToString(net, float_format=".17g"))


def tuneModelDefinition(model_path, iteration, is_test_phase, core_num, socket_num):
working_dir = sys.path[0]
caffe_path = os.path.join(working_dir, "..", "..",
"build", "tools", "caffe")
def tuneModelDefinition(model_path, iteration):
working_dir = sys.argv[0]

caffe_path = os.path.join(os.path.dirname(os.path.abspath(working_dir)), "../..", "build", "tools", "caffe")
if not os.path.exists(caffe_path):
print "Caffe binary does not exist; please build Caffe binary first."
sys.exit(1)
core_num_per_socket = int([i for i in os.popen('lscpu').readlines(
) if i.startswith('Core(s) per socket:')][0].strip().split(':')[-1].strip())
core_num_per_socket *= socket_num
if is_test_phase:
caffe_path += ' time -phase TEST -forward_only'
if core_num != 0:
core_num = core_num_per_socket if core_num > core_num_per_socket else core_num
env_prefix = 'OMP_NUM_THREADS={} KMP_HW_SUBSET=1t KMP_AFFINITY=compact,granularity=fine numactl -C 0-{} -m 0 '.format(
core_num, core_num)
elif socket_num == 1:
env_prefix = 'OMP_NUM_THREADS={} KMP_HW_SUBSET=1t KMP_AFFINITY=compact,granularity=fine numactl -C 0-{} -m 0 '.format(
core_num_per_socket, core_num_per_socket)
else:
env_prefix = 'numactl -l '
caffe_path = env_prefix + caffe_path

base_model_name = os.path.basename(model_path)
model_dir = os.path.dirname(model_path)
Expand Down Expand Up @@ -83,30 +69,21 @@ def tuneModelDefinition(model_path, iteration, is_test_phase, core_num, socket_n
mkldnn_winograd_log = "mkldnn_winograd.log"
mkldnn_direct_log_path = os.path.join(model_dir, mkldnn_direct_log)
mkldnn_winograd_log_path = os.path.join(model_dir, mkldnn_winograd_log)
mkldnn_direct_command = caffe_path + " -model " + direct_model_path + \
" -engine MKLDNN -iterations " + \
str(iteration) + " >& " + mkldnn_direct_log_path
mkldnn_direct_command = caffe_path + " time -model " + direct_model_path + " -engine MKLDNN -iterations " + str(iteration) + " >& " + mkldnn_direct_log_path
os.system(mkldnn_direct_command)
mkldnn_winograd_command = caffe_path + " -model " + winograd_model_path + \
" -engine MKLDNN -iterations " + \
str(iteration) + " >& " + mkldnn_winograd_log_path
mkldnn_winograd_command = caffe_path + " time -model " + winograd_model_path + " -engine MKLDNN -iterations " + str(iteration) + " >& " + mkldnn_winograd_log_path
os.system(mkldnn_winograd_command)

(model_str, mkldnn_direct_time_lines) = utils.parseLog(mkldnn_direct_log_path)
mkldnn_direct_layer_time_map = utils.parseTimeLines(
mkldnn_direct_time_lines)
(model_str, mkldnn_winograd_time_lines) = utils.parseLog(
mkldnn_winograd_log_path)
mkldnn_winograd_layer_time_map = utils.parseTimeLines(
mkldnn_winograd_time_lines)
mkldnn_direct_layer_time_map = utils.parseTimeLines(mkldnn_direct_time_lines)
(model_str, mkldnn_winograd_time_lines) = utils.parseLog(mkldnn_winograd_log_path)
mkldnn_winograd_layer_time_map = utils.parseTimeLines(mkldnn_winograd_time_lines)

hybrid_model_name = base_model_name.split(".")[0] + "_hybrid.prototxt"
hybrid_model_path = os.path.join(model_dir, hybrid_model_name)
genOptimalModel(base_net, mkldnn_direct_layer_time_map,
mkldnn_winograd_layer_time_map, hybrid_model_path)
genOptimalModel(base_net, mkldnn_direct_layer_time_map, mkldnn_winograd_layer_time_map, hybrid_model_path)
print '{} has been generated.'.format(hybrid_model_path)


if __name__ == '__main__':
parser = argparse.ArgumentParser()

Expand All @@ -116,17 +93,7 @@ def tuneModelDefinition(model_path, iteration, is_test_phase, core_num, socket_n
parser.add_argument('-i', '--iteration', action='store', dest='iterations', type=int, default=10,
help='require iterations number to run the model')

parser.add_argument('-t', '--phase', action='store', dest='is_test_phase', type=bool, default=False,
help='Train or Test phase')

parser.add_argument('-c', '--core_num', action='store', dest='core_num', type=int, default=0,
help='core number for inference')

parser.add_argument('-s', '--socket', action='store', dest='socket_num', type=int, default=2,
help='socket number for inference')

parser.add_argument('-v', '--version', action='version',
version='%(prog)s 1.0')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')

params = parser.parse_args()

Expand All @@ -135,5 +102,4 @@ def tuneModelDefinition(model_path, iteration, is_test_phase, core_num, socket_n
print "[ERROR] Please specify the model definition file with -m"
exit(1)

tuneModelDefinition(params.model, params.iterations,
params.is_test_phase, params.core_num, params.socket_num)
tuneModelDefinition(params.model, params.iterations)
2 changes: 2 additions & 0 deletions examples/pycaffe/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import os
import sys
sys.path.insert(0,"python")
import caffe
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf

Expand Down
8 changes: 8 additions & 0 deletions examples/rfcn/license.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
The MIT License
Copyright (c) <year> <copyright holders>

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 changes: 5 additions & 5 deletions external/mkl/prepare_mkl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
FindLibrary()
{
# Find all the instances of the MKL libraries present in Caffe
Expand Down Expand Up @@ -74,11 +74,11 @@ echo $VERSION_LINE # Return Version Line
# MKL
DST=`dirname $0`
OMP=0
VERSION_MATCH=20180710
ARCHIVE_BASENAME=mklml_lnx_2019.0.20180710.tgz
VERSION_MATCH=20180928
ARCHIVE_BASENAME=mklml_lnx_2019.0.1.20180928.tgz
MKL_CONTENT_DIR=`echo $ARCHIVE_BASENAME | rev | cut -d "." -f 2- | rev`

MKLURL="https://github.com/intel/mkl-dnn/releases/download/v0.16/$ARCHIVE_BASENAME"
MKLURL="https://github.com/intel/mkl-dnn/releases/download/v0.17-rc/$ARCHIVE_BASENAME"
# there are diffrent MKL lib to be used for GCC and for ICC
reg='^[0-9]+$'
VERSION_LINE=`GetVersionName $MKLROOT`
Expand Down
17 changes: 10 additions & 7 deletions include/caffe/mkldnn_memory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ class MKLDNNMemoryDescriptorBase : public PrvMemDescr
, std::vector<float>scale=std::vector<float>(1,1.)
, int mask=0
, bool is_sum=false
, bool is_wino=false);
, bool is_wino=false
, bool is_weight=false);


~MKLDNNMemoryDescriptorBase() {}
Expand Down Expand Up @@ -142,12 +143,12 @@ class MKLDNNMemoryDescriptorBase : public PrvMemDescr
// TODO: may need initialize memory by 0
}
}
void set_prv_memory_pd(shared_ptr<memory::primitive_desc> memory_pd, std::vector<float> scale, int mask, bool is_wino) {
void set_prv_memory_pd(shared_ptr<memory::primitive_desc> memory_pd, std::vector<float> scale, int mask, bool is_wino, bool is_weight) {
_prv_memory_pd = memory_pd;
if (_prv_memory_pd && _usr_memory_pd) {
check_usr_with_prv_descriptors();
std::vector<float>scale_ext = std::vector<float>(1,1.);
this->create_reorder_descriptors(scale, mask, scale_ext, false, is_wino);
this->create_reorder_descriptors(scale, mask, scale_ext, false, is_wino, is_weight);
}
}

Expand All @@ -163,7 +164,7 @@ class MKLDNNMemoryDescriptorBase : public PrvMemDescr
_usr_memory_pd = memory_pd;
}

void create_reorder_descriptors(std::vector<float> scale, int mask=0, std::vector<float>scale_ext=std::vector<float>(1,1.), bool is_sum=false, bool is_wino=false);
void create_reorder_descriptors(std::vector<float> scale, int mask=0, std::vector<float>scale_ext=std::vector<float>(1,1.), bool is_sum=false, bool is_wino=false, bool is_weight=false);

shared_ptr<memory::primitive_desc> _usr_memory_pd;
shared_ptr<memory::primitive_desc> _prv_memory_pd;
Expand Down Expand Up @@ -205,7 +206,8 @@ class MKLDNNMemoryDescriptor : public MKLDNNMemoryDescriptorBase<Dtype> {
, std::vector<float> scale=std::vector<float>(1,1.)
, int mask=0
, bool is_sum=false
, bool is_wino=false);
, bool is_wino=false
, bool is_weight=false);

virtual void convert_from_prv(void* cpu_ptr);
virtual void convert_to_prv(void* cpu_ptr);
Expand Down Expand Up @@ -253,8 +255,9 @@ class MKLDNNData : public MKLDNNMemoryDescriptor<Dtype, false>
, std::vector<float> scale=std::vector<float>(1,1.)
, int mask=0
, bool is_sum=false
, bool is_wino=false)
: MKLDNNMemoryDescriptor<Dtype, false>(usr_memory_pd, prv_memory_pd, blob, mkldnn_layer, scale, mask, is_sum, is_wino) {}
, bool is_wino=false
, bool is_weight=false)
: MKLDNNMemoryDescriptor<Dtype, false>(usr_memory_pd, prv_memory_pd, blob, mkldnn_layer, scale, mask, is_sum, is_wino, is_weight) {}
};

template <typename Dtype>
Expand Down
2 changes: 1 addition & 1 deletion mkldnn.commit
Original file line number Diff line number Diff line change
@@ -1 +1 @@
4e333787e0d66a1dca1218e99a891d493dbc8ef1
830a10059a018cd2634d94195140cf2d8790a75a
Loading

0 comments on commit c0408ba

Please sign in to comment.