Skip to content
This repository has been archived by the owner on Aug 5, 2022. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'remotes/internal/release_1.0.5'
Browse files Browse the repository at this point in the history
  • Loading branch information
daisyden committed Oct 25, 2017
2 parents ce08100 + 1f7ce1f commit 7a35532
Show file tree
Hide file tree
Showing 250 changed files with 58,746 additions and 995 deletions.
8 changes: 6 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ include(cmake/ConfigGen.cmake)
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
caffe_option(USE_OPENMP "Build Caffe with OpenMP support" ON )
caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY)
caffe_option(USE_MKL2017_AS_DEFAULT_ENGINE "Use MKL2017 primitives for supported layers" ON)
caffe_option(USE_MKLDNN_AS_DEFAULT_ENGINE "Use MKL-DNN primitives for supported layers" OFF)
caffe_option(USE_MKL2017_AS_DEFAULT_ENGINE "Use MKL2017 primitives for supported layers" OFF)
caffe_option(USE_MKLDNN_AS_DEFAULT_ENGINE "Use MKL-DNN primitives for supported layers" ON)
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
caffe_option(BUILD_python "Build Python wrapper" ON)
set(python_version "2" CACHE STRING "Specify which Python version to use")
Expand Down Expand Up @@ -68,6 +68,8 @@ if(UNIX OR APPLE)
# GCC specific flags.
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong")
# auto enable SGD FUSION if gcc version >= 4.9
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_SGD_FUSION")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
endif()
Expand All @@ -79,6 +81,8 @@ if(UNIX OR APPLE)
# though it uses -pie linker option that require -fPIE during compilation. Checksec
# shows that it generates correct PIE anyway if only -pie is provided.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector")
# Auto enable SGD Fusion if use intel compiler
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_SGD_FUSION")
endif()

# Generic flags.
Expand Down
25 changes: 14 additions & 11 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
DYNAMIC_NAME := $(LIB_BUILD_DIR)/$(DYNAMIC_VERSIONED_NAME_SHORT)
COMMON_FLAGS += -DCAFFE_VERSION=$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
COMMON_FLAGS += -std=c++11

##############################
# Get all source files
Expand Down Expand Up @@ -318,6 +319,11 @@ else ifeq ($(UNAME), Darwin)
OSX_MINOR_VERSION := $(shell sw_vers -productVersion | cut -f 2 -d .)
endif

# Custom compiler
ifdef CUSTOM_CXX
CXX := $(CUSTOM_CXX)
endif

# Linux
ifeq ($(LINUX), 1)
CXX ?= /usr/bin/g++
Expand Down Expand Up @@ -365,19 +371,19 @@ else
ORIGIN := \$$ORIGIN
endif

# Custom compiler
ifdef CUSTOM_CXX
CXX := $(CUSTOM_CXX)
endif

# Compiler flags
ifneq (,$(findstring icpc,$(CXX)))
CXX_HARDENING_FLAGS += -fstack-protector
#Enable SGD FUSION if use intel compiler
COMMON_FLAGS += -DENABLE_SGD_FUSION

else ifneq (,$(findstring clang++,$(CXX)))
CXX_HARDENING_FLAGS += -fPIE -fstack-protector
else ifneq (,$(findstring g++,$(CXX)))
ifeq ($(shell echo | awk '{exit $(GCCVERSION) >= 4.9;}'), 1)
ifeq ($(shell echo | awk '{ print $(GCCVERSION) >= 4.9 }'), 1)
CXX_HARDENING_FLAGS += -fPIE -fstack-protector-strong
#Enable SGD FUSION if gcc version >= 4.9
COMMON_FLAGS += -DENABLE_SGD_FUSION
else
CXX_HARDENING_FLAGS += -fPIE -fstack-protector
endif
Expand Down Expand Up @@ -500,7 +506,7 @@ ifeq ($(MKL_EXTERNAL), 1)
MKL_LDFLAGS+=-Wl,-rpath,$(MKLROOT)/lib
endif

COMMON_FLAGS += -DUSE_MKL
COMMON_FLAGS += -DUSE_MKL -DMKL_ILP64
BLAS_INCLUDE ?= $(MKLROOT)/include
BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64

Expand Down Expand Up @@ -547,10 +553,6 @@ LIBRARY_DIRS += $(LIB_BUILD_DIR)
# Automatic dependency generation (nvcc is handled separately)
CXXFLAGS += -MMD -MP

##########SGD FUSION#######################
ifeq ($(ENABLE_SGD_FUSION), 1)
COMMON_FLAGS += -DENABLE_SGD_FUSION
endif
###########################################
#
# Complete build flags.
Expand Down Expand Up @@ -827,6 +829,7 @@ $(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
touch $(PY_PROTO_INIT)

clean: mkldnn_clean
@echo "Will download the new version of MKL2017 and MLSL when clean and prepare the environment."
@- $(RM) -rf $(ALL_BUILD_DIRS)
@- $(RM) -rf $(OTHER_BUILD_DIR)
@- $(RM) -rf $(BUILD_DIR_LINK)
Expand Down
7 changes: 2 additions & 5 deletions Makefile.config.example
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@
# CPU-only switch (uncomment to build without GPU support).
CPU_ONLY := 1

USE_MKL2017_AS_DEFAULT_ENGINE := 1
# USE_MKL2017_AS_DEFAULT_ENGINE := 1
# or put this at the top your train_val.protoxt or solver.prototxt file:
# engine: "MKL2017"
# or use this option with caffe tool:
# -engine "MKL2017"

# USE_MKLDNN_AS_DEFAULT_ENGINE flag is OBSOLETE
USE_MKLDNN_AS_DEFAULT_ENGINE := 1
# Put this at the top your train_val.protoxt or solver.prototxt file:
# engine: "MKLDNN"
# or use this option with caffe tool:
Expand Down Expand Up @@ -170,8 +170,5 @@ DISTRIBUTE_DIR := distribute
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0

# Uncomment for enabling SGD fusion
# ENABLE_SGD_FUSION := 1

# enable pretty build (comment to see full commands)
Q ?= @
2 changes: 1 addition & 1 deletion cmake/Cuda.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ macro(caffe_cuda_compile objlist_variable)
endforeach()

if(UNIX OR APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC -std=c++11)
endif()

if(APPLE)
Expand Down
98 changes: 49 additions & 49 deletions docker/README.md
Original file line number Diff line number Diff line change
@@ -1,49 +1,49 @@
# Building Caffe using standalone Dockerfile

The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running:

```
docker build -t caffe:cpu standalone/cpu-ubuntu
```
for example. (Here `ubuntu` can be substituted for `centos`, `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail).

Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned.

# Running Caffe using the docker image

In order to test the Caffe image, run:
```
docker run -ti caffe:cpu caffe --version
```
which should show a message like:
```
caffe version 1.0.0-rc3
```

One can also build and run the Caffe tests in the image using:
```
docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"
```

In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running:
```
docker run -ti caffe:cpu caffe time -model /opt/caffe/models/bvlc_alexnet/deploy.prototxt -engine MKLDNN
```
will measure the performance of AlexNet. You can also run caffe train as well. Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used:
```
docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=/opt/caffe/models/bvlc_alexnet/solver.prototxt -engine MKLDNN
```
where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option. Note that you need to prepare dataset before training.

# Other use-cases

Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe).

Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running:
```
docker run -ti caffe:cpu python
```
(`ipython` is also available in the container).

Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py`

# Building Caffe using standalone Dockerfile

The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running:

```
docker build -t caffe:cpu standalone/cpu-ubuntu
```
for example. (Here `ubuntu` can be substituted for `centos`, `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail).

Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned.

# Running Caffe using the docker image

In order to test the Caffe image, run:
```
docker run -ti caffe:cpu caffe --version
```
which should show a message like:
```
caffe version 1.0.0-rc3
```

One can also build and run the Caffe tests in the image using:
```
docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"
```

In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running:
```
docker run -ti caffe:cpu caffe time -model /opt/caffe/models/bvlc_alexnet/deploy.prototxt -engine MKLDNN
```
will measure the performance of AlexNet. You can also run caffe train as well. Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used:
```
docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=/opt/caffe/models/bvlc_alexnet/solver.prototxt -engine MKLDNN
```
where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option. Note that you need to prepare dataset before training.

# Other use-cases

Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe).

Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running:
```
docker run -ti caffe:cpu python
```
(`ipython` is also available in the container).

Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py`

112 changes: 56 additions & 56 deletions docker/standalone/cpu-centos/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,56 +1,56 @@
FROM centos:7
MAINTAINER [email protected]

#ENV http_proxy proxy:port
#ENV https_proxy proxy:port

RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm

RUN yum install -y \
redhat-rpm-config \
tar \
findutils \
make \
gcc-c++ \
cmake \
git \
wget \
atlas-devel \
boost-devel \
gflags-devel \
glog-devel \
hdf5-devel \
leveldb-devel \
lmdb-devel \
opencv-devel \
protobuf-devel \
snappy-devel \
protobuf-compiler \
freetype-devel \
libpng-devel \
python-devel \
python-numpy \
python-pip \
python-scipy \
gcc-gfortran \
libjpeg-turbo-devel

RUN yum clean all
ENV CAFFE_ROOT=/opt/caffe
WORKDIR $CAFFE_ROOT

# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
ENV CLONE_TAG=master

RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \
for req in $(cat python/requirements.txt) pydot; do pip --no-cache-dir install $req; done && \
mkdir build && cd build && \
cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \
make all -j"$(nproc)"

ENV PYCAFFE_ROOT $CAFFE_ROOT/python
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig

WORKDIR /workspace
FROM centos:7
MAINTAINER [email protected]

#ENV http_proxy proxy:port
#ENV https_proxy proxy:port

RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm

RUN yum install -y \
redhat-rpm-config \
tar \
findutils \
make \
gcc-c++ \
cmake \
git \
wget \
atlas-devel \
boost-devel \
gflags-devel \
glog-devel \
hdf5-devel \
leveldb-devel \
lmdb-devel \
opencv-devel \
protobuf-devel \
snappy-devel \
protobuf-compiler \
freetype-devel \
libpng-devel \
python-devel \
python-numpy \
python-pip \
python-scipy \
gcc-gfortran \
libjpeg-turbo-devel

RUN yum clean all
ENV CAFFE_ROOT=/opt/caffe
WORKDIR $CAFFE_ROOT

# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
ENV CLONE_TAG=master

RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \
for req in $(cat python/requirements.txt) pydot; do pip --no-cache-dir install $req; done && \
mkdir build && cd build && \
cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \
make all -j"$(nproc)"

ENV PYCAFFE_ROOT $CAFFE_ROOT/python
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig

WORKDIR /workspace
Loading

0 comments on commit 7a35532

Please sign in to comment.