Skip to content

Commit

Permalink
[REVIEW] Update to latest version of RAPIDS 0.13 (#81)
Browse files Browse the repository at this point in the history
* make the code 0.13 compatible
* fixed the unit test bugs
* fixed portfolio notebook
* fixed xgboost node
* get rid of the warning
* fixed the flake8 bugs
* cupy is installed by default
* fixed the remaining notebooks
* remove the default jupyter-lab entrypoint
* activate rapids
* added unit test to cover the return feature
* added 3 node tests
* using the series
* fixed the groupby
* fixed the port bug
  • Loading branch information
yidong72 authored May 19, 2020
1 parent 27ae2c9 commit 273d50f
Show file tree
Hide file tree
Showing 35 changed files with 1,022 additions and 1,362 deletions.
8 changes: 2 additions & 6 deletions docker/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,21 @@ echo -e "\nPlease, select your cuda version:\n" \

read -p "Enter your option and hit return [1]-3: " CUDA_VERSION

RAPIDS_VERSION="0.11"
RAPIDS_VERSION="0.13"

CUDA_VERSION=${CUDA_VERSION:-1}
case $CUDA_VERSION in
2)
echo "cuda 10.0 selected."
CONTAINER_VER='10.0'
CUPY='cupy-cuda100'
;;
3)
echo "cuda 10.1.2 selected."
CONTAINER_VER='10.1'
CUPY='cupy-cuda101'
;;
*)
echo "cuda 9.2 selected."
CONTAINER_VER='9.2'
CUPY='cupy-cuda92'
;;
esac

Expand Down Expand Up @@ -82,8 +79,6 @@ SHELL ["bash","-c"]
#
# Additional python libs
#
RUN source activate rapids \
&& pip install $CUPY
RUN source activate rapids \
&& cd /rapids/gQuant \
Expand All @@ -106,6 +101,7 @@ EXPOSE 8888
EXPOSE 8787
EXPOSE 8786
WORKDIR /rapids
ENTRYPOINT /bin/bash -c 'source activate rapids; /bin/bash'
EOF

docker build -f $D_FILE -t $D_CONT .
6 changes: 3 additions & 3 deletions gquant/cuindicator/ewm.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ def kernel(in_arr, out_arr, average_length, span, arr_len, thread_tile,
for j in range(0, average_length - 1, block_size):
if (((tx + j) < average_length - 1) and
(starting_id - average_length + 1 + tx + j >= 0)):
shared[tx + j] = \
in_arr[starting_id - average_length + 1 + tx + j]
shared[tx + j] = \
in_arr[starting_id - average_length + 1 + tx + j]
cuda.syncthreads()
# slice the shared memory for each threads
start_shared = tx * thread_tile
Expand Down Expand Up @@ -95,7 +95,7 @@ def __init__(self, span, input_arr, min_periods=None, thread_tile=48,
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
self.gpu_in = input_arr
else:
self.gpu_in = input_arr.data.to_gpu_array()
self.gpu_in = input_arr.to_gpu_array()
if min_periods is None:
self.min_periods = span
else:
Expand Down
4 changes: 2 additions & 2 deletions gquant/cuindicator/frac_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def fractional_diff(input_arr, d=0.5, floor=1e-3, min_periods=None,
if isinstance(input_arr, numba.cuda.cudadrv.devicearray.DeviceNDArray):
gpu_in = input_arr
else:
gpu_in = input_arr.data.to_gpu_array()
gpu_in = input_arr.to_gpu_array()

# compute the weights for the fractional difference
weights = get_weights_floored(d=d,
Expand Down Expand Up @@ -269,6 +269,6 @@ def port_fractional_diff(asset_indicator, input_arr, d=0.5, floor=1e-3,
min_periods=min_periods,
thread_tile=thread_tile,
number_of_threads=number_of_threads)
port_mask_nan(asset_indicator.data.to_gpu_array(), out, 0,
port_mask_nan(asset_indicator.to_gpu_array(), out, 0,
len(weights) - 1)
return out, weights
Loading

0 comments on commit 273d50f

Please sign in to comment.