This repository has been archived by the owner on Mar 3, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 57
/
SmallSortTestBindings.cu
101 lines (75 loc) · 2.64 KB
/
SmallSortTestBindings.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
// Copyright 2004-present Facebook. All Rights Reserved.
#include "cuda/SmallSortTestBindings.cuh"
#include "cuda/DeviceTensor.cuh"
#include "cuda/SmallSort.cuh"
using namespace std;
namespace facebook { namespace cuda {
__global__ void
sortDevice(DeviceTensor<float, 1> data, DeviceTensor<float, 1> out) {
warpSort<float, GreaterThan<float> >(data, out);
}
__global__ void
sortDevice(DeviceTensor<float, 1> data,
DeviceTensor<float, 1> out,
DeviceTensor<int, 1> indices) {
warpSort<float, int, GreaterThan<Pair<float, int> > >(data, out, indices);
}
vector<float>
sort(const vector<float>& data) {
const size_t sizeBytes = data.size() * sizeof(float);
float* devFloat = NULL;
cudaMalloc(&devFloat, sizeBytes);
cudaMemcpy(devFloat, data.data(), sizeBytes, cudaMemcpyHostToDevice);
float* devResult = NULL;
cudaMalloc(&devResult, sizeBytes);
cudaMemset(devResult, 0, sizeBytes);
dim3 grid(1);
dim3 block(32);
int dataSizes[] = { (int) data.size() };
int outSizes[] = { (int) data.size() };
sortDevice<<<grid, block>>>(
DeviceTensor<float, 1>(devFloat, dataSizes),
DeviceTensor<float, 1>(devResult, outSizes));
vector<float> vals(data.size());
cudaMemcpy(vals.data(), devResult, sizeBytes, cudaMemcpyDeviceToHost);
cudaFree(devFloat);
cudaFree(devResult);
return vals;
}
vector<pair<float, int> >
sortWithIndices(const std::vector<float>& data) {
const size_t sizeBytes = data.size() * sizeof(float);
const size_t sizeIndicesBytes = data.size() * sizeof(int);
float* devFloat = NULL;
cudaMalloc(&devFloat, sizeBytes);
cudaMemcpy(devFloat, data.data(), sizeBytes, cudaMemcpyHostToDevice);
float* devResult = NULL;
cudaMalloc(&devResult, sizeBytes);
cudaMemset(devResult, 0, sizeBytes);
int* devIndices = NULL;
cudaMalloc(&devIndices, sizeIndicesBytes);
cudaMemset(devIndices, 0, sizeIndicesBytes);
dim3 grid(1);
dim3 block(32);
int dataSizes[] = { (int) data.size() };
int outSizes[] = { (int) data.size() };
sortDevice<<<grid, block>>>(
DeviceTensor<float, 1>(devFloat, dataSizes),
DeviceTensor<float, 1>(devResult, outSizes),
DeviceTensor<int, 1>(devIndices, outSizes));
vector<float> vals(data.size());
cudaMemcpy(vals.data(),
devResult, sizeBytes, cudaMemcpyDeviceToHost);
vector<int> indices(data.size());
cudaMemcpy(indices.data(),
devIndices, sizeIndicesBytes, cudaMemcpyDeviceToHost);
cudaFree(devFloat);
cudaFree(devResult);
cudaFree(devIndices);
vector<pair<float, int> > result;
for (int i = 0; i < data.size(); ++i) {
result.push_back(make_pair(vals[i], indices[i]));
}
return result;
}
} } // namespace