In one of my projects, I'm seeing some incorrect results when using CUB's DeviceReduce::ReduceByKey. However, using the same inputs/outputs with thrust::reduce_by_key produces the expected results.
#include "cub/cub.cuh"
#include <vector>
#include <iostream>
#include <cuda.h>
struct AddFunctor {
__host__ __device__ __forceinline__
float operator()(const float & a, const float & b) const {
return a + b;
}
} reduction_op;
int main() {
int n = 7680;
std::vector < uint64_t > keys_h(n);
for (int i = 0; i < 4000; i++) keys_h[i] = 1;
for (int i = 4000; i < 5000; i++) keys_h[i] = 2;
for (int i = 5000; i < 7680; i++) keys_h[i] = 3;
uint64_t * keys;
cudaMalloc(&keys, sizeof(uint64_t) * n);
cudaMemcpy(keys, &keys_h[0], sizeof(uint64_t) * n, cudaMemcpyDefault);
uint64_t * unique_keys;
cudaMalloc(&unique_keys, sizeof(uint64_t) * n);
std::vector < float > values_h(n);
for (int i = 0; i < n; i++) values_h[i] = 1.0;
float * values;
cudaMalloc(&values, sizeof(float) * n);
cudaMemcpy(values, &values_h[0], sizeof(float) * n, cudaMemcpyDefault);
float * aggregates;
cudaMalloc(&aggregates, sizeof(float) * n);
int * remaining;
cudaMalloc(&remaining, sizeof(int));
size_t size = 0;
void * buffer = NULL;
cub::DeviceReduce::ReduceByKey(
buffer,
size,
keys,
unique_keys,
values,
aggregates,
remaining,
reduction_op,
n);
cudaMalloc(&buffer, sizeof(char) * size);
cub::DeviceReduce::ReduceByKey(
buffer,
size,
keys,
unique_keys,
values,
aggregates,
remaining,
reduction_op,
n);
int remaining_h;
cudaMemcpy(&remaining_h, remaining, sizeof(int), cudaMemcpyDefault);
std::vector < float > aggregates_h(remaining_h);
cudaMemcpy(&aggregates_h[0], aggregates, sizeof(float) * remaining_h, cudaMemcpyDefault);
for (int i = 0; i < remaining_h; i++) {
std::cout << i << ", " << aggregates_h[i] << std::endl;
}
cudaFree(buffer);
cudaFree(keys);
cudaFree(unique_keys);
cudaFree(values);
cudaFree(aggregates);
cudaFree(remaining);
}
When I include "-gencode arch=compute_35,code=sm_35" (for a Kepler GTX Titan), it produces the wrong results, but when I leave these flags out entirely, it works.
$ nvcc cub_test.cu
$ ./a.out
0, 4000
1, 1000
2, 2680
$ nvcc cub_test.cu -gencode arch=compute_35,code=sm_35
$ ./a.out
0, 4000
1, 1000
2, 768
I use a handful of other CUB calls without issue, just this one is misbehaving. I've also tried running this code on a GTX 1080 Ti (with compute_61, sm_61) and see the same behavior.
Is the right solution to omit these compiler flags?
tried on one machine with:
and another with:
Sounds like you should file a bug report at the CUB repository issues page.
Edit: I can reproduce this issue:
[joeuser@myhost:/tmp]$ nvcc -I/opt/cub -o a a.cu
nvcc warning : The 'compute_20', 'sm_20', and 'sm_21' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning).
[joeuser@myhost:/tmp]$ ./a
0, 4000
1, 1000
2, 2680
[joeuser@myhost:/tmp]$ nvcc -I/opt/cub -o a a.cu -gencode arch=compute_30,code=sm_30
[joeuser@myhost:/tmp]$ ./a
0, 4000
1, 1000
2, 512
Relevant info: