cudatesla

Cuda does not modify input if grid dimension is too large


Consider the following code:

#include<iostream>
#include<vector>

#include <cuda.h>
#include <cuda_runtime_api.h>

using namespace std;

__global__ void reduce_or(char* A) {
    if(threadIdx.x == 0) {
      A[blockIdx.x] = 1;
    }
}

#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
   if (code != cudaSuccess) 
   {
      fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}

int main(int argc, char** argv) {
  const uint64_t group_size = 1 << 16; //1 << 15 would work
  char *dr;

  std::vector<char> result;
  result.resize(group_size, 0);

  gpuErrchk(cudaMalloc((void **)&dr, group_size));
  gpuErrchk(cudaMemcpy(dr, result.data(), group_size, cudaMemcpyHostToDevice));

  reduce_or<<<group_size, 32>>>(dr);

  gpuErrchk(cudaDeviceSynchronize());
  gpuErrchk(cudaMemcpy(result.data(), dr, group_size, cudaMemcpyDeviceToHost));

  for(int kk = 0; kk < group_size; ++kk) {
    if(result[kk]) {
      cout << std::dec << kk << std::hex << "  " << (unsigned long) result[kk] << endl;
    }
  }
}

If the number of blocks is larger or equal to 65536, the input array is not modified although device_query from the CUDA code samples says, that the maximal size in the x dimension is much larger than 65536:

Device 0: "Tesla K20Xm"
  CUDA Driver Version / Runtime Version          6.5 / 6.5
  CUDA Capability Major/Minor version number:    3.5
  Total amount of global memory:                 5760 MBytes (6039339008 
  [...]
  Maximum number of threads per multiprocessor:  2048
  Maximum number of threads per block:           1024
  Max dimension size of a thread block (x,y,z): (1024, 1024, 64)
  Max dimension size of a grid size    (x,y,z): (2147483647, 65535, 65535)
  [...]

Am I doing anything wrong or does the hardware lie about its capabilities? Is this a known bug? Shouldn't a error be thrown?


Solution

  • Check compiler options, to get grid size more than 65535 you must set minimum compute capability at least 3.0, by option: compute_30,sm_30.

    Look at Table 13 here http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities as you can see for 2.x devices

    Maximum x-dimension of a grid of thread blocks 65535