I'm trying to process a video, frame by frame. To do this I want to create a texture containing the current frame and pass it to the kernel. The frames are 1440*1080 pixel with each pixel being represented by an unsigned char, e.g. 8 bit.
I followed the instructions, however my program always fails at the point where the texture is created. Error-Code 0x11: "invalid arguments".
Here is my code:
// allocate cuda array in device memory
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaArray* cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
// copy frame_in to device memory
int size = width * height * sizeof(char);
cudaMemcpyToArray(cuArray, 0, 0, frame_in.data, size, cudaMemcpyHostToDevice);
// specify texture
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
// specify texture object parameters
cudaTextureDesc texDesc;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
// !FAILS! create texture object
cudaTextureObject_t texObj = NULL;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
Got it, I forgot the memset() for the cudaTextureDesc. Should read:
// specify texture object parameters
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;