Hello I'm working in a CUDA kernel about matrix vector product. I want to improve the performance with tiling and shared memory.
The problem is that with this code the Matrix M
or the vector N
aren't loading right.
Do you have any idea about how to Load a tile from M
and N
into the shared memory arrays?
M
is the matrix, N
is the vector and P
is the result of the matrix vector product
__global__ void matrixMul( float* P, float* M, float* N, int Mw, int Nw)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
__shared__ float Ms[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Ns[BLOCK_SIZE];
// ===================================================================
// Code segment 1
// Determine the update values for the tile indices in the loop
// ===================================================================
int mBegin = Mw * BLOCK_SIZE * by;
int mEnd = mBegin + Mw - 1;
int mStep = BLOCK_SIZE;
int nBegin = BLOCK_SIZE * bx;
//int nStep = BLOCK_SIZE*Nw;
int nStep = 1;
float Psub = 0.0f;
// ===================================================================
// Code segment 2
// Do matrix-matrix multiplication inside a tile
// ===================================================================
for (int m = mBegin, n = nBegin; m <= mEnd; m += mStep, n += nStep) {
// Load a tile from M and N into the shared memory arrays
Ms[ty][tx] = M[bx*mStep*Mw+m];
Ns[ty] = N[by*nStep*Nw+n];
// Synchronize the threads
__syncthreads();
// Multiply the two tiles together, each thread accumulating
// the partial sum of a single dot product.
for (int i = 0; i < BLOCK_SIZE; i++) {
Psub += Ms[i][tx] * Ns[i];
}
// Synchronize again.
__syncthreads();
}
// ===================================================================
// Code segment 3
// Store the data back to global memory
// ===================================================================
int p = Nw * BLOCK_SIZE * by + BLOCK_SIZE * bx;
P[p + nStep] = Psub;
}
I found a similar example (dealing with square matrices of identical sizes, mind you) that also loads parts of the matrix into shared memory. It seems your declarations are right, and it probably just comes down to the algebra you are using to determine which elements go where.
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width){
__shared__float Mds[TILE_WIDTH][TILE_WIDTH]; // Shared memory
__shared__float Nds[TILE_WIDTH][TILE_WIDTH]; // declarations
int bx = blockIdx.x; int by = blockIdx.y; // ID thread
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0; // REGISTER!
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}