c++openglglewglfw

OpenGL Shader doesnt work correctly in an AMD machine


Problem:

What I've tried so far:

Video Cards:

Duplicated?

Code:

Im using GLFW/GLEW.

GLFW and GLEW init

if (!glfwInit())
    return;
GLFWwindow * window = glfwCreateWindow(wndWidth, wndHeight, "Window", NULL, NULL);
if (!window){
    glfwTerminate();
    return;
}
glfwMakeContextCurrent(window);
if (glewInit() != GLEW_OK){
    std::cout << "ERROR: glewInit failed!" << std::endl;
    return;
}

Main Loop: Render calls first the render using Fixed Pipeline and then the one using Shader.

while (!glfwWindowShouldClose(window))
{
    render();
    glfwSwapBuffers(window);
    glfwPollEvents();
    parseEvents();
    glCheckError("initWindow@MapViewer");
}

Initializing VBO

glGenBuffers(3, buffers);
//Vertices
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*3, attr_vertices.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
//Color
glBindBuffer(GL_ARRAY_BUFFER, buffers[1]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*4, attr_colors.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
//Size
glBindBuffer(GL_ARRAY_BUFFER, buffers[2]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*1, attr_size.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, 0, 0);

Shaders

std::string vertexSRC = "#version 120\n"
    "attribute vec3 vertex;\n"
    "attribute vec4 color;\n"
    "attribute float size;\n"
    "uniform mat4 matrix;\n"
    "uniform vec4 bb;\n"
    "uniform float size_mod;\n"
    "varying vec4 vColor;\n"        
    "void main(void)\n"
    "{\n"
    "    vec3 w = vec3((vertex.x-bb.x)/bb.z*2-1, (vertex.y-bb.y)/bb.w*2-1, vertex.z);\n"
    "    gl_Position = vec4(w, 1.0);\n"
    "    vColor = color;\n"
    "    gl_PointSize = size*size_mod;\n"
    "}\n";
std::string fragmentSRC = "#version 120\n"
    "varying vec4 vColor;\n"
    "uniform sampler2D texture;\n"
    "void main()\n"
    "{\n"
    "    vec4 w = texture2D(texture, vec2(gl_PointCoord.x, 1.0-gl_PointCoord.y)) * vColor;\n"
    "    gl_FragColor = w;\n"
    "}\n";

Initializing Shaders

vertexShader = glCreateShader(GL_VERTEX_SHADER);
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
const GLchar * vtx = vertexSRC.c_str();
const GLchar * frg = fragmentSRC.c_str();
glShaderSource(vertexShader, 1, &vtx, NULL);
glShaderSource(fragmentShader, 1, &frg, NULL);
GLint vst, fst;
glCompileShader(vertexShader);
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &vst);
glCompileShader(fragmentShader);
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &fst);
if (vst != GL_TRUE || fst != GL_TRUE){
    std::cout << "ERROR compiling shaders: " << vst << " " << fst << std::endl;
    return;
}
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);

Getting Uniform Handlers

glUseProgram(shaderProgram);
l_matrix = glGetUniformLocation(shaderProgram, "matrix");
l_bbLocation = glGetUniformLocation(shaderProgram, "bb");
l_texture = glGetUniformLocation(shaderProgram, "texture");
l_sizeMod = glGetUniformLocation(shaderProgram, "size_mod");
l_vertex = glGetAttribLocation(shaderProgram, "vertex");
l_color = glGetAttribLocation(shaderProgram, "color");
l_size = glGetAttribLocation(shaderProgram, "size");

Render Call

glEnable(GL_POINT_SPRITE);
glEnable(GL_PROGRAM_POINT_SIZE);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glUseProgram(shaderProgram);

//Vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
//Color
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, buffers[1]);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
//Size
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, buffers[2]);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, 0, 0);

glUniformMatrix4fv(l_matrix, 1, true, m);
glUniform4fv(l_bbLocation, 1, latlng_bb);
glUniform1f(l_sizeMod, GLfloat(zoom*size_mod/9.0));

//RENDER
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[face]);
glUniform1i(l_texture, 0);

glDrawArrays(GL_POINTS, 0, num_vertices);
glUseProgram(0);

//DISABLE
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);

glDisable(GL_POINT_SPRITE);
glDisable(GL_VERTEX_PROGRAM_POINT_SIZE);
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, 0);

Any idea, tips, anything that can help me trace where the problem is is welcome.

Edit 0

Added more information about the Video Cards.

Edit 1

I just comment the line #version 120 from vertex Shader and I got some warnings for the line:

0(10) : warning C7011: implicit cast from "int" to "float"

Just changed the line to the following. The warnings are gone but still have the same problem.

vec3 w = vec3((vertex.x-bb.x)/bb.z*2.0-1.0, (vertex.y-bb.y)/bb.w*2.0-1.0, vertex.z);\n


Solution

  • I had to do 2 things to solve the problem.

    First, to make it work with the AMD card as well, I had to get and use the AttribLocation.

    Changed from this:

    glEnableVertexAttribArray(0);
    glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
    

    To this:

    l_vertex = glGetAttribLocation(shaderProgram, "vertex");
    glEnableVertexAttribArray(l_vertex);
    glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
    glVertexAttribPointer(l_vertex, 3, GL_FLOAT, GL_FALSE, 0, 0);
    

    It wasn't showing the data, because the size/vertex/color was being mixed.

    After that, it stopped working in my Nvidia card. Didn't get any errors when compiling the shaders. Although, I got an error (using glGetError) every time I tried to use the shaders. I went back and realized that I had to remove all the non-used variables from the shaders.