c++openglopengl-3glad

how to implement 3D model rendering By different indexes?


I have a problem with rendering 3D models in OpenGL - The problem is that I can't implement rendering by single index buffers

(I mean rendering: vertex by vertex indices, normals by normal indices, texcoord by texcoord indices)

Because of this, at the moment my program does not use index rendering since glDrawElements - implies that for all VertexBufferObject in VertexArrayObject, one ElementBufferObject is used for rendering (in fact, an index buffer is created, but it is always equal to 0,1,2,3,4,...)

Although this method works, it spends a lot of video card memory compared to if it used several index buffers

I have attached a picture with the desired result and a picture of what the problem is

enter image description here enter image description here

Question - how can I make sure that different indexes are used when drawing the model ?

I tried a lot of things but didn't get the desired result😥


Solution

  • You have 3 options.

    1. Flatten all 3 arrays out, and use glDrawArrays. This is the simplest option usually....
    struct Vertex {
       float v[3];
       float n[3];
       float t[2];
    };
    std::vector<Vertex> flatten(
       float V[], float N[], float T[],
       uint32_t VI[], uint32_t NI[], uint32_t TI[],
       uint32_t num_triangles)
    {
       std::vector<Vertex> combined(num_triangles * 3);
       for(uint32_t i = 0; i < num_triangles * 3; ++i) {
          combined[i].v[0] = V[ 3* VI[i] ];
          combined[i].v[1] = V[ 3* VI[i] + 1 ];
          combined[i].v[2] = V[ 3* VI[i] + 2 ];
          combined[I].n[0] = N[ 3* NI[i] ];
          combined[I].n[1] = N[ 3* NI[i] + 1 ];
          combined[I].n[2] = N[ 3* NI[i] + 2 ];
          combined[I].t[0] = N[ 2* TI[i] ];
          combined[I].t[1] = N[ 2* TI[i] + 1 ];
       }
       return combined;
    }
    
    void draw(const std::vector<Vertex>& verts) {
       glVertexAttribPointer(VERTS_IDX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), verts[0].v);
       glVertexAttribPointer(NORMS_IDX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), verts[0].n);
       glVertexAttribPointer(UVS_IDX, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), verts[0].t);
       glDrawArrays(GL_TRIANGLES, 0, verts.size());
    }
    
    
    1. Generate a new set of indices by hashing the existing indices.
    struct Vertex {
       float v[3];
       float n[3];
       float t[2];
    };
    
    struct IndexedMesh {
       std::vector<Vertex> verts;
       std::vector<uint32_t> indices;
    };
    
    // just going to use 21bits for each index. 
    uint64_t make_index_hash(uint32_t vi, uint32_t ni, uint32_t ti) {
       assert(vi < 0x1FFFFF);
       assert(ni < 0x1FFFFF);
       assert(ti < 0x1FFFFF);
       return ((uint64_t)vi) |
              ((uint64_t)vi) << 21 | 
              ((uint64_t)vi) << 42;
    }
    
    IndexedMesh flatten(
       float V[], float N[], float T[],
       uint32_t VI[], uint32_t NI[], uint32_t TI[],
       uint32_t num_triangles)
    {
       std::map<uint64_t, uint62_t> index_remap;
       std::vector<Vertex> combined;
       std::vector<uint32_t> combined_indices(num_triangles * 3);
    
       for(uint32_t i = 0; i < num_triangles * 3; ++i) {
    
          uint64_t hash = make_index_hash(VI[i], NI[i], TI[I]);
    
          // check to see if this set of indices has been sued before
          auto iter = index_remap.find(hash);
          if(iter != index_remap.end() {
             combined_indices[i] = iter->second;
          } else {
             index_remap.emplace(hash, combined.size());
             Vertex v = {
                 { V[ 3* VI[i] ], V[ 3* VI[i] + 1 ], V[ 3* VI[I] + 2 ] },
                 { N[ 3* NI[i] ], N[ 3* NI[i] + 1 ], N[ 3* NI[I] + 2 ] },
                 { T[ 2* TI[i] ], T[ 2* TI[i] + 1 ] }
             };
             combined.push_back(v);
          }
       }
       return { combined, combined_indices };
    }
    
    void draw(const IndexedMesh& mesh) {
       glVertexAttribPointer(VERTS_IDX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), mesh.verts[0].v);
       glVertexAttribPointer(NORMS_IDX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), mesh.verts[0].n);
       glVertexAttribPointer(UVS_IDX, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), mesh.verts[0].t);
       glDrawElements(GL_TRIANGLES, verts.size(), GL_UNSIGNED_INT, mesh.indices.data());
    }
    
    1. The final option is to do this in a shader using shader storage buffers. The basic idea is that you load you vertices/normals/uv arrays into the shader storage buffers, and you load your index sets into traditional per-vertex params.

    Within the shader, you can then index those arrays directly. (It's been a while since I uses glsl, so this is more a general hint than workable code).

    I have used this approach before, but can't what/if the performance impacts are (I imagine option2 is the most performant tbh)

    layout(std430, binding = 0) buffer VertsSSBO
    {
        vec3 verts[];
    };
    layout(std430, binding = 1) buffer NormsSSBO
    {
        vec3 norms[];
    };
    layout(std430, binding = 2) buffer UVsSSBO
    {
        vec2 uvcoords[];
    };
    
    // the indices are stored in vertex buffers,
    // and bound with glVertexAttribPointer
    in uint vs_vertex_index;
    in uint vs_normal_index;
    in uint vs_uv_index;
    
    // outputs to fragment shader
    out vec4 fs_normal;
    out vec2 fs_uv;
    
    uniform mat4 vs_mvp;  ///< the modelview-projection matrix
    
    void main() {
    
       // grab the elements from the shader storage buffers
       vec4 v = vec4(verts[vs_vertex_index], 1.0);
       vec4 n = vec4(norms[vs_normal_index], 0.0);
       vec2 t = uvcoords[vs_normal_index];
    
       // now transform and output as you would usually.
       gl_Position = vs_mvp * v;
       fs_normal = vs_mvp * n;
       fs_uv = t;
    }