I am trying to render a 3D cude on the window and observe the perspective projection. In the vertex shader when I dont multiply the projection matrix, the cube gets rendered (but you cant observe the perspective projection). When I multiply the perspective projection matrix the cube seems to be too enlarged or just a black window appears.
I have given the code of vertex shader and main.cpp below;
//Vertex Shader
#version 420 core
layout (location = 0) in vec3 pos;
out vec4 col;
uniform mat4 model;
uniform mat4 projection;
void main(){
gl_Position = model *projection* vec4(pos,1.0f);
col = vec4(clamp(pos, 0.1f, 100.0f), 1.0f);
}
#include <iostream>
#include <vector>
#include <stdio.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <GLM/glm.hpp>
#include <GLM/gtc/matrix_transform.hpp>
#include <GLM/gtc/type_ptr.hpp>
#include "Window.h"
#include "Shader.h"
#include "Buffers.h"
#include "Camera.h"
Window window;
std::vector<Buffers*> bufferList;
std::vector<Shader> shaderList;
static const char* vShader = "Shaders/shader.vert";
static const char* fShader = "Shaders/shader.frag";
void CreateObjects() {
GLfloat Cubevertices[] = {
1.0f,1.0f,1.0f, //0
-1.0f,1.0f,1.0f, //1
1.0f,-1.0f,1.0f, //2
-1.0f,-1.0f,1.0f, //3
1.0f,1.0f,-1.0f, //4
-1.0f,1.0f,-1.0f, //5
-1.0f,-1.0f,-1.0f, //6
1.0f,-1.0f,-1.0f, //7
};
unsigned int Cubeindices[] = {
0,1,3,
3,0,2,
3,2,6,
6,7,2,
1,3,5,
5,6,3,
0,1,4,
4,5,1,
2,0,7,
7,4,0,
5,6,4,
4,7,6
};
Buffers* obj1 = new Buffers();
obj1->CreateBuffer(Cubevertices, Cubeindices, 24, 36); //24 36
bufferList.push_back(obj1);
}
void CreateShaders() {
Shader* shader1 = new Shader();
shader1->createFromFiles(vShader, fShader);
shaderList.push_back(*shader1);
}
GLfloat toRad = 3.14159265359f/180.0f;
GLfloat r_angle = 0.0f;
bool direction = true;
float offset = 0.0f;
float maxoffset = 0.7f;
float inc = 0.0005f;
float cursize = 0.5f;
bool sizedir = true;
int main() {
window = Window(500, 500, "Cube");
window.Initialize();
CreateObjects();
CreateShaders();
GLuint u_model = 0, u_project = 0, u_view = 0;
glm::mat4 projection = glm::perspective(glm::radians(10.0f), (GLfloat)window.getBufferWidth() / window.getBufferHeight(), 1.0f, 100.0f);
while (!window.windowClose()) {
glfwPollEvents();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
shaderList[0].UseShader();
u_model = shaderList[0].getUniformModelLocation();
u_project = shaderList[0].getProjectionModelLocation();
glm::mat4 model(1.0f);
//TRANSLATION
if (direction) {
offset += inc;
}
else {
offset -= inc;
}
if (abs(offset) >= maxoffset) {
direction = !direction;
}
model = glm::scale(model, glm::vec3(0.3f, 0.3f, 0.3f));
model = glm::translate(model, glm::vec3(0.0f, offset, 0.0f));
model = glm::rotate(model, toRad*45.0f, glm::vec3(1.0f, 1.0f, 1.0f));
glUniformMatrix4fv(u_project, 1, GL_FALSE, glm::value_ptr(projection));
glUniformMatrix4fv(u_model, 1, GL_FALSE, glm::value_ptr(model));
bufferList[0]->Render();
window.swapBuffers();
}
return 0;
}
UPDATE (SOLVED): After taking insights from all the other solutions, this is my understanding. It turns out that in the perspective projection since the camera takes the position of the origin, we need to translate the cube towards the -ve Z-axis in order to view the rendered cube. I have attached the updated shaders and main.cpp code and the output.
#version 330
layout (location = 0) in vec3 pos;
out vec4 vCol;
uniform mat4 model;
uniform mat4 projection;
void main(){
gl_Position = projection * model * vec4(pos,1.0);
vCol = vec4(clamp(pos,0.0f,1.0f),1.0f);
}
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <vector>
#include <GL\glew.h>
#include <GLFW\glfw3.h>
#include <glm\glm.hpp>
#include <glm\gtc\matrix_transform.hpp>
#include <glm\gtc\type_ptr.hpp>
#include "Window.h"
#include "Mesh.h"
#include "Shader.h"
Window mainWindow;
std::vector<Mesh*> meshList;
std::vector<Shader> shaderList;
static const char* vShader = "Shaders/shader.vert";
static const char* fShader = "Shaders/shader.frag";
void CreateObjects() {
unsigned int indices[] = {
0,1,2,
2,3,0,
4,5,6,
6,7,4,
1,6,5,
5,2,1,
7,0,3,
3,4,7,
3,2,5,
5,4,3,
7,6,1,
1,0,7
};
GLfloat vertices[] = {
-0.5f,-0.5f,-0.5f,
-0.5f,0.5f,-0.5f,
0.5f,0.5f,-0.5f,
0.5f,-0.5f,-0.5f,
0.5f,-0.5f,0.5f,
0.5f,0.5f,0.5f,
-0.5f,0.5f,0.5f,
-0.5f,-0.5f,0.5f
};
Mesh* obj = new Mesh();
obj->CreateMesh(vertices, indices, 24, 36);
meshList.push_back(obj);
}
void CreateShader() {
Shader* shader = new Shader();
shader->CreateFromFiles(vShader, fShader);
shaderList.push_back(*shader);
}
GLfloat r_angle = 0.0f;
int main() {
mainWindow = Window(800, 600);
mainWindow.Initialize();
CreateObjects();
CreateShader();
GLuint uniformProjection = 0, uniformModel = 0;
glm::mat4 projection = glm::perspective(glm::radians(45.0f), (GLfloat)mainWindow.getBufferWidth() / mainWindow.getBufferHeight(), 0.1f, 100.0f);
while (!mainWindow.getShouldClose()) {
glfwPollEvents();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shaderList[0].UseShader();
uniformModel = shaderList[0].GetModelLocation();
uniformProjection = shaderList[0].GetProjectionLocation();
r_angle += 0.05f;
if (r_angle >= 360)
{
r_angle -= 360;
}
glm::mat4 model(1.0f);
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -2.0f));
model = glm::rotate(model, glm::radians(r_angle), glm::vec3(0.0f, 1.0f, 0.0f));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE, glm::value_ptr(model));
glUniformMatrix4fv(uniformProjection, 1, GL_FALSE, glm::value_ptr(projection));
meshList[0]->RenderMesh();
glUseProgram(0);
mainWindow.swapBuffers();
}
return 0;
}
In OpenGL, combining matrices goes from right to left:
projection * model * vec4(pos,1.0f)
this is the proper order.
In the formula above, one thing is missing, it is a view matrix. Or in other words, the view matrix occurs implicitly in this formula as the identity matrix:
1 0 0 0
0 1 0 0
0 0 1 0
0 0 0 1
This matrix defines the camera with following properties:
(0,0,-1)
point, along negative values of z-axisBy using glm::perspective
, the projection matrix is created which defines a frustum that has a near and a far plane. Distances to the near/far planes are defined relative the camera position. So if you stay with default camera position, your model needs to have z-coord which is less than -1
. It doesn't happen in your example, because model is rotated around its origin, then translated along y-axis and finally scaled by 0.3 factor along all axis.
Solutions:
also the value of FOV may be greater, ~ 45 degrees.