OpenGL學習 延遲著色 Deferred Shading
這個操作看了看介紹是為了減少正向渲染(就是一個模型完了再渲染下一個)的開銷
介紹
正向渲染(Forward Rendering)或者正向著色法(Forward Shading),在場景中我們根據所有光源照亮一個物體,之後再渲染下一個物體,以此類推。它非常容易理解,也很容易實現,但是同時它對程式效能的影響也很大,因為對於每一個需要渲染的物體,程式都要對每一個光源每一個需要渲染的片段進行迭代,這是非常多的
延遲著色法基於我們延遲(Defer)或推遲(Postpone)大部分計算量非常大的渲染(像是光照)到後期進行處理的想法。它包含兩個處理階段(Pass):在第一個幾何處理階段(Geometry Pass)中,我們先渲染場景一次,之後獲取物件的各種幾何資訊,並儲存在一系列叫做G緩衝(G-buffer)的紋理中;想想位置向量(Position Vector)、顏色向量(Color Vector)、法向量(Normal Vector)和/或鏡面值(Specular Value)。場景中這些儲存在G緩衝中的幾何資訊將會在之後用來做(更復雜的)光照計算。
會有一個G-Buffer 先用來儲存position normal albedo specular
第二次操作 就有點像後期了 處理的是quad
GLuint gBuffer; glGenFramebuffers(1, &gBuffer); glBindFramebuffer(GL_FRAMEBUFFER, gBuffer); GLuint gPosition, gNormal, gAlbedoSpec; // - Position color buffer glGenTextures(1, &gPosition); glBindTexture(GL_TEXTURE_2D, gPosition); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, gPosition, 0); // - Normal color buffer glGenTextures(1, &gNormal); glBindTexture(GL_TEXTURE_2D, gNormal); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, GL_TEXTURE_2D, gNormal, 0); // - Color + Specular color buffer glGenTextures(1, &gAlbedoSpec); glBindTexture(GL_TEXTURE_2D, gAlbedoSpec); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT2, GL_TEXTURE_2D, gAlbedoSpec, 0); // - Tell OpenGL which color attachments we'll use (of this framebuffer) for rendering GLuint attachments[3] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2 }; glDrawBuffers(3, attachments); // - Create and attach depth buffer (renderbuffer) GLuint rboDepth; glGenRenderbuffers(1, &rboDepth); glBindRenderbuffer(GL_RENDERBUFFER, rboDepth); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, SCR_WIDTH, SCR_HEIGHT); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rboDepth); // - Finally check if framebuffer is complete if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) std::cout << "Framebuffer not complete!" << std::endl; glBindFramebuffer(GL_FRAMEBUFFER, 0); glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
第一次看見這個glDrawBuffers 不過看語義就是要把這三個attachment繪製了吧
//vertex #version 330 core layout (location = 0) in vec3 position; layout (location = 1) in vec3 normal; layout (location = 2) in vec2 texCoords; out vec3 FragPos; out vec2 TexCoords; out vec3 Normal; uniform mat4 model; uniform mat4 view; uniform mat4 projection; void main() { vec4 worldPos = model * vec4(position, 1.0f); FragPos = worldPos.xyz; gl_Position = projection * view * worldPos; TexCoords = texCoords; mat3 normalMatrix = transpose(inverse(mat3(model))); Normal = normalMatrix * normal; } //fragment #version 330 core layout (location = 0) out vec3 gPosition; layout (location = 1) out vec3 gNormal; layout (location = 2) out vec4 gAlbedoSpec; in vec2 TexCoords; in vec3 FragPos; in vec3 Normal; uniform sampler2D texture_diffuse1; uniform sampler2D texture_specular1; void main() { // Store the fragment position vector in the first gbuffer texture gPosition = FragPos; // Also store the per-fragment normals into the gbuffer gNormal = normalize(Normal); // And the diffuse per-fragment color gAlbedoSpec.rgb = texture(texture_diffuse1, TexCoords).rgb; // Store specular intensity in gAlbedoSpec's alpha component gAlbedoSpec.a = texture(texture_specular1, TexCoords).r; }
這樣把位置和法線糾正一下 操作過後 position normal albedospec就存了該有的資訊 rbodepth存的深度
然後就繪製quad
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shaderLightingPass.Use();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gPosition);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, gNormal);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, gAlbedoSpec);
// Also send light relevant uniforms
for (GLuint i = 0; i < lightPositions.size(); i++)
{
glUniform3fv(glGetUniformLocation(shaderLightingPass.Program, ("lights[" + std::to_string(i) + "].Position").c_str()), 1, &lightPositions[i][0]);
glUniform3fv(glGetUniformLocation(shaderLightingPass.Program, ("lights[" + std::to_string(i) + "].Color").c_str()), 1, &lightColors[i][0]);
// Update attenuation parameters and calculate radius
const GLfloat constant = 1.0; // Note that we don't send this to the shader, we assume it is always 1.0 (in our case)
const GLfloat linear = 0.7;
const GLfloat quadratic = 1.8;
glUniform1f(glGetUniformLocation(shaderLightingPass.Program, ("lights[" + std::to_string(i) + "].Linear").c_str()), linear);
glUniform1f(glGetUniformLocation(shaderLightingPass.Program, ("lights[" + std::to_string(i) + "].Quadratic").c_str()), quadratic);
}
glUniform3fv(glGetUniformLocation(shaderLightingPass.Program, "viewPos"), 1, &camera.Position[0]);
// Finally render quad
RenderQuad();
用於計算的資料都已經在紋理的rgba裡面 vec4 value=texture2D(uv,texture)就拿到了。
//vertex
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoords;
out vec2 TexCoords;
void main()
{
gl_Position = vec4(position, 1.0f);
TexCoords = texCoords;
}
//fragment
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D gAlbedoSpec;
struct Light {
vec3 Position;
vec3 Color;
float Linear;
float Quadratic;
};
const int NR_LIGHTS = 32;
uniform Light lights[NR_LIGHTS];
uniform vec3 viewPos;
void main()
{
// Retrieve data from gbuffer
vec3 FragPos = texture(gPosition, TexCoords).rgb;
vec3 Normal = texture(gNormal, TexCoords).rgb;
vec3 Diffuse = texture(gAlbedoSpec, TexCoords).rgb;
float Specular = texture(gAlbedoSpec, TexCoords).a;
// Then calculate lighting as usual
vec3 lighting = Diffuse * 0.1; // hard-coded ambient component
vec3 viewDir = normalize(viewPos - FragPos);
for(int i = 0; i < NR_LIGHTS; ++i)
{
// Diffuse
vec3 lightDir = normalize(lights[i].Position - FragPos);
vec3 diffuse = max(dot(Normal, lightDir), 0.0) * Diffuse * lights[i].Color;
// Specular
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(Normal, halfwayDir), 0.0), 16.0);
vec3 specular = lights[i].Color * spec * Specular;
// Attenuation
float distance = length(lights[i].Position - FragPos);
float attenuation = 1.0 / (1.0 + lights[i].Linear * distance + lights[i].Quadratic * distance * distance);
diffuse *= attenuation;
specular *= attenuation;
lighting += diffuse + specular;
}
FragColor = vec4(lighting, 1.0);
}
前面提取出那些資料,後面的計算就是phong光照計算了
到這裡 純 延遲渲染就完了 但是 還有正向和延遲混合啊
記得前面有個rboDepth嗎。。存了深度還沒用吶。
RenderQuad();//之後
// 2.5. Copy content of geometry's depth buffer to default framebuffer's depth buffer
glBindFramebuffer(GL_READ_FRAMEBUFFER, gBuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); // Write to default framebuffer
// blit to default framebuffer. Note that this may or may not work as the internal formats of both the FBO and default framebuffer have to match.
// the internal formats are implementation defined. This works on all of my systems, but if it doesn't on yours you'll likely have to write to the
// depth buffer in another shader stage (or somehow see to match the default framebuffer's internal format with the FBO's internal format).
glBlitFramebuffer(0, 0, SCR_WIDTH, SCR_HEIGHT, 0, 0, SCR_WIDTH, SCR_HEIGHT, GL_DEPTH_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 3. Render lights on top of scene, by blitting
shaderLightBox.Use();
glUniformMatrix4fv(glGetUniformLocation(shaderLightBox.Program, "projection"), 1, GL_FALSE, glm::value_ptr(projection));
glUniformMatrix4fv(glGetUniformLocation(shaderLightBox.Program, "view"), 1, GL_FALSE, glm::value_ptr(view));
for (GLuint i = 0; i < lightPositions.size(); i++)
{
model = glm::mat4();
model = glm::translate(model, lightPositions[i]);
model = glm::scale(model, glm::vec3(0.25f));
glUniformMatrix4fv(glGetUniformLocation(shaderLightBox.Program, "model"), 1, GL_FALSE, glm::value_ptr(model));
glUniform3fv(glGetUniformLocation(shaderLightBox.Program, "lightColor"), 1, &lightColors[i][0]);
RenderCube();
}
glBlitFrameBuffer幾步就是把深度挪到渲染到螢幕上的操作 這樣 深度就不會有問題了 可以愉快的在最後繪製正向渲染的物體了
這個基於phong的。
這個是看learnopengl的學習記錄。
原文 https://learnopengl-cn.github.io/05%20Advanced%20Lighting/08%20Deferred%20Shading/