WebGPU Rendering: Part 13 Shadow Mapping
Introduction
I have been reading through this online book on WebGPU. In this series of articles, I will be going through this book and implementing the lessons in a more structured typescript class approach and eventually we will build three types of WebGPU renderers: Gaussian Splatting, Ray tracing and Rasterization.
In this article we will talk about the Shadow mapping which is a technique used in computer graphics to determine which areas of a scene are in shadow relative to a light source.
The following link is the commit in my Github repo that matches the code we will go over.
Shadow Mapping
To generate a shadow map, we will render the scene from the light’s point of view. After this initial render pass, we will have generated some depth information (the distance from the light to the nearest surface at each point in the scene).
Then, when we render from the camera perspective, we can take an extra step to get the position of the pixel we are looking at from the perspective of the light. If the pixel has a distance greater than that stored in the depth texture, then that means it is behind some other object and should be shadowed.
@group(0) @binding(0)
var<uniform> modelView: mat4x4<f32>;
@group(0) @binding(1)
var<uniform> projection: mat4x4<f32>;
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) depth: f32
}
@vertex
fn vs_main(@location(0) inPos: vec3<f32>) -> VertexOutput {
var out: VertexOutput;
var wldLoc:vec4<f32> = modelView * vec4<f32>(inPos, 1.0);
out.clip_position = projection * wldLoc;
out.depth = out.clip_position.z / out.clip_position.w;
return out;
}
struct FragOutputs {
@builtin(frag_depth) depth: f32,
@location(0) color: vec4<f32>
}
@fragment
fn fs_main(in: VertexOutput, @builtin(front_facing) isFront: bool) -> FragOutputs {
var out: FragOutputs;
if (isFront) {
out.depth = in.depth;
} else {
out.depth = in.depth - 0.001;
}
out.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
return out;
}
In this shader, we manually write the depth value to the output texture — and do a small bit of offsetting for non front faces to help avoid some flickering. This will give us the shadow map generated from the light which we will use in our object shader. The color of the fragments doesn’t really matter since we will be tossing out this part anyways.
Our OBJ shader is very similar to what we have seen before in previous articles, but now we take in some more information about our light source’s position and take in a texture and sampler for our shadow map.
@group(0) @binding(0)
var<uniform> modelView: mat4x4<f32>;
@group(0) @binding(1)
var<uniform> projection: mat4x4<f32>;
@group(0) @binding(2)
var<uniform> normalMatrix: mat4x4<f32>;
@group(0) @binding(3)
var<uniform> lightDirection: vec3<f32>;
@group(0) @binding(4)
var<uniform> viewDirection: vec3<f32>;
@group(0) @binding(5)
var<uniform> ambientColor:vec4<f32>;
@group(0) @binding(6)
var<uniform> diffuseColor:vec4<f32>;
@group(0) @binding(7)
var<uniform> specularColor:vec4<f32>;
@group(0) @binding(8)
var<uniform> shininess:f32;
const diffuseConstant:f32 = 1.0;
const specularConstant:f32 = 1.0;
const ambientConstant: f32 = 1.0;
fn specular(lightDir:vec3<f32>, viewDir:vec3<f32>, normal:vec3<f32>, specularColor:vec3<f32>,
shininess:f32) -> vec3<f32> {
let reflectDir:vec3<f32> = reflect(-lightDir, normal);
let specDot:f32 = max(dot(reflectDir, viewDir), 0.0);
return pow(specDot, shininess) * specularColor;
}
fn diffuse(lightDir:vec3<f32>, normal:vec3<f32>, diffuseColor:vec3<f32>) -> vec3<f32>{
return max(dot(lightDir, normal), 0.0) * diffuseColor;
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) viewDir: vec3<f32>,
@location(1) normal: vec3<f32>,
@location(2) lightDir: vec3<f32>,
@location(3) wldLoc: vec3<f32>,
@location(4) lightLoc: vec3<f32>,
@location(5) inPos: vec3<f32>
};
@vertex
fn vs_main(
@location(0) inPos: vec3<f32>,
@location(1) inNormal: vec3<f32>
) -> VertexOutput {
var out: VertexOutput;
out.viewDir = normalize((normalMatrix * vec4<f32>(-viewDirection, 0.0)).xyz);
out.lightDir = normalize((normalMatrix * vec4<f32>(-lightDirection, 0.0)).xyz);
out.normal = normalize(normalMatrix * vec4<f32>(inNormal, 0.0)).xyz;
var wldLoc:vec4<f32> = modelView * vec4<f32>(inPos, 1.0);
out.clip_position = projection * wldLoc;
out.wldLoc = wldLoc.xyz / wldLoc.w;
out.inPos = inPos;
var lightLoc:vec4<f32> = modelView * vec4<f32>(lightDirection, 1.0);
out.lightLoc = lightLoc.xyz / lightLoc.w;
return out;
}
@group(0) @binding(9)
var t_depth: texture_depth_2d;
@group(0) @binding(10)
var s_depth: sampler_comparison;
@group(0) @binding(11)
var<uniform> lightModelViewMatrix: mat4x4<f32>;
@group(0) @binding(12)
var<uniform> lightProjectionMatrix: mat4x4<f32>;
@fragment
fn fs_main(in: VertexOutput, @builtin(front_facing) face: bool) -> @location(0) vec4<f32> {
var lightLoc:vec3<f32> = in.lightLoc;
var lightDir:vec3<f32> = normalize(in.lightDir);
var n:vec3<f32> = normalize(in.normal);
var viewDir: vec3<f32> = in.viewDir;
var fragmentPosInShadowMapSpace: vec4<f32> = lightProjectionMatrix * lightModelViewMatrix * vec4(in.inPos, 1.0);
fragmentPosInShadowMapSpace = fragmentPosInShadowMapSpace / fragmentPosInShadowMapSpace.w;
var depth: f32 = fragmentPosInShadowMapSpace.z;
var uv: vec2<f32> = 0.5*(fragmentPosInShadowMapSpace.xy + vec2(1.0, 1.0));
var visibility = 0.0;
let oneOverShadowDepthTextureSize = 1.0 / 1024.0;
for (var y = -2; y <= 2; y++) {
for (var x = -2; x <= 2; x++) {
let offset = vec2<f32>(vec2(x,y)) * oneOverShadowDepthTextureSize;
visibility += textureSampleCompare(t_depth, s_depth, vec2(uv.x, 1.0 - uv.y) + offset, depth - 0.0003); // small offset to avoid self shadowing
}
}
visibility /= 25.0;
if (face) {
var wldLoc2Light:vec3<f32> = in.wldLoc - lightLoc;
var align:f32 = dot(normalize(wldLoc2Light), lightDir);
if (align > 0.9) {
var radiance:vec3<f32> = ambientColor.rgb * ambientConstant +
diffuse(-lightDir, n, diffuseColor.rgb)* diffuseConstant +
specular(-lightDir, viewDir, n, specularColor.rgb, shininess) * specularConstant;
return vec4<f32>(radiance * visibility,1.0);
}
}
return vec4<f32>( 0.0,0.0,0.0,1.0);
}
We need to take in some information about the light’s location in order to properly use the shadow map. We calculate the fragment’s position with respect to the light and use that to get the depth, then we can also get the corresponding UV for the shadow map texture.
We will then accumulate visibility by looking at pixels where the depth is closer to the light than the value in the shadow texture. We take samples in a 5x5 grid around the pixel we are coloring to help generate a softer shadow. We can then use this accumulated visibility value when coloring the final pixel color.
OBJ Models
Our OBJs will function very similar to OBJ objects we have seen in the past, the only difference is that they will also take in some buffers for the light’s position and the shadow depth texture.
Plane
export class Plane {
private _pipeline: GPURenderPipeline;
private _positionBuffer: GPUBuffer;
private _normalBuffer: GPUBuffer;
private _uniformBindGroup: GPUBindGroup;
public static async init(device: GPUDevice, modelViewMatrixUniformBuffer: GPUBuffer,
projectionMatrixUnifromBuffer: GPUBuffer, normalMatrixUniformBuffer: GPUBuffer,
viewDirectionUniformBuffer: GPUBuffer, lightDirectionUniformBuffer: GPUBuffer,
depthTexture: GPUTexture, sampler: GPUSampler, lightModelViewMatrixUniformBuffer: GPUBuffer,
lightProjectionMatrixUniformBuffer: GPUBuffer, shaderCode: string): Promise<Plane> {
const shaderModule = device.createShaderModule({ code: shaderCode });
const positions = new Float32Array([
-100, -100, 0,
100, -100, 0,
-100, 100, 0,
100, 100, 0
]);
const normals = new Float32Array([
0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, 1
]);
const positionBuffer = createGPUBuffer(device, positions, GPUBufferUsage.VERTEX);
const normalBuffer = createGPUBuffer(device, normals, GPUBufferUsage.VERTEX);
const unifromBindGroupLayout = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 1,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 2,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 3,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 4,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 5,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 6,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 7,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 8,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 9,
visibility: GPUShaderStage.FRAGMENT,
texture: {
sampleType: "depth"
}
},
{
binding: 10,
visibility: GPUShaderStage.FRAGMENT,
sampler: {
type: "comparison"
}
},
{
binding: 11,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 12,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
}
]
});
const ambientUniformBuffer = createGPUBuffer(device, new Float32Array([0.15, 0.10, 0.10, 1.0]), GPUBufferUsage.UNIFORM);
const diffuseUniformBuffer = createGPUBuffer(device, new Float32Array([0.55, 0.55, 0.55, 1.0]), GPUBufferUsage.UNIFORM);
const specularUniformBuffer = createGPUBuffer(device, new Float32Array([0.0, 0.0, 0.0, 1.0]), GPUBufferUsage.UNIFORM);
const shininessUniformBuffer = createGPUBuffer(device, new Float32Array([0.0]), GPUBufferUsage.UNIFORM);
const uniformBindGroup = device.createBindGroup({
layout: unifromBindGroupLayout,
entries: [
{
binding: 0,
resource: {
buffer: modelViewMatrixUniformBuffer
}
},
{
binding: 1,
resource: {
buffer: projectionMatrixUnifromBuffer
}
},
{
binding: 2,
resource: {
buffer: normalMatrixUniformBuffer
}
},
{
binding: 3,
resource: {
buffer: lightDirectionUniformBuffer
}
},
{
binding: 4,
resource: {
buffer: viewDirectionUniformBuffer
}
},
{
binding: 5,
resource: {
buffer: ambientUniformBuffer
}
},
{
binding: 6,
resource: {
buffer: diffuseUniformBuffer
}
},
{
binding: 7,
resource: {
buffer: specularUniformBuffer
}
},
{
binding: 8,
resource: {
buffer: shininessUniformBuffer
}
},
{
binding: 9,
resource: depthTexture.createView()
},
{
binding: 10,
resource: sampler
},
{
binding: 11,
resource: {
buffer: lightModelViewMatrixUniformBuffer
}
},
{
binding: 12,
resource: {
buffer: lightProjectionMatrixUniformBuffer
}
}
]
});
const positionAttribDesc: GPUVertexAttribute = {
shaderLocation: 0,
offset: 0,
format: 'float32x3'
}
const positionBufferLayout: GPUVertexBufferLayout = {
attributes: [positionAttribDesc],
arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
stepMode: 'vertex'
}
const normalAttribDesc: GPUVertexAttribute = {
shaderLocation: 1,
offset: 0,
format: 'float32x3'
}
const normalBufferLayout: GPUVertexBufferLayout = {
attributes: [normalAttribDesc],
arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
stepMode: 'vertex'
}
const pipelineLayoutDesc: GPUPipelineLayoutDescriptor = { bindGroupLayouts: [unifromBindGroupLayout] };
const pipelineLayout = device.createPipelineLayout(pipelineLayoutDesc);
const colorState: GPUColorTargetState = {
format: 'bgra8unorm'
}
const pipelineDesc: GPURenderPipelineDescriptor = {
layout: pipelineLayout,
vertex: {
module: shaderModule,
entryPoint: 'vs_main',
buffers: [positionBufferLayout, normalBufferLayout]
},
fragment: {
module: shaderModule,
entryPoint: 'fs_main',
targets: [colorState]
},
primitive: {
topology: 'triangle-strip',
frontFace: 'ccw',
cullMode: 'none'
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth32float'
}
}
const pipeline = device.createRenderPipeline(pipelineDesc);
return new Plane(pipeline, positionBuffer, normalBuffer, uniformBindGroup);
}
private constructor(pipeline: GPURenderPipeline, positionBuffer: GPUBuffer, normalBuffer: GPUBuffer, uniformBindGroup: GPUBindGroup) {
this._pipeline = pipeline;
this._positionBuffer = positionBuffer;
this._normalBuffer = normalBuffer;
this._uniformBindGroup = uniformBindGroup;
}
public encodeRenderPass(renderPassEncoder: GPURenderPassEncoder) {
renderPassEncoder.setPipeline(this._pipeline);
renderPassEncoder.setBindGroup(0, this._uniformBindGroup);
renderPassEncoder.setVertexBuffer(0, this._positionBuffer);
renderPassEncoder.setVertexBuffer(1, this._normalBuffer);
renderPassEncoder.draw(4, 1);
}
}
We create the buffers and build out our plane OBJ class very simlarly to our other OBJs in past articles. The main difference is just the updated bind group layout and bind groups to account for the lighting and shadow mapping items.
Teapot
Since our Teaopt will cast the shadow, we will need an extra shaderModule and render pipeline to generate the shadow map.
export class Teapot {
private _pipeline: GPURenderPipeline;
private _lightPipeline: GPURenderPipeline;
private _positionBuffer: GPUBuffer;
private _normalBuffer: GPUBuffer;
private _uniformBindGroup: GPUBindGroup;
private _uniformBindGroupLight: GPUBindGroup;
private _indexBuffer?: GPUBuffer;
private _indexSize?: number;
public static async init(device: GPUDevice, modelViewMatrixUniformBuffer: GPUBuffer,
projectionMatrixUnifromBuffer: GPUBuffer, normalMatrixUniformBuffer: GPUBuffer,
viewDirectionUniformBuffer: GPUBuffer, lightDirectionUniformBuffer: GPUBuffer,
depthTexture: GPUTexture, sampler: GPUSampler, lightModelViewMatrixUniformBuffer: GPUBuffer,
lightProjectionMatrixUniformBuffer: GPUBuffer, shaderCode: string, lightViewShaderCode: string): Promise<Teapot> {
const shaderModule = device.createShaderModule({ code: shaderCode });
const shaderModuleLight = device.createShaderModule({code: lightViewShaderCode});
const objResponse = await fetch("./objs/teapot.obj");
const objBlob = await objResponse.blob();
const objText = await objBlob.text();
const objDataExtractor = new ObjDataExtractor(objText);
const positions = objDataExtractor.vertexPositions;
const positionBuffer = createGPUBuffer(device, positions, GPUBufferUsage.VERTEX);
const normals = objDataExtractor.normals;
const normalBuffer = createGPUBuffer(device, normals, GPUBufferUsage.VERTEX);
const indices = objDataExtractor.indices;
const indexBuffer = createGPUBuffer(device, indices, GPUBufferUsage.INDEX);
const indexSize = indices.length;
const unifromBindGroupLayoutLight = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 1,
visibility: GPUShaderStage.VERTEX,
buffer: {}
}
]
});
const unifromBindGroupLayout = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 1,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 2,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 3,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 4,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 5,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 6,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 7,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 8,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 9,
visibility: GPUShaderStage.FRAGMENT,
texture: {
sampleType: "depth"
}
},
{
binding: 10,
visibility: GPUShaderStage.FRAGMENT,
sampler: {
type: "comparison"
}
},
{
binding: 11,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
},
{
binding: 12,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}
}
]
});
const ambientUniformBuffer = createGPUBuffer(device, new Float32Array([0.15, 0.10, 0.10, 1.0]), GPUBufferUsage.UNIFORM);
const diffuseUniformBuffer = createGPUBuffer(device, new Float32Array([0.55, 0.55, 0.55, 1.0]), GPUBufferUsage.UNIFORM);
const specularUniformBuffer = createGPUBuffer(device, new Float32Array([1.0, 1.0, 1.0, 1.0]), GPUBufferUsage.UNIFORM);
const shininessUniformBuffer = createGPUBuffer(device, new Float32Array([20.0]), GPUBufferUsage.UNIFORM);
const uniformBindGroupLight = device.createBindGroup({
layout: unifromBindGroupLayoutLight,
entries: [
{
binding: 0,
resource: {
buffer: lightModelViewMatrixUniformBuffer
}
},
{
binding: 1,
resource: {
buffer: lightProjectionMatrixUniformBuffer
}
}
]
});
const uniformBindGroup = device.createBindGroup({
layout: unifromBindGroupLayout,
entries: [
{
binding: 0,
resource: {
buffer: modelViewMatrixUniformBuffer
}
},
{
binding: 1,
resource: {
buffer: projectionMatrixUnifromBuffer
}
},
{
binding: 2,
resource: {
buffer: normalMatrixUniformBuffer
}
},
{
binding: 3,
resource: {
buffer: lightDirectionUniformBuffer
}
},
{
binding: 4,
resource: {
buffer: viewDirectionUniformBuffer
}
},
{
binding: 5,
resource: {
buffer: ambientUniformBuffer
}
},
{
binding: 6,
resource: {
buffer: diffuseUniformBuffer
}
},
{
binding: 7,
resource: {
buffer: specularUniformBuffer
}
},
{
binding: 8,
resource: {
buffer: shininessUniformBuffer
}
},
{
binding: 9,
resource: depthTexture.createView()
},
{
binding: 10,
resource: sampler
},
{
binding: 11,
resource: {
buffer: lightModelViewMatrixUniformBuffer
}
},
{
binding: 12,
resource: {
buffer: lightProjectionMatrixUniformBuffer
}
}
]
});
const positionAttribDesc: GPUVertexAttribute = {
shaderLocation: 0,
offset: 0,
format: 'float32x3'
}
const positionBufferLayout: GPUVertexBufferLayout = {
attributes: [positionAttribDesc],
arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
stepMode: 'vertex'
}
const normalAttribDesc: GPUVertexAttribute = {
shaderLocation: 1,
offset: 0,
format: 'float32x3'
}
const normalBufferLayout: GPUVertexBufferLayout = {
attributes: [normalAttribDesc],
arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
stepMode: 'vertex'
}
const pipelineLayoutDesc: GPUPipelineLayoutDescriptor = { bindGroupLayouts: [unifromBindGroupLayout] };
const pipelineLayout = device.createPipelineLayout(pipelineLayoutDesc);
const colorState: GPUColorTargetState = {
format: 'bgra8unorm'
};
const pipelineDesc: GPURenderPipelineDescriptor = {
layout: pipelineLayout,
vertex: {
module: shaderModule,
entryPoint: 'vs_main',
buffers: [positionBufferLayout, normalBufferLayout]
},
fragment: {
module: shaderModule,
entryPoint: 'fs_main',
targets: [colorState]
},
primitive: {
topology: 'triangle-list',
frontFace: 'ccw',
cullMode: 'none'
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth32float'
}
};
const pipeline = device.createRenderPipeline(pipelineDesc);
const lightPipelineLayoutDesc: GPURenderPipelineDescriptor = {
layout: device.createPipelineLayout({bindGroupLayouts: [unifromBindGroupLayoutLight]}),
vertex: {
module: shaderModuleLight,
entryPoint: "vs_main",
buffers: [positionBufferLayout]
},
fragment: {
module: shaderModuleLight,
entryPoint: "fs_main",
targets: [colorState]
},
primitive: {
topology: "triangle-list",
frontFace: "ccw",
cullMode: "none"
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth32float'
}
}
const lightPipeline = device.createRenderPipeline(lightPipelineLayoutDesc);
return new Teapot(pipeline, lightPipeline, positionBuffer, normalBuffer, uniformBindGroup, uniformBindGroupLight, indexBuffer, indexSize);
}
public encodeRenderPass(renderPassEncoder: GPURenderPassEncoder) {
renderPassEncoder.setPipeline(this._pipeline);
renderPassEncoder.setVertexBuffer(0, this._positionBuffer);
renderPassEncoder.setVertexBuffer(1, this._normalBuffer);
renderPassEncoder.setBindGroup(0, this._uniformBindGroup);
renderPassEncoder.setIndexBuffer(this._indexBuffer!, 'uint16');
renderPassEncoder.drawIndexed(this._indexSize!);
}
public encodeLightRenderPass(renderPassEncoder: GPURenderPassEncoder) {
renderPassEncoder.setPipeline(this._lightPipeline);
renderPassEncoder.setBindGroup(0, this._uniformBindGroupLight);
renderPassEncoder.setVertexBuffer(0, this._positionBuffer);
renderPassEncoder.setIndexBuffer(this._indexBuffer!, 'uint16');
renderPassEncoder.drawIndexed(this._indexSize!)
}
private constructor(pipeline: GPURenderPipeline, lightPipeline: GPURenderPipeline, positionBuffer: GPUBuffer, normalBuffer: GPUBuffer,
uniformBindGroup: GPUBindGroup, uniformBindGroupLight: GPUBindGroup, indexBuffer: GPUBuffer, indexSize: number) {
this._pipeline = pipeline;
this._lightPipeline = lightPipeline;
this._positionBuffer = positionBuffer;
this._normalBuffer = normalBuffer;
this._uniformBindGroup = uniformBindGroup;
this._uniformBindGroupLight = uniformBindGroupLight;
this._indexBuffer = indexBuffer;
this._indexSize = indexSize;
}
}
We set up the pipelines very similarly to one another, the only difference being the shader modules and the bind groups used. Our teapot will expose two render pass functions, one for when we are generating shadows and one for actually rendering the teapot.
Rendering
Now — when we render– we will first create our shadow map by running our light render pass, this will generate a depth texture we will use for the shadow. We will use this shadow map texture in our following render pass to draw our OBJs with shadowing in mind.
const renderShadowExample = async () => {
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter!.requestDevice();
const canvas = document.getElementById("canvas") as HTMLCanvasElement;
const context = canvas.getContext("webgpu");
const canvasConfig: GPUCanvasConfiguration = {
device: device!,
format: navigator.gpu.getPreferredCanvasFormat() as GPUTextureFormat,
usage: GPUTextureUsage.RENDER_ATTACHMENT,
alphaMode: "opaque",
}
context!.configure(canvasConfig);
let angle = 0.0;
const sampler: GPUSampler = device!.createSampler({
addressModeU: 'clamp-to-edge',
addressModeV: 'clamp-to-edge',
magFilter: 'linear',
minFilter: 'linear',
compare: 'less'
});
const lightDepthTextureDesc: GPUTextureDescriptor = {
size: [1024, 1024, 1],
dimension: '2d',
format: "depth32float",
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC | GPUTextureUsage.TEXTURE_BINDING
}
const ligthDepthTexture: GPUTexture = device!.createTexture(lightDepthTextureDesc);
const arcball = new Arcball(15.0);
const modelViewMatrix = arcball.getMatrices();
const modelViewMatrixUniformBuffer = createGPUBuffer(device!, new Float32Array(modelViewMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const viewDir = glMatrix.vec3.fromValues(-10.0, -10.0, -10.0);
const viewDirectionUniformBuffer = createGPUBuffer(device!, new Float32Array(viewDir), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const lightDirectionBuffer = createGPUBuffer(device!, new Float32Array(viewDir), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const modelViewMatrixInverse = glMatrix.mat4.invert(glMatrix.mat4.create(), modelViewMatrix)!;
const normalMatrix = glMatrix.mat4.transpose(glMatrix.mat4.create(), modelViewMatrixInverse);
const normalMatrixUniformBuffer = createGPUBuffer(device!, new Float32Array(normalMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const projectionMatrix = glMatrix.mat4.perspective(glMatrix.mat4.create(), 1.4, canvas.width / canvas.height, 0.1, 1000.0);
const projectionMatrixUnifromBuffer = createGPUBuffer(device!, new Float32Array(projectionMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const lightProjectionMatrix = glMatrix.mat4.perspective(glMatrix.mat4.create(), Math.acos(0.9) * 2.0, 1.0, 1.0, 100.0);
const lightProjectionMatrixUniformBuffer = createGPUBuffer(device!, new Float32Array(lightProjectionMatrix), GPUBufferUsage.UNIFORM);
const lightModelViewMatrixUniformBuffer = createGPUBuffer(device!, new Float32Array(modelViewMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const teapot = await ShadowTeapot.init(device!, modelViewMatrixUniformBuffer, projectionMatrixUnifromBuffer, normalMatrixUniformBuffer, viewDirectionUniformBuffer, lightDirectionBuffer, ligthDepthTexture, sampler,
lightModelViewMatrixUniformBuffer, lightProjectionMatrixUniformBuffer, shadowObjModelWgsl, shadowLightWgsl);
const plane = await ShadowPlane.init(device!, modelViewMatrixUniformBuffer, projectionMatrixUnifromBuffer, normalMatrixUniformBuffer, viewDirectionUniformBuffer, lightDirectionBuffer, ligthDepthTexture,
sampler, lightModelViewMatrixUniformBuffer, lightProjectionMatrixUniformBuffer, shadowObjModelWgsl);
const lightDepthAttachment: GPURenderPassDepthStencilAttachment = {
view: ligthDepthTexture.createView(),
depthClearValue: 1,
depthLoadOp: 'clear',
depthStoreOp: 'store'
};
const lightColorTextureDesc: GPUTextureDescriptor = {
size: [1024, 1024, 1],
dimension: "2d",
format: 'bgra8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT
}
const ligthColorTexture: GPUTexture = device!.createTexture(lightColorTextureDesc);
const lightColorAttachment: GPURenderPassColorAttachment = {
view: ligthColorTexture.createView(),
clearValue: {r: 1, g: 0, b: 0, a: 1},
loadOp: "load",
storeOp: "store"
}
const lightRenderPassDesc: GPURenderPassDescriptor = {
colorAttachments: [lightColorAttachment],
depthStencilAttachment: lightDepthAttachment
}
let depthTexture: GPUTexture | null = null;
let depthStencilAttachment: GPURenderPassDepthStencilAttachment | undefined = undefined;
async function render() {
const devicePixelRatio = window.devicePixelRatio || 1;
let currentCanvasWidth = canvas.clientWidth * devicePixelRatio;
let currentCanvasHeight = canvas.clientHeight * devicePixelRatio;
let projectionMatrixUniformBufferUpdate = null;
if (currentCanvasWidth != canvas.width || currentCanvasHeight != canvas.height) {
canvas.width = currentCanvasWidth;
canvas.height = currentCanvasHeight;
if (depthTexture !== null) {
depthTexture.destroy();
}
const depthTextureDesc: GPUTextureDescriptor = {
size: [canvas.width, canvas.height, 1],
dimension: '2d',
format: 'depth32float',
usage: GPUTextureUsage.RENDER_ATTACHMENT
};
depthTexture = device!.createTexture(depthTextureDesc);
let depthTextureView = depthTexture.createView();
depthStencilAttachment = {
view: depthTextureView,
depthClearValue: 1,
depthLoadOp: 'clear',
depthStoreOp: 'store'
};
let projectionMatrix = glMatrix.mat4.perspective(glMatrix.mat4.create(),
1.4, canvas.width / canvas.height, 0.1, 1000.0);
projectionMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(projectionMatrix), GPUBufferUsage.COPY_SRC);
}
const modelViewMatrix = arcball.getMatrices();
const modelViewMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(modelViewMatrix), GPUBufferUsage.COPY_SRC);
const modelViewMatrixInverse = glMatrix.mat4.invert(glMatrix.mat4.create(), modelViewMatrix)!;
const normalMatrix = glMatrix.mat4.transpose(glMatrix.mat4.create(), modelViewMatrixInverse);
const normalMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(normalMatrix), GPUBufferUsage.COPY_SRC);
const viewDir = glMatrix.vec3.fromValues(-arcball.forward[0], -arcball.forward[1], -arcball.forward[2]);
const viewDirectionUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(viewDir), GPUBufferUsage.COPY_SRC);
const lightDir = glMatrix.vec3.fromValues(Math.cos(angle) * 8.0, Math.sin(angle) * 8.0, 10);
const lightDirectionBufferUpdate = createGPUBuffer(device!, new Float32Array(lightDir), GPUBufferUsage.COPY_SRC);
let lightModelViewMatrix = glMatrix.mat4.lookAt(glMatrix.mat4.create(),
glMatrix.vec3.fromValues(Math.cos(angle) * 8.0, Math.sin(angle) * 8.0, 10),
glMatrix.vec3.fromValues(0, 0, 0), glMatrix.vec3.fromValues(0.0, 0.0, 1.0));
const lightModelViewMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(lightModelViewMatrix), GPUBufferUsage.COPY_SRC);
const colorTexture = context!.getCurrentTexture();
const colorTextureView = colorTexture.createView();
const colorAttachment: GPURenderPassColorAttachment = {
view: colorTextureView,
clearValue: { r: 1, g: 0, b: 0, a: 1 },
loadOp: 'clear',
storeOp: 'store'
};
const renderPassDesc: GPURenderPassDescriptor = {
colorAttachments: [colorAttachment],
depthStencilAttachment: depthStencilAttachment
};
const commandEncoder = device!.createCommandEncoder();
if (projectionMatrixUniformBufferUpdate != null) {
commandEncoder.copyBufferToBuffer(projectionMatrixUniformBufferUpdate, 0, projectionMatrixUnifromBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
}
commandEncoder.copyBufferToBuffer(lightModelViewMatrixUniformBufferUpdate, 0,lightModelViewMatrixUniformBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
commandEncoder.copyBufferToBuffer(modelViewMatrixUniformBufferUpdate, 0, modelViewMatrixUniformBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
commandEncoder.copyBufferToBuffer(normalMatrixUniformBufferUpdate, 0, normalMatrixUniformBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
commandEncoder.copyBufferToBuffer(viewDirectionUniformBufferUpdate, 0, viewDirectionUniformBuffer, 0, 3 * Float32Array.BYTES_PER_ELEMENT);
commandEncoder.copyBufferToBuffer(lightDirectionBufferUpdate, 0, lightDirectionBuffer, 0, 3 * Float32Array.BYTES_PER_ELEMENT);
const lightPassEncoder = commandEncoder.beginRenderPass(lightRenderPassDesc);
lightPassEncoder.setViewport(0, 0, 1024, 1024, 0, 1);
teapot.encodeLightRenderPass(lightPassEncoder);
lightPassEncoder.end();
const passEncoder = commandEncoder.beginRenderPass(renderPassDesc);
passEncoder.setViewport(0, 0, canvas.width, canvas.height, 0, 1);
teapot.encodeRenderPass(passEncoder);
plane.encodeRenderPass(passEncoder);
passEncoder.end()
device!.queue.submit([commandEncoder.finish()]);
await device!.queue.onSubmittedWorkDone();
if (projectionMatrixUniformBufferUpdate) {
projectionMatrixUniformBufferUpdate.destroy();
}
modelViewMatrixUniformBufferUpdate.destroy();
normalMatrixUniformBufferUpdate.destroy();
viewDirectionUniformBufferUpdate.destroy();
lightDirectionBufferUpdate.destroy();
lightModelViewMatrixUniformBufferUpdate.destroy();
angle += 0.01;
requestAnimationFrame(render);
}
new Controls(canvas, arcball, render);
requestAnimationFrame(render);
}
In our setup we create a light color attachment (which we will throw away) and a light depth attachment (which we will use as our shadow map).
Our render logic will first preform our light render to paint to our lightDepthTexture, then we can render our actual obj geometry in the main render pass.
Conclusion
In this article we learned about shadow mapping, which is a technique to preform a preliminary render pass to render a depth map from the perspective of a light and then use that depth map to determine if objects are occluded (and should be shadowed) or not in the camera render pass.