WebGPU Rendering: Part 14 Equirectangular Rendering
Introduction
I have been reading through this online book on WebGPU. In this series of articles, I will be going through this book and implementing the lessons in a more structured typescript class approach and eventually we will build three types of WebGPU renderers: Gaussian Splatting, Ray tracing and Rasterization.
In this article we will talk about the Equirectangular Rendering which is a technique used in 3D computer graphics to project a 3D scene onto a flat 2D image in an equirectangular projection format. This type of rendering is commonly used for creating 360-degree panoramic images or videos that can be viewed interactively in VR (Virtual Reality) environments or 360-degree video players.
The following link is the commit in my Github repo that matches the code we will go over.
Equirectangular Rendering
To accomplish this rendering technique we need to first project the 3D scene onto a virtual unit sphere surrounding the camera. Then, we will flatten the virtual sphere onto a rectangular 2D image, giving us UVs to color our fragment.
@group(0) @binding(0)
var<uniform> modelView: mat4x4<f32>;
@group(0) @binding(1)
var<uniform> projection: mat4x4<f32>;
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) worldPos: vec3<f32>
}
@vertex
fn vs_main(@location(0) inPos: vec3<f32>) -> VertexOutput {
var out: VertexOutput;
out.worldPos = inPos;
var worldLoc:vec4<f32> = modelView * vec4<f32>(inPos, 1.0);
out.clip_position = projection * worldLoc;
return out;
}
const pi:f32 = 3.141592654;
@group(0) @binding(2)
var t_diffuse: texture_2d<f32>;
@group(0) @binding(3)
var s_diffuse: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
var n:vec3<f32> = normalize(in.worldPos);
var len:f32 = sqrt(n.x*n.x + n.y*n.y);
var s:f32 = acos(n.x/len);
if (n.y < 0) {
s = 2.0 * pi - s;
}
s = s / (2.0 * pi);
var tex_coord:vec2<f32> = vec2(s, ((asin(n.z) * -2.0/pi) + 1.0) * 0.5);
return textureSampleLevel(t_diffuse, s_diffuse, tex_coord, 0);
}
We will take in the positions in the world we want to draw the environment on (in our case we are going to render a bunch of squares around our camera to surround it).
In our fragment shader, we first normalize the distance so it is on the unit sphere. Then we get the distance to the horizontal XY plane – like projecting onto a flat screen. We then get the horizontal angle using acos and add in some casing to account for negative y direction (where we will flip the angle). We then normalize the angle to a [0,1] range and can use this to generate UV coordinates to properly sample from our equirectangular texture.
SkyBox
We will need a small helper function to load our image in and turn it into a texture.
export const img2Texture = async (device: GPUDevice, filePath: string): Promise<GPUTexture> => {
const response = await fetch(filePath);
const blob = await response.blob();
const imageBitmap = await createImageBitmap(blob);
const textureDescriptor: GPUTextureDescriptor = {
size: { width: imageBitmap.width, height: imageBitmap.height },
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
}
const texture = device.createTexture(textureDescriptor);
device.queue.copyExternalImageToTexture({ source: imageBitmap }, {texture}, textureDescriptor.size);
return texture;
}
Our Skybox will be composed of six square planes encasing the camera which we then use to and generate our parking lot environment render.
export class Skybox {
private _pipeline: GPURenderPipeline;
private _positionsBuffer: GPUBuffer;
private _uniformBindGroup: GPUBindGroup;
public static async init(device: GPUDevice, modelViewMatrixUnifromBuffer: GPUBuffer, projectionMatrixUniformBuffer: GPUBuffer, shaderCode: string) {
const shaderModule: GPUShaderModule = device.createShaderModule({code: shaderCode});
const positions = new Float32Array([
100.0, -100.0, 50.0, -100.0, -100.0, 50.0, -100.0, 100.0, 50.0,
-100.0, 100.0, 50.0, 100.0, 100.0, 50.0, 100.0, -100.0, 50.0,
-100.0, 100.0, -50.0, -100.0, -100.0, -50.0, 100.0, -100.0, -50.0,
100.0, -100.0, -50.0, 100.0, 100.0, -50.0, -100.0, 100.0, -50.0,
-100.0, 50.0, -100.0, 100.0, 50.0, -100.0, -100.0, 50.0, 100.0,
100.0, 50.0, 100.0, -100.0, 50.0, 100.0, 100.0, 50.0, -100.0,
-100.0, -50.0, -100.0, -100.0, -50.0, 100.0, 100.0, -50.0, -100.0,
100.0, -50.0, 100.0, 100.0, -50.0, -100.0, -100.0, -50.0, 100.0,
50.0, 100.0, -100.0, 50.0, -100.0, -100.0, 50.0, -100.0, 100.0,
50.0, -100.0, 100.0, 50.0, 100.0, 100.0, 50.0, 100.0, -100.0,
-50.0, -100.0, 100.0, -50.0, -100.0, -100.0, -50.0, 100.0, -100.0,
-50.0, 100.0, -100.0, -50.0, 100.0, 100.0, -50.0, -100.0, 100.0,
]);
const positionsBuffer = createGPUBuffer(device, positions, GPUBufferUsage.VERTEX);
const sampler = device.createSampler({
addressModeU: 'repeat',
addressModeV: 'repeat',
magFilter: 'linear',
minFilter: 'linear',
mipmapFilter: 'linear'
});
const texture = await img2Texture(device, "./data/parking_lot.jpg"); //TODO resolving right??
const uniformBindGroupLayout = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 1,
visibility: GPUShaderStage.VERTEX,
buffer: {}
},
{
binding: 2,
visibility: GPUShaderStage.FRAGMENT,
texture: {}
},
{
binding: 3,
visibility: GPUShaderStage.FRAGMENT,
sampler: {}
}
]
});
const uniformBindGroup = device.createBindGroup({
layout: uniformBindGroupLayout,
entries: [
{
binding:0,
resource: {
buffer: modelViewMatrixUnifromBuffer
}
},
{
binding:1,
resource: {
buffer: projectionMatrixUniformBuffer
}
},
{
binding:2,
resource: texture.createView()
},
{
binding: 3,
resource: sampler
}
]
});
const positonAttribDesc: GPUVertexAttribute = {
shaderLocation: 0,
offset: 0,
format: 'float32x3'
}
const positionBufferLayoutDesc: GPUVertexBufferLayout = {
attributes: [positonAttribDesc],
arrayStride: Float32Array.BYTES_PER_ELEMENT * 3,
stepMode: 'vertex'
};
const piplineLayout: GPUPipelineLayout = device.createPipelineLayout({
bindGroupLayouts: [uniformBindGroupLayout]
});
const pipelineDesc: GPURenderPipelineDescriptor = {
layout: piplineLayout,
vertex: {
module: shaderModule,
entryPoint: 'vs_main',
buffers: [positionBufferLayoutDesc]
},
fragment: {
module: shaderModule,
entryPoint: 'fs_main',
targets: [{format: 'bgra8unorm'}]
},
primitive: {
topology: 'triangle-list',
frontFace: 'ccw',
cullMode: 'back'
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth32float'
}
};
const pipeline = device.createRenderPipeline(pipelineDesc);
return new Skybox(pipeline, positionsBuffer, uniformBindGroup);
}
public encodeRenderPass(renderPassEncoder: GPURenderPassEncoder) {
renderPassEncoder.setPipeline(this._pipeline);
renderPassEncoder.setBindGroup(0, this._uniformBindGroup);
renderPassEncoder.setVertexBuffer(0, this._positionsBuffer);
renderPassEncoder.draw(36, 1);
}
private constructor(pipeline: GPURenderPipeline, positionsBuffer: GPUBuffer, uniformBindGroup: GPUBindGroup) {
this._pipeline = pipeline;
this._positionsBuffer = positionsBuffer;
this._uniformBindGroup = uniformBindGroup;
}
}
Drawing the Skybox is the same as pretty much every other item we have rendered. We construct the pipeline, pass in the bind groups to the shader code, and we expose a function to be called during pipeline rendering.
Rendering
Our actual rendering is very simple and nothing we haven’t seen before. Since most of the logic is held inside the actual shader code and Skybox, it is pretty standard.
const renderSkyboxExample = async () => {
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter!.requestDevice();
const canvas = document.getElementById("canvas") as HTMLCanvasElement;
const context = canvas.getContext("webgpu");
const canvasConfig: GPUCanvasConfiguration = {
device: device!,
format: navigator.gpu.getPreferredCanvasFormat() as GPUTextureFormat,
usage: GPUTextureUsage.RENDER_ATTACHMENT,
alphaMode: "opaque",
}
context!.configure(canvasConfig);
let angle = 0.0;
const arcball = new Arcball(15.0);
const modelViewMatrix = arcball.getMatrices();
const modelViewMatrixUniformBuffer = createGPUBuffer(device!, new Float32Array(modelViewMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const projectionMatrix = glMatrix.mat4.perspective(glMatrix.mat4.create(), 1.4, canvas.width / canvas.height, 0.1, 1000.0);
const projectionMatrixUnifromBuffer = createGPUBuffer(device!, new Float32Array(projectionMatrix), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST);
const skybox = await Skybox.init(device!, modelViewMatrixUniformBuffer, projectionMatrixUnifromBuffer, skyBoxShader);
let depthTexture: GPUTexture | null = null;
let depthStencilAttachment: GPURenderPassDepthStencilAttachment | undefined = undefined;
async function render() {
const devicePixelRatio = window.devicePixelRatio || 1;
let currentCanvasWidth = canvas.clientWidth * devicePixelRatio;
let currentCanvasHeight = canvas.clientHeight * devicePixelRatio;
let projectionMatrixUniformBufferUpdate = null;
if (currentCanvasWidth != canvas.width || currentCanvasHeight != canvas.height) {
canvas.width = currentCanvasWidth;
canvas.height = currentCanvasHeight;
if (depthTexture !== null) {
depthTexture.destroy();
}
const depthTextureDesc: GPUTextureDescriptor = {
size: [canvas.width, canvas.height, 1],
dimension: '2d',
format: 'depth32float',
usage: GPUTextureUsage.RENDER_ATTACHMENT
};
depthTexture = device!.createTexture(depthTextureDesc);
let depthTextureView = depthTexture.createView();
depthStencilAttachment = {
view: depthTextureView,
depthClearValue: 1,
depthLoadOp: 'clear',
depthStoreOp: 'store'
};
let projectionMatrix = glMatrix.mat4.perspective(glMatrix.mat4.create(),
1.4, canvas.width / canvas.height, 0.1, 1000.0);
projectionMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(projectionMatrix), GPUBufferUsage.COPY_SRC);
}
const modelViewMatrix = arcball.getMatrices();
const modelViewMatrixUniformBufferUpdate = createGPUBuffer(device!, new Float32Array(modelViewMatrix), GPUBufferUsage.COPY_SRC);
const colorTexture = context!.getCurrentTexture();
const colorTextureView = colorTexture.createView();
const colorAttachment: GPURenderPassColorAttachment = {
view: colorTextureView,
clearValue: { r: 1, g: 0, b: 0, a: 1 },
loadOp: 'clear',
storeOp: 'store'
};
const renderPassDesc: GPURenderPassDescriptor = {
colorAttachments: [colorAttachment],
depthStencilAttachment: depthStencilAttachment
};
const commandEncoder = device!.createCommandEncoder();
if (projectionMatrixUniformBufferUpdate != null) {
commandEncoder.copyBufferToBuffer(projectionMatrixUniformBufferUpdate, 0, projectionMatrixUnifromBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
}
commandEncoder.copyBufferToBuffer(modelViewMatrixUniformBufferUpdate, 0, modelViewMatrixUniformBuffer, 0, 16 * Float32Array.BYTES_PER_ELEMENT);
const passEncoder = commandEncoder.beginRenderPass(renderPassDesc);
passEncoder.setViewport(0, 0, canvas.width, canvas.height, 0, 1)
skybox.encodeRenderPass(passEncoder);
passEncoder.end()
device!.queue.submit([commandEncoder.finish()]);
await device!.queue.onSubmittedWorkDone();
if (projectionMatrixUniformBufferUpdate) {
projectionMatrixUniformBufferUpdate.destroy();
}
modelViewMatrixUniformBufferUpdate.destroy();
angle += 0.01;
requestAnimationFrame(render);
}
new Controls(canvas, arcball, render);
requestAnimationFrame(render);
}
Conclusion
In this article, we saw how to create an environment like rendering view from an image by using Equirectangular rendering. Most of the interesting work is done in the shader which projected the fragment’s 3D position onto a unit sphere and then use the projected point to sample from the provided equirectangular image.