{
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const context = util.ctx(512, 512);
const format = navigator.gpu.getPreferredCanvasFormat();
context.configure({ device, format });
const module = device.createShaderModule({
code: `
struct Vertex {
@location(0) position: vec2f,
@location(1) color: vec4f,
@location(2) offset: vec2f,
@location(3) scale: vec2f,
@location(4) perVertexColor: vec3f,
};
struct VertexOut {
@builtin(position) position: vec4f,
@location(0) color: vec4f,
};
@vertex
fn vs(vertex: Vertex) -> VertexOut {
var vsOut: VertexOut;
vsOut.position = vec4f(vertex.position * vertex.scale + vertex.offset, 0.0, 1.0);
vsOut.color = vertex.color * vec4(vertex.perVertexColor, 1);
return vsOut;
}
@fragment
fn fs(vsOut: VertexOut) -> @location(0) vec4f {
return vsOut.color;
}
`
})
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
{
arrayStride: 2 * 4 + 4, // (2 floats * 4 bytes each) + 4 bytes
attributes: [
{ shaderLocation: 0, offset: 0, format: 'float32x2' }, // position
// from float32x4 (float) -> unorm8x4 (uint) to save space.
// using 8bit values and telling WebGPU they should be normalized from 0 ↔ 255 to 0.0 ↔ 1.0
// no 8bit 3-value format, so use `unorm8x4` instead
{ shaderLocation: 4, offset: 8, format: 'unorm8x4' }, // perVertexColor
],
},
// static
{
arrayStride: 4 + 2 * 4, // 4 bytes + (2 floats * 4 bytes each)
// attr advances one per instance (vs one per vert, starting over at instance)
stepMode: 'instance',
attributes: [
{ shaderLocation: 1, offset: 0, format: 'unorm8x4' }, // color (rgba -> 4 bytes)
{ shaderLocation: 2, offset: 0 + 4, format: 'float32x2' }, // offset (xy -> 2x4)
],
},
// dynamic
{
arrayStride: 2 * 4, // 2 floats * 4 bytes each
stepMode: 'instance',
attributes: [
{ shaderLocation: 3, offset: 0, format: 'float32x2' }, // scale (xy)
]
}
]
},
fragment: {
module,
entryPoint: 'fs',
targets: [{ format }]
}
})
// janky but we're not focused on perf rn...
const xExtent = [Math.min(...data.map(d => d.len)), Math.max(...data.map(d => d.len))];
const yExtent = [Math.min(...data.map(d => d.depth)), Math.max(...data.map(d => d.depth))];
const kNumObjects = data.length;
const dynamicData = [];
// create 2 storage buffers
const staticUnitSize =
4 + // color = 4 bytes
2 * 4; // offset = 2x4 (2 32-bit-floats x 4 bytes)
const dynamicUnitSize =
2 * 4; // scale = 2x4 (2 32-bit-floats x 4 bytes)
const staticBufferSize = staticUnitSize * kNumObjects;
const dynamicBufferSize = dynamicUnitSize * kNumObjects;
const staticBuffer = device.createBuffer({
size: staticBufferSize,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
const dynamicBuffer = device.createBuffer({
size: dynamicBufferSize,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
// offsets to the various uniform values in float32 indices
const kColorOffset = 0;
const kOffsetOffset = 1; // offset changes from 4 bytes to 1 byte
//
const kScaleOffset = 0;
const staticValuesU8 = new Uint8Array(staticBufferSize);
const staticValuesF32 = new Float32Array(staticValuesU8.buffer);
for (let i = 0; i < kNumObjects; i++) {
const d = data[i];
const c = species.indexOf(data[i].species);
const staticOffsetU8 = i * staticUnitSize;
const staticOffsetF32 = staticOffsetU8 / 4;
// color
staticValuesU8.set(
[+(c === 0) * 255, +(c === 1) * 255, +(c === 2) * 255, 255],
staticOffsetU8 + kColorOffset
);
// offset
staticValuesF32.set(
// lerp to clip space
// in subsequent notebooks, we will do these calculations in shaders
[
-1 + ((d.len - xExtent[0]) / (xExtent[1] - xExtent[0])) * (1 - (-1)),
-1 + ((d.depth - yExtent[0]) / (yExtent[1] - yExtent[0])) * (1 - (-1))
],
staticOffsetF32 + kOffsetOffset
);
// scale
dynamicData.push({ scale: 0.025 });
}
// write to gpu; will write dynamic values during render
device.queue.writeBuffer(staticBuffer, 0, staticValuesF32);
// used to update dynamicBuffer
const dynamicValues = new Float32Array(dynamicBufferSize / 4);
// position (`@location(0)`)
const { vertices, indices, count: vertexCount } = util.geom.circle({ r: 1, innerR: 0 });
const vertexBuffer = device.createBuffer({
size: vertices.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(vertexBuffer, 0, vertices);
// optimization!
// create an index buffer (see util.geom.circle to see the logic behind this)
const indexBuffer = device.createBuffer({
size: indices.byteLength,
usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(indexBuffer, 0, indices);
const renderPassDescriptor = {
colorAttachments: [
{
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
view: undefined, // to be filled out when we render
},
],
};
function render() {
renderPassDescriptor.colorAttachments[0].view = context.getCurrentTexture().createView();
const encoder = device.createCommandEncoder();
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setVertexBuffer(0, vertexBuffer);
pass.setVertexBuffer(1, staticBuffer);
pass.setVertexBuffer(2, dynamicBuffer);
// optimization! index buffer for deduped triangle vertices
pass.setIndexBuffer(indexBuffer, 'uint32');
// can cache this w a flag, esp since getting canvas dims is expensive
const aspect = context.canvas.width / context.canvas.height;
// set scales for each object
dynamicData.forEach(({ scale }, i) => {
const offset = i * (dynamicUnitSize / 4);
dynamicValues.set([scale / aspect, scale], offset + kScaleOffset); // set scale
});
// upload all scales at once
device.queue.writeBuffer(dynamicBuffer, 0, dynamicValues);
// pass.draw(vertexCount, kNumObjects);
// optimization!
pass.drawIndexed(vertexCount, kNumObjects);
pass.end();
device.queue.submit([encoder.finish()]);
}
render()
return htl.html`
<figure>
${context.canvas}
<figcaption>
x = penguin culmen length (mm); y = penguin culment depth (mm)
<br><br>
${legend}
<br>
</figcaption>
</figure>
`;
}