I try to code a simple paint example with webgpu , so , in every mousemove , arrayData.length is changing. I keep an array storage all points(vec2 array) , I create new GPUBuffer with this points array in every frame,because I want to change the gpuBuffer.size whitch is readonly.
At last I get an Error : RangeError: Failed to execute 'createBuffer' on 'GPUDevice': createBuffer failed, size is too large for the implementation when mappedAtCreation == true
const data = new Float32Array( [ 0.3, 0.3, 0.4, 0.4 ] );
async function frame () {
const commandEncoder = device.createCommandEncoder();
... ...
const vertexBuffer = device.createBuffer( {
size: data.byteLength,
usage: GPUBufferUsage.VERTEX,
mappedAtCreation: true
} );
//@ts-ignore
const dst = new data.constructor( vertexBuffer.getMappedRange() );
dst.set( data );
vertexBuffer.unmap();
const passEncoder = commandEncoder.beginRenderPass( renderPassDescriptor );
passEncoder.setPipeline( pipeline );
passEncoder.setVertexBuffer( 0, vertexBuffer );
passEncoder.draw( data.length / 2 );
passEncoder.end();
device.queue.submit( [ commandEncoder.finish() ] );
// I try to destory the buffer after finish , it is no useful
vertexBuffer.destroy();
requestAnimationFrame( frame );
}
I want to update the vertexBuffer in mosemove(in every frame), at present, i create new GPUBuffer , it doesn't seem to work
It's not clear what you're trying to do but if it was me, I wouldn't create a new buffer every mousemove. Instead I'd create a large buffer and just set data in it every mousemove. If the buffer gets full I'd add a new buffer
/* global GPUBufferUsage */
/* global GPUTextureUsage */
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
alert('need a browser that supports WebGPU');
return;
}
const canvas = document.querySelector('canvas');
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'premultiplied',
});
const code = `
struct MyVSInput {
@location(0) position: vec4f,
};
@vertex
fn myVSMain(v: MyVSInput) -> @builtin(position) vec4f {
return v.position;
}
@fragment
fn myFSMain() -> @location(0) vec4f {
return vec4f(1, 1, 0, 1);
}
`;
const module = device.createShaderModule({code});
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
buffers: [
{
arrayStride: 2 * 4,
attributes: [
{ shaderLocation: 0, offset: 0, format: 'float32x2' },
],
},
],
},
fragment: {
module,
targets: [
{format: presentationFormat},
],
},
primitive: {
topology: 'point-list',
},
});
const buffers = [];
const pointsPerBuffer = 128; // should make this much larger (16k) but keeping it small for testing.
let numPoints = 0;
const addPoint = (x, y) => {
const bufferNdx = numPoints / pointsPerBuffer | 0;
const ndx = numPoints % pointsPerBuffer;
if (ndx === 0) {
const buffer = device.createBuffer({
size: pointsPerBuffer * 2 * 4,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
buffers.push(buffer);
}
const buffer = buffers[bufferNdx];
device.queue.writeBuffer(buffer, ndx * 2 * 4, new Float32Array([x, y]));
++numPoints;
};
const renderPassDescriptor = {
colorAttachments: [
{
// view: undefined, // Assigned later
clearValue: [ 0.2, 0.2, 0.2, 1.0 ],
loadOp: 'clear',
storeOp: 'store',
},
],
};
function render() {
const canvasTexture = context.getCurrentTexture();
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
const encoder = device.createCommandEncoder();
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
buffers.forEach((buffer, i) => {
pass.setVertexBuffer(0, buffer);
const base = i * pointsPerBuffer;
const numToDraw = Math.min(numPoints - base, pointsPerBuffer);
pass.draw(numToDraw);
})
pass.end();
device.queue.submit([encoder.finish()]);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
window.addEventListener('mousemove', e => {
const rect = canvas.getBoundingClientRect();
const x = (e.clientX - rect.left) / rect.width * 2 - 1;
const y = (e.clientY - rect.top) / rect.height * -2 + 1;
addPoint(x, y);
})
const observer = new ResizeObserver(entries => {
for (const entry of entries) {
const canvas = entry.target;
const width = entry.contentBoxSize[0].inlineSize;
const height = entry.contentBoxSize[0].blockSize;
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
}
});
observer.observe(canvas);
}
main();
html, body { margin: 0; height: 100% }
canvas { width: 100%; height: 100%; display: block; }
<canvas></canvas>
The example above uses 128 points per buffer just to test that it works when the number of points goes over 128 but I'd set pointsPerBuffer
to 16k or 128k or something like that if I wanted to handle lots of points.