webgpumatrix-engine

webGPU I cant draw two object in same scene


I 'am begginer here. I took wgpu-matrix from Greggman like best glmatrix alternative for webgpu (conform for me on usage level).

Code in self taken from https://webgpu.github.io/webgpu-samples/samples/renderBundles
In this example i got shader with textures/uv/ even shadows , render more elements at once looks perfect for me.

Now i am limited with "same shader" and "same pipline". Generally i want to have scene with object identity (transformation/change shader in runtime...).

import {vec3, mat4} from "wgpu-matrix";

export default class MatrixEngineGPURender {
  engine = null;

  constructor(engine) {
    this.engine = engine;

    requestAnimationFrame(this.render);
  }

  resizeToDisplaySize(device, canvasInfo) {
    const {
      canvas,
      renderTarget,
      presentationFormat,
      depthTexture,
      sampleCount,
    } = canvasInfo;
    const width = Math.max(1, Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth),);
    const height = Math.max(1, Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight),);

    const needResize = !canvasInfo.renderTarget || width !== canvas.width || height !== canvas.height;
    if(needResize) {
      if(renderTarget) {renderTarget.destroy()}
      if(depthTexture) {depthTexture.destroy()}
      canvas.width = width;
      canvas.height = height;

      if(sampleCount > 1) {
        const newRenderTarget = device.createTexture({
          size: [canvas.width, canvas.height],
          format: presentationFormat,
          sampleCount,
          usage: GPUTextureUsage.RENDER_ATTACHMENT,
        });
        canvasInfo.renderTarget = newRenderTarget;
        canvasInfo.renderTargetView = newRenderTarget.createView();
      }

      const newDepthTexture = device.createTexture({
        size: [canvas.width, canvas.height],
        format: "depth24plus",
        sampleCount,
        usage: GPUTextureUsage.RENDER_ATTACHMENT,
      });
      canvasInfo.depthTexture = newDepthTexture;
      canvasInfo.depthTextureView = newDepthTexture.createView();
    }
    return needResize;
  }

  render = (t) => {
    var time = t;
    time *= 0.001;
    this.resizeToDisplaySize(this.engine.device, this.engine.canvasInfo);
    let clientWidth = this.engine.canvas.clientWidth;
    let clientHeight = this.engine.canvas.clientHeight;
    const projection = mat4.perspective((30 * Math.PI) / 180,
      clientWidth / clientHeight, 0.5, 10)
    var eye = [1, 4, -6];
    var target = [0, 0, -2];
    var up = [0, 1, 0];

    var view = mat4.lookAt(eye, target, up);
    var viewProjection = mat4.multiply(projection, view);
    var world = mat4.rotationY(time);
    mat4.transpose(mat4.inverse(world), this.engine.worldInverseTranspose);
    mat4.multiply(viewProjection, world, this.engine.worldViewProjection);
    vec3.normalize([1, 8, -10], this.engine.lightDirection);

    this.engine.device.queue.writeBuffer(this.engine.vsUniformBuffer, 0, this.engine.vsUniformValues);
    this.engine.device.queue.writeBuffer(this.engine.fsUniformBuffer, 0, this.engine.fsUniformValues);

    if(this.engine.canvasInfo.sampleCount === 1) {
      const colorTexture = this.engine.context.getCurrentTexture();
      this.engine.renderPassDescriptor.colorAttachments[0].view = colorTexture.createView();
    } else {
      this.engine.renderPassDescriptor.colorAttachments[0].view = this.engine.canvasInfo.renderTargetView;
      this.engine.renderPassDescriptor.colorAttachments[0].resolveTarget = this.engine.context.getCurrentTexture().createView();
    }
    this.engine.renderPassDescriptor.depthStencilAttachment.view = this.engine.canvasInfo.depthTextureView;

    const commandEncoder = this.engine.device.createCommandEncoder();
    const passEncoder = commandEncoder.beginRenderPass(this.engine.renderPassDescriptor);

    passEncoder.setPipeline(this.engine.pipeline);
    passEncoder.setBindGroup(0, this.engine.bindGroup);
    passEncoder.setVertexBuffer(0, this.engine.buffersManager.MY_GPU_BUFFER.positionBuffer);
    passEncoder.setVertexBuffer(1, this.engine.buffersManager.MY_GPU_BUFFER.normalBuffer);
    passEncoder.setVertexBuffer(2, this.engine.buffersManager.MY_GPU_BUFFER.texcoordBuffer);
    passEncoder.setIndexBuffer(this.engine.buffersManager.MY_GPU_BUFFER.indicesBuffer, "uint16");
    passEncoder.drawIndexed(this.engine.buffersManager.MY_GPU_BUFFER.indices.length);

    this.engine.systemScene.forEach((matrixEnginePipline) => {
      matrixEnginePipline.draw(passEncoder)
    })

    passEncoder.end();
    this.engine.device.queue.submit([commandEncoder.finish()]);
    requestAnimationFrame(this.render);
  }
}

Now i wanna separated different texture cube:


import {vec3, mat4} from "wgpu-matrix";

// CUBE TEXTURE BASE OBJECT
export default class MECubeTexPipline {

  depthTexture;
  cubeTexPipeline = null;
  renderPassDescriptor = null;
  context = null;
  device = null;

  constructor(device, presentationFormat, moduleCubeTex, context, canvas) {
    this.canvas = canvas;
    this.context = context;
    this.device = device;
    this.cubeTexPipeline = device.createRenderPipeline({
      label: '2 attributes',
      layout: 'auto',
      vertex: {
        module: moduleCubeTex,
        entryPoint: 'vs',
        buffers: [
          {
            arrayStride: (3 + 2) * 4, // (3+2) floats 4 bytes each
            attributes: [
              {shaderLocation: 0, offset: 0, format: 'float32x3'},  // position
              {shaderLocation: 1, offset: 12, format: 'float32x2'},  // texcoord
            ],
          },
        ],
      },
      fragment: {
        module: moduleCubeTex,
        entryPoint: 'fs',
        targets: [{format: presentationFormat}],
      },
      primitive: {
        cullMode: 'back',
      },
      depthStencil: {
        depthWriteEnabled: true,
        depthCompare: 'less',
        format: 'depth24plus',
      },
    });
  }

  loadObjProgram(device, bufferManager, sampler, texture) {
    // matrix
    const uniformBufferSize = (16) * 4;
    this.uniformBuffer = device.createBuffer({
      label: 'uniforms',
      size: uniformBufferSize,
      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
    });

    this.uniformValues = new Float32Array(uniformBufferSize / 4);

    // offsets to the various uniform values in float32 indices
    const kMatrixOffset = 0;

    this.matrixValue = this.uniformValues.subarray(kMatrixOffset, kMatrixOffset + 16);

    const {vertexData, indexData, numVertices} = bufferManager.createCubeVertices();
    this.numVertices = numVertices;
    this.vertexBuffer = device.createBuffer({
      label: 'vertex buffer vertices',
      size: vertexData.byteLength,
      usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
    });
    device.queue.writeBuffer(this.vertexBuffer, 0, vertexData);

    this.indexBuffer = device.createBuffer({
      label: 'index buffer',
      size: vertexData.byteLength,
      usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
    });
    device.queue.writeBuffer(this.indexBuffer, 0, indexData);

    this.bindGroup = device.createBindGroup({
      label: 'bind group for object',
      layout: this.cubeTexPipeline.getBindGroupLayout(0),
      entries: [
        {binding: 0, resource: {buffer: this.uniformBuffer}},
        {binding: 1, resource: sampler},
        {binding: 2, resource: texture.createView()},
      ],
    });

    this.renderPassDescriptor = {
      label: 'our basic canvas renderPass',
      colorAttachments: [
        {
          // view: <- to be filled out when we render
          loadOp: 'clear',
          storeOp: 'store',
        },
      ],
      depthStencilAttachment: {
        // view: <- to be filled out when we render
        depthClearValue: 1.0,
        depthLoadOp: 'clear',
        depthStoreOp: 'store',
      },
    };

    const degToRad = d => d * Math.PI / 180;
    this.settings = {
      rotation: [degToRad(20), degToRad(25), degToRad(0)],
    };
    // const radToDegOptions = {min: -360, max: 360, step: 1, converters: GUI.converters.radToDeg};
  }

  draw(passEncoder) {
    // Get the current texture from the canvas context and
    // set it as the texture to render to.
    const canvasTexture = this.context.getCurrentTexture();
    this.renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();

    // If we don't have a depth texture OR if its size is different
    // from the canvasTexture when make a new depth texture

    if(!this.depthTexture ||
      this.depthTexture.width !== canvasTexture.width ||
      this.depthTexture.height !== canvasTexture.height) {
      if(this.depthTexture) {
        this.depthTexture.destroy();
      }
      this.depthTexture = this.device.createTexture({
        size: [canvasTexture.width, canvasTexture.height],
        format: 'depth24plus',
        usage: GPUTextureUsage.RENDER_ATTACHMENT,
      });
    }
    this.renderPassDescriptor.depthStencilAttachment.view = this.depthTexture.createView();
    // const encoder = this.device.createCommandEncoder();
    // const passEncoder = encoder.beginRenderPass(this.renderPassDescriptor);
    // passEncoder.setPipeline(this.cubeTexPipeline);
    passEncoder.setVertexBuffer(0, this.vertexBuffer);
    passEncoder.setIndexBuffer(this.indexBuffer, 'uint16');

    const aspect = this.canvas.clientWidth / this.canvas.clientHeight;
    mat4.perspective(
      60 * Math.PI / 180,
      aspect,
      0.1,      // zNear
      10,      // zFar
      this.matrixValue,
    );
    const view = mat4.lookAt(
      [0, 1, 5],  // camera position
      [0, 0, 0],  // target
      [0, 1, 0],  // up
    );
    mat4.multiply(this.matrixValue, view, this.matrixValue);
    mat4.rotateX(this.matrixValue, this.settings.rotation[0], this.matrixValue);
    mat4.rotateY(this.matrixValue, this.settings.rotation[1], this.matrixValue);
    mat4.rotateZ(this.matrixValue, this.settings.rotation[2], this.matrixValue);

    // upload the uniform values to the uniform buffer
    this.device.queue.writeBuffer(this.uniformBuffer, 0, this.uniformValues);
    passEncoder.setBindGroup(0, this.bindGroup);
    passEncoder.drawIndexed(this.numVertices);
    // passEncoder.end();
    // const commandBuffer = encoder.finish();
    // this.device.queue.submit([commandBuffer]);
  }
}

Warn:


> The current pipeline ([RenderPipeline]) was created with a default
> layout, and is not compatible with the [BindGroup "bind group for
> object"] set at group index 0 which uses a [BindGroupLayout] that was
> not created by the pipeline. Either use the bind group layout returned
> by calling getBindGroupLayout(0) on the pipeline when creating the
> bind group, or provide an explicit pipeline layout when creating the
> pipeline.
>  - While encoding [RenderPassEncoder].DrawIndexed(36, 1, 0, 0, 0).

Other type of errors:

Command cannot be recorded while [CommandEncoder] is locked and [RenderPassEncoder] is currently open.

How to draw two cube but with different renderPassDescriptor is it ok to put twice beginRenderPass or setPipeline.

Any suggestion ?

Open source if anyone interest: project source


Solution

  • You have a class called MECubeTexPipeline which appears to be designed to render a single cube in single render pass in a single command buffer

    Let's assume you only have one pipeline and 2 things you want to draw with that pipeline, a cube, and a sphere. In pseudo code you might do something like this

    at init time

    at render time

    Things to notice, there is only one command buffer. There is only 1 render pass.

    You can have different pipelines if the things you want to draw actually need different pipelines but if possible you could try to have less pipelines than more. In the example above, if you cube had a different pipeline than the sphere you'd call setPipeline with the sphere's pipeline after drawing the cube and before drawing the sphere.

    You can also have different render passes if you need them. It's common to need a several but not just for drawing 2 cubes. Examples might be

    For each render pass, you need to set loadOp and storeOp correctly. Most examples that have one render pass set loadOp: 'clear' but in the example above, if render pass 2 had loadOp: 'clear' it would erase the results from render pass 1. Instead it would need to be loadOp: 'load'.

    You mentioned 2 errors

    Command cannot be recorded while [CommandEncoder] is locked and [RenderPassEncoder] is currently open.

    Once you call pass = encoder.beginRenderPass() you have to call pass.end() before you can call any other methods of encoder

       const encoder = device.createCommandEncoder();
       const pass = encoder.beginRenderPass();
       ...
       pass.end();  // stop encoding render pass commands
       // now you can call some other `encoder.xxx` method
    

    The other error

    The current pipeline ([RenderPipeline]) was created with a default layout, and is not compatible with the [BindGroup "bind group for object"] set at group index 0 which uses a [BindGroupLayout] that was not created by the pipeline. Either use the bind group layout returned by calling getBindGroupLayout(0) on the pipeline when creating the bind group, or provide an explicit pipeline layout when creating the pipeline.

    • While encoding [RenderPassEncoder].DrawIndexed(36, 1, 0, 0, 0).

    When you create a pipeline you pass in a pipeline layout OR 'auto'

      const pipeline = device.createRenderPipeline({
        layout: 'auto', // <------
        ...
      }
    

    And, when you create a bindGroup you pass a bind group layout`

      const bindGroup = device.createBindGroup({
        layout: pipeline.getBindGroupLayout(0),  // <------
      }
    

    BindGroups are, in general, only compatible with specific bind group layouts, (more details here). In the case of a bind group layout made from a pipeline that used layout: 'auto', bind groups made with that type of bind group layout are incompatible with all other bind group layouts from different pipelines.

    If you want the bindgroup layouts to be compatible then you have to manually create them. Whether that's needed for your example is unclear. if you were only using a single pipeline then all bindGroups would be compatible with that pipeline. If you're using multiple pipelines then, IF you need bindgroups that can be used with both pipelines you'd need to manually create your bind group layouts

    Looking at your bindGroup creation code, it looks like if you wanted to make bind group layouts manually you'd do this

    const bindGroupLayout = device.createBindGroupLayout(  {
        entries: [
          // the uniform buffer binding
          {
            binding: 0,
            visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
            buffer: {},
          },
          // the sampler binding
          {
            binding: 1,
            visibility: GPUShaderStage.FRAGMENT,
            sampler: {
              type: "filtering",
            },
          },
          // the texture binding
          {
            binding: 2,
            visibility: GPUShaderStage.FRAGMENT,
            texture: {
              sampleType: "float",
              viewDimension: "2d",
              multisampled: false,
            },
          },
        ],
    });
    
    const pipelineLayout = device.createPipelineLayout({
      bindGroupLayouts: [
        bindGroupLayout,   // group(0)'s layout
      ],
    });
    
    const pipeline = device.createRenderPipeline({
       layout: pipelineLayout,  // used to be 'auto'
    });
    

    Note: this page will calculate bind group layouts for you the same way layout: 'auto' does it.

    Making bind group layouts manually means that any bind groups made with that bind group layout are compatible. It also means you can specify resources that are not used in every pipeline. For example above we created a bind group layout with 1 texture but we could have made it with 2 textures and it would still work with our pipeline that happens to only use 1 texture.

    To be honest, I suspect your example only needs a single pipeline and so you don't need to manually make bind group layouts. Even if it had multiple pipelines it might not need manually made bind group layouts if the bind groups used with each pipeline are not used with other pipelines.

    disclosure: I'm a contributor to the site above that computes bind group layouts, and to the WebGPU spec and an implementation itself so apparently even talking about either on S.O. without disclosure is frowned on.