I have two ID3D11Texture2D textures. Created from an array of bytes
D3D11_SUBRESOURCE_DATA initData = { 0 };
initData.pSysMem = (const void*)pArrayOfBytes1;
initData.SysMemPitch = desktop.right * sizeof(DWORD);
initData.SysMemSlicePitch = desktop.right * desktop.bottom * sizeof(DWORD);
D3D11_TEXTURE2D_DESC desc;
desc.Width = desktop.right;
desc.Height = desktop.bottom;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
HRESULT hr1 = pDevice->CreateTexture2D(&desc, &initData, &pMyTexture1);
initData.pSysMem = (const void*)pArrayOfBytes2;
HRESULT hr2 = pDevice->CreateTexture2D(&desc, &initData, &pMyTexture2);
Texture1
Texture2
I need to combine these two textures so that the result is as follows
What is the best way for me to do this? Which DirectX features should I use best?
The question is what do you want to do with the result? if you are just loading two images and want to blend them and write them out as an image, you can do that with just the CPU using traditional image-blending math.
If you want to do some kind of real-time dynamic rendering with them, this is typically done as a 'DualTexture' rendering:
struct PSInput
{
float2 TexCoord : TEXCOORD0;
float2 TexCoord2 : TEXCOORD1;
};
float4 PS(PSInput pin) : SV_Target0
{
float4 color = Texture.Sample(Sampler, pin.TexCoord);
float4 color2 = Texture2.Sample(Sampler2, pin.TexCoord2);
float4 output;
output.rgb = color.rgb * (1 - color.a) + color2.rgb * color.a
output.a = color.a;
return output;
}
It's unclear from your question if you are only rendering these two textures as 'fullscreen'. If so, then you can use 'fullscreen-quad' rendering for the vertex shader.
struct VSOutput
{
float4 Position : SV_Position;
float2 TexCoord : TEXCOORD0;
float2 TexCoord2 : TEXCOORD0;
};
VSOutput VSQuad(uint vI : SV_VertexId)
{
VSInputTx vout;
// We use the 'big triangle' optimization so you only Draw 3 verticies instead of 4.
float2 texcoord = float2((vI << 1) & 2, vI & 2);
vout.TexCoord = texcoord;
vout.Position = float4(texcoord.x * 2 - 1, -texcoord.y * 2 + 1, 0, 1);
vout.TexCoord2 = texcoord; // ?
// you may not need two UV coordinates if they are always the same, but for
// demonstration I left it here.
return vout;
}
auto vertexShaderBlob = DX::ReadData(L"FullScreenQuadVS.cso");
DX::ThrowIfFailed(
d3dDevice->CreateVertexShader(vertexShaderBlob.data(), vertexShaderBlob.size(), nullptr,
&vertexShader);
auto pixelShaderBlob = DX::ReadData(L"FullScreenQuadPS.cso");
DX::ThrowIfFailed(
d3dDevice->CreatePixelShader(pixelShaderBlob.data(), pixelShaderBlob.size(), nullptr,
&pixelShader));
and rendering would look something like (I'm using DirectX Tool Kit helpers here for all the state and sample objects):
// Set the texture.
ID3D11ShaderResourceView* textures[2] = { texture, texture2 };
d3dContext->PSSetShaderResources(0, 2, textures);
auto sampler = states.LinearClamp();
d3dContext->PSSetSamplers(0, 1, &sampler);
// Set state objects.
d3dContext->OMSetBlendState(states.Opaque(), nullptr, 0xffffffff);
d3dContext->OMSetDepthStencilState(states.DepthNone(), 0);
d3dContext->RSSetState(states.CullNone());
// Set shaders.
d3dContext->VSSetShader(vertexShader, nullptr, 0);
d3dContext->PSSetShader(pixelShader, nullptr, 0);
// Draw quad.
d3dContext->IASetInputLayout(nullptr);
d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dContext->Draw(3, 0);