I'm trying to replace all RGB pixels with the value of 0 to 1 (out of the max value of 255).
Here is my code on stackblitz.
You can see that after I'm reassigning the buffer with the new pixels some of the pixles are back to 0. On Firefox it's actually works with this image:
But won't work with the full size image:
It's like the browser won't allow a certain contrast of something like that.
Sharing my code here as well:
const getBase64FromFile = async (file: File): Promise<string> => {
return new Promise((resolve: Function, reject: Function) => {
let reader = new FileReader();
reader.addEventListener(
'load',
(arg) => {
resolve(reader.result);
},
false
);
reader.readAsDataURL(file);
});
};
// Returns the amount of pixels with RGB 0 value
const howManyZeros = async (src: string): Promise<number> => {
return new Promise((resolve: Function, reject: Function) => {
const image = new Image();
image.onload = () => {
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = image.naturalWidth;
canvas.height = image.naturalHeight;
ctx.drawImage(image, 0, 0);
const data = ctx.getImageData(
0,
0,
image.naturalWidth,
image.naturalHeight
).data;
let zeros = 0;
for (var i = 0; i < data.length; i += 4) {
if (data[i] === 0) zeros++;
if (data[i + 1] === 0) zeros++;
if (data[i + 2] === 0) zeros++;
}
resolve(zeros);
};
image.src = src;
});
};
const onFinish = async (src: string) => {
document.querySelector(
'p#after'
).textContent = `nunber of zeros after: ${await howManyZeros(src)}`;
(document.querySelector('img#after-img') as HTMLImageElement).src = src;
const a = document.querySelector('a');
a.setAttribute('href', src);
a.setAttribute('download', 'image.png');
a.style.display = '';
};
const onFileChange = async (e: Event | any) => {
const image = new Image();
image.onload = async () => {
const canvas = document.createElement('canvas');
canvas.width = image.naturalWidth;
canvas.height = image.naturalHeight;
const ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0);
let data = ctx.getImageData(
0,
0,
image.naturalWidth,
image.naturalHeight
).data;
let buffer = new Uint8ClampedArray(
image.naturalWidth * image.naturalHeight * 4
);
// Iterate over all the pixels and increase all RGB 0 values to 1
for (var i = 0; i < data.length; i += 4) {
if (data[i] === 0) buffer[i] = 1;
else buffer[i] = data[i];
if (data[i + 1] === 0) buffer[i + 1] = 1;
else buffer[i + 1] = data[i + 1];
if (data[i + 2] === 0) buffer[i + 2] = 1;
else buffer[i + 2] = data[i + 2];
buffer[i + 3] = data[i + 3];
}
const iData = ctx.createImageData(image.naturalWidth, image.naturalHeight);
iData.data.set(buffer);
ctx.putImageData(iData, 0, 0);
onFinish(canvas.toDataURL('image/png', 1));
};
let src = await getBase64FromFile(e.target.files[0]);
document.querySelector(
'p#before'
).textContent = `nunber of zeros before: ${await howManyZeros(src)}`;
(document.querySelector('img#before-img') as HTMLImageElement).src = src;
image.src = src;
};
const input: HTMLInputElement = document.querySelector('input');
input.addEventListener('change', onFileChange, false);
Appreciate any help with this and praying that's not a browser issue but something with my code.
Due to canvas
spec, which doesn't guarantee that the pixels stay the same as you set them, you can't use browser built in image manipulation functions.
Due to the lossy nature of converting between color spaces and converting to and from premultiplied alpha color values, pixels that have just been set using putImageData(), and are not completely opaque, might be returned to an equivalent getImageData() as different values.
In your case pixel values with high transparency get turned to 0 again.
This doesn't happen in WebGL context with the premultipliedAlpha
context attribute set to false
, but the solution involves a lot of code.
The following code is based on the example from WebGL2 Fundamentals:
const vertexShaderSource = `#version 300 es
// an attribute is an input (in) to a vertex shader.
// It will receive data from a buffer
in vec2 a_position;
in vec2 a_texCoord;
// Used to pass in the resolution of the canvas
uniform vec2 u_resolution;
// Used to pass the texture coordinates to the fragment shader
out vec2 v_texCoord;
// all shaders have a main function
void main() {
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}`;
const fragmentShaderSource = `#version 300 es
// fragment shaders don't have a default precision so we need
// to pick one. highp is a good default. It means "high precision"
precision highp float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
in vec2 v_texCoord;
// we need to declare an output for the fragment shader
out vec4 outColor;
void main() {
vec4 inColor = texture(u_image, v_texCoord);
outColor = vec4(
inColor.r != 0.0 ? inColor.r : 1.0/255.0,
inColor.g != 0.0 ? inColor.g : 1.0/255.0,
inColor.b != 0.0 ? inColor.b : 1.0/255.0,
inColor.a
);
}`;
function readFile(file) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = function() {
resolve(this.result);
}
reader.onerror = reject;
reader.readAsArrayBuffer(file);
});
}
function loadImage(url) {
return new Promise(async(resolve, reject) => {
const image = new Image();
image.onload = function() {
resolve(this);
}
image.onerror = reject;
image.src = url;
})
}
function canvasToBlob(canvas) {
return new Promise((resolve, reject) => {
canvas.toBlob(blob => blob ? resolve(blob) : reject(canvas), "image/png");
});
}
function howManyZeros(gl) {
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
let data = new Uint8Array(gl.canvas.width * gl.canvas.height * 4);
gl.readPixels(0, 0, gl.canvas.width, gl.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, data);
let zeros = 0;
for (let i=0; i<data.length; i++) {
if (i % 4 == 3) continue; // ignore alpha
if (data[i] == 0) zeros++;
}
return zeros;
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
function render(image) {
const canvas = document.createElement("canvas");
canvas.width = image.width;
canvas.height = image.height;
const gl = canvas.getContext("webgl2", {
premultipliedAlpha: false
});
if (!gl) {
console.error("No WebGL2");
return;
}
canvas.gl = gl;
// setup GLSL program
const program = webglUtils.createProgramFromSources(gl, [vertexShaderSource, fragmentShaderSource]);
// look up where the vertex data needs to go.
const positionAttributeLocation = gl.getAttribLocation(program, "a_position");
const texCoordAttributeLocation = gl.getAttribLocation(program, "a_texCoord");
// lookup uniforms
const resolutionLocation = gl.getUniformLocation(program, "u_resolution");
const imageLocation = gl.getUniformLocation(program, "u_image");
// Create a vertex array object (attribute state)
const vao = gl.createVertexArray();
// and make it the one we're currently working with
gl.bindVertexArray(vao);
// Create a buffer and put a single pixel space rectangle in
// it (2 triangles)
const positionBuffer = gl.createBuffer();
// Turn on the attribute
gl.enableVertexAttribArray(positionAttributeLocation);
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the attribute how to get data out of positionBuffer (ARRAY_BUFFER)
let size = 2; // 2 components per iteration
let type = gl.FLOAT; // the data is 32bit floats
let normalize = false; // don't normalize the data
let stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
let offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionAttributeLocation, size, type, normalize, stride, offset);
// provide texture coordinates for the rectangle.
const texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
]), gl.STATIC_DRAW);
// Turn on the attribute
gl.enableVertexAttribArray(texCoordAttributeLocation);
// Tell the attribute how to get data out of texCoordBuffer (ARRAY_BUFFER)
size = 2; // 2 components per iteration
type = gl.FLOAT; // the data is 32bit floats
normalize = false; // don't normalize the data
stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texCoordAttributeLocation, size, type, normalize, stride, offset);
// Create a texture.
const texture = gl.createTexture();
// make unit 0 the active texture uint
// (ie, the unit all other texture commands will affect
gl.activeTexture(gl.TEXTURE0 + 0);
// Bind it to texture unit 0' 2D bind point
gl.bindTexture(gl.TEXTURE_2D, texture);
// Set the parameters so we don't need mips and so we're not filtering
// and we don't repeat
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
// Upload the image into the texture.
const mipLevel = 0; // the largest mip
const internalFormat = gl.RGBA; // format we want in the texture
const srcFormat = gl.RGBA; // format of data we are supplying
const srcType = gl.UNSIGNED_BYTE; // type of data we are supplying
gl.texImage2D(gl.TEXTURE_2D,
mipLevel,
internalFormat,
srcFormat,
srcType,
image);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Bind the attribute/buffer set we want.
gl.bindVertexArray(vao);
// Pass in the canvas resolution so we can convert from
// pixels to clipspace in the shader
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Tell the shader to get the texture from texture unit 0
gl.uniform1i(imageLocation, 0);
// Bind the position buffer so gl.bufferData that will be called
// in setRectangle puts data in the position buffer
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, image.width, image.height);
// Draw the rectangle.
const primitiveType = gl.TRIANGLES;
offset = 0;
const count = 6;
gl.drawArrays(primitiveType, offset, count);
return canvas;
}
async function onFileChange(e) {
const png_data = await readFile(e.target.files[0]);
const png_blob = new Blob([png_data], {
type: 'image/png'
});
const png_url = URL.createObjectURL(png_blob);
document.querySelector("#before-img").src = png_url;
const image = await loadImage(png_url);
let canvas = render(image);
document.querySelector("#after").textContent = `nunber of zeros after: ${howManyZeros(canvas.gl)}`
const new_png_blob = await canvasToBlob(canvas);
const new_png_url = URL.createObjectURL(new_png_blob);
const dl_link = document.querySelector("a");
dl_link.href = new_png_url;
dl_link.style.display = "";
document.querySelector("#after-img").src = new_png_url;
};
const input = document.querySelector('input[type=file]');
input.addEventListener('change', onFileChange, false);
<input type="file" />
<div>
<a style="display: none" download="image.png">download</a>
</div>
<div>
<p id="before"></p>
<img id="before-img" src="" />
</div>
<div>
<p id="after"></p>
<img id="after-img" src="" />
</div>
<script src="https://webgl2fundamentals.org/webgl/resources/webgl-utils.js"></script>
Alternatively you could do it with a 3rd party image manupulation library.
Here's an example using the UPNG.js library:
function readFile(file) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = function() {
resolve(this.result);
}
reader.onerror = reject;
reader.readAsArrayBuffer(file);
});
}
async function onFileChange(e) {
const png_data = await readFile(e.target.files[0]);
const png_blob = new Blob([png_data], {
type: 'image/png'
});
const png_url = URL.createObjectURL(png_blob);
document.querySelector("#before-img").src = png_url;
const png = UPNG.decode(png_data);
const png_frames = UPNG.toRGBA8(png);
const png_frame = new Uint8Array(png_frames[0]);
for (var i = 0; i < png_frame.length; i += 4) {
png_frame[i + 0] = png_frame[i + 0] == 0 ? 1 : png_frame[i + 0];
png_frame[i + 1] = png_frame[i + 1] == 0 ? 1 : png_frame[i + 1];
png_frame[i + 2] = png_frame[i + 2] == 0 ? 1 : png_frame[i + 2];
//png_frame[i+3] = 255; // remove transparency
}
const new_png_data = UPNG.encode([png_frame.buffer], png.width, png.height, 0);
const new_png_blob = new Blob([new_png_data], {
type: 'image/png'
});
const new_png_url = URL.createObjectURL(new_png_blob);
/*const new_png_url = "data:image/png;base64," + btoa(String.fromCharCode.apply(null, new Uint8Array(new_png_data)));*/
const dl_link = document.querySelector("a");
dl_link.href = new_png_url;
dl_link.style.display = "";
document.querySelector("#after-img").src = new_png_url;
};
const input = document.querySelector('input[type=file]');
input.addEventListener('change', onFileChange, false);
<input type="file" />
<div>
<a style="display: none" download="image.png">download</a>
</div>
<div>
<p id="before"></p>
<img id="before-img" src="" />
</div>
<div>
<p id="after"></p>
<img id="after-img" src="" />
</div>
<script type="module">
import UPNG from "https://cdn.skypack.dev/upng-js@2.1.0"; window.UPNG = UPNG;
</script>