I am trying to render the key points to a canvas that I can getting from Blaze Pose, but I can't seem to get them drawing the canvas. I know the x and y for each key point and being retrieved but I can't get them showing up on the canvas. I have tried changing the styling but no luck so far. Thanks for the help.
const video = document.getElementById('webcam');
const canvas = document.getElementById('output')
const liveView = document.getElementById('liveView');
const demosSection = document.getElementById('demos');
const enableWebcamButton = document.getElementById('webcamButton');
const ctx = canvas.getContext("2d");
let poses;
function getUserMediaSupported() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
if (getUserMediaSupported()) {
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
return;
}
// Hide the button once clicked.
event.target.classList.add('removed');
// getUsermedia parameters to force video but not audio.
const constraints = {
video: true
};
document.getElementById('output').style.zIndex = "6";
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
async function predictWebcam() {
const videoHeight = video.videoHeight;
const videoWidth = video.videoWidth;
video.width = videoWidth;
video.height = videoHeight;
canvas.width = videoWidth;
canvas.height = videoHeight;
poses = await detector.estimatePoses(video)
//ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
if(poses && poses.length > 0){
for(const pose of poses){
if(pose.keypoints != null){
drawKeypoints(pose.keypoints);
}
}
}
window.requestAnimationFrame(predictWebcam);
}
function drawKeypoints(keypoints){
for(let i = 0; i < keypoints.length; i++){
drawKeypoint(keypoints[i]);
}
}
function drawKeypoint(keypoint){
ctx.fillStyle = 'Orange';
ctx.strokeStyle = 'Green';
ctx.lineWidth = 2;
const radius = 4;
const circle = new Path2D();
circle.arc(keypoint.x, keypoint.y, radius, 0, 2 * Math.PI)
ctx.fill(circle)
ctx.stroke(circle)
}
// Store the resulting model in the global scope of our app.
let model = undefined;
let detector = undefined;
// Before we can use BlazePose class we must wait for it to finish
async function loadModel(){
model = poseDetection.SupportedModels.BlazePose;
const detectorConfig = {
runtime: 'tfjs',
enableSmoothing: true,
modelType: 'full'
};
detector = await poseDetection.createDetector(model, detectorConfig);
demosSection.classList.remove('invisible');
}
loadModel();
<!DOCTYPE html>
<html lang="en">
<head>
<title>Measuring App</title>
<meta charset="utf-8">
<!-- Import the webpage's stylesheet -->
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Measuring App</h1>
<p>Wait for the model to load before clicking the button to enable the webcam - at which point it will become visible to use.</p>
<section id="demos" class="invisible">
<div id="liveView" class="camView">
<button id="webcamButton">Enable Webcam</button>
<canvas id="output"></canvas>
<video id="webcam" autoplay muted width="640" height="480"></video>
</div>
</section>
<!-- Import TensorFlow.js library -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
<!-- Load the coco-ssd model to use to recognize things in images -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection"></script>
<!-- Import the page's JavaScript to do some stuff -->
<script src="script.js" defer></script>
</body>
</html>
body {
font-family: helvetica, arial, sans-serif;
margin: 2em;
color: #3D3D3D;
}
h1 {
font-style: italic;
color: #FF6F00;
}
video {
display: block;
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
.removed {
display: none;
z-index: -10;
}
.invisible {
opacity: 0.2;
}
.camView {
position: relative;
float: left;
width: calc(100% - 20px);
margin: 10px;
cursor: pointer;
}
.camView p {
position: absolute;
padding: 5px;
background-color: rgba(255, 111, 0, 0.85);
color: #FFF;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 1;
font-size: 12px;
}
#output {
position: absolute;
z-index: -100;
top: 0;
bottom: 0;
left: 0;
}
You should do this fixings:
<canvas id="output" width="640" height="480"></canvas>
predictCam
function remove lines setting canvas size: ...
video.width = videoWidth;
video.height = videoHeight;
// canvas.width = videoWidth; <----
// canvas.height = videoHeight; <----
...
drawKeypoints
add this line at start: ctx.clearRect(0, 0, canvas.width, canvas.height);
Actually I also added this line at the beginning of loadModel function to make it completely working.
await tf.ready();