使用的是BodyPix - Person Segmentation in the Browser
一樣是code
現在code還有些問題
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Document</title>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix@1.0.0"></script>
<style>
video {
visibility: hidden;
}
</style>
</head>
<body>
<canvas id="detect_result"></canvas>
<video autoplay playsinline muted id="webcam"></video>
<script src="index.js"> </script>
</body>
</html>
index.js
async function app() {
const model = await bodyPix.load();
const webcamElement = document.getElementById('webcam');
const canvas = document.getElementById('detect_result');
const context = canvas.getContext('2d');
let showResult = async function () {
canvas.width = webcamElement.videoWidth;
canvas.height = webcamElement.videoHeight;
context.drawImage(webcamElement, 0, 0);
const body = await model.estimatePersonSegmentation(webcamElement, {
flipHorizontal: false
});
console.log(body);
setTimeout(function () {
showResult();
}, 300);
}
let setupWebcam = function () {
return new Promise((resolve, reject) => {
const navigatorAny = navigator;
navigator.getUserMedia = navigator.getUserMedia ||
navigatorAny.webkitGetUserMedia || navigatorAny.mozGetUserMedia ||
navigatorAny.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
video: true
},
(stream) => {
webcamElement.srcObject = stream;
webcamElement.addEventListener('loadedmetadata', () => resolve(),
false);
},
(err) => reject(err));
} else {
reject("getUserMedia failed");
}
});
}
setupWebcam().then(
() => {
console.log("shoq");
showResult();
},
(err) => {
console.log(err);
}
)
}
app();