kalidokit
kalidokit copied to clipboard
Run kalidokit for input video
How to run this repository for input video and not real time video
Instead of using the Mediapipe Camera tools, use your own html video element and call the predict function on every frame.
video.requestVideoFrameCallback(async()=>{
await holistic.send({ image: video });
);
@yeemachine Hi! Thanks for your cool project! I followed the instructions in README but found it always output 1 in the field of eye blendshapes using code as below, can you give me some suggestions?
<!DOCTYPE html>
<html lang="en-us">
<head>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/face_mesh.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/kalidokit.umd.js" crossorigin="anonymous"></script>
<!-- <script src="./hands.js" crossorigin="anonymous"></script> -->
</head>
<body>
<div class="panel-block">
<video class="input_video2"></video>
</div>
<script id="rendered-js">
const video2 = document.getElementsByClassName('input_video2')[0];
const faceMesh = new FaceMesh({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/${file}`;
}});
function onResults(results) {
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
var solvedOutput = Kalidokit.Face.solve(landmarks, {
runtime: "mediapipe", // `mediapipe` or `tfjs`
video: video2,
imageSize: { height: 480, width: 480 },
smoothBlink: false, // smooth left and right eye blink delays
blinkSettings: [0.25, 0.75], // adjust upper and lower bound blink sensitivity
});
console.log(solvedOutput['eye']);
}
}
}
faceMesh.onResults(onResults);
const camera = new Camera(video2, {
onFrame: async () => {
await faceMesh.send({image: video2});
},
width: 480,
height: 480
});
camera.start()
</script>
</body>
</html>