tfjs-yolov5-example icon indicating copy to clipboard operation
tfjs-yolov5-example copied to clipboard

tfjs cannot detection a video

Open gshoanganh opened this issue 2 years ago • 1 comments

this example is cannot detect a video (or a camera live). I tried running my log, but it didn't detect it. but it can detect still image.

import React from "react";
import ReactDOM from "react-dom";
import * as tf from '@tensorflow/tfjs';
import { loadGraphModel } from '@tensorflow/tfjs-converter';
import "./style-cam.css";
tf.setBackend('webgl');

const weights = '/web_model/model.json';
const threshold = 0.75;


class App extends React.Component {
  videoRef = React.createRef();
  canvasRef = React.createRef();
  
  load_model = async () => {  
    const model = await loadGraphModel(weights);   
    return model;
  }

  componentDidMount() {
    if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
      const webCamPromise = navigator.mediaDevices
        .getUserMedia({
          audio: false,
          video: {
            facingMode: "user"
          }
        })
        .then(stream => {
          window.stream = stream;
          this.videoRef.current.srcObject = stream;
          return new Promise((resolve, reject) => {
            this.videoRef.current.onloadedmetadata = () => {
              resolve();
            };
          });
        });

        this.beginDetect(webCamPromise)
    }
  }

  beginDetect = async (webCamPromise)=>{
    const modelPromise = await this.load_model();
    console.log('modelPromise: ', modelPromise)

    Promise.all([modelPromise, webCamPromise])
      .then(values => {
        this.detectFrame(this.videoRef.current, values[0]);
      })
      .catch(error => {
        console.error(error);
      });
  }

  detectFrame = (video, model) => {
    tf.engine().startScope();
    model.executeAsync(this.process_input(video)).then(predictions => {
      this.renderPredictions(predictions, video);
      requestAnimationFrame(() => {
        this.detectFrame(video, model);
      });
      tf.engine().endScope();
    });
  };

  process_input(video_frame) {
    const input = tf.tidy(() => {
      return tf.image.resizeBilinear(tf.browser.fromPixels(video_frame), [640, 640])
        .div(255.0).expandDims(0);
    });

    this.preview = input
    console.log('input: ', this.preview)
    this.setState({ loading: false })
    return input
  };
 
  renderPredictions = predictions => {
    const ctx = this.canvasRef.current.getContext("2d");
    ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);

    // Font options.
    const font = "16px sans-serif";
    ctx.font = font;
    ctx.textBaseline = "top";

    const [boxes, scores, classes, valid_detections] = predictions;
    const boxes_data = boxes.dataSync();
    const scores_data = scores.dataSync();
    const classes_data = classes.dataSync();
    const valid_detections_data = valid_detections.dataSync()[0];
    console.log('detect: ',scores_data, classes_data)
 
  };

  render() {
    return (
      <div>
        <h1>Real-Time Object Detection: Mask</h1>
        <h3>MobileNetV2</h3>
        <video
          style={{ height: '600px', width: "500px" }}
          className="size"
          autoPlay
          playsInline
          muted
          ref={this.videoRef}
          width="600"
          height="500"
          id="frame"
        />
        <canvas
          className="size"
          ref={this.canvasRef}
          width="600"
          height="500"
        />
      </div>
    );
  }
}

export default App

gshoanganh avatar Apr 14 '22 02:04 gshoanganh

This repo does not support video input for the moment, and a discussion about live streaming input is in https://github.com/zldrobit/tfjs-yolov5-example/issues/10.

zldrobit avatar Apr 18 '22 07:04 zldrobit