Passing local video through tf instead of using webcam

Hi Everyone! Not sure if this is the right place to ask this, but I’m trying to use a local video instead of the webcam for this Teachable Machine/TF js script. File2 is functional code that uses the webcam and passes the video through the model. File1 is my attempt at using a local video instead of the webcam, but doesn’t currently work. I’m seeking help with passing the video through the model in File1.

I’m still a JS beginner, so any help is greatly appreciated!

Thanks! :slight_smile:

File 1:

 <input type="file" name="file" id="fileItem" onchange="onChange()">

 <button type="button" onclick="init()">Start</button>
 <video id="webcam-container" controls></video>
 <div id="label-container"></div>
 <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"></script>
 <script src="https://cdn.jsdelivr.net/npm/@teachablemachine/image@0.8/dist/teachablemachine-image.min.js"></script>
 <script type="text/javascript">
  const URL = "https://teachablemachine.withgoogle.com/models/fakemodel/";

  var URLA = window.URL || window.webkitURL;
  var video1 = document.getElementsByTagName('video')[0];

  function onChange() {
    var fileItem = document.getElementById('fileItem');
    var files = fileItem.files;
    var file = files[0];
    var urli = URLA.createObjectURL(file);
    console.log(file);
    console.log(urli);
    document.querySelector('#webcam-container').src = urli;
    video1.load();
    video1.onloadeddata = function() {
      video1.play();
    }
  }


  let model, webcam, labelContainer, maxPredictions;

  async function init() {
    const modelURL = URL + "model.json";
    const metadataURL = URL + "metadata.json";

    model = await tmImage.load(modelURL, metadataURL);
    maxPredictions = model.getTotalClasses();

    document.getElementById("webcam-container").appendChild(webcam.canvas);
    labelContainer = document.getElementById("label-container");
    for (let i = 0; i < maxPredictions; i++) { // and class labels
      labelContainer.appendChild(document.createElement("div"));
    }
  }

  function delay(ms) {
    return new Promise(resolve => setTimeout(resolve, ms));
  }

  async function loop() {
    webcam.update();
    await delay(2000);
    await predict();
    window.requestAnimationFrame(loop);
  }

  async function predict() {
    //sample user
    var sampleusr = "Liam656656"
    //add date
    var now = new Date();
    const prediction = await model.predict(webcam.canvas);
    for (let i = 0; i < maxPredictions; i++) {
      const classPrediction =
        "Prediction" + ":" + prediction[i].className + "-" + prediction[i].probability.toFixed(2) + "; " +     "Timestamp:" + now.toUTCString() + "; " + "User:" + sampleusr;
      labelContainer.childNodes[i].innerHTML = classPrediction;
      console.log(classPrediction);
    }

  }
 </script>

File 2: (Functional)

<button type="button" onclick="init()">Start</button>
 <div id="webcam-container"></div>
 <div id="label-container"></div>
 <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"></script>
 <script           src="https://cdn.jsdelivr.net/npm/@teachablemachine/image@0.8/dist/teachablemachine-     image.min.js"></script>
 <script type="text/javascript">
   const URL = "https://teachablemachine.withgoogle.com/models/fakemodel/";

  let model, webcam, labelContainer, maxPredictions;

  async function init() {
    const modelURL = URL + "model.json";
    const metadataURL = URL + "metadata.json";

    model = await tmImage.load(modelURL, metadataURL);
    maxPredictions = model.getTotalClasses();

    const flip = true;
    webcam = new tmImage.Webcam(200, 200, flip);
    await webcam.setup();
    await webcam.play();
    window.requestAnimationFrame(loop);

    document.getElementById("webcam-container").appendChild(webcam.canvas);
    labelContainer = document.getElementById("label-container");
    for (let i = 0; i < maxPredictions; i++) {
      labelContainer.appendChild(document.createElement("div"));
    }
  }

  function delay(ms) {
    return new Promise(resolve => setTimeout(resolve, ms));
  }

  async function loop() {
    webcam.update();
    await delay(2000);
    await predict();
    window.requestAnimationFrame(loop);
  }

  async function predict() {
    //sample user
    var sampleusr = "Liam656656"
    //add date
    var now = new Date();
    const prediction = await model.predict(webcam.canvas);
    for (let i = 0; i < maxPredictions; i++) {
      const classPrediction =
        "Prediction" + ":" + prediction[i].className + "-" + prediction[i].probability.toFixed(2) + "; " +     "Timestamp:" + now.toUTCString() + "; " + "User:" + sampleusr;
      labelContainer.childNodes[i].innerHTML = classPrediction;
      console.log(classPrediction);
    }

  }
 </script>

Welcome to the community. This is more a question about web programming vs TensorFlow.js itself but I shall try to point you in the right direction. You may want to use some code like this to grab a local file and play it in a video tag (this pen also contains code to load a random TensorFlow.js model and use it to classify a frame of the video when you click the classify button below the video).

Once you have video rendered to canvas you can then pass the canvas to TensorFlow.js pre-made models for classification as usual (or grab a frame from the canvas and convert to Tensor if running on a custom model yourself).

Key things to note:

Your webpage must be served on HTTPS (secure) to access files without issues I believe. You can use Codepen like I did to get free HTTPS page to test on, or:

  1. Glitch.com offers free projects hosted on https. We even have a presence on there ourselves for example code
  2. You could sign up for something like Google Cloud Storage bucket which serves over HTTPS too. But this is a paid service (but will scale to as many users as you need)