Files
jitsi-meet/react/features/facial-recognition/facialExpressionsWorker.js
Gabriel Borlea 61684b1071 feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006)
* Initial implementation; Happy flow

* Maybe revert this

* Functional prototype

* feat(facial-expressions): get stream when changing background effect and use presenter effect with camera

* add(facial-expressions): array that stores the expressions durin the meeting

* refactor(facial-expressions): capture imagebitmap from stream with imagecapture api

* add(speaker-stats): expression label

* fix(facial-expression): expression store

* revert: expression leabel on speaker stats

* add(facial-expressions): broadcast of expression when it changes

* feat: facial expression handling on prosody

* fix(facial-expressions): get the right track when opening and closing camera

* add(speaker-stats): facial expression column

* fix(facial-expressions): allow to start facial recognition only after joining conference

* fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it

* chore(facial-expressions): change detection from 2000ms to 1000ms

* add(facial-expressions): send expression to server when there is only one participant

* feat(facial-expressions): store expresions as a timeline

* feat(mod_speakerstats_component): store facial expresions as a timeline

* fix(facial-expressions): stop facial recognition only when muting video track

* fix(facial-expressions): presenter mode get right track to detect face

* add: polyfils for image capture for firefox and safari

* refactor(facial-expressions): store expressions by counting them in a map

* chore(facial-expressions): remove manually assigning the backend for tenserflowjs

* feat(facial-expressions): move face-api from main thread to web worker

* fix(facial-expressions): make feature work on firefox and safari

* feat(facial-expressions): camera time tracker

* feat(facial-expressions): camera time tracker in prosody

* add(facial-expressions): expressions time as TimeElapsed object in speaker stats

* fix(facial-expresions): lower the frequency of detection when tf uses cpu backend

* add(facial-expressions): duration to the expression and send it with durantion when it is done

* fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp

* refactor(facial-expressions): change expressions labels from text to emoji

* refactor(facial-expressions): remove camera time tracker

* add(facial-expressions): detection time interval

* chore(facial-expressions): add docs and minor refactor of the code

* refactor(facial-expressions): put timeout in worker and remove set interval in main thread

* feat(facial-expressions): disable feature in the config

* add(facial-expressions): tooltips of labels in speaker stats

* refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs

* refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware

* chore(facial-expressions): order imports and format some code

* fix(facial-expressions): rebase issues with newer master

* fix(facial-expressions): package-lock.json

* fix(facial-expression): add commented default value of disableFacialRecognition flag and short description

* fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config

* fix: resources load-test package-lock.json

* fix(facial-expressions): set and get facial expressions only if facial recognition enabled

* add: facial recognition resources folder in .eslintignore

* chore: package-lock update

* fix: package-lock.json

* fix(facial-expressions): gpu memory leak in the web worker

* fix(facial-expressions): set cpu time interval for detection to 6000ms

* chore(speaker-stats): fix indentation

* chore(facial-expressions): remove empty lines between comments and type declarations

* fix(facial-expressions): remove camera timetracker

* fix(facial-expressions): remove facialRecognitionAllowed flag

* fix(facial-expressions): remove sending interval time to worker

* refactor(facial-expression): middleware

* fix(facial-expression): end tensor scope after setting backend

* fix(facial-expressions): sending info back to worker only on facial expression message

* fix: lint errors

* refactor(facial-expressions): bundle web worker using webpack

* fix: deploy-facial-expressions command in makefile

* chore: fix load test package-lock.json and package.json

* chore: sync package-lock.json

Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
2021-11-17 16:33:03 +02:00

117 lines
2.8 KiB
JavaScript

// @flow
import './faceApiPatch';
import * as faceapi from 'face-api.js';
/**
* A flag that indicates whether the tensorflow models were loaded or not.
*/
let modelsLoaded = false;
/**
* A flag that indicates whether the tensorflow backend is set or not.
*/
let backendSet = false;
/**
* A timer variable for set interval.
*/
let timer;
/**
* The duration of the set timeout.
*/
let timeoutDuration = -1;
/**
* Time used for detection interval when facial expressions worker uses webgl backend.
*/
const WEBGL_TIME_INTERVAL = 1000;
/**
* Time used for detection interval when facial expression worker uses cpu backend.
*/
const CPU_TIME_INTERVAL = 6000;
// eslint-disable-next-line no-unused-vars
const window = {
screen: {
width: 1280,
height: 720
}
};
onmessage = async function(message) {
// Receives image data
if (message.data.id === 'SET_TIMEOUT') {
if (message.data.imageData === null || message.data.imageData === undefined) {
return;
}
// the models are loaded
if (!modelsLoaded) {
await faceapi.loadTinyFaceDetectorModel('.');
await faceapi.loadFaceExpressionModel('.');
modelsLoaded = true;
}
faceapi.tf.engine().startScope();
const tensor = faceapi.tf.browser.fromPixels(message.data.imageData);
const detections = await faceapi.detectSingleFace(
tensor,
new faceapi.TinyFaceDetectorOptions()
).withFaceExpressions();
// The backend is set
if (!backendSet) {
const backend = faceapi.tf.getBackend();
if (backend !== undefined) {
if (backend === 'webgl') {
timeoutDuration = WEBGL_TIME_INTERVAL;
} else if (backend === 'cpu') {
timeoutDuration = CPU_TIME_INTERVAL;
}
self.postMessage({
type: 'tf-backend',
value: backend
});
backendSet = true;
}
}
faceapi.tf.engine().endScope();
let facialExpression;
if (detections) {
facialExpression = detections.expressions.asSortedArray()[0].expression;
}
if (timeoutDuration === -1) {
self.postMessage({
type: 'facial-expression',
value: facialExpression
});
} else {
timer = setTimeout(() => {
self.postMessage({
type: 'facial-expression',
value: facialExpression
});
}, timeoutDuration);
}
} else if (message.data.id === 'CLEAR_TIMEOUT') {
// Clear the timeout.
if (timer) {
clearTimeout(timer);
timer = null;
}
}
};