mirror of
https://gitcode.com/GitHub_Trending/ji/jitsi-meet.git
synced 2026-05-14 23:27:47 +00:00
* Initial implementation; Happy flow * Maybe revert this * Functional prototype * feat(facial-expressions): get stream when changing background effect and use presenter effect with camera * add(facial-expressions): array that stores the expressions durin the meeting * refactor(facial-expressions): capture imagebitmap from stream with imagecapture api * add(speaker-stats): expression label * fix(facial-expression): expression store * revert: expression leabel on speaker stats * add(facial-expressions): broadcast of expression when it changes * feat: facial expression handling on prosody * fix(facial-expressions): get the right track when opening and closing camera * add(speaker-stats): facial expression column * fix(facial-expressions): allow to start facial recognition only after joining conference * fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it * chore(facial-expressions): change detection from 2000ms to 1000ms * add(facial-expressions): send expression to server when there is only one participant * feat(facial-expressions): store expresions as a timeline * feat(mod_speakerstats_component): store facial expresions as a timeline * fix(facial-expressions): stop facial recognition only when muting video track * fix(facial-expressions): presenter mode get right track to detect face * add: polyfils for image capture for firefox and safari * refactor(facial-expressions): store expressions by counting them in a map * chore(facial-expressions): remove manually assigning the backend for tenserflowjs * feat(facial-expressions): move face-api from main thread to web worker * fix(facial-expressions): make feature work on firefox and safari * feat(facial-expressions): camera time tracker * feat(facial-expressions): camera time tracker in prosody * add(facial-expressions): expressions time as TimeElapsed object in speaker stats * fix(facial-expresions): lower the frequency of detection when tf uses cpu backend * add(facial-expressions): duration to the expression and send it with durantion when it is done * fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp * refactor(facial-expressions): change expressions labels from text to emoji * refactor(facial-expressions): remove camera time tracker * add(facial-expressions): detection time interval * chore(facial-expressions): add docs and minor refactor of the code * refactor(facial-expressions): put timeout in worker and remove set interval in main thread * feat(facial-expressions): disable feature in the config * add(facial-expressions): tooltips of labels in speaker stats * refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs * refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware * chore(facial-expressions): order imports and format some code * fix(facial-expressions): rebase issues with newer master * fix(facial-expressions): package-lock.json * fix(facial-expression): add commented default value of disableFacialRecognition flag and short description * fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config * fix: resources load-test package-lock.json * fix(facial-expressions): set and get facial expressions only if facial recognition enabled * add: facial recognition resources folder in .eslintignore * chore: package-lock update * fix: package-lock.json * fix(facial-expressions): gpu memory leak in the web worker * fix(facial-expressions): set cpu time interval for detection to 6000ms * chore(speaker-stats): fix indentation * chore(facial-expressions): remove empty lines between comments and type declarations * fix(facial-expressions): remove camera timetracker * fix(facial-expressions): remove facialRecognitionAllowed flag * fix(facial-expressions): remove sending interval time to worker * refactor(facial-expression): middleware * fix(facial-expression): end tensor scope after setting backend * fix(facial-expressions): sending info back to worker only on facial expression message * fix: lint errors * refactor(facial-expressions): bundle web worker using webpack * fix: deploy-facial-expressions command in makefile * chore: fix load test package-lock.json and package.json * chore: sync package-lock.json Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
107 lines
3.0 KiB
JavaScript
107 lines
3.0 KiB
JavaScript
/* eslint-disable */
|
|
// From: https://github.com/justadudewhohacks/face-api.js/issues/47
|
|
// This is needed because face-api.js does not support working in a WebWorker natively
|
|
// Updated Dec 1 2020 to work on latest Chrome (tested in WebWorkers on Chrome Mobile on Android / Google Pixel 3 as well)
|
|
if(!self.OffscreenCanvas) {
|
|
self.OffscreenCanvas = class OffscreenCanvas {
|
|
constructor() {
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
if(!self.OffscreenCanvasRenderingContext2D) {
|
|
self.OffscreenCanvasRenderingContext2D = class OffscreenCanvasRenderingContext2D {
|
|
constructor() {
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
self.Canvas = self.HTMLCanvasElement = OffscreenCanvas;
|
|
// self.HTMLCanvasElement.name = 'HTMLCanvasElement';
|
|
// self.Canvas.name = 'Canvas';
|
|
|
|
self.CanvasRenderingContext2D = OffscreenCanvasRenderingContext2D;
|
|
|
|
function HTMLImageElement(){}
|
|
function HTMLVideoElement(){}
|
|
|
|
self.Image = HTMLImageElement;
|
|
self.Video = HTMLVideoElement;
|
|
|
|
function Storage () {
|
|
let _data = {};
|
|
this.clear = function(){ return _data = {}; };
|
|
this.getItem = function(id){ return _data.hasOwnProperty(id) ? _data[id] : undefined; };
|
|
this.removeItem = function(id){ return delete _data[id]; };
|
|
this.setItem = function(id, val){ return _data[id] = String(val); };
|
|
}
|
|
class Document extends EventTarget {}
|
|
|
|
self.document = new Document();
|
|
|
|
self.window = self.Window = self;
|
|
self.localStorage = new Storage();
|
|
|
|
function createElement(element) {
|
|
switch(element) {
|
|
case 'canvas':
|
|
let canvas = new Canvas(1,1);
|
|
canvas.localName = 'canvas';
|
|
canvas.nodeName = 'CANVAS';
|
|
canvas.tagName = 'CANVAS';
|
|
canvas.nodeType = 1;
|
|
canvas.innerHTML = '';
|
|
canvas.remove = () => { console.log('nope'); };
|
|
return canvas;
|
|
default:
|
|
console.log('arg', element);
|
|
break;
|
|
}
|
|
}
|
|
|
|
document.createElement = createElement;
|
|
document.location = self.location;
|
|
|
|
// These are the same checks face-api.js/isBrowser does
|
|
if(!typeof window == 'object') {
|
|
console.warn("Check failed: window");
|
|
}
|
|
if(typeof document === 'undefined') {
|
|
console.warn("Check failed: document");
|
|
}
|
|
if(typeof HTMLImageElement === 'undefined') {
|
|
console.warn("Check failed: HTMLImageElement");
|
|
}
|
|
if(typeof HTMLCanvasElement === 'undefined') {
|
|
console.warn("Check failed: HTMLCanvasElement");
|
|
}
|
|
if(typeof HTMLVideoElement === 'undefined') {
|
|
console.warn("Check failed: HTMLVideoElement");
|
|
}
|
|
if(typeof ImageData === 'undefined') {
|
|
console.warn("Check failed: ImageData");
|
|
}
|
|
if(typeof CanvasRenderingContext2D === 'undefined') {
|
|
console.warn("Check failed: CanvasRenderingContext2D");
|
|
}
|
|
|
|
self.window = window;
|
|
self.document = document;
|
|
self.HTMLImageElement = HTMLImageElement;
|
|
self.HTMLVideoElement = HTMLVideoElement;
|
|
|
|
// These are the same checks face-api.js/isBrowser does
|
|
const isBrowserCheck = typeof window === 'object'
|
|
&& typeof document !== 'undefined'
|
|
&& typeof HTMLImageElement !== 'undefined'
|
|
&& typeof HTMLCanvasElement !== 'undefined'
|
|
&& typeof HTMLVideoElement !== 'undefined'
|
|
&& typeof ImageData !== 'undefined'
|
|
&& typeof CanvasRenderingContext2D !== 'undefined';
|
|
;
|
|
if(!isBrowserCheck) {
|
|
throw new Error("Failed to monkey patch for face-api, face-api will fail");
|
|
}
|