Files
jitsi-meet/react/features/virtual-background/actions.js
Gabriel Borlea 61684b1071 feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006)
* Initial implementation; Happy flow

* Maybe revert this

* Functional prototype

* feat(facial-expressions): get stream when changing background effect and use presenter effect with camera

* add(facial-expressions): array that stores the expressions durin the meeting

* refactor(facial-expressions): capture imagebitmap from stream with imagecapture api

* add(speaker-stats): expression label

* fix(facial-expression): expression store

* revert: expression leabel on speaker stats

* add(facial-expressions): broadcast of expression when it changes

* feat: facial expression handling on prosody

* fix(facial-expressions): get the right track when opening and closing camera

* add(speaker-stats): facial expression column

* fix(facial-expressions): allow to start facial recognition only after joining conference

* fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it

* chore(facial-expressions): change detection from 2000ms to 1000ms

* add(facial-expressions): send expression to server when there is only one participant

* feat(facial-expressions): store expresions as a timeline

* feat(mod_speakerstats_component): store facial expresions as a timeline

* fix(facial-expressions): stop facial recognition only when muting video track

* fix(facial-expressions): presenter mode get right track to detect face

* add: polyfils for image capture for firefox and safari

* refactor(facial-expressions): store expressions by counting them in a map

* chore(facial-expressions): remove manually assigning the backend for tenserflowjs

* feat(facial-expressions): move face-api from main thread to web worker

* fix(facial-expressions): make feature work on firefox and safari

* feat(facial-expressions): camera time tracker

* feat(facial-expressions): camera time tracker in prosody

* add(facial-expressions): expressions time as TimeElapsed object in speaker stats

* fix(facial-expresions): lower the frequency of detection when tf uses cpu backend

* add(facial-expressions): duration to the expression and send it with durantion when it is done

* fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp

* refactor(facial-expressions): change expressions labels from text to emoji

* refactor(facial-expressions): remove camera time tracker

* add(facial-expressions): detection time interval

* chore(facial-expressions): add docs and minor refactor of the code

* refactor(facial-expressions): put timeout in worker and remove set interval in main thread

* feat(facial-expressions): disable feature in the config

* add(facial-expressions): tooltips of labels in speaker stats

* refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs

* refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware

* chore(facial-expressions): order imports and format some code

* fix(facial-expressions): rebase issues with newer master

* fix(facial-expressions): package-lock.json

* fix(facial-expression): add commented default value of disableFacialRecognition flag and short description

* fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config

* fix: resources load-test package-lock.json

* fix(facial-expressions): set and get facial expressions only if facial recognition enabled

* add: facial recognition resources folder in .eslintignore

* chore: package-lock update

* fix: package-lock.json

* fix(facial-expressions): gpu memory leak in the web worker

* fix(facial-expressions): set cpu time interval for detection to 6000ms

* chore(speaker-stats): fix indentation

* chore(facial-expressions): remove empty lines between comments and type declarations

* fix(facial-expressions): remove camera timetracker

* fix(facial-expressions): remove facialRecognitionAllowed flag

* fix(facial-expressions): remove sending interval time to worker

* refactor(facial-expression): middleware

* fix(facial-expression): end tensor scope after setting backend

* fix(facial-expressions): sending info back to worker only on facial expression message

* fix: lint errors

* refactor(facial-expressions): bundle web worker using webpack

* fix: deploy-facial-expressions command in makefile

* chore: fix load test package-lock.json and package.json

* chore: sync package-lock.json

Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
2021-11-17 16:33:03 +02:00

87 lines
2.7 KiB
JavaScript

// @flow
import { createVirtualBackgroundEffect } from '../stream-effects/virtual-background';
import { BACKGROUND_ENABLED, SET_VIRTUAL_BACKGROUND, VIRTUAL_BACKGROUND_TRACK_CHANGED } from './actionTypes';
import logger from './logger';
/**
* Signals the local participant activate the virtual background video or not.
*
* @param {Object} options - Represents the virtual background setted options.
* @param {Object} jitsiTrack - Represents the jitsi track that will have backgraund effect applied.
* @returns {Promise}
*/
export function toggleBackgroundEffect(options: Object, jitsiTrack: Object) {
return async function(dispatch: Object => Object, getState: () => any) {
await dispatch(backgroundEnabled(options.enabled));
await dispatch(setVirtualBackground(options));
const state = getState();
const virtualBackground = state['features/virtual-background'];
if (jitsiTrack) {
try {
if (options.enabled) {
await jitsiTrack.setEffect(await createVirtualBackgroundEffect(virtualBackground, dispatch));
} else {
await jitsiTrack.setEffect(undefined);
dispatch(backgroundEnabled(false));
}
} catch (error) {
dispatch(backgroundEnabled(false));
logger.error('Error on apply background effect:', error);
}
}
};
}
/**
* Sets the selected virtual background image object.
*
* @param {Object} options - Represents the virtual background setted options.
* @returns {{
* type: SET_VIRTUAL_BACKGROUND,
* virtualSource: string,
* blurValue: number,
* type: string,
* }}
*/
export function setVirtualBackground(options: Object) {
return {
type: SET_VIRTUAL_BACKGROUND,
virtualSource: options?.url,
blurValue: options?.blurValue,
backgroundType: options?.backgroundType,
selectedThumbnail: options?.selectedThumbnail
};
}
/**
* Signals the local participant that the background effect has been enabled.
*
* @param {boolean} backgroundEffectEnabled - Indicate if virtual background effect is activated.
* @returns {{
* type: BACKGROUND_ENABLED,
* backgroundEffectEnabled: boolean
* }}
*/
export function backgroundEnabled(backgroundEffectEnabled: boolean) {
return {
type: BACKGROUND_ENABLED,
backgroundEffectEnabled
};
}
/**
* Signals if the local track was changed due to a changes of the virtual background.
*
* @returns {{
* type: VIRTUAL_BACKGROUND_TRACK_CHANGED
*}}.
*/
export function virtualBackgroundTrackChanged() {
return {
type: VIRTUAL_BACKGROUND_TRACK_CHANGED
};
}