From 42703fed474c72606f21e92214a24a0b1b5aaed7 Mon Sep 17 00:00:00 2001 From: Avram Tudor Date: Wed, 6 Apr 2022 12:10:31 +0300 Subject: [PATCH] ref(face-landmarks) refactor namings (#11307) * ref(face-landmarks) refactor namings * code review --- .eslintignore | 2 +- Makefile | 14 +-- conference.js | 9 -- config.js | 20 ++-- react/features/app/middlewares.web.js | 2 +- react/features/app/reducers.web.js | 2 +- react/features/base/config/configWhitelist.js | 3 +- react/features/face-landmarks/actionTypes.js | 60 ++++++++++ .../actions.js | 108 +++++++++--------- .../constants.js | 4 +- .../createImageBitmap.js | 0 .../faceApiPatch.js | 0 .../faceLandmarksWorker.js} | 0 .../functions.js | 48 ++++---- .../index.js | 0 .../logger.js | 2 +- .../middleware.js | 34 +++--- .../reducer.js | 42 ++++--- .../resources/README.md | 4 +- ...ace_expression_model-weights_manifest.json | 0 .../resources/face_expression_model.bin | Bin ..._face_detector_model-weights_manifest.json | 0 .../resources/tiny_face_detector_model.bin | Bin .../facial-recognition/actionTypes.js | 59 ---------- .../filmstrip/components/web/Thumbnail.js | 2 +- react/features/rtcstats/RTCStats.js | 8 +- react/features/rtcstats/middleware.js | 10 +- react/features/speaker-stats/actionTypes.js | 6 +- react/features/speaker-stats/actions.any.js | 8 +- .../components/AbstractSpeakerStatsList.js | 18 +-- ...ionsSwitch.js => FaceExpressionsSwitch.js} | 14 +-- .../components/web/SpeakerStats.js | 28 ++--- .../components/web/SpeakerStatsItem.js | 22 ++-- .../components/web/SpeakerStatsLabels.js | 16 +-- react/features/speaker-stats/reducer.js | 8 +- .../mod_speakerstats_component.lua | 24 ++-- webpack.config.js | 4 +- 37 files changed, 288 insertions(+), 293 deletions(-) create mode 100644 react/features/face-landmarks/actionTypes.js rename react/features/{facial-recognition => face-landmarks}/actions.js (62%) rename react/features/{facial-recognition => face-landmarks}/constants.js (88%) rename react/features/{facial-recognition => face-landmarks}/createImageBitmap.js (100%) rename react/features/{facial-recognition => face-landmarks}/faceApiPatch.js (100%) rename react/features/{facial-recognition/facialExpressionsWorker.js => face-landmarks/faceLandmarksWorker.js} (100%) rename react/features/{facial-recognition => face-landmarks}/functions.js (77%) rename react/features/{facial-recognition => face-landmarks}/index.js (100%) rename react/features/{facial-recognition => face-landmarks}/logger.js (53%) rename react/features/{facial-recognition => face-landmarks}/middleware.js (72%) rename react/features/{facial-recognition => face-landmarks}/reducer.js (50%) rename react/features/{facial-recognition => face-landmarks}/resources/README.md (87%) rename react/features/{facial-recognition => face-landmarks}/resources/face_expression_model-weights_manifest.json (100%) rename react/features/{facial-recognition => face-landmarks}/resources/face_expression_model.bin (100%) rename react/features/{facial-recognition => face-landmarks}/resources/tiny_face_detector_model-weights_manifest.json (100%) rename react/features/{facial-recognition => face-landmarks}/resources/tiny_face_detector_model.bin (100%) delete mode 100644 react/features/facial-recognition/actionTypes.js rename react/features/speaker-stats/components/web/{FacialExpressionsSwitch.js => FaceExpressionsSwitch.js} (87%) diff --git a/.eslintignore b/.eslintignore index 2cfc20c5b8..eab8d9621d 100644 --- a/.eslintignore +++ b/.eslintignore @@ -8,7 +8,7 @@ libs/* resources/* react/features/stream-effects/virtual-background/vendor/* load-test/* -react/features/facial-recognition/resources/* +react/features/face-landmarks/resources/* # ESLint will by default ignore its own configuration file. However, there does # not seem to be a reason why we will want to risk being inconsistent with our diff --git a/Makefile b/Makefile index c600069df7..390c2452c3 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ TF_WASM_DIR = node_modules/@tensorflow/tfjs-backend-wasm/dist/ RNNOISE_WASM_DIR = node_modules/rnnoise-wasm/dist TFLITE_WASM = react/features/stream-effects/virtual-background/vendor/tflite MEET_MODELS_DIR = react/features/stream-effects/virtual-background/vendor/models -FACIAL_MODELS_DIR = react/features/facial-recognition/resources +FACE_MODELS_DIR = react/features/face-landmarks/resources NODE_SASS = ./node_modules/.bin/sass NPM = npm OUTPUT_DIR = . @@ -30,7 +30,7 @@ clean: rm -fr $(BUILD_DIR) .NOTPARALLEL: -deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-css deploy-local deploy-facial-expressions +deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-css deploy-local deploy-face-landmarks deploy-init: rm -fr $(DEPLOY_DIR) @@ -53,8 +53,8 @@ deploy-appbundle: $(OUTPUT_DIR)/analytics-ga.js \ $(BUILD_DIR)/analytics-ga.min.js \ $(BUILD_DIR)/analytics-ga.min.js.map \ - $(BUILD_DIR)/facial-expressions-worker.min.js \ - $(BUILD_DIR)/facial-expressions-worker.min.js.map \ + $(BUILD_DIR)/face-landmarks-worker.min.js \ + $(BUILD_DIR)/face-landmarks-worker.min.js.map \ $(DEPLOY_DIR) cp \ $(BUILD_DIR)/close3.min.js \ @@ -101,9 +101,9 @@ deploy-meet-models: $(MEET_MODELS_DIR)/*.tflite \ $(DEPLOY_DIR) -deploy-facial-expressions: +deploy-face-landmarks: cp \ - $(FACIAL_MODELS_DIR)/* \ + $(FACE_MODELS_DIR)/* \ $(DEPLOY_DIR) deploy-css: @@ -115,7 +115,7 @@ deploy-local: ([ ! -x deploy-local.sh ] || ./deploy-local.sh) .NOTPARALLEL: -dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-facial-expressions +dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-face-landmarks $(WEBPACK_DEV_SERVER) source-package: diff --git a/conference.js b/conference.js index 5a6d561659..71e83b7a9b 100644 --- a/conference.js +++ b/conference.js @@ -3116,15 +3116,6 @@ export default { room.sendEndpointMessage(to, payload); }, - /** - * Sends a facial expression as a string and its duration as a number - * @param {object} payload - Object containing the {string} facialExpression - * and {number} duration - */ - sendFacialExpression(payload) { - room.sendFacialExpression(payload); - }, - /** * Adds new listener. * @param {String} eventName the name of the event diff --git a/config.js b/config.js index 238f927305..8e4c5bc238 100644 --- a/config.js +++ b/config.js @@ -752,18 +752,18 @@ var config = { // Enables sending participants' emails (if available) to callstats and other analytics // enableEmailInStats: false, - // Enables detecting faces of participants and get their expression and send it to other participants - // enableFacialRecognition: true, - - // Enables displaying facial expressions in speaker stats - // enableDisplayFacialExpressions: true, - - // faceCoordinatesSharing: { + // faceLandmarks: { // // Enables sharing your face cordinates. Used for centering faces within a video. - // enabled: false, + // enableFaceCentering: false, - // // Minimum required face movement percentage threshold for sending new face coordinates data. - // threshold: 10, + // // Enables detecting face expressions and sharing data with other participants + // enableFaceExpressionsDetection: false, + + // // Enables displaying face expressions in speaker stats + // enableDisplayFaceExpressions: false, + + // // Minimum required face movement percentage threshold for sending new face centering coordinates data. + // faceCenteringThreshold: 10, // // Miliseconds for processing a new image capture in order to detect face coordinates if they exist. // captureInterval: 100 diff --git a/react/features/app/middlewares.web.js b/react/features/app/middlewares.web.js index eb42a5f0bd..beb0b1e330 100644 --- a/react/features/app/middlewares.web.js +++ b/react/features/app/middlewares.web.js @@ -20,7 +20,7 @@ import '../shared-video/middleware'; import '../settings/middleware'; import '../talk-while-muted/middleware'; import '../virtual-background/middleware'; -import '../facial-recognition/middleware'; +import '../face-landmarks/middleware'; import '../gifs/middleware'; import './middlewares.any'; diff --git a/react/features/app/reducers.web.js b/react/features/app/reducers.web.js index b7f0450069..74e7667079 100644 --- a/react/features/app/reducers.web.js +++ b/react/features/app/reducers.web.js @@ -2,7 +2,7 @@ import '../base/devices/reducer'; import '../e2ee/reducer'; -import '../facial-recognition/reducer'; +import '../face-landmarks/reducer'; import '../feedback/reducer'; import '../local-recording/reducer'; import '../no-audio-signal/reducer'; diff --git a/react/features/base/config/configWhitelist.js b/react/features/base/config/configWhitelist.js index b46416651a..2191a8a17e 100644 --- a/react/features/base/config/configWhitelist.js +++ b/react/features/base/config/configWhitelist.js @@ -136,7 +136,6 @@ export default [ 'enableDisplayNameInStats', 'enableEmailInStats', 'enableEncodedTransformSupport', - 'enableFacialRecognition', 'enableIceRestart', 'enableInsecureRoomNameWarning', 'enableLayerSuspension', @@ -152,7 +151,7 @@ export default [ 'enableTcc', 'enableAutomaticUrlCopy', 'etherpad_base', - 'faceCoordinatesSharing', + 'faceLandmarks', 'failICE', 'feedbackPercentage', 'fileRecordingsEnabled', diff --git a/react/features/face-landmarks/actionTypes.js b/react/features/face-landmarks/actionTypes.js new file mode 100644 index 0000000000..d8ad6c952a --- /dev/null +++ b/react/features/face-landmarks/actionTypes.js @@ -0,0 +1,60 @@ +// @flow + +/** + * Redux action type dispatched in order to add a face expression. + * + * { + * type: ADD_FACE_EXPRESSION, + * faceExpression: string, + * duration: number + * } + */ +export const ADD_FACE_EXPRESSION = 'ADD_FACE_EXPRESSION'; + +/** + * Redux action type dispatched in order to add a expression to the face expressions buffer. + * + * { + * type: ADD_TO_FACE_EXPRESSIONS_BUFFER, + * faceExpression: string + * } +*/ +export const ADD_TO_FACE_EXPRESSIONS_BUFFER = 'ADD_TO_FACE_EXPRESSIONS_BUFFER '; + +/** + * Redux action type dispatched in order to clear the face expressions buffer in the state. + * + * { + * type: CLEAR_FACE_EXPRESSIONS_BUFFER + * } +*/ +export const CLEAR_FACE_EXPRESSIONS_BUFFER = 'CLEAR_FACE_EXPRESSIONS_BUFFER'; + +/** + * Redux action type dispatched in order to set recognition active in the state. + * + * { + * type: START_FACE_LANDMARKS_DETECTION + * } + */ +export const START_FACE_LANDMARKS_DETECTION = 'START_FACE_LANDMARKS_DETECTION'; + + /** + * Redux action type dispatched in order to set recognition inactive in the state. + * + * { + * type: STOP_FACE_LANDMARKS_DETECTION + * } + */ +export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION'; + +/** + * Redux action type dispatched in order to update coordinates of a detected face. + * + * { + * type: UPDATE_FACE_COORDINATES, + * faceBox: Object({ left, bottom, right, top }), + * participantId: string + * } + */ +export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES'; diff --git a/react/features/facial-recognition/actions.js b/react/features/face-landmarks/actions.js similarity index 62% rename from react/features/facial-recognition/actions.js rename to react/features/face-landmarks/actions.js index 7f16f57114..2c85fc8dca 100644 --- a/react/features/facial-recognition/actions.js +++ b/react/features/face-landmarks/actions.js @@ -8,11 +8,11 @@ import { getLocalVideoTrack } from '../base/tracks'; import { getBaseUrl } from '../base/util'; import { - ADD_FACIAL_EXPRESSION, - ADD_TO_FACIAL_EXPRESSIONS_BUFFER, - CLEAR_FACIAL_EXPRESSIONS_BUFFER, - START_FACIAL_RECOGNITION, - STOP_FACIAL_RECOGNITION, + ADD_FACE_EXPRESSION, + ADD_TO_FACE_EXPRESSIONS_BUFFER, + CLEAR_FACE_EXPRESSIONS_BUFFER, + START_FACE_LANDMARKS_DETECTION, + STOP_FACE_LANDMARKS_DETECTION, UPDATE_FACE_COORDINATES } from './actionTypes'; import { @@ -24,7 +24,7 @@ import { getDetectionInterval, sendDataToWorker, sendFaceBoxToParticipants, - sendFacialExpressionsWebhook + sendFaceExpressionsWebhook } from './functions'; import logger from './logger'; @@ -34,19 +34,19 @@ import logger from './logger'; let imageCapture; /** - * Object where the facial expression worker is stored. + * Object where the face landmarks worker is stored. */ let worker; /** - * The last facial expression received from the worker. + * The last face expression received from the worker. */ -let lastFacialExpression; +let lastFaceExpression; /** - * The last facial expression timestamp. + * The last face expression timestamp. */ -let lastFacialExpressionTimestamp; +let lastFaceExpressionTimestamp; /** * How many duplicate consecutive expression occurred. @@ -65,7 +65,7 @@ let webhookSendInterval; let detectionInterval; /** - * Loads the worker that predicts the facial expression. + * Loads the worker that detects the face landmarks. * * @returns {void} */ @@ -84,7 +84,7 @@ export function loadWorker() { } const baseUrl = `${getBaseUrl()}libs/`; - let workerUrl = `${baseUrl}facial-expressions-worker.min.js`; + let workerUrl = `${baseUrl}face-landmarks-worker.min.js`; const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' }); @@ -94,18 +94,18 @@ export function loadWorker() { const { faceExpression, faceBox } = e.data; if (faceExpression) { - if (faceExpression === lastFacialExpression) { + if (faceExpression === lastFaceExpression) { duplicateConsecutiveExpressions++; } else { - if (lastFacialExpression && lastFacialExpressionTimestamp) { - dispatch(addFacialExpression( - lastFacialExpression, + if (lastFaceExpression && lastFaceExpressionTimestamp) { + dispatch(addFaceExpression( + lastFaceExpression, duplicateConsecutiveExpressions + 1, - lastFacialExpressionTimestamp + lastFaceExpressionTimestamp )); } - lastFacialExpression = faceExpression; - lastFacialExpressionTimestamp = Date.now(); + lastFaceExpression = faceExpression; + lastFaceExpressionTimestamp = Date.now(); duplicateConsecutiveExpressions = 0; } } @@ -127,10 +127,10 @@ export function loadWorker() { } }; - const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config']; + const { faceLandmarks } = getState()['features/base/config']; const detectionTypes = [ - faceCoordinatesSharing?.enabled && DETECTION_TYPES.FACE_BOX, - enableFacialRecognition && DETECTION_TYPES.FACE_EXPRESSIONS + faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX, + faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS ].filter(Boolean); worker.postMessage({ @@ -139,7 +139,7 @@ export function loadWorker() { detectionTypes }); - dispatch(startFacialRecognition()); + dispatch(startFaceLandmarksDetection()); }; } @@ -149,14 +149,14 @@ export function loadWorker() { * @param {Track | undefined} track - Track for which to start detecting faces. * @returns {Function} */ -export function startFacialRecognition(track) { +export function startFaceLandmarksDetection(track) { return async function(dispatch: Function, getState: Function) { if (!worker) { return; } const state = getState(); - const { recognitionActive } = state['features/facial-recognition']; + const { recognitionActive } = state['features/face-landmarks']; if (recognitionActive) { logger.log('Face recognition already active.'); @@ -167,18 +167,18 @@ export function startFacialRecognition(track) { const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']); if (localVideoTrack === undefined) { - logger.warn('Facial recognition is disabled due to missing local track.'); + logger.warn('Face landmarks detection is disabled due to missing local track.'); return; } const stream = localVideoTrack.jitsiTrack.getOriginalStream(); - dispatch({ type: START_FACIAL_RECOGNITION }); + dispatch({ type: START_FACE_LANDMARKS_DETECTION }); logger.log('Start face recognition'); const firstVideoTrack = stream.getVideoTracks()[0]; - const { enableFacialRecognition, faceCoordinatesSharing } = state['features/base/config']; + const { faceLandmarks } = state['features/base/config']; imageCapture = new ImageCapture(firstVideoTrack); @@ -186,16 +186,16 @@ export function startFacialRecognition(track) { sendDataToWorker( worker, imageCapture, - faceCoordinatesSharing?.threshold + faceLandmarks?.faceCenteringThreshold ); }, getDetectionInterval(state)); - if (enableFacialRecognition) { + if (faceLandmarks?.enableFaceExpressionsDetection) { webhookSendInterval = setInterval(async () => { - const result = await sendFacialExpressionsWebhook(getState()); + const result = await sendFaceExpressionsWebhook(getState()); if (result) { - dispatch(clearFacialExpressionBuffer()); + dispatch(clearFaceExpressionBuffer()); } }, WEBHOOK_SEND_TIME_INTERVAL); } @@ -207,14 +207,14 @@ export function startFacialRecognition(track) { * * @returns {void} */ -export function stopFacialRecognition() { +export function stopFaceLandmarksDetection() { return function(dispatch: Function) { - if (lastFacialExpression && lastFacialExpressionTimestamp) { + if (lastFaceExpression && lastFaceExpressionTimestamp) { dispatch( - addFacialExpression( - lastFacialExpression, + addFaceExpression( + lastFaceExpression, duplicateConsecutiveExpressions + 1, - lastFacialExpressionTimestamp + lastFaceExpressionTimestamp ) ); } @@ -227,26 +227,26 @@ export function stopFacialRecognition() { detectionInterval = null; imageCapture = null; - dispatch({ type: STOP_FACIAL_RECOGNITION }); + dispatch({ type: STOP_FACE_LANDMARKS_DETECTION }); logger.log('Stop face recognition'); }; } /** - * Adds a new facial expression and its duration. + * Adds a new face expression and its duration. * - * @param {string} facialExpression - Facial expression to be added. - * @param {number} duration - Duration in seconds of the facial expression. - * @param {number} timestamp - Duration in seconds of the facial expression. + * @param {string} faceExpression - Face expression to be added. + * @param {number} duration - Duration in seconds of the face expression. + * @param {number} timestamp - Duration in seconds of the face expression. * @returns {Object} */ -function addFacialExpression(facialExpression: string, duration: number, timestamp: number) { +function addFaceExpression(faceExpression: string, duration: number, timestamp: number) { return function(dispatch: Function, getState: Function) { const finalDuration = duration * getDetectionInterval(getState()) / 1000; dispatch({ - type: ADD_FACIAL_EXPRESSION, - facialExpression, + type: ADD_FACE_EXPRESSION, + faceExpression, duration: finalDuration, timestamp }); @@ -254,25 +254,25 @@ function addFacialExpression(facialExpression: string, duration: number, timesta } /** - * Adds a facial expression with its timestamp to the facial expression buffer. + * Adds a face expression with its timestamp to the face expression buffer. * - * @param {Object} facialExpression - Object containing facial expression string and its timestamp. + * @param {Object} faceExpression - Object containing face expression string and its timestamp. * @returns {Object} */ -export function addToFacialExpressionsBuffer(facialExpression: Object) { +export function addToFaceExpressionsBuffer(faceExpression: Object) { return { - type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER, - facialExpression + type: ADD_TO_FACE_EXPRESSIONS_BUFFER, + faceExpression }; } /** - * Clears the facial expressions array in the state. + * Clears the face expressions array in the state. * * @returns {Object} */ -function clearFacialExpressionBuffer() { +function clearFaceExpressionBuffer() { return { - type: CLEAR_FACIAL_EXPRESSIONS_BUFFER + type: CLEAR_FACE_EXPRESSIONS_BUFFER }; } diff --git a/react/features/facial-recognition/constants.js b/react/features/face-landmarks/constants.js similarity index 88% rename from react/features/facial-recognition/constants.js rename to react/features/face-landmarks/constants.js index e62813a004..5cb86ee631 100644 --- a/react/features/facial-recognition/constants.js +++ b/react/features/face-landmarks/constants.js @@ -1,6 +1,6 @@ // @flow -export const FACIAL_EXPRESSION_EMOJIS = { +export const FACE_EXPRESSIONS_EMOJIS = { happy: '😊', neutral: '😐', sad: '🙁', @@ -11,7 +11,7 @@ export const FACIAL_EXPRESSION_EMOJIS = { // disgusted: '🤢' }; -export const FACIAL_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ]; +export const FACE_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ]; /** * Time is ms used for sending expression. diff --git a/react/features/facial-recognition/createImageBitmap.js b/react/features/face-landmarks/createImageBitmap.js similarity index 100% rename from react/features/facial-recognition/createImageBitmap.js rename to react/features/face-landmarks/createImageBitmap.js diff --git a/react/features/facial-recognition/faceApiPatch.js b/react/features/face-landmarks/faceApiPatch.js similarity index 100% rename from react/features/facial-recognition/faceApiPatch.js rename to react/features/face-landmarks/faceApiPatch.js diff --git a/react/features/facial-recognition/facialExpressionsWorker.js b/react/features/face-landmarks/faceLandmarksWorker.js similarity index 100% rename from react/features/facial-recognition/facialExpressionsWorker.js rename to react/features/face-landmarks/faceLandmarksWorker.js diff --git a/react/features/facial-recognition/functions.js b/react/features/face-landmarks/functions.js similarity index 77% rename from react/features/facial-recognition/functions.js rename to react/features/face-landmarks/functions.js index 2b3986a229..bd8e2ca9b4 100644 --- a/react/features/facial-recognition/functions.js +++ b/react/features/face-landmarks/functions.js @@ -14,26 +14,26 @@ if (typeof OffscreenCanvas === 'undefined') { } /** - * Sends the facial expression with its duration to all the other participants. + * Sends the face expression with its duration to all the other participants. * * @param {Object} conference - The current conference. - * @param {string} facialExpression - Facial expression to be sent. - * @param {number} duration - The duration of the facial expression in seconds. + * @param {string} faceExpression - Face expression to be sent. + * @param {number} duration - The duration of the face expression in seconds. * @returns {void} */ -export function sendFacialExpressionToParticipants( +export function sendFaceExpressionToParticipants( conference: Object, - facialExpression: string, + faceExpression: string, duration: number ): void { try { conference.sendEndpointMessage('', { - type: 'facial_expression', - facialExpression, + type: 'face_landmark', + faceExpression, duration }); } catch (err) { - logger.warn('Could not broadcast the facial expression to the other participants', err); + logger.warn('Could not broadcast the face expression to the other participants', err); } } @@ -60,44 +60,44 @@ export function sendFaceBoxToParticipants( } /** - * Sends the facial expression with its duration to xmpp server. + * Sends the face expression with its duration to xmpp server. * * @param {Object} conference - The current conference. - * @param {string} facialExpression - Facial expression to be sent. - * @param {number} duration - The duration of the facial expression in seconds. + * @param {string} faceExpression - Face expression to be sent. + * @param {number} duration - The duration of the face expression in seconds. * @returns {void} */ -export function sendFacialExpressionToServer( +export function sendFaceExpressionToServer( conference: Object, - facialExpression: string, + faceExpression: string, duration: number ): void { try { - conference.sendFacialExpression({ - facialExpression, + conference.sendFaceLandmarks({ + faceExpression, duration }); } catch (err) { - logger.warn('Could not send the facial expression to xmpp server', err); + logger.warn('Could not send the face expression to xmpp server', err); } } /** - * Sends facial expression to backend. + * Sends face expression to backend. * * @param {Object} state - Redux state. * @returns {boolean} - True if sent, false otherwise. */ -export async function sendFacialExpressionsWebhook(state: Object) { +export async function sendFaceExpressionsWebhook(state: Object) { const { webhookProxyUrl: url } = state['features/base/config']; const { conference } = state['features/base/conference']; const { jwt } = state['features/base/jwt']; const { connection } = state['features/base/connection']; const jid = connection.getJid(); const localParticipant = getLocalParticipant(state); - const { facialExpressionsBuffer } = state['features/facial-recognition']; + const { faceExpressionsBuffer } = state['features/face-landmarks']; - if (facialExpressionsBuffer.length === 0) { + if (faceExpressionsBuffer.length === 0) { return false; } @@ -110,7 +110,7 @@ export async function sendFacialExpressionsWebhook(state: Object) { meetingFqn: extractFqnFromPath(), sessionId: conference.sessionId, submitted: Date.now(), - emotions: facialExpressionsBuffer, + emotions: faceExpressionsBuffer, participantId: localParticipant.jwtId, participantName: localParticipant.name, participantJid: jid @@ -192,7 +192,7 @@ export async function sendDataToWorker( * @returns {Object} */ function getFaceBoxForId(id: string, state: Object) { - return state['features/facial-recognition'].faceBoxes[id]; + return state['features/face-landmarks'].faceBoxes[id]; } /** @@ -221,7 +221,7 @@ export function getVideoObjectPosition(state: Object, id: string) { * @returns {number} - Number of miliseconds for doing face detection. */ export function getDetectionInterval(state: Object) { - const { faceCoordinatesSharing } = state['features/base/config']; + const { faceLandmarks } = state['features/base/config']; - return Math.min(faceCoordinatesSharing?.captureInterval || SEND_IMAGE_INTERVAL_MS); + return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS); } diff --git a/react/features/facial-recognition/index.js b/react/features/face-landmarks/index.js similarity index 100% rename from react/features/facial-recognition/index.js rename to react/features/face-landmarks/index.js diff --git a/react/features/facial-recognition/logger.js b/react/features/face-landmarks/logger.js similarity index 53% rename from react/features/facial-recognition/logger.js rename to react/features/face-landmarks/logger.js index 01412d76f9..d2bec748e7 100644 --- a/react/features/facial-recognition/logger.js +++ b/react/features/face-landmarks/logger.js @@ -2,4 +2,4 @@ import { getLogger } from '../base/logging/functions'; -export default getLogger('features/facial-recognition'); +export default getLogger('features/face-landmarks'); diff --git a/react/features/facial-recognition/middleware.js b/react/features/face-landmarks/middleware.js similarity index 72% rename from react/features/facial-recognition/middleware.js rename to react/features/face-landmarks/middleware.js index 989ca54e1e..a4f21a1393 100644 --- a/react/features/facial-recognition/middleware.js +++ b/react/features/face-landmarks/middleware.js @@ -10,20 +10,20 @@ import { getParticipantCount } from '../base/participants'; import { MiddlewareRegistry } from '../base/redux'; import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks'; -import { ADD_FACIAL_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes'; +import { ADD_FACE_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes'; import { - addToFacialExpressionsBuffer, + addToFaceExpressionsBuffer, loadWorker, - stopFacialRecognition, - startFacialRecognition + stopFaceLandmarksDetection, + startFaceLandmarksDetection } from './actions'; import { FACE_BOX_EVENT_TYPE } from './constants'; -import { sendFacialExpressionToParticipants, sendFacialExpressionToServer } from './functions'; +import { sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions'; MiddlewareRegistry.register(({ dispatch, getState }) => next => action => { - const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config']; - const isEnabled = enableFacialRecognition || faceCoordinatesSharing?.enabled; + const { faceLandmarks } = getState()['features/base/config']; + const isEnabled = faceLandmarks?.enableFaceCentering || faceLandmarks?.enableFaceExpressionsDetection; if (action.type === CONFERENCE_JOINED) { if (isEnabled) { @@ -57,7 +57,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => { switch (action.type) { case CONFERENCE_WILL_LEAVE : { - dispatch(stopFacialRecognition()); + dispatch(stopFaceLandmarksDetection()); return next(action); } @@ -66,7 +66,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => { if (videoType === 'camera' && isLocal()) { // need to pass this since the track is not yet added in the store - dispatch(startFacialRecognition(action.track)); + dispatch(startFaceLandmarksDetection(action.track)); } return next(action); @@ -83,9 +83,9 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => { if (muted !== undefined) { // addresses video mute state changes if (muted) { - dispatch(stopFacialRecognition()); + dispatch(stopFaceLandmarksDetection()); } else { - dispatch(startFacialRecognition()); + dispatch(startFaceLandmarksDetection()); } } @@ -95,21 +95,21 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => { const { jitsiTrack: { isLocal, videoType } } = action.track; if (videoType === 'camera' && isLocal()) { - dispatch(stopFacialRecognition()); + dispatch(stopFaceLandmarksDetection()); } return next(action); } - case ADD_FACIAL_EXPRESSION: { + case ADD_FACE_EXPRESSION: { const state = getState(); const conference = getCurrentConference(state); if (getParticipantCount(state) > 1) { - sendFacialExpressionToParticipants(conference, action.facialExpression, action.duration); + sendFaceExpressionToParticipants(conference, action.faceExpression, action.duration); } - sendFacialExpressionToServer(conference, action.facialExpression, action.duration); - dispatch(addToFacialExpressionsBuffer({ - emotion: action.facialExpression, + sendFaceExpressionToServer(conference, action.faceExpression, action.duration); + dispatch(addToFaceExpressionsBuffer({ + emotion: action.faceExpression, timestamp: action.timestamp })); diff --git a/react/features/facial-recognition/reducer.js b/react/features/face-landmarks/reducer.js similarity index 50% rename from react/features/facial-recognition/reducer.js rename to react/features/face-landmarks/reducer.js index a7f8d11923..d1c89ee13f 100644 --- a/react/features/facial-recognition/reducer.js +++ b/react/features/face-landmarks/reducer.js @@ -3,17 +3,17 @@ import { ReducerRegistry } from '../base/redux'; import { - ADD_FACIAL_EXPRESSION, - ADD_TO_FACIAL_EXPRESSIONS_BUFFER, - CLEAR_FACIAL_EXPRESSIONS_BUFFER, - START_FACIAL_RECOGNITION, - STOP_FACIAL_RECOGNITION, + ADD_FACE_EXPRESSION, + ADD_TO_FACE_EXPRESSIONS_BUFFER, + CLEAR_FACE_EXPRESSIONS_BUFFER, + START_FACE_LANDMARKS_DETECTION, + STOP_FACE_LANDMARKS_DETECTION, UPDATE_FACE_COORDINATES } from './actionTypes'; const defaultState = { faceBoxes: {}, - facialExpressions: { + faceExpressions: { happy: 0, neutral: 0, surprised: 0, @@ -22,36 +22,40 @@ const defaultState = { disgusted: 0, sad: 0 }, - facialExpressionsBuffer: [], + faceExpressionsBuffer: [], recognitionActive: false }; -ReducerRegistry.register('features/facial-recognition', (state = defaultState, action) => { +ReducerRegistry.register('features/face-landmarks', (state = defaultState, action) => { switch (action.type) { - case ADD_FACIAL_EXPRESSION: { - state.facialExpressions[action.facialExpression] += action.duration; - - return state; - } - case ADD_TO_FACIAL_EXPRESSIONS_BUFFER: { + case ADD_FACE_EXPRESSION: { return { ...state, - facialExpressionsBuffer: [ ...state.facialExpressionsBuffer, action.facialExpression ] + faceExpressions: { + ...state.faceExpressions, + [action.faceExpression]: state.faceExpressions[action.faceExpression] + action.duration + } }; } - case CLEAR_FACIAL_EXPRESSIONS_BUFFER: { + case ADD_TO_FACE_EXPRESSIONS_BUFFER: { return { ...state, - facialExpressionsBuffer: [] + faceExpressionsBuffer: [ ...state.faceExpressionsBuffer, action.faceExpression ] }; } - case START_FACIAL_RECOGNITION: { + case CLEAR_FACE_EXPRESSIONS_BUFFER: { + return { + ...state, + faceExpressionsBuffer: [] + }; + } + case START_FACE_LANDMARKS_DETECTION: { return { ...state, recognitionActive: true }; } - case STOP_FACIAL_RECOGNITION: { + case STOP_FACE_LANDMARKS_DETECTION: { return { ...state, recognitionActive: false diff --git a/react/features/facial-recognition/resources/README.md b/react/features/face-landmarks/resources/README.md similarity index 87% rename from react/features/facial-recognition/resources/README.md rename to react/features/face-landmarks/resources/README.md index c0fda2873d..5f6b85038d 100644 --- a/react/features/facial-recognition/resources/README.md +++ b/react/features/face-landmarks/resources/README.md @@ -1,4 +1,4 @@ -# Facial Recognition and Facial Expression Models and Weights +# Face Landmarks Detection and Face Expression Models and Weights ### Tiny Face Detector @@ -6,7 +6,7 @@ It is a realtime face detector. ### Face Expression Recognition Model -It is a models that recognizes facial expressions. +It is a models that recognizes face expressions. ### Usage diff --git a/react/features/facial-recognition/resources/face_expression_model-weights_manifest.json b/react/features/face-landmarks/resources/face_expression_model-weights_manifest.json similarity index 100% rename from react/features/facial-recognition/resources/face_expression_model-weights_manifest.json rename to react/features/face-landmarks/resources/face_expression_model-weights_manifest.json diff --git a/react/features/facial-recognition/resources/face_expression_model.bin b/react/features/face-landmarks/resources/face_expression_model.bin similarity index 100% rename from react/features/facial-recognition/resources/face_expression_model.bin rename to react/features/face-landmarks/resources/face_expression_model.bin diff --git a/react/features/facial-recognition/resources/tiny_face_detector_model-weights_manifest.json b/react/features/face-landmarks/resources/tiny_face_detector_model-weights_manifest.json similarity index 100% rename from react/features/facial-recognition/resources/tiny_face_detector_model-weights_manifest.json rename to react/features/face-landmarks/resources/tiny_face_detector_model-weights_manifest.json diff --git a/react/features/facial-recognition/resources/tiny_face_detector_model.bin b/react/features/face-landmarks/resources/tiny_face_detector_model.bin similarity index 100% rename from react/features/facial-recognition/resources/tiny_face_detector_model.bin rename to react/features/face-landmarks/resources/tiny_face_detector_model.bin diff --git a/react/features/facial-recognition/actionTypes.js b/react/features/facial-recognition/actionTypes.js deleted file mode 100644 index e61f9a2c23..0000000000 --- a/react/features/facial-recognition/actionTypes.js +++ /dev/null @@ -1,59 +0,0 @@ -// @flow - -/** - * Redux action type dispatched in order to add a facial expression. - * - * { - * type: ADD_FACIAL_EXPRESSION, - * facialExpression: string, - * duration: number - * } - */ -export const ADD_FACIAL_EXPRESSION = 'ADD_FACIAL_EXPRESSION'; - -/** - * Redux action type dispatched in order to set recognition active in the state. - * - * { - * type: START_FACIAL_RECOGNITION - * } - */ -export const START_FACIAL_RECOGNITION = 'START_FACIAL_RECOGNITION'; - -/** - * Redux action type dispatched in order to set recognition inactive in the state. - * - * { - * type: STOP_FACIAL_RECOGNITION - * } - */ -export const STOP_FACIAL_RECOGNITION = 'STOP_FACIAL_RECOGNITION'; - -/** - * Redux action type dispatched in order to clear the facial expressions buffer in the state. - * - * { - * type: CLEAR_FACIAL_EXPRESSIONS_BUFFER - * } -*/ -export const CLEAR_FACIAL_EXPRESSIONS_BUFFER = 'CLEAR_FACIAL_EXPRESSIONS_BUFFER'; - -/** - * Redux action type dispatched in order to add a expression to the facial expressions buffer. - * - * { - * type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER - * } -*/ -export const ADD_TO_FACIAL_EXPRESSIONS_BUFFER = 'ADD_TO_FACIAL_EXPRESSIONS_BUFFER '; - -/** - * Redux action type dispatched in order to update coordinates of a detected face. - * - * { - * type: UPDATE_FACE_COORDINATES, - * faceBox: Object({ left, bottom, right, top }), - * participantId: string - * } - */ - export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES'; diff --git a/react/features/filmstrip/components/web/Thumbnail.js b/react/features/filmstrip/components/web/Thumbnail.js index 009215928a..96ef4963fd 100644 --- a/react/features/filmstrip/components/web/Thumbnail.js +++ b/react/features/filmstrip/components/web/Thumbnail.js @@ -25,7 +25,7 @@ import { getFakeScreenshareParticipantTrack, updateLastTrackVideoMediaEvent } from '../../../base/tracks'; -import { getVideoObjectPosition } from '../../../facial-recognition/functions'; +import { getVideoObjectPosition } from '../../../face-landmarks/functions'; import { hideGif, showGif } from '../../../gifs/actions'; import { getGifDisplayMode, getGifForParticipant } from '../../../gifs/functions'; import { PresenceLabel } from '../../../presence-status'; diff --git a/react/features/rtcstats/RTCStats.js b/react/features/rtcstats/RTCStats.js index f5a2fad383..941bb5de8b 100644 --- a/react/features/rtcstats/RTCStats.js +++ b/react/features/rtcstats/RTCStats.js @@ -106,13 +106,13 @@ class RTCStats { } /** - * Send facial expression data, the data will be processed by rtcstats-server and saved in the dump file. + * Send face expression data, the data will be processed by rtcstats-server and saved in the dump file. * - * @param {Object} facialExpressionData - Facial expression data to be saved in the rtcstats dump. + * @param {Object} faceExpressionData - Face expression data to be saved in the rtcstats dump. * @returns {void} */ - sendFacialExpressionData(facialExpressionData) { - this.trace && this.trace.statsEntry('facialExpression', null, facialExpressionData); + sendFaceExpressionData(faceExpressionData) { + this.trace && this.trace.statsEntry('faceExpression', null, faceExpressionData); } /** diff --git a/react/features/rtcstats/middleware.js b/react/features/rtcstats/middleware.js index 861bc638d7..eb28651ec6 100644 --- a/react/features/rtcstats/middleware.js +++ b/react/features/rtcstats/middleware.js @@ -7,7 +7,7 @@ import { CONFERENCE_UNIQUE_ID_SET, E2E_RTT_CHANGED, getConferenceOptions, getRoo import { LIB_WILL_INIT } from '../base/lib-jitsi-meet'; import { DOMINANT_SPEAKER_CHANGED, getLocalParticipant } from '../base/participants'; import { MiddlewareRegistry } from '../base/redux'; -import { ADD_FACIAL_EXPRESSION } from '../facial-recognition/actionTypes'; +import { ADD_FACE_EXPRESSION } from '../face-landmarks/actionTypes'; import RTCStats from './RTCStats'; import { canSendRtcstatsData, isRtcstatsEnabled } from './functions'; @@ -117,13 +117,13 @@ MiddlewareRegistry.register(store => next => action => { } break; } - case ADD_FACIAL_EXPRESSION: { + case ADD_FACE_EXPRESSION: { if (canSendRtcstatsData(state)) { - const { duration, facialExpression } = action; + const { duration, faceExpression } = action; - RTCStats.sendFacialExpressionData({ + RTCStats.sendFaceExpressionData({ duration, - facialExpression + faceExpression }); } break; diff --git a/react/features/speaker-stats/actionTypes.js b/react/features/speaker-stats/actionTypes.js index 75aca2f7f0..06dfb54e23 100644 --- a/react/features/speaker-stats/actionTypes.js +++ b/react/features/speaker-stats/actionTypes.js @@ -48,10 +48,10 @@ export const INIT_REORDER_STATS = 'INIT_REORDER_STATS'; export const RESET_SEARCH_CRITERIA = 'RESET_SEARCH_CRITERIA' /** - * Action type to toggle the facial expressions grid. + * Action type to toggle the face expressions grid. * { - * type: TOGGLE_FACIAL_EXPRESSIONS + * type: TOGGLE_FACE_EXPRESSIONS * } */ -export const TOGGLE_FACIAL_EXPRESSIONS = 'SHOW_FACIAL_EXPRESSIONS'; +export const TOGGLE_FACE_EXPRESSIONS = 'SHOW_FACE_EXPRESSIONS'; diff --git a/react/features/speaker-stats/actions.any.js b/react/features/speaker-stats/actions.any.js index 205af93030..480fc71ac1 100644 --- a/react/features/speaker-stats/actions.any.js +++ b/react/features/speaker-stats/actions.any.js @@ -6,7 +6,7 @@ import { UPDATE_STATS, INIT_REORDER_STATS, RESET_SEARCH_CRITERIA, - TOGGLE_FACIAL_EXPRESSIONS + TOGGLE_FACE_EXPRESSIONS } from './actionTypes'; /** @@ -71,12 +71,12 @@ export function resetSearchCriteria() { } /** - * Toggles the facial expressions grid. + * Toggles the face expressions grid. * * @returns {Object} */ -export function toggleFacialExpressions() { +export function toggleFaceExpressions() { return { - type: TOGGLE_FACIAL_EXPRESSIONS + type: TOGGLE_FACE_EXPRESSIONS }; } diff --git a/react/features/speaker-stats/components/AbstractSpeakerStatsList.js b/react/features/speaker-stats/components/AbstractSpeakerStatsList.js index 30ce320c09..a003b5d969 100644 --- a/react/features/speaker-stats/components/AbstractSpeakerStatsList.js +++ b/react/features/speaker-stats/components/AbstractSpeakerStatsList.js @@ -21,13 +21,13 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec const dispatch = useDispatch(); const { t } = useTranslation(); const conference = useSelector(state => state['features/base/conference'].conference); - const { stats: speakerStats, showFacialExpressions } = useSelector(state => state['features/speaker-stats']); + const { stats: speakerStats, showFaceExpressions } = useSelector(state => state['features/speaker-stats']); const localParticipant = useSelector(getLocalParticipant); const { defaultRemoteDisplayName } = useSelector( state => state['features/base/config']) || {}; - const { enableDisplayFacialExpressions } = useSelector(state => state['features/base/config']) || {}; - const { facialExpressions: localFacialExpressions } = useSelector( - state => state['features/facial-recognition']) || {}; + const { faceLandmarks } = useSelector(state => state['features/base/config']) || {}; + const { faceExpressions: localFaceExpressions } = useSelector( + state => state['features/face-landmarks']) || {}; /** * Update the internal state with the latest speaker stats. @@ -48,8 +48,8 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec ? `${localParticipant.name} (${meString})` : meString ); - if (enableDisplayFacialExpressions) { - stats[userId].setFacialExpressions(localFacialExpressions); + if (faceLandmarks?.enableDisplayFaceExpressions) { + stats[userId].setFaceExpressions(localFaceExpressions); } } @@ -87,11 +87,11 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec props.dominantSpeakerTime = statsModel.getTotalDominantSpeakerTime(); props.participantId = userId; props.hasLeft = statsModel.hasLeft(); - if (showFacialExpressions) { - props.facialExpressions = statsModel.getFacialExpressions(); + if (showFaceExpressions) { + props.faceExpressions = statsModel.getFaceExpressions(); } props.hidden = statsModel.hidden; - props.showFacialExpressions = showFacialExpressions; + props.showFaceExpressions = showFaceExpressions; props.displayName = statsModel.getDisplayName() || defaultRemoteDisplayName; if (itemStyles) { props.styles = itemStyles; diff --git a/react/features/speaker-stats/components/web/FacialExpressionsSwitch.js b/react/features/speaker-stats/components/web/FaceExpressionsSwitch.js similarity index 87% rename from react/features/speaker-stats/components/web/FacialExpressionsSwitch.js rename to react/features/speaker-stats/components/web/FaceExpressionsSwitch.js index 0372594099..c9eea382a9 100644 --- a/react/features/speaker-stats/components/web/FacialExpressionsSwitch.js +++ b/react/features/speaker-stats/components/web/FaceExpressionsSwitch.js @@ -55,7 +55,7 @@ const useStyles = makeStyles(theme => { }); /** - * The type of the React {@code Component} props of {@link ToggleFacialExpressionsButton}. + * The type of the React {@code Component} props of {@link ToggleFaceExpressionsButton}. */ type Props = { @@ -67,16 +67,16 @@ type Props = { /** * The state of the button. */ - showFacialExpressions: boolean, + showFaceExpressions: boolean, }; /** - * React component for toggling facial expressions grid. + * React component for toggling face expressions grid. * * @returns {React$Element} */ -export default function FacialExpressionsSwitch({ onChange, showFacialExpressions }: Props) { +export default function FaceExpressionsSwitch({ onChange, showFaceExpressions }: Props) { const classes = useStyles(); const { t } = useTranslation(); @@ -84,14 +84,14 @@ export default function FacialExpressionsSwitch({ onChange, showFacialExpression
+ value = { showFaceExpressions } />
); } diff --git a/react/features/speaker-stats/components/web/SpeakerStats.js b/react/features/speaker-stats/components/web/SpeakerStats.js index e9a1ccc5ec..9f12a46c3f 100644 --- a/react/features/speaker-stats/components/web/SpeakerStats.js +++ b/react/features/speaker-stats/components/web/SpeakerStats.js @@ -6,14 +6,14 @@ import { useSelector, useDispatch } from 'react-redux'; import { Dialog } from '../../../base/dialog'; import { escapeRegexp } from '../../../base/util'; -import { resetSearchCriteria, toggleFacialExpressions, initSearch } from '../../actions'; +import { resetSearchCriteria, toggleFaceExpressions, initSearch } from '../../actions'; import { DISPLAY_SWITCH_BREAKPOINT, MOBILE_BREAKPOINT, RESIZE_SEARCH_SWITCH_CONTAINER_BREAKPOINT } from '../../constants'; -import FacialExpressionsSwitch from './FacialExpressionsSwitch'; +import FaceExpressionsSwitch from './FaceExpressionsSwitch'; import SpeakerStatsLabels from './SpeakerStatsLabels'; import SpeakerStatsList from './SpeakerStatsList'; import SpeakerStatsSearch from './SpeakerStatsSearch'; @@ -88,16 +88,16 @@ const useStyles = makeStyles(theme => { }); const SpeakerStats = () => { - const { enableDisplayFacialExpressions } = useSelector(state => state['features/base/config']); - const { showFacialExpressions } = useSelector(state => state['features/speaker-stats']); + const { faceLandmarks } = useSelector(state => state['features/base/config']); + const { showFaceExpressions } = useSelector(state => state['features/speaker-stats']); const { clientWidth } = useSelector(state => state['features/base/responsive-ui']); - const displaySwitch = enableDisplayFacialExpressions && clientWidth > DISPLAY_SWITCH_BREAKPOINT; + const displaySwitch = faceLandmarks?.enableDisplayFaceExpressions && clientWidth > DISPLAY_SWITCH_BREAKPOINT; const displayLabels = clientWidth > MOBILE_BREAKPOINT; const dispatch = useDispatch(); const classes = useStyles(); - const onToggleFacialExpressions = useCallback(() => - dispatch(toggleFacialExpressions()) + const onToggleFaceExpressions = useCallback(() => + dispatch(toggleFaceExpressions()) , [ dispatch ]); const onSearch = useCallback((criteria = '') => { @@ -106,7 +106,7 @@ const SpeakerStats = () => { , [ dispatch ]); useEffect(() => { - showFacialExpressions && !displaySwitch && dispatch(toggleFacialExpressions()); + showFaceExpressions && !displaySwitch && dispatch(toggleFaceExpressions()); }, [ clientWidth ]); useEffect(() => () => dispatch(resetSearchCriteria()), []); @@ -117,12 +117,12 @@ const SpeakerStats = () => { hideCancelButton = { true } submitDisabled = { true } titleKey = 'speakerStats.speakerStats' - width = { showFacialExpressions ? '664px' : 'small' }> + width = { showFaceExpressions ? '664px' : 'small' }>
{
{ displaySwitch - && + && }
{ displayLabels && (
+ showFaceExpressions = { showFaceExpressions ?? false } />
)} diff --git a/react/features/speaker-stats/components/web/SpeakerStatsItem.js b/react/features/speaker-stats/components/web/SpeakerStatsItem.js index d3b85bbee0..a705f5685f 100644 --- a/react/features/speaker-stats/components/web/SpeakerStatsItem.js +++ b/react/features/speaker-stats/components/web/SpeakerStatsItem.js @@ -5,7 +5,7 @@ import React from 'react'; import { Avatar, StatelessAvatar } from '../../../base/avatar'; import { getInitials } from '../../../base/avatar/functions'; import BaseTheme from '../../../base/ui/components/BaseTheme'; -import { FACIAL_EXPRESSIONS } from '../../../facial-recognition/constants.js'; +import { FACE_EXPRESSIONS } from '../../../face-landmarks/constants.js'; import TimeElapsed from './TimeElapsed'; @@ -20,15 +20,15 @@ type Props = { displayName: string, /** - * The object that has as keys the facial expressions of the + * The object that has as keys the face expressions of the * participant and as values a number that represents the count . */ - facialExpressions: Object, + faceExpressions: Object, /** - * True if the facial recognition is not disabled. + * True if the face expressions detection is not disabled. */ - showFacialExpressions: boolean, + showFaceExpressions: boolean, /** * The total milliseconds the participant has been dominant speaker. @@ -71,22 +71,22 @@ const SpeakerStatsItem = (props: Props) => { const rowDisplayClass = `row ${hasLeftClass} ${props.styles.item}`; const expressionClass = 'expression'; const nameTimeClass = `name-time${ - props.showFacialExpressions ? ' name-time_expressions-on' : '' + props.showFaceExpressions ? ' name-time_expressions-on' : '' }`; const timeClass = `${props.styles.time} ${props.isDominantSpeaker ? props.styles.dominant : ''}`; - const FacialExpressions = () => FACIAL_EXPRESSIONS.map( + const FaceExpressions = () => FACE_EXPRESSIONS.map( expression => (
- { props.facialExpressions[expression] } + { props.faceExpressions[expression] }
) ); @@ -123,10 +123,10 @@ const SpeakerStatsItem = (props: Props) => { time = { props.dominantSpeakerTime } />
- { props.showFacialExpressions + { props.showFaceExpressions && (
- +
)} diff --git a/react/features/speaker-stats/components/web/SpeakerStatsLabels.js b/react/features/speaker-stats/components/web/SpeakerStatsLabels.js index e681c2bae5..e04f4aa897 100644 --- a/react/features/speaker-stats/components/web/SpeakerStatsLabels.js +++ b/react/features/speaker-stats/components/web/SpeakerStatsLabels.js @@ -4,7 +4,7 @@ import React from 'react'; import { useTranslation } from 'react-i18next'; import { Tooltip } from '../../../base/tooltip'; -import { FACIAL_EXPRESSION_EMOJIS } from '../../../facial-recognition/constants.js'; +import { FACE_EXPRESSIONS_EMOJIS } from '../../../face-landmarks/constants.js'; const useStyles = makeStyles(theme => { return { @@ -26,15 +26,15 @@ const useStyles = makeStyles(theme => { type Props = { /** - * True if the facial recognition is not disabled. + * True if the face expressions detection is not disabled. */ - showFacialExpressions: boolean, + showFaceExpressions: boolean, }; const SpeakerStatsLabels = (props: Props) => { const { t } = useTranslation(); const classes = useStyles(); - const FacialExpressionsLabels = () => Object.keys(FACIAL_EXPRESSION_EMOJIS).map( + const FaceExpressionsLabels = () => Object.keys(FACE_EXPRESSIONS_EMOJIS).map( expression => (
{ content = { t(`speakerStats.${expression}`) } position = { 'top' } >
- { FACIAL_EXPRESSION_EMOJIS[expression] } + { FACE_EXPRESSIONS_EMOJIS[expression] }
@@ -51,7 +51,7 @@ const SpeakerStatsLabels = (props: Props) => { ) ); const nameTimeClass = `name-time${ - props.showFacialExpressions ? ' name-time_expressions-on' : '' + props.showFaceExpressions ? ' name-time_expressions-on' : '' }`; return ( @@ -67,9 +67,9 @@ const SpeakerStatsLabels = (props: Props) => {
{ - props.showFacialExpressions + props.showFaceExpressions &&
- +
} diff --git a/react/features/speaker-stats/reducer.js b/react/features/speaker-stats/reducer.js index 3df0f6bd2d..a98fd1851a 100644 --- a/react/features/speaker-stats/reducer.js +++ b/react/features/speaker-stats/reducer.js @@ -9,7 +9,7 @@ import { UPDATE_STATS, INIT_REORDER_STATS, RESET_SEARCH_CRITERIA, - TOGGLE_FACIAL_EXPRESSIONS + TOGGLE_FACE_EXPRESSIONS } from './actionTypes'; /** @@ -22,7 +22,7 @@ const INITIAL_STATE = { isOpen: false, pendingReorder: true, criteria: null, - showFacialExpressions: false + showFaceExpressions: false }; ReducerRegistry.register('features/speaker-stats', (state = _getInitialState(), action) => { @@ -35,10 +35,10 @@ ReducerRegistry.register('features/speaker-stats', (state = _getInitialState(), return _initReorderStats(state); case RESET_SEARCH_CRITERIA: return _updateCriteria(state, { criteria: null }); - case TOGGLE_FACIAL_EXPRESSIONS: { + case TOGGLE_FACE_EXPRESSIONS: { return { ...state, - showFacialExpressions: !state.showFacialExpressions + showFaceExpressions: !state.showFaceExpressions }; } } diff --git a/resources/prosody-plugins/mod_speakerstats_component.lua b/resources/prosody-plugins/mod_speakerstats_component.lua index a832046e27..4bb326d11e 100644 --- a/resources/prosody-plugins/mod_speakerstats_component.lua +++ b/resources/prosody-plugins/mod_speakerstats_component.lua @@ -77,10 +77,10 @@ function on_message(event) room.speakerStats['dominantSpeakerId'] = occupant.jid; end - local facialExpression = event.stanza:get_child('facialExpression', 'http://jitsi.org/jitmeet'); + local faceExpression = event.stanza:get_child('faceExpression', 'http://jitsi.org/jitmeet'); - if facialExpression then - local roomAddress = facialExpression.attr.room; + if faceExpression then + local roomAddress = faceExpression.attr.room; local room = get_room_from_jid(room_jid_match_rewrite(roomAddress)); if not room then @@ -98,9 +98,9 @@ function on_message(event) log("warn", "No occupant %s found for %s", from, roomAddress); return false; end - local facialExpressions = room.speakerStats[occupant.jid].facialExpressions; - facialExpressions[facialExpression.attr.expression] = - facialExpressions[facialExpression.attr.expression] + tonumber(facialExpression.attr.duration); + local faceExpressions = room.speakerStats[occupant.jid].faceExpressions; + faceExpressions[faceExpression.attr.expression] = + faceExpressions[faceExpression.attr.expression] + tonumber(faceExpression.attr.duration); end return true @@ -117,7 +117,7 @@ function new_SpeakerStats(nick, context_user) nick = nick; context_user = context_user; displayName = nil; - facialExpressions = { + faceExpressions = { happy = 0, neutral = 0, surprised = 0, @@ -186,9 +186,9 @@ function occupant_joined(event) -- and skip focus if sneaked into the table if values.nick ~= nil and values.nick ~= 'focus' then local totalDominantSpeakerTime = values.totalDominantSpeakerTime; - local facialExpressions = values.facialExpressions; + local faceExpressions = values.faceExpressions; if totalDominantSpeakerTime > 0 or room:get_occupant_jid(jid) == nil or values:isDominantSpeaker() - or get_participant_expressions_count(facialExpressions) > 0 then + or get_participant_expressions_count(faceExpressions) > 0 then -- before sending we need to calculate current dominant speaker state if values:isDominantSpeaker() then local timeElapsed = math.floor(socket.gettime()*1000 - values._dominantSpeakerStart); @@ -198,7 +198,7 @@ function occupant_joined(event) users_json[values.nick] = { displayName = values.displayName, totalDominantSpeakerTime = totalDominantSpeakerTime, - facialExpressions = facialExpressions + faceExpressions = faceExpressions }; end end @@ -285,9 +285,9 @@ else process_host(muc_component_host); end -function get_participant_expressions_count(facialExpressions) +function get_participant_expressions_count(faceExpressions) local count = 0; - for expression, value in pairs(facialExpressions) do + for expression, value in pairs(faceExpressions) do count = count + value; end diff --git a/webpack.config.js b/webpack.config.js index bdaf1d0fa0..e12a7c8849 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -386,11 +386,11 @@ module.exports = (_env, argv) => { }), Object.assign({}, config, { entry: { - 'facial-expressions-worker': './react/features/facial-recognition/facialExpressionsWorker.js' + 'face-landmarks-worker': './react/features/face-landmarks/faceLandmarksWorker.js' }, plugins: [ ...config.plugins, - ...getBundleAnalyzerPlugin(analyzeBundle, 'facial-expressions-worker') + ...getBundleAnalyzerPlugin(analyzeBundle, 'face-landmarks-worker') ], performance: getPerformanceHints(perfHintOptions, 1024 * 1024 * 1.5) })