Files
jitsi-meet/react/features/speaker-stats/components/SpeakerStatsLabels.js
Gabriel Borlea 61684b1071 feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006)
* Initial implementation; Happy flow

* Maybe revert this

* Functional prototype

* feat(facial-expressions): get stream when changing background effect and use presenter effect with camera

* add(facial-expressions): array that stores the expressions durin the meeting

* refactor(facial-expressions): capture imagebitmap from stream with imagecapture api

* add(speaker-stats): expression label

* fix(facial-expression): expression store

* revert: expression leabel on speaker stats

* add(facial-expressions): broadcast of expression when it changes

* feat: facial expression handling on prosody

* fix(facial-expressions): get the right track when opening and closing camera

* add(speaker-stats): facial expression column

* fix(facial-expressions): allow to start facial recognition only after joining conference

* fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it

* chore(facial-expressions): change detection from 2000ms to 1000ms

* add(facial-expressions): send expression to server when there is only one participant

* feat(facial-expressions): store expresions as a timeline

* feat(mod_speakerstats_component): store facial expresions as a timeline

* fix(facial-expressions): stop facial recognition only when muting video track

* fix(facial-expressions): presenter mode get right track to detect face

* add: polyfils for image capture for firefox and safari

* refactor(facial-expressions): store expressions by counting them in a map

* chore(facial-expressions): remove manually assigning the backend for tenserflowjs

* feat(facial-expressions): move face-api from main thread to web worker

* fix(facial-expressions): make feature work on firefox and safari

* feat(facial-expressions): camera time tracker

* feat(facial-expressions): camera time tracker in prosody

* add(facial-expressions): expressions time as TimeElapsed object in speaker stats

* fix(facial-expresions): lower the frequency of detection when tf uses cpu backend

* add(facial-expressions): duration to the expression and send it with durantion when it is done

* fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp

* refactor(facial-expressions): change expressions labels from text to emoji

* refactor(facial-expressions): remove camera time tracker

* add(facial-expressions): detection time interval

* chore(facial-expressions): add docs and minor refactor of the code

* refactor(facial-expressions): put timeout in worker and remove set interval in main thread

* feat(facial-expressions): disable feature in the config

* add(facial-expressions): tooltips of labels in speaker stats

* refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs

* refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware

* chore(facial-expressions): order imports and format some code

* fix(facial-expressions): rebase issues with newer master

* fix(facial-expressions): package-lock.json

* fix(facial-expression): add commented default value of disableFacialRecognition flag and short description

* fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config

* fix: resources load-test package-lock.json

* fix(facial-expressions): set and get facial expressions only if facial recognition enabled

* add: facial recognition resources folder in .eslintignore

* chore: package-lock update

* fix: package-lock.json

* fix(facial-expressions): gpu memory leak in the web worker

* fix(facial-expressions): set cpu time interval for detection to 6000ms

* chore(speaker-stats): fix indentation

* chore(facial-expressions): remove empty lines between comments and type declarations

* fix(facial-expressions): remove camera timetracker

* fix(facial-expressions): remove facialRecognitionAllowed flag

* fix(facial-expressions): remove sending interval time to worker

* refactor(facial-expression): middleware

* fix(facial-expression): end tensor scope after setting backend

* fix(facial-expressions): sending info back to worker only on facial expression message

* fix: lint errors

* refactor(facial-expressions): bundle web worker using webpack

* fix: deploy-facial-expressions command in makefile

* chore: fix load test package-lock.json and package.json

* chore: sync package-lock.json

Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
2021-11-17 16:33:03 +02:00

91 lines
2.8 KiB
JavaScript

/* @flow */
import React, { Component } from 'react';
import { translate } from '../../base/i18n';
import { Tooltip } from '../../base/tooltip';
import { FACIAL_EXPRESSION_EMOJIS } from '../../facial-recognition/constants.js';
/**
* The type of the React {@code Component} props of {@link SpeakerStatsLabels}.
*/
type Props = {
/**
* True if the client width is les than 750.
*/
reduceExpressions: boolean,
/**
* True if the facial recognition is not disabled.
*/
showFacialExpressions: boolean,
/**
* The function to translate human-readable text.
*/
t: Function,
};
/**
* React component for labeling speaker stats column items.
*
* @augments Component
*/
class SpeakerStatsLabels extends Component<Props> {
/**
* Implements React's {@link Component#render()}.
*
* @inheritdoc
* @returns {ReactElement}
*/
render() {
const { t } = this.props;
return (
<div className = 'speaker-stats-item__labels'>
<div className = 'speaker-stats-item__status' />
<div
className = { `speaker-stats-item__name${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
{ t('speakerStats.name') }
</div>
<div
className = { `speaker-stats-item__time${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
{ t('speakerStats.speakerTime') }
</div>
{this.props.showFacialExpressions
&& (this.props.reduceExpressions
? Object.keys(FACIAL_EXPRESSION_EMOJIS)
.filter(expression => ![ 'angry', 'fearful', 'disgusted' ].includes(expression))
: Object.keys(FACIAL_EXPRESSION_EMOJIS)
).map(
expression => (
<div
className = 'speaker-stats-item__expression'
key = { expression }>
<Tooltip
content = { t(`speakerStats.${expression}`) }
position = { 'top' } >
<div
// eslint-disable-next-line react-native/no-inline-styles
style = {{ fontSize: 17 }}>
{ FACIAL_EXPRESSION_EMOJIS[expression] }
</div>
</Tooltip>
</div>
))
}
</div>
);
}
}
export default translate(SpeakerStatsLabels);