mirror of
https://gitcode.com/GitHub_Trending/ji/jitsi-meet.git
synced 2025-12-30 03:12:29 +00:00
* Initial implementation; Happy flow * Maybe revert this * Functional prototype * feat(facial-expressions): get stream when changing background effect and use presenter effect with camera * add(facial-expressions): array that stores the expressions durin the meeting * refactor(facial-expressions): capture imagebitmap from stream with imagecapture api * add(speaker-stats): expression label * fix(facial-expression): expression store * revert: expression leabel on speaker stats * add(facial-expressions): broadcast of expression when it changes * feat: facial expression handling on prosody * fix(facial-expressions): get the right track when opening and closing camera * add(speaker-stats): facial expression column * fix(facial-expressions): allow to start facial recognition only after joining conference * fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it * chore(facial-expressions): change detection from 2000ms to 1000ms * add(facial-expressions): send expression to server when there is only one participant * feat(facial-expressions): store expresions as a timeline * feat(mod_speakerstats_component): store facial expresions as a timeline * fix(facial-expressions): stop facial recognition only when muting video track * fix(facial-expressions): presenter mode get right track to detect face * add: polyfils for image capture for firefox and safari * refactor(facial-expressions): store expressions by counting them in a map * chore(facial-expressions): remove manually assigning the backend for tenserflowjs * feat(facial-expressions): move face-api from main thread to web worker * fix(facial-expressions): make feature work on firefox and safari * feat(facial-expressions): camera time tracker * feat(facial-expressions): camera time tracker in prosody * add(facial-expressions): expressions time as TimeElapsed object in speaker stats * fix(facial-expresions): lower the frequency of detection when tf uses cpu backend * add(facial-expressions): duration to the expression and send it with durantion when it is done * fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp * refactor(facial-expressions): change expressions labels from text to emoji * refactor(facial-expressions): remove camera time tracker * add(facial-expressions): detection time interval * chore(facial-expressions): add docs and minor refactor of the code * refactor(facial-expressions): put timeout in worker and remove set interval in main thread * feat(facial-expressions): disable feature in the config * add(facial-expressions): tooltips of labels in speaker stats * refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs * refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware * chore(facial-expressions): order imports and format some code * fix(facial-expressions): rebase issues with newer master * fix(facial-expressions): package-lock.json * fix(facial-expression): add commented default value of disableFacialRecognition flag and short description * fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config * fix: resources load-test package-lock.json * fix(facial-expressions): set and get facial expressions only if facial recognition enabled * add: facial recognition resources folder in .eslintignore * chore: package-lock update * fix: package-lock.json * fix(facial-expressions): gpu memory leak in the web worker * fix(facial-expressions): set cpu time interval for detection to 6000ms * chore(speaker-stats): fix indentation * chore(facial-expressions): remove empty lines between comments and type declarations * fix(facial-expressions): remove camera timetracker * fix(facial-expressions): remove facialRecognitionAllowed flag * fix(facial-expressions): remove sending interval time to worker * refactor(facial-expression): middleware * fix(facial-expression): end tensor scope after setting backend * fix(facial-expressions): sending info back to worker only on facial expression message * fix: lint errors * refactor(facial-expressions): bundle web worker using webpack * fix: deploy-facial-expressions command in makefile * chore: fix load test package-lock.json and package.json * chore: sync package-lock.json Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
284 lines
8.1 KiB
JavaScript
284 lines
8.1 KiB
JavaScript
// @flow
|
|
|
|
import React, { Component } from 'react';
|
|
import type { Dispatch } from 'redux';
|
|
|
|
import { Dialog } from '../../base/dialog';
|
|
import { translate } from '../../base/i18n';
|
|
import { getLocalParticipant } from '../../base/participants';
|
|
import { connect } from '../../base/redux';
|
|
import { escapeRegexp } from '../../base/util';
|
|
import { initUpdateStats, initSearch } from '../actions';
|
|
import { SPEAKER_STATS_RELOAD_INTERVAL } from '../constants';
|
|
import { getSpeakerStats, getSearchCriteria } from '../functions';
|
|
|
|
import SpeakerStatsItem from './SpeakerStatsItem';
|
|
import SpeakerStatsLabels from './SpeakerStatsLabels';
|
|
import SpeakerStatsSearch from './SpeakerStatsSearch';
|
|
|
|
declare var interfaceConfig: Object;
|
|
|
|
declare var APP;
|
|
|
|
/**
|
|
* The type of the React {@code Component} props of {@link SpeakerStats}.
|
|
*/
|
|
type Props = {
|
|
|
|
/**
|
|
* The display name for the local participant obtained from the redux store.
|
|
*/
|
|
_localDisplayName: string,
|
|
|
|
/**
|
|
* The flag which shows if the facial recognition is enabled, obtained from the redux store.
|
|
* if enabled facial expressions are shown
|
|
*/
|
|
_enableFacialRecognition: boolean,
|
|
|
|
/**
|
|
* The facial expressions for the local participant obtained from the redux store.
|
|
*/
|
|
_localFacialExpressions: Array<Object>,
|
|
|
|
/**
|
|
* The flag which shows if all the facial expressions are shown or only 4
|
|
* if true show only 4, if false show all
|
|
*/
|
|
_reduceExpressions: boolean,
|
|
|
|
/**
|
|
* The speaker paricipant stats.
|
|
*/
|
|
_stats: Object,
|
|
|
|
/**
|
|
* The search criteria.
|
|
*/
|
|
_criteria: string | null,
|
|
|
|
/**
|
|
* The JitsiConference from which stats will be pulled.
|
|
*/
|
|
conference: Object,
|
|
|
|
/**
|
|
* Redux store dispatch method.
|
|
*/
|
|
dispatch: Dispatch<any>,
|
|
|
|
/**
|
|
* The function to translate human-readable text.
|
|
*/
|
|
t: Function,
|
|
stats: Object,
|
|
|
|
lastFacialExpression: string,
|
|
};
|
|
|
|
/**
|
|
* React component for displaying a list of speaker stats.
|
|
*
|
|
* @augments Component
|
|
*/
|
|
class SpeakerStats extends Component<Props> {
|
|
_updateInterval: IntervalID;
|
|
|
|
/**
|
|
* Initializes a new SpeakerStats instance.
|
|
*
|
|
* @param {Object} props - The read-only React Component props with which
|
|
* the new instance is to be initialized.
|
|
*/
|
|
constructor(props) {
|
|
super(props);
|
|
|
|
// Bind event handlers so they are only bound once per instance.
|
|
this._updateStats = this._updateStats.bind(this);
|
|
this._onSearch = this._onSearch.bind(this);
|
|
|
|
this._updateStats();
|
|
}
|
|
|
|
/**
|
|
* Begin polling for speaker stats updates.
|
|
*
|
|
* @inheritdoc
|
|
*/
|
|
componentDidMount() {
|
|
this._updateInterval = setInterval(() => this._updateStats(), SPEAKER_STATS_RELOAD_INTERVAL);
|
|
}
|
|
|
|
/**
|
|
* Stop polling for speaker stats updates.
|
|
*
|
|
* @inheritdoc
|
|
* @returns {void}
|
|
*/
|
|
componentWillUnmount() {
|
|
clearInterval(this._updateInterval);
|
|
}
|
|
|
|
/**
|
|
* Implements React's {@link Component#render()}.
|
|
*
|
|
* @inheritdoc
|
|
* @returns {ReactElement}
|
|
*/
|
|
render() {
|
|
const userIds = Object.keys(this.props._stats);
|
|
const items = userIds.map(userId => this._createStatsItem(userId));
|
|
|
|
return (
|
|
<Dialog
|
|
cancelKey = 'dialog.close'
|
|
submitDisabled = { true }
|
|
titleKey = 'speakerStats.speakerStats'
|
|
width = { this.props._enableFacialRecognition ? 'large' : 'medium' }>
|
|
<div className = 'speaker-stats'>
|
|
<SpeakerStatsSearch onSearch = { this._onSearch } />
|
|
<SpeakerStatsLabels
|
|
reduceExpressions = { this.props._reduceExpressions }
|
|
showFacialExpressions = { this.props._enableFacialRecognition } />
|
|
{ items }
|
|
</div>
|
|
</Dialog>
|
|
);
|
|
}
|
|
|
|
/**
|
|
* Create a SpeakerStatsItem instance for the passed in user id.
|
|
*
|
|
* @param {string} userId - User id used to look up the associated
|
|
* speaker stats from the jitsi library.
|
|
* @returns {SpeakerStatsItem|null}
|
|
* @private
|
|
*/
|
|
_createStatsItem(userId) {
|
|
const statsModel = this.props._stats[userId];
|
|
|
|
if (!statsModel || statsModel.hidden) {
|
|
return null;
|
|
}
|
|
|
|
const isDominantSpeaker = statsModel.isDominantSpeaker();
|
|
const dominantSpeakerTime = statsModel.getTotalDominantSpeakerTime();
|
|
const hasLeft = statsModel.hasLeft();
|
|
let facialExpressions;
|
|
|
|
if (this.props._enableFacialRecognition) {
|
|
facialExpressions = statsModel.getFacialExpressions();
|
|
}
|
|
|
|
return (
|
|
<SpeakerStatsItem
|
|
displayName = { statsModel.getDisplayName() }
|
|
dominantSpeakerTime = { dominantSpeakerTime }
|
|
facialExpressions = { facialExpressions }
|
|
hasLeft = { hasLeft }
|
|
isDominantSpeaker = { isDominantSpeaker }
|
|
key = { userId }
|
|
reduceExpressions = { this.props._reduceExpressions }
|
|
showFacialExpressions = { this.props._enableFacialRecognition } />
|
|
);
|
|
}
|
|
|
|
_onSearch: () => void;
|
|
|
|
/**
|
|
* Search the existing participants by name.
|
|
*
|
|
* @returns {void}
|
|
* @param {string} criteria - The search parameter.
|
|
* @protected
|
|
*/
|
|
_onSearch(criteria = '') {
|
|
this.props.dispatch(initSearch(escapeRegexp(criteria)));
|
|
}
|
|
|
|
_updateStats: () => void;
|
|
|
|
/**
|
|
* Update the internal state with the latest speaker stats.
|
|
*
|
|
* @returns {void}
|
|
* @private
|
|
*/
|
|
_updateStats() {
|
|
this.props.dispatch(initUpdateStats(() => this._getSpeakerStats()));
|
|
}
|
|
|
|
/**
|
|
* Update the internal state with the latest speaker stats.
|
|
*
|
|
* @returns {Object}
|
|
* @private
|
|
*/
|
|
_getSpeakerStats() {
|
|
const stats = { ...this.props.conference.getSpeakerStats() };
|
|
|
|
for (const userId in stats) {
|
|
if (stats[userId]) {
|
|
if (stats[userId].isLocalStats()) {
|
|
const { t } = this.props;
|
|
const meString = t('me');
|
|
|
|
stats[userId].setDisplayName(
|
|
this.props._localDisplayName
|
|
? `${this.props._localDisplayName} (${meString})`
|
|
: meString
|
|
);
|
|
if (this.props._enableFacialRecognition) {
|
|
stats[userId].setFacialExpressions(this.props._localFacialExpressions);
|
|
}
|
|
}
|
|
|
|
if (!stats[userId].getDisplayName()) {
|
|
stats[userId].setDisplayName(
|
|
interfaceConfig.DEFAULT_REMOTE_DISPLAY_NAME
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
return stats;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Maps (parts of) the redux state to the associated SpeakerStats's props.
|
|
*
|
|
* @param {Object} state - The redux state.
|
|
* @private
|
|
* @returns {{
|
|
* _localDisplayName: ?string,
|
|
* _stats: Object,
|
|
* _criteria: string,
|
|
* }}
|
|
*/
|
|
function _mapStateToProps(state) {
|
|
const localParticipant = getLocalParticipant(state);
|
|
const { enableFacialRecognition } = state['features/base/config'];
|
|
const { facialExpressions: localFacialExpressions } = state['features/facial-recognition'];
|
|
const { cameraTimeTracker: localCameraTimeTracker } = state['features/facial-recognition'];
|
|
const { clientWidth } = state['features/base/responsive-ui'];
|
|
|
|
return {
|
|
/**
|
|
* The local display name.
|
|
*
|
|
* @private
|
|
* @type {string|undefined}
|
|
*/
|
|
_localDisplayName: localParticipant && localParticipant.name,
|
|
_stats: getSpeakerStats(state),
|
|
_criteria: getSearchCriteria(state),
|
|
_enableFacialRecognition: enableFacialRecognition,
|
|
_localFacialExpressions: localFacialExpressions,
|
|
_localCameraTimeTracker: localCameraTimeTracker,
|
|
_reduceExpressions: clientWidth < 750
|
|
};
|
|
}
|
|
|
|
export default translate(connect(_mapStateToProps)(SpeakerStats));
|