feat(settings) Add advanced audio settings checkboxes (#16316)

* Add checkboxes to toggle audio settings

* Sync checkboxes with audio mixer effect

* Add tooltips

* Move previewAudioTrack to redux

* Add translation

* Add audio settings state to redux

* Update docs

* Apply review comments

* Create local track with audio contraints when unmuting

* Refactor functions and naming

* Add enableAdvancedAudioSettings config

* Fix mobile imports

* Add tooltips content

* Update react/features/base/config/functions.any.ts

* Layout checkboxes in a two-column grid

* Fix web imports

* Sort translation alphabetically

* Separate audio mute implementation for mobile and web

* Apply review comments

* squash: Add imports for middleware.any

* squash: fix linter errors

* Remove tooltips

* Lint

* Refactored setting of audio constraints in createLocalTracksF with checks for feature flag and desktop

---------

Co-authored-by: Jaya Allamsetty <54324652+jallamsetty1@users.noreply.github.com>
Co-authored-by: Jaya Allamsetty <jaya.allamsetty@8x8.com>
This commit is contained in:
Hugo Lavernhe
2025-09-05 22:52:35 +02:00
committed by GitHub
parent 9252bbb036
commit 1e15d9421b
21 changed files with 516 additions and 84 deletions

View File

@@ -134,6 +134,7 @@ import {
isLocalTrackMuted,
isUserInteractionRequiredForUnmute
} from './react/features/base/tracks/functions';
import { getLocalJitsiAudioTrackSettings } from './react/features/base/tracks/functions.web';
import { downloadJSON } from './react/features/base/util/downloadJSON';
import { getJitsiMeetGlobalNSConnectionTimes } from './react/features/base/util/helpers';
import { openLeaveReasonDialog } from './react/features/conference/actions.web';
@@ -158,6 +159,7 @@ import { disableReceiver, stopReceiver } from './react/features/remote-control/a
import { setScreenAudioShareState } from './react/features/screen-share/actions.web';
import { isScreenAudioShared } from './react/features/screen-share/functions';
import { toggleScreenshotCaptureSummary } from './react/features/screenshot-capture/actions';
import { setAudioSettings } from './react/features/settings/actions.web';
import { AudioMixerEffect } from './react/features/stream-effects/audio-mixer/AudioMixerEffect';
import { createRnnoiseProcessor } from './react/features/stream-effects/rnnoise';
import { handleToggleVideoMuted } from './react/features/toolbox/actions.any';
@@ -566,7 +568,15 @@ export default {
if (browser.isWebKitBased()) {
this.muteAudio(true, true);
} else {
localTracks = localTracks.filter(track => track.getType() !== MEDIA_TYPE.AUDIO);
localTracks = localTracks.filter(track => {
if (track.getType() === MEDIA_TYPE.AUDIO) {
track.stopStream();
return false;
}
return true;
});
}
}
@@ -1763,7 +1773,11 @@ export default {
return this.useAudioStream(stream);
})
.then(() => {
const localAudio = getLocalJitsiAudioTrack(APP.store.getState());
const state = APP.store.getState();
const localAudio = getLocalJitsiAudioTrack(state);
const settings = getLocalJitsiAudioTrackSettings(state);
APP.store.dispatch(setAudioSettings(settings));
if (localAudio && isDefaultMicSelected) {
// workaround for the default device to be shown as selected in the

View File

@@ -1380,6 +1380,20 @@
"videounmute": "Start camera"
},
"addPeople": "Add people to your call",
"advancedAudioSettings": {
"aec": {
"label": "Acoustic echo cancellation"
},
"agc": {
"label": "Automatic gain control"
},
"ns": {
"label": "Noise suppression"
},
"stereo": {
"label": "Stereo"
}
},
"audioOnlyOff": "Disable low bandwidth mode",
"audioOnlyOn": "Enable low bandwidth mode",
"audioRoute": "Select the sound device",

View File

@@ -1,5 +1,6 @@
import { ToolbarButton } from '../../toolbox/types';
import { ILoggingConfig } from '../logging/types';
import { IAudioSettings } from '../settings/reducer';
import { DesktopSharingSourceType } from '../tracks/types';
type ButtonsWithNotifyClick = 'camera' |
@@ -191,6 +192,7 @@ export interface IConfig {
appId?: string;
audioLevelsInterval?: number;
audioQuality?: {
enableAdvancedAudioSettings?: boolean;
opusMaxAverageBitrate?: number | null;
stereo?: boolean;
};
@@ -237,6 +239,7 @@ export interface IConfig {
inactiveDisabled?: boolean;
};
constraints?: {
audio?: IAudioSettings;
video?: {
height?: {
ideal?: number;

View File

@@ -6,6 +6,7 @@ import { safeJsonParse } from '@jitsi/js-utils/json';
import { isEmpty, mergeWith, pick } from 'lodash-es';
import { IReduxState } from '../../app/types';
import { browser } from '../lib-jitsi-meet';
import { getLocalParticipant } from '../participants/functions';
import { isEmbedded } from '../util/embedUtils';
import { parseURLParams } from '../util/parseURLParams';
@@ -256,6 +257,17 @@ export function isDisplayNameVisible(state: IReduxState): boolean {
return !state['features/base/config'].hideDisplayName;
}
/**
* Selector for determining if the advanced audio settings are enabled.
*
* @param {Object} state - The state of the app.
* @returns {boolean}
*/
export function isAdvancedAudioSettingsEnabled(state: IReduxState): boolean {
return !browser.isWebKitBased() && Boolean(state['features/base/config']?.audioQuality?.enableAdvancedAudioSettings);
}
/**
* Restores a Jitsi Meet config.js from {@code localStorage} if it was
* previously downloaded from a specific {@code baseURL} and stored with

View File

@@ -51,8 +51,15 @@ const DEFAULT_STATE: ISettingsState = {
userSelectedMicDeviceLabel: undefined
};
export interface IAudioSettings {
autoGainControl?: boolean;
channelCount?: 1 | 2;
echoCancellation?: boolean;
noiseSuppression?: boolean;
}
export interface ISettingsState {
audioOutputDeviceId?: string;
audioSettings?: IAudioSettings;
audioSettingsVisible?: boolean;
avatarURL?: string;
cameraDeviceId?: string | boolean;
@@ -66,6 +73,7 @@ export interface ISettingsState {
localFlipX?: boolean;
maxStageParticipants?: number;
micDeviceId?: string | boolean;
previewAudioTrack?: any | null;
serverURL?: string;
showSubtitlesOnStage?: boolean;
soundsIncomingMessage?: boolean;

View File

@@ -164,6 +164,7 @@ export function createLocalTracksA(options: ITrackOptions = {}) {
= createLocalTracksF(
{
cameraDeviceId: options.cameraDeviceId,
constraints: options?.constraints,
devices: [ device ],
facingMode:
options.facingMode || getCameraFacingMode(state),

View File

@@ -12,6 +12,7 @@ import { setScreenAudioShareState, setScreenshareAudioTrack } from '../../screen
import { isAudioOnlySharing, isScreenVideoShared } from '../../screen-share/functions';
import { toggleScreenshotCaptureSummary } from '../../screenshot-capture/actions';
import { isScreenshotCaptureEnabled } from '../../screenshot-capture/functions';
import { setAudioSettings } from '../../settings/actions.web';
import { AudioMixerEffect } from '../../stream-effects/audio-mixer/AudioMixerEffect';
import { getCurrentConference } from '../conference/functions';
import { notifyCameraError, notifyMicError } from '../devices/actions.web';
@@ -27,6 +28,7 @@ import {
} from '../media/constants';
import { IGUMPendingState } from '../media/types';
import { updateSettings } from '../settings/actions';
import { IAudioSettings } from '../settings/reducer';
import { addLocalTrack, replaceLocalTrack } from './actions.any';
import AllowToggleCameraDialog from './components/web/AllowToggleCameraDialog';
@@ -37,6 +39,7 @@ import {
getLocalVideoTrack,
isToggleCameraEnabled
} from './functions';
import { applyAudioConstraints, getLocalJitsiAudioTrackSettings } from './functions.web';
import logger from './logger';
import { ICreateInitialTracksOptions, IInitialTracksErrors, IShareOptions, IToggleScreenSharingOptions } from './types';
@@ -329,7 +332,6 @@ export function setGUMPendingStateOnFailedTracks(tracks: Array<any>, dispatch: I
export function createAndAddInitialAVTracks(devices: Array<MediaType>) {
return async (dispatch: IStore['dispatch']) => {
dispatch(gumPending(devices, IGUMPendingState.PENDING_UNMUTE));
const { tracks, errors } = await dispatch(createInitialAVTracks({ devices }));
setGUMPendingStateOnFailedTracks(tracks, dispatch);
@@ -541,3 +543,21 @@ export function toggleCamera() {
await dispatch(replaceLocalTrack(null, newVideoTrack));
};
}
/**
* Toggles the audio settings.
*
* @param {IAudioSettings} settings - The settings to apply.
* @returns {Function}
*/
export function toggleUpdateAudioSettings(settings: IAudioSettings) {
return async (dispatch: IStore['dispatch'], getState: IStore['getState']) => {
const state = getState();
await applyAudioConstraints(state, settings);
const updatedSettings = getLocalJitsiAudioTrackSettings(state) as IAudioSettings;
dispatch(setAudioSettings(updatedSettings));
};
}

View File

@@ -201,6 +201,45 @@ export function getLocalJitsiAudioTrack(state: IReduxState) {
return track?.jitsiTrack;
}
/**
* Returns audio settings from the local Jitsi audio track.
*
* @param {IReduxState} state - The Redux state.
* @returns {IAudioSettings} The extracted audio settings.
*/
export function getLocalJitsiAudioTrackSettings(state: IReduxState) {
const jitsiTrack = getLocalJitsiAudioTrack(state);
if (!jitsiTrack) {
const config = state['features/base/config'];
const disableAP = Boolean(config?.disableAP);
const disableAGC = Boolean(config?.disableAGC);
const disableAEC = Boolean(config?.disableAEC);
const disableNS = Boolean(config?.disableNS);
const stereo = Boolean(config?.audioQuality?.stereo);
return {
autoGainControl: !disableAP && !disableAGC,
channelCount: stereo ? 2 : 1,
echoCancellation: !disableAP && !disableAEC,
noiseSuppression: !disableAP && !disableNS
};
}
const hasAudioMixerEffect = Boolean(typeof jitsiTrack._streamEffect?.setMuted === 'function' && jitsiTrack._streamEffect?._originalTrack);
const track = hasAudioMixerEffect ? jitsiTrack._streamEffect._originalTrack : jitsiTrack.getTrack();
const { autoGainControl, channelCount, echoCancellation, noiseSuppression } = track.getSettings();
return {
autoGainControl,
channelCount,
echoCancellation,
noiseSuppression
};
}
/**
* Returns track of specified media type for specified participant.
*

View File

@@ -1,5 +1,6 @@
import { IStore } from '../../app/types';
import { IStateful } from '../app/types';
import { isAdvancedAudioSettingsEnabled } from '../config/functions.any';
import { isMobileBrowser } from '../environment/utils';
import JitsiMeetJS, { JitsiTrackErrors, browser } from '../lib-jitsi-meet';
import { gumPending, setAudioMuted } from '../media/actions';
@@ -11,9 +12,10 @@ import {
getUserSelectedCameraDeviceId,
getUserSelectedMicDeviceId
} from '../settings/functions.web';
import { IAudioSettings } from '../settings/reducer';
import { getJitsiMeetGlobalNSConnectionTimes } from '../util/helpers';
import { getCameraFacingMode } from './functions.any';
import { getCameraFacingMode, getLocalJitsiAudioTrack, getLocalJitsiAudioTrackSettings } from './functions.any';
import loadEffects from './loadEffects';
import logger from './logger';
import { ITrackOptions } from './types';
@@ -62,7 +64,13 @@ export function createLocalTracksF(options: ITrackOptions = {}, store?: IStore,
desktopSharingFrameRate,
resolution
} = state['features/base/config'];
const constraints = options.constraints ?? state['features/base/config'].constraints;
const constraints = options.constraints ?? state['features/base/config'].constraints ?? {};
if (isAdvancedAudioSettingsEnabled(state) && typeof APP !== 'undefined') {
constraints.audio = state['features/settings'].audioSettings ?? getLocalJitsiAudioTrackSettings(state);
}
return (
loadEffects(store).then((effectsArray: Object[]) => {
@@ -214,3 +222,32 @@ export function isToggleCameraEnabled(stateful: IStateful) {
return isMobileBrowser() && Number(videoInput?.length) > 1;
}
/**
* Applies audio constraints to the local Jitsi audio track.
*
* @param {Function|Object} stateful - The redux store or {@code getState} function.
* @param {IAudioSettings} settings - The audio settings to apply.
* @returns {Promise<void>}
*/
export async function applyAudioConstraints(stateful: IStateful, settings: IAudioSettings) {
const state = toState(stateful);
const track = getLocalJitsiAudioTrack(state);
if (!track) {
logger.debug('No local audio track found');
return;
}
if (!isAdvancedAudioSettingsEnabled(state)) {
logger.debug('Advanced audio settings disabled');
return;
}
try {
await track.applyConstraints(settings);
} catch (error) {
logger.error('Failed to apply audio constraints ', error);
}
}

View File

@@ -4,7 +4,6 @@ import { IStore } from '../../app/types';
import { _RESET_BREAKOUT_ROOMS } from '../../breakout-rooms/actionTypes';
import { getCurrentConference } from '../conference/functions';
import {
SET_AUDIO_MUTED,
SET_CAMERA_FACING_MODE,
SET_SCREENSHARE_MUTED,
SET_VIDEO_MUTED,
@@ -46,15 +45,6 @@ import './subscriber';
*/
MiddlewareRegistry.register(store => next => action => {
switch (action.type) {
case SET_AUDIO_MUTED:
if (!action.muted
&& isUserInteractionRequiredForUnmute(store.getState())) {
return;
}
_setMuted(store, action, MEDIA_TYPE.AUDIO);
break;
case SET_CAMERA_FACING_MODE: {
// XXX The camera facing mode of a MediaStreamTrack can be specified
// only at initialization time and then it can only be toggled. So in

View File

@@ -1,3 +1,5 @@
import { IStore } from '../../app/types';
import { SET_AUDIO_MUTED } from '../media/actionTypes';
import {
MEDIA_TYPE,
VIDEO_TYPE
@@ -8,8 +10,11 @@ import {
TRACK_UPDATED
} from './actionTypes';
import {
toggleScreensharing
createLocalTracksA,
toggleScreensharing,
trackMuteUnmuteFailed
} from './actions.native';
import { getLocalTrack, setTrackMuted } from './functions.any';
import './middleware.any';
@@ -23,11 +28,15 @@ import './middleware.any';
*/
MiddlewareRegistry.register(store => next => action => {
switch (action.type) {
case SET_AUDIO_MUTED: {
_setMuted(store, action);
break;
}
case TRACK_UPDATED: {
const { jitsiTrack, local } = action.track;
if (local && jitsiTrack.isMuted()
&& jitsiTrack.type === MEDIA_TYPE.VIDEO && jitsiTrack.videoType === VIDEO_TYPE.DESKTOP) {
&& jitsiTrack.type === MEDIA_TYPE.VIDEO && jitsiTrack.videoType === VIDEO_TYPE.DESKTOP) {
store.dispatch(toggleScreensharing(false));
}
break;
@@ -36,3 +45,32 @@ MiddlewareRegistry.register(store => next => action => {
return next(action);
});
/**
* Mutes or unmutes a local track with a specific media type.
*
* @param {Store} store - The redux store in which the specified action is dispatched.
* @param {Action} action - The redux action dispatched in the specified store.
* @private
* @returns {void}
*/
function _setMuted(store: IStore, { ensureTrack, muted }: {
ensureTrack: boolean; muted: boolean; }) {
const { dispatch, getState } = store;
const state = getState();
const localTrack = getLocalTrack(state['features/base/tracks'], MEDIA_TYPE.AUDIO, /* includePending */ true);
if (localTrack) {
// The `jitsiTrack` property will have a value only for a localTrack for which `getUserMedia` has already
// completed. If there's no `jitsiTrack`, then the `muted` state will be applied once the `jitsiTrack` is
// created.
const { jitsiTrack } = localTrack;
if (jitsiTrack) {
setTrackMuted(jitsiTrack, muted, state, dispatch)
.catch(() => dispatch(trackMuteUnmuteFailed(localTrack, muted)));
}
} else if (!muted && ensureTrack) {
dispatch(createLocalTracksA({ devices: [ MEDIA_TYPE.AUDIO ] }));
}
}

View File

@@ -3,12 +3,15 @@ import { AnyAction } from 'redux';
import { IStore } from '../../app/types';
import { hideNotification } from '../../notifications/actions';
import { isPrejoinPageVisible } from '../../prejoin/functions';
import { setAudioSettings } from '../../settings/actions.web';
import { getAvailableDevices } from '../devices/actions.web';
import { setScreenshareMuted } from '../media/actions';
import { SET_AUDIO_MUTED } from '../media/actionTypes';
import { gumPending, setScreenshareMuted } from '../media/actions';
import {
MEDIA_TYPE,
VIDEO_TYPE
} from '../media/constants';
import { IGUMPendingState } from '../media/types';
import MiddlewareRegistry from '../redux/MiddlewareRegistry';
import {
@@ -20,14 +23,19 @@ import {
TRACK_UPDATED
} from './actionTypes';
import {
createLocalTracksA,
showNoDataFromSourceVideoError,
toggleScreensharing,
trackMuteUnmuteFailed,
trackNoDataFromSourceNotificationInfoChanged
} from './actions.web';
import {
getTrackByJitsiTrack, logTracksForParticipant
getLocalJitsiAudioTrackSettings,
getLocalTrack,
getTrackByJitsiTrack, isUserInteractionRequiredForUnmute, logTracksForParticipant,
setTrackMuted
} from './functions.web';
import { ITrack } from './types';
import { ITrack, ITrackOptions } from './types';
import './middleware.any';
@@ -138,7 +146,15 @@ MiddlewareRegistry.register(store => next => action => {
return result;
}
case SET_AUDIO_MUTED: {
if (!action.muted
&& isUserInteractionRequiredForUnmute(store.getState())) {
return;
}
_setMuted(store, action);
break;
}
}
return next(action);
@@ -207,3 +223,47 @@ function _removeNoDataFromSourceNotification({ getState, dispatch }: IStore, tra
dispatch(trackNoDataFromSourceNotificationInfoChanged(jitsiTrack, undefined));
}
}
/**
* Mutes or unmutes a local track with a specific media type.
*
* @param {Store} store - The redux store in which the specified action is
* dispatched.
* @param {Action} action - The redux action dispatched in the specified store.
* @private
* @returns {void}
*/
function _setMuted(store: IStore, { ensureTrack, muted }: {
ensureTrack: boolean; muted: boolean; }) {
const { dispatch, getState } = store;
const state = getState();
const localTrack = getLocalTrack(state['features/base/tracks'], MEDIA_TYPE.AUDIO, /* includePending */ true);
if (localTrack) {
// The `jitsiTrack` property will have a value only for a localTrack for which `getUserMedia` has already
// completed. If there's no `jitsiTrack`, then the `muted` state will be applied once the `jitsiTrack` is
// created.
const { jitsiTrack } = localTrack;
if (jitsiTrack) {
setTrackMuted(jitsiTrack, muted, state, dispatch)
.catch(() => {
dispatch(trackMuteUnmuteFailed(localTrack, muted));
});
}
} else if (!muted && ensureTrack) {
// TODO(saghul): reconcile these 2 types.
dispatch(gumPending([ MEDIA_TYPE.AUDIO ], IGUMPendingState.PENDING_UNMUTE));
const createTrackOptions: ITrackOptions = {
devices: [ MEDIA_TYPE.AUDIO ],
};
dispatch(createLocalTracksA(createTrackOptions)).then(() => {
dispatch(gumPending([ MEDIA_TYPE.AUDIO ], IGUMPendingState.NONE));
const updatedSettings = getLocalJitsiAudioTrackSettings(getState());
dispatch(setAudioSettings(updatedSettings));
});
}
}

View File

@@ -1,8 +1,10 @@
import { MediaType } from '../media/constants';
import { IAudioSettings } from '../settings/reducer';
export interface ITrackOptions {
cameraDeviceId?: string | null;
constraints?: {
audio?: IAudioSettings;
video?: {
height?: {
ideal?: number;

View File

@@ -1,3 +1,5 @@
import { isEqual } from 'lodash-es';
import { createDeviceChangedEvent } from '../analytics/AnalyticsEvents';
import { sendAnalytics } from '../analytics/functions';
import { IStore } from '../app/types';
@@ -7,8 +9,12 @@ import {
} from '../base/devices/actions';
import { getDeviceLabelById, setAudioOutputDeviceId } from '../base/devices/functions';
import { updateSettings } from '../base/settings/actions';
import { toggleUpdateAudioSettings } from '../base/tracks/actions.web';
import { getLocalJitsiAudioTrack } from '../base/tracks/functions.any';
import { toggleNoiseSuppression } from '../noise-suppression/actions';
import { setScreenshareFramerate } from '../screen-share/actions';
import { setScreenshareFramerate } from '../screen-share/actions.web';
import { setAudioSettings } from '../settings/actions.web';
import { disposePreviewAudioTrack } from '../settings/functions.web';
import { getAudioDeviceSelectionDialogProps, getVideoDeviceSelectionDialogProps } from './functions';
import logger from './logger';
@@ -22,16 +28,21 @@ import logger from './logger';
* @returns {Function}
*/
export function submitAudioDeviceSelectionTab(newState: any, isDisplayedOnWelcomePage: boolean) {
return (dispatch: IStore['dispatch'], getState: IStore['getState']) => {
return async (dispatch: IStore['dispatch'], getState: IStore['getState']) => {
const currentState = getAudioDeviceSelectionDialogProps(getState(), isDisplayedOnWelcomePage);
const isSelectedAudioInputIdChanged = newState.selectedAudioInputId
&& newState.selectedAudioInputId !== currentState.selectedAudioInputId;
if (newState.selectedAudioInputId && newState.selectedAudioInputId !== currentState.selectedAudioInputId) {
if (isSelectedAudioInputIdChanged) {
dispatch(updateSettings({
userSelectedMicDeviceId: newState.selectedAudioInputId,
userSelectedMicDeviceLabel:
getDeviceLabelById(getState(), newState.selectedAudioInputId, 'audioInput')
}));
await disposePreviewAudioTrack(getState());
dispatch(setAudioSettings(newState.audioSettings));
dispatch(setAudioInputDevice(newState.selectedAudioInputId));
}
@@ -58,6 +69,21 @@ export function submitAudioDeviceSelectionTab(newState: any, isDisplayedOnWelcom
if (newState.noiseSuppressionEnabled !== currentState.noiseSuppressionEnabled) {
dispatch(toggleNoiseSuppression());
}
if (!isEqual(newState.audioSettings, currentState.audioSettings) && !isSelectedAudioInputIdChanged && !newState.noiseSuppressionEnabled) {
const state = getState();
const jitsiTrack = getLocalJitsiAudioTrack(state);
if (!jitsiTrack) {
logger.debug('No local audio track found');
dispatch(setAudioSettings(newState.audioSettings));
return;
}
await disposePreviewAudioTrack(getState());
dispatch(toggleUpdateAudioSettings(newState.audioSettings));
}
};
}

View File

@@ -1,30 +1,41 @@
import { Theme } from '@mui/material';
import clsx from 'clsx';
import React from 'react';
import { WithTranslation } from 'react-i18next';
import { connect } from 'react-redux';
import { withStyles } from 'tss-react/mui';
import { IReduxState, IStore } from '../../app/types';
import { isAdvancedAudioSettingsEnabled } from '../../base/config/functions.any';
import { getAvailableDevices } from '../../base/devices/actions.web';
import AbstractDialogTab, {
type IProps as AbstractDialogTabProps
} from '../../base/dialog/components/web/AbstractDialogTab';
import { translate } from '../../base/i18n/functions';
import { createLocalTrack } from '../../base/lib-jitsi-meet/functions.web';
import { IAudioSettings } from '../../base/settings/reducer';
import Checkbox from '../../base/ui/components/web/Checkbox';
import { setPreviewAudioTrack } from '../../settings/actions.web';
import { disposeTrack } from '../../settings/functions.web';
import { iAmVisitor as iAmVisitorCheck } from '../../visitors/functions';
import logger from '../logger';
import AudioInputPreview from './AudioInputPreview';
import AudioOutputPreview from './AudioOutputPreview';
import AudioInputPreview from './AudioInputPreview.web';
import AudioOutputPreview from './AudioOutputPreview.web';
import DeviceHidContainer from './DeviceHidContainer.web';
import DeviceSelector from './DeviceSelector.web';
/**
* The type of the React {@code Component} props of {@link AudioDevicesSelection}.
*/
interface IProps extends AbstractDialogTabProps, WithTranslation {
/**
* The audio local track settings.
*/
audioSettings?: IAudioSettings;
/**
* All known audio and video devices split by type. This prop comes from
* the app state.
@@ -98,11 +109,21 @@ interface IProps extends AbstractDialogTabProps, WithTranslation {
*/
iAmVisitor: boolean;
/**
* Whether the advanced audio settings are enabled from config.
*/
isAdvancedAudioSettingsConfigEnabled: boolean;
/**
* Whether noise suppression is on or not.
*/
noiseSuppressionEnabled: boolean;
/**
* The audio track that is used for previewing the audio input.
*/
previewAudioTrack: any | null;
/**
* The id of the audio input device to preview.
*/
@@ -114,19 +135,20 @@ interface IProps extends AbstractDialogTabProps, WithTranslation {
selectedAudioOutputId: string;
}
/**
* The type of the React {@code Component} state of {@link AudioDevicesSelection}.
*/
interface IState {
/**
* The JitsiTrack to use for previewing audio input.
*/
previewAudioTrack?: any | null;
}
const styles = (theme: Theme) => {
return {
checkbox: {
width: 'max-content'
},
checkboxGrid: {
display: 'grid',
gridTemplateColumns: 'auto auto',
gap: theme.spacing(3),
margin: `${theme.spacing(3)} 0`
},
container: {
display: 'flex',
flexDirection: 'column' as const,
@@ -134,32 +156,33 @@ const styles = (theme: Theme) => {
width: '100%'
},
hidden: {
display: 'none'
},
inputContainer: {
marginBottom: theme.spacing(3)
},
outputContainer: {
margin: `${theme.spacing(5)} 0`,
display: 'flex',
alignItems: 'flex-end'
alignItems: 'flex-end',
margin: `${theme.spacing(5)} 0`
},
outputButton: {
marginLeft: theme.spacing(3)
},
noiseSuppressionContainer: {
marginBottom: theme.spacing(5)
}
};
};
/**
* React {@code Component} for previewing audio and video input/output devices.
*
* @augments Component
*/
class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
class AudioDevicesSelection extends AbstractDialogTab<IProps, {}> {
/**
* Whether current component is mounted or not.
@@ -181,9 +204,6 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
constructor(props: IProps) {
super(props);
this.state = {
previewAudioTrack: null
};
this._unMounted = true;
}
@@ -224,9 +244,29 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
*/
override componentWillUnmount() {
this._unMounted = true;
this._disposeAudioInputPreview();
disposeTrack(this.props.previewAudioTrack);
}
/**
* Toggles the audio settings based on the input change event and updates the state.
*
* @param {Object} e - The key event to handle.
*
* @returns {void}
*/
_onToggleAudioSettings = ({ target: { checked, name } }: React.ChangeEvent<HTMLInputElement>) => {
const { audioSettings } = this.props;
const newValue = name === 'channelCount' ? (checked ? 2 : 1) : checked;
super._onChange({
audioSettings: {
...audioSettings,
[name]: newValue
}
});
};
/**
* Implements React's {@link Component#render()}.
*
@@ -234,11 +274,13 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
*/
override render() {
const {
audioSettings,
hasAudioPermission,
hideAudioInputPreview,
hideAudioOutputPreview,
hideDeviceHIDContainer,
hideNoiseSuppression,
isAdvancedAudioSettingsConfigEnabled,
iAmVisitor,
noiseSuppressionEnabled,
selectedAudioOutputId,
@@ -248,6 +290,13 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
const classes = withStyles.getClasses(this.props);
const isAudioSettingsEnabled = Boolean(audioSettings?.autoGainControl || audioSettings?.channelCount === 2 || audioSettings?.echoCancellation || audioSettings?.noiseSuppression);
const shouldDisplayNoiseSuppressionCheckbox = !hideNoiseSuppression && !iAmVisitor;
const shouldDisplayAdvancedAudioSettingsCheckboxes = !hideNoiseSuppression && !iAmVisitor && isAdvancedAudioSettingsConfigEnabled && Boolean(audioSettings);
const shouldDisabledNoiseSupressionCheckbox = shouldDisplayAdvancedAudioSettingsCheckboxes && (isAudioSettingsEnabled && !noiseSuppressionEnabled);
return (
<div className = { classes.container }>
{!iAmVisitor && <div
@@ -255,9 +304,60 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
className = { classes.inputContainer }>
{this._renderSelector(audioInput)}
</div>}
{!hideAudioInputPreview && hasAudioPermission && !iAmVisitor
&& <AudioInputPreview
track = { this.state.previewAudioTrack } />}
&& <AudioInputPreview
track = { this.props.previewAudioTrack } />}
<fieldset className = { classes.checkboxGrid }>
<Checkbox
checked = { Boolean(audioSettings?.echoCancellation) }
className = { clsx(classes.checkbox, {
[classes.hidden]: !shouldDisplayAdvancedAudioSettingsCheckboxes,
}) }
disabled = { noiseSuppressionEnabled }
label = { t('toolbar.advancedAudioSettings.aec.label') }
name = { 'echoCancellation' }
onChange = { this._onToggleAudioSettings } />
<Checkbox
checked = { Boolean(audioSettings?.channelCount === 2) }
className = { clsx(classes.checkbox, {
[classes.hidden]: !shouldDisplayAdvancedAudioSettingsCheckboxes,
}) }
disabled = { noiseSuppressionEnabled }
label = { t('toolbar.advancedAudioSettings.stereo.label') }
name = { 'channelCount' }
onChange = { this._onToggleAudioSettings } />
<Checkbox
checked = { Boolean(audioSettings?.autoGainControl) }
className = { clsx(classes.checkbox, {
[classes.hidden]: !shouldDisplayAdvancedAudioSettingsCheckboxes,
}) }
disabled = { noiseSuppressionEnabled }
label = { t('toolbar.advancedAudioSettings.agc.label') }
name = { 'autoGainControl' }
onChange = { this._onToggleAudioSettings } />
<Checkbox
checked = { Boolean(audioSettings?.noiseSuppression) }
className = { clsx(classes.checkbox, {
[classes.hidden]: !shouldDisplayAdvancedAudioSettingsCheckboxes,
}) }
disabled = { noiseSuppressionEnabled }
label = { t('toolbar.advancedAudioSettings.ns.label') }
name = { 'noiseSuppression' }
onChange = { this._onToggleAudioSettings } />
<Checkbox
checked = { noiseSuppressionEnabled }
className = { clsx(classes.checkbox, {
[classes.hidden]: !shouldDisplayNoiseSuppressionCheckbox,
}) }
disabled = { shouldDisabledNoiseSupressionCheckbox }
label = { t('toolbar.enableNoiseSuppression') }
// eslint-disable-next-line react/jsx-no-bind
onChange = { () => super._onChange({
noiseSuppressionEnabled: !noiseSuppressionEnabled
}) } />
</fieldset>
<div
aria-live = 'polite'
className = { classes.outputContainer }>
@@ -267,17 +367,7 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
className = { classes.outputButton }
deviceId = { selectedAudioOutputId } />}
</div>
{!hideNoiseSuppression && !iAmVisitor && (
<div className = { classes.noiseSuppressionContainer }>
<Checkbox
checked = { noiseSuppressionEnabled }
label = { t('toolbar.enableNoiseSuppression') }
// eslint-disable-next-line react/jsx-no-bind
onChange = { () => super._onChange({
noiseSuppressionEnabled: !noiseSuppressionEnabled
}) } />
</div>
)}
{!hideDeviceHIDContainer && !iAmVisitor
&& <DeviceHidContainer />}
</div>
@@ -292,13 +382,13 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
* @returns {void}
*/
_createAudioInputTrack(deviceId: string) {
const { hideAudioInputPreview } = this.props;
const { hideAudioInputPreview, previewAudioTrack } = this.props;
if (hideAudioInputPreview) {
return;
}
return this._disposeAudioInputPreview()
return disposeTrack(previewAudioTrack)
.then(() => createLocalTrack('audio', deviceId, 5000))
.then(jitsiLocalTrack => {
if (this._unMounted) {
@@ -306,29 +396,14 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
return;
}
this.props.dispatch(setPreviewAudioTrack(jitsiLocalTrack));
this.setState({
previewAudioTrack: jitsiLocalTrack
});
})
.catch(() => {
this.setState({
previewAudioTrack: null
});
this.props.dispatch(setPreviewAudioTrack(null));
});
}
/**
* Utility function for disposing the current audio input preview.
*
* @private
* @returns {Promise}
*/
_disposeAudioInputPreview(): Promise<any> {
return this.state.previewAudioTrack
? this.state.previewAudioTrack.dispose() : Promise.resolve();
}
/**
* Creates a DeviceSelector instance based on the passed in configuration.
*
@@ -362,8 +437,8 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
id: 'audioInput',
label: 'settings.selectMic',
onSelect: (selectedAudioInputId: string) => super._onChange({ selectedAudioInputId }),
selectedDeviceId: this.state.previewAudioTrack
? this.state.previewAudioTrack.getDeviceId() : this.props.selectedAudioInputId
selectedDeviceId: this.props.previewAudioTrack
? this.props.previewAudioTrack.getDeviceId() : this.props.selectedAudioInputId
};
let audioOutput;
@@ -389,7 +464,9 @@ class AudioDevicesSelection extends AbstractDialogTab<IProps, IState> {
const mapStateToProps = (state: IReduxState) => {
return {
availableDevices: state['features/base/devices'].availableDevices ?? {},
iAmVisitor: iAmVisitorCheck(state)
isAdvancedAudioSettingsConfigEnabled: isAdvancedAudioSettingsEnabled(state),
iAmVisitor: iAmVisitorCheck(state),
previewAudioTrack: state['features/settings'].previewAudioTrack
};
};

View File

@@ -22,6 +22,7 @@ import {
getUserSelectedMicDeviceId,
getUserSelectedOutputDeviceId
} from '../base/settings/functions.web';
import { getLocalJitsiAudioTrackSettings } from '../base/tracks/functions.any';
import { isNoiseSuppressionEnabled } from '../noise-suppression/functions';
import { isPrejoinPageVisible } from '../prejoin/functions';
import { SS_DEFAULT_FRAME_RATE, SS_SUPPORTED_FRAMERATES } from '../settings/constants';
@@ -50,6 +51,7 @@ export function getAudioDeviceSelectionDialogProps(stateful: IStateful, isDispla
const deviceHidSupported = isDeviceHidSupported() && getWebHIDFeatureConfig(state);
const noiseSuppressionEnabled = isNoiseSuppressionEnabled(state);
const hideNoiseSuppression = isPrejoinPageVisible(state) || isDisplayedOnWelcomePage;
const audioSettings = state['features/settings'].audioSettings ?? getLocalJitsiAudioTrackSettings(state);
// When the previews are disabled we don't need multiple audio input support in order to change the mic. This is the
// case for Safari on iOS.
@@ -71,6 +73,7 @@ export function getAudioDeviceSelectionDialogProps(stateful: IStateful, isDispla
// we fill the device selection dialog with the devices that are currently
// used or if none are currently used with what we have in settings(user selected)
return {
audioSettings,
disableAudioInputChange,
disableDeviceChange: !JitsiMeetJS.mediaDevices.isDeviceChangeAvailable(),
hasAudioPermission: permissions.audio,

View File

@@ -1,3 +1,13 @@
/**
* The type of (redux) action which sets the audio settings.
*/
export const SET_AUDIO_SETTINGS = 'SET_AUDIO_SETTINGS';
/**
* The type of (redux) action which sets the preview audio track settings.
*/
export const SET_PREVIEW_AUDIO_TRACK = 'SET_PREVIEW_AUDIO_TRACK';
/**
* The type of (redux) action which sets the visibility of the audio settings popup.
*/

View File

@@ -16,6 +16,7 @@ import i18next from '../base/i18n/i18next';
import { browser } from '../base/lib-jitsi-meet';
import { getNormalizedDisplayName } from '../base/participants/functions';
import { updateSettings } from '../base/settings/actions';
import { IAudioSettings } from '../base/settings/reducer';
import { getLocalVideoTrack } from '../base/tracks/functions.web';
import { appendURLHashParam } from '../base/util/uri';
import { disableKeyboardShortcuts, enableKeyboardShortcuts } from '../keyboard-shortcuts/actions';
@@ -23,7 +24,9 @@ import { toggleBackgroundEffect } from '../virtual-background/actions';
import virtualBackgroundLogger from '../virtual-background/logger';
import {
SET_AUDIO_SETTINGS,
SET_AUDIO_SETTINGS_VISIBILITY,
SET_PREVIEW_AUDIO_TRACK,
SET_VIDEO_SETTINGS_VISIBILITY
} from './actionTypes';
import LogoutDialog from './components/web/LogoutDialog';
@@ -36,7 +39,6 @@ import {
getShortcutsTabProps
} from './functions.web';
/**
* Opens {@code LogoutDialog}.
*
@@ -340,3 +342,35 @@ export function submitVirtualBackgroundTab(newState: any, isCancel = false) {
}
};
}
/**
* Sets the audio preview track.
*
* @param {any} track - The track to set.
* @returns {{
* type: SET_PREVIEW_AUDIO_TRACK,
* track: any
* }}
*/
export function setPreviewAudioTrack(track: any) {
return {
type: SET_PREVIEW_AUDIO_TRACK,
track
};
}
/**
* Sets the audio settings.
*
* @param {IAudioSettings} settings - The settings to set.
* @returns {{
* type: SET_AUDIO_SETTINGS,
* settings: IAudioSettings
* }}
*/
export function setAudioSettings(settings: IAudioSettings) {
return {
type: SET_AUDIO_SETTINGS,
settings
};
}

View File

@@ -17,8 +17,8 @@ import {
import DialogWithTabs, { IDialogTab } from '../../../base/ui/components/web/DialogWithTabs';
import { isCalendarEnabled } from '../../../calendar-sync/functions.web';
import { submitAudioDeviceSelectionTab, submitVideoDeviceSelectionTab } from '../../../device-selection/actions.web';
import AudioDevicesSelection from '../../../device-selection/components/AudioDevicesSelection';
import VideoDeviceSelection from '../../../device-selection/components/VideoDeviceSelection';
import AudioDevicesSelection from '../../../device-selection/components/AudioDevicesSelection.web';
import VideoDeviceSelection from '../../../device-selection/components/VideoDeviceSelection.web';
import {
getAudioDeviceSelectionDialogProps,
getVideoDeviceSelectionDialogProps
@@ -160,6 +160,7 @@ function _mapStateToProps(state: IReduxState, ownProps: any) {
return {
...newProps,
audioSettings: tabState.audioSettings,
noiseSuppressionEnabled: tabState.noiseSuppressionEnabled,
selectedAudioInputId: tabState.selectedAudioInputId,
selectedAudioOutputId: tabState.selectedAudioOutputId

View File

@@ -145,3 +145,31 @@ export function shouldShowModeratorSettings(stateful: IStateful) {
return hasModeratorRights && !hideModeratorSettingsTab;
}
/**
* Disposes a track.
*
* @param {Object} track - The track to dispose.
* @returns {Promise<void>}
*/
export async function disposeTrack(track: any) {
if (!track) {
return;
}
await track.dispose();
}
/**
* Disposes the audio input preview track from Redux state.
*
* @param {(Function|Object)} stateful - The (whole) redux state, or redux's
* {@code getState} function to be used to retrieve the state.
* @returns {Promise<void>}
*/
export async function disposePreviewAudioTrack(stateful: IStateful) {
const state = toState(stateful);
const previewTrack = state['features/settings']?.previewAudioTrack;
await disposeTrack(previewTrack);
}

View File

@@ -1,12 +1,17 @@
import ReducerRegistry from '../base/redux/ReducerRegistry';
import { IAudioSettings } from '../base/settings/reducer';
import {
SET_AUDIO_SETTINGS,
SET_AUDIO_SETTINGS_VISIBILITY,
SET_PREVIEW_AUDIO_TRACK,
SET_VIDEO_SETTINGS_VISIBILITY
} from './actionTypes';
export interface ISettingsState {
audioSettings?: IAudioSettings;
audioSettingsVisible?: boolean;
previewAudioTrack?: any | null;
videoSettingsVisible?: boolean;
}
@@ -22,6 +27,16 @@ ReducerRegistry.register('features/settings', (state: ISettingsState = {}, actio
...state,
videoSettingsVisible: action.value
};
case SET_PREVIEW_AUDIO_TRACK:
return {
...state,
previewAudioTrack: action.track
};
case SET_AUDIO_SETTINGS:
return {
...state,
audioSettings: action.settings
};
}
return state;