[mirotalksfu] - #215 integrate RNNoise for noise suppression
هذا الالتزام موجود في:
@@ -76,7 +76,7 @@ let BRAND = {
|
||||
},
|
||||
about: {
|
||||
imageUrl: '../images/mirotalk-logo.gif',
|
||||
title: '<strong>WebRTC SFU v1.8.92</strong>',
|
||||
title: '<strong>WebRTC SFU v1.9.00</strong>',
|
||||
html: `
|
||||
<button
|
||||
id="support-button"
|
||||
|
||||
@@ -32,14 +32,7 @@ class LocalStorage {
|
||||
moderator_media_cant_sharing: false, // Everyone can't share media
|
||||
moderator_disconnect_all_on_leave: false, // Disconnect all participants on leave room
|
||||
dominant_speaker_focus: false, // Focus on dominant speaker
|
||||
mic_auto_gain_control: false, // Automatic gain control
|
||||
mic_echo_cancellations: true, // Echo cancellation
|
||||
mic_noise_suppression: true, // Noise suppression
|
||||
mic_sample_rate: 0, // 0: 48000 Hz 1: 44100 Hz
|
||||
mic_sample_size: 0, // 0: 16 bits 1: 32 bits
|
||||
mic_channel_count: 0, // 0: 1(mono) 1: 2 (stereo)
|
||||
mic_latency: 50, // ms
|
||||
mic_volume: 100, // %
|
||||
mic_noise_suppression: false, // Noise suppression using RNNoise
|
||||
video_fps: 0, // default 1280x768 30fps
|
||||
screen_optimization: 1, // default detail (1): For high fidelity (screen sharing with text/graphics) || motion (2): For high frame rate (video playback, game streaming)
|
||||
screen_fps: 3, // default 1920x1080 15fps
|
||||
|
||||
221
public/js/NodeProcessor.js
Normal file
221
public/js/NodeProcessor.js
Normal file
@@ -0,0 +1,221 @@
|
||||
'use strict';
|
||||
|
||||
// Handle UI updates and interactions
|
||||
class UIManager {
|
||||
constructor(elements) {
|
||||
this.elements = elements;
|
||||
}
|
||||
|
||||
updateStatus(message, type = 'info') {
|
||||
const timestamp = new Date().toLocaleTimeString();
|
||||
const printMessage = `[${timestamp}] ${message}`;
|
||||
switch (type) {
|
||||
case 'error':
|
||||
console.error(printMessage);
|
||||
break;
|
||||
case 'success':
|
||||
console.info(printMessage);
|
||||
break;
|
||||
case 'warning':
|
||||
console.warn(printMessage);
|
||||
break;
|
||||
default:
|
||||
console.log(printMessage);
|
||||
}
|
||||
}
|
||||
|
||||
updateUI(isProcessing, noiseSuppressionEnabled) {
|
||||
this.updateStatus(
|
||||
`Audio processing ${isProcessing ? 'started' : 'stopped'}`,
|
||||
isProcessing ? 'success' : 'info'
|
||||
);
|
||||
if (noiseSuppressionEnabled) {
|
||||
this.elements.labelNoiseSuppression.textContent = '🔊 Noise Suppression';
|
||||
this.elements.labelNoiseSuppression.style.color = 'lime';
|
||||
} else {
|
||||
this.elements.labelNoiseSuppression.textContent = '🔇 Noise Suppression';
|
||||
this.elements.labelNoiseSuppression.style.color = 'white';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle audio worklet message processing
|
||||
class MessageHandler {
|
||||
constructor(uiManager, wasmLoader) {
|
||||
this.uiManager = uiManager;
|
||||
this.wasmLoader = wasmLoader;
|
||||
}
|
||||
|
||||
handleMessage(event) {
|
||||
if (event.data.type === 'request-wasm') {
|
||||
this.wasmLoader.loadWasmBuffer();
|
||||
} else if (event.data.type === 'wasm-ready') {
|
||||
this.uiManager.updateStatus('✅ RNNoise WASM initialized successfully', 'success');
|
||||
} else if (event.data.type === 'wasm-error') {
|
||||
this.uiManager.updateStatus('❌ RNNoise WASM error: ' + event.data.error, 'error');
|
||||
} else if (event.data.type === 'vad') {
|
||||
if (event.data.isSpeech) {
|
||||
//this.uiManager.updateStatus(`🗣️ Speech detected (VAD: ${event.data.probability.toFixed(2)})`, 'info');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle only WASM module loading
|
||||
class WasmLoader {
|
||||
constructor(uiManager, getWorkletNode) {
|
||||
this.uiManager = uiManager;
|
||||
this.getWorkletNode = getWorkletNode;
|
||||
}
|
||||
|
||||
async loadWasmBuffer() {
|
||||
try {
|
||||
this.uiManager.updateStatus('📦 Loading RNNoise sync module...', 'info');
|
||||
|
||||
const jsResponse = await fetch('../js/RnnoiseSync.js');
|
||||
|
||||
if (!jsResponse.ok) {
|
||||
throw new Error('Failed to load rnnoise-sync.js');
|
||||
}
|
||||
|
||||
const jsContent = await jsResponse.text();
|
||||
this.uiManager.updateStatus('📦 Sending sync module to worklet...', 'info');
|
||||
|
||||
this.getWorkletNode().port.postMessage({
|
||||
type: 'sync-module',
|
||||
jsContent: jsContent,
|
||||
});
|
||||
|
||||
this.uiManager.updateStatus('📦 Sync module sent to worklet', 'info');
|
||||
} catch (error) {
|
||||
this.uiManager.updateStatus('❌ Failed to load sync module: ' + error.message, 'error');
|
||||
console.error('Sync module loading error:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle RNNoise processing
|
||||
class RNNoiseProcessor {
|
||||
constructor() {
|
||||
this.audioContext = null;
|
||||
this.workletNode = null;
|
||||
this.mediaStream = null;
|
||||
this.sourceNode = null;
|
||||
this.destinationNode = null;
|
||||
this.isProcessing = false;
|
||||
this.noiseSuppressionEnabled = false;
|
||||
|
||||
this.initializeUI();
|
||||
this.initializeDependencies();
|
||||
}
|
||||
|
||||
initializeUI() {
|
||||
this.elements = {
|
||||
labelNoiseSuppression: document.getElementById('labelNoiseSuppression'),
|
||||
switchNoiseSuppression: document.getElementById('switchNoiseSuppression'),
|
||||
};
|
||||
|
||||
this.elements.switchNoiseSuppression.onchange = (e) => {
|
||||
localStorageSettings.mic_noise_suppression = e.currentTarget.checked;
|
||||
lS.setSettings(localStorageSettings);
|
||||
userLog(
|
||||
'info',
|
||||
localStorageSettings.mic_noise_suppression ? 'Noise suppression enabled' : 'Noise suppression disabled',
|
||||
'top-end',
|
||||
3000
|
||||
);
|
||||
this.toggleNoiseSuppression();
|
||||
};
|
||||
}
|
||||
|
||||
initializeDependencies() {
|
||||
this.uiManager = new UIManager(this.elements);
|
||||
this.wasmLoader = new WasmLoader(this.uiManager, () => this.workletNode);
|
||||
this.messageHandler = new MessageHandler(this.uiManager, this.wasmLoader);
|
||||
}
|
||||
|
||||
async toggleProcessing(mediaStream = null) {
|
||||
this.isProcessing ? this.stopProcessing() : await this.startProcessing(mediaStream);
|
||||
}
|
||||
|
||||
async startProcessing(mediaStream = null) {
|
||||
if (!mediaStream) {
|
||||
throw new Error('No media stream provided to startProcessing');
|
||||
}
|
||||
try {
|
||||
this.uiManager.updateStatus('🎤 Starting audio processing...', 'info');
|
||||
|
||||
this.audioContext = new AudioContext();
|
||||
const sampleRate = this.audioContext.sampleRate;
|
||||
this.uiManager.updateStatus(`🎵 Audio context created with sample rate: ${sampleRate}Hz`, 'info');
|
||||
|
||||
this.mediaStream = mediaStream;
|
||||
if (!this.mediaStream.getAudioTracks().length) {
|
||||
throw new Error('No audio tracks found in the provided media stream');
|
||||
}
|
||||
|
||||
await this.audioContext.audioWorklet.addModule('../js/NoiseSuppressionProcessor.js');
|
||||
|
||||
this.workletNode = new AudioWorkletNode(this.audioContext, 'NoiseSuppressionProcessor', {
|
||||
numberOfInputs: 1,
|
||||
numberOfOutputs: 1,
|
||||
outputChannelCount: [1],
|
||||
});
|
||||
|
||||
this.workletNode.port.onmessage = (event) => this.messageHandler.handleMessage(event);
|
||||
|
||||
this.sourceNode = this.audioContext.createMediaStreamSource(this.mediaStream);
|
||||
this.destinationNode = this.audioContext.createMediaStreamDestination();
|
||||
|
||||
this.sourceNode.connect(this.workletNode);
|
||||
this.workletNode.connect(this.destinationNode);
|
||||
|
||||
this.isProcessing = true;
|
||||
this.uiManager.updateUI(this.isProcessing, this.noiseSuppressionEnabled);
|
||||
this.uiManager.updateStatus('🎤 Audio processing started', 'success');
|
||||
|
||||
// Return the processed MediaStream (with noise suppression)
|
||||
return this.destinationNode.stream;
|
||||
} catch (error) {
|
||||
this.uiManager.updateStatus('❌ Error: ' + error.message, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
stopProcessing() {
|
||||
if (this.mediaStream) {
|
||||
this.mediaStream.getTracks().forEach((track) => track.stop());
|
||||
this.mediaStream = null;
|
||||
}
|
||||
|
||||
if (this.audioContext && this.audioContext.state !== 'closed') {
|
||||
this.audioContext.close();
|
||||
this.audioContext = null;
|
||||
}
|
||||
|
||||
this.workletNode = null;
|
||||
this.sourceNode = null;
|
||||
this.destinationNode = null;
|
||||
this.isProcessing = false;
|
||||
this.noiseSuppressionEnabled = false;
|
||||
|
||||
this.uiManager.updateUI(this.isProcessing, this.noiseSuppressionEnabled);
|
||||
this.uiManager.updateStatus('🛑 Audio processing stopped', 'info');
|
||||
}
|
||||
|
||||
toggleNoiseSuppression() {
|
||||
this.noiseSuppressionEnabled = !this.noiseSuppressionEnabled;
|
||||
|
||||
if (this.workletNode) {
|
||||
this.workletNode.port.postMessage({
|
||||
type: 'enable',
|
||||
enabled: this.noiseSuppressionEnabled,
|
||||
});
|
||||
}
|
||||
|
||||
this.noiseSuppressionEnabled
|
||||
? this.uiManager.updateStatus('🔊 RNNoise enabled - background noise will be suppressed', 'success')
|
||||
: this.uiManager.updateStatus('🔇 RNNoise disabled - audio passes through unchanged', 'info');
|
||||
|
||||
this.uiManager.updateUI(this.isProcessing, this.noiseSuppressionEnabled);
|
||||
}
|
||||
}
|
||||
252
public/js/NoiseSuppressionProcessor.js
Normal file
252
public/js/NoiseSuppressionProcessor.js
Normal file
@@ -0,0 +1,252 @@
|
||||
'use strict';
|
||||
|
||||
const RNNOISE_FRAME_SIZE = 480;
|
||||
const SHIFT_16_BIT_NR = 32768;
|
||||
|
||||
// Handle WASM module initialization
|
||||
class WasmModuleInitializer {
|
||||
constructor(messagePort) {
|
||||
this.messagePort = messagePort;
|
||||
this.Module = null;
|
||||
}
|
||||
|
||||
async initSyncModule(jsContent) {
|
||||
try {
|
||||
if (!jsContent) throw new Error('Missing sync module JS content');
|
||||
|
||||
const createFunction = new Function(jsContent + '; return createRNNWasmModuleSync;')();
|
||||
this.Module = await createFunction();
|
||||
|
||||
if (this.Module.ready) {
|
||||
await this.Module.ready;
|
||||
}
|
||||
this.messagePort.postMessage({ type: 'wasm-ready' });
|
||||
return this.Module;
|
||||
} catch (error) {
|
||||
console.error('Sync module initialization error:', error);
|
||||
this.messagePort.postMessage({ type: 'wasm-error', error: error.message });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
getModule() {
|
||||
return this.Module;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle RNNoise context and buffer management
|
||||
class RNNoiseContextManager {
|
||||
constructor(module) {
|
||||
this.module = module;
|
||||
this.rnnoiseContext = null;
|
||||
this.wasmPcmInput = null;
|
||||
this.wasmPcmInputF32Index = null;
|
||||
this.setupWasm();
|
||||
}
|
||||
|
||||
setupWasm() {
|
||||
this.wasmPcmInput = this.module._malloc(RNNOISE_FRAME_SIZE * 4);
|
||||
this.wasmPcmInputF32Index = this.wasmPcmInput >> 2;
|
||||
if (!this.wasmPcmInput) throw new Error('Failed to allocate WASM buffer');
|
||||
|
||||
this.rnnoiseContext = this.module._rnnoise_create();
|
||||
if (!this.rnnoiseContext) throw new Error('Failed to create RNNoise context');
|
||||
|
||||
console.log('WASM setup complete:', {
|
||||
wasmPcmInput: this.wasmPcmInput,
|
||||
rnnoiseContext: this.rnnoiseContext,
|
||||
heapF32Available: !!this.module.HEAPF32,
|
||||
});
|
||||
}
|
||||
|
||||
processFrame(frameBuffer, processedBuffer, messagePort) {
|
||||
if (!this.rnnoiseContext || !this.module || !this.module.HEAPF32) return;
|
||||
|
||||
try {
|
||||
for (let i = 0; i < RNNOISE_FRAME_SIZE; i++) {
|
||||
this.module.HEAPF32[this.wasmPcmInputF32Index + i] = frameBuffer[i] * SHIFT_16_BIT_NR;
|
||||
}
|
||||
|
||||
const vadScore = this.module._rnnoise_process_frame(
|
||||
this.rnnoiseContext,
|
||||
this.wasmPcmInput,
|
||||
this.wasmPcmInput
|
||||
);
|
||||
|
||||
for (let i = 0; i < RNNOISE_FRAME_SIZE; i++) {
|
||||
processedBuffer[i] = this.module.HEAPF32[this.wasmPcmInputF32Index + i] / SHIFT_16_BIT_NR;
|
||||
}
|
||||
|
||||
messagePort.postMessage({
|
||||
type: 'vad',
|
||||
probability: vadScore,
|
||||
isSpeech: vadScore > 0.5,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Frame processing failed:', error);
|
||||
for (let i = 0; i < RNNOISE_FRAME_SIZE; i++) {
|
||||
processedBuffer[i] = frameBuffer[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
destroy() {
|
||||
if (this.wasmPcmInput && this.module?._free) {
|
||||
this.module._free(this.wasmPcmInput);
|
||||
this.wasmPcmInput = null;
|
||||
}
|
||||
if (this.rnnoiseContext && this.module?._rnnoise_destroy) {
|
||||
this.module._rnnoise_destroy(this.rnnoiseContext);
|
||||
this.rnnoiseContext = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle audio frame buffering
|
||||
class AudioFrameBuffer {
|
||||
constructor() {
|
||||
this.frameBuffer = new Float32Array(RNNOISE_FRAME_SIZE);
|
||||
this.bufferIndex = 0;
|
||||
this.hasProcessedFrame = false;
|
||||
this.processedBuffer = new Float32Array(RNNOISE_FRAME_SIZE);
|
||||
this.processedIndex = 0;
|
||||
}
|
||||
|
||||
addSample(sample) {
|
||||
this.frameBuffer[this.bufferIndex++] = sample;
|
||||
return this.bufferIndex === RNNOISE_FRAME_SIZE;
|
||||
}
|
||||
|
||||
resetBuffer() {
|
||||
this.bufferIndex = 0;
|
||||
this.hasProcessedFrame = true;
|
||||
this.processedIndex = 0;
|
||||
}
|
||||
|
||||
getProcessedSample() {
|
||||
return this.processedBuffer[this.processedIndex++];
|
||||
}
|
||||
|
||||
getFrameBuffer() {
|
||||
return this.frameBuffer;
|
||||
}
|
||||
|
||||
getProcessedBuffer() {
|
||||
return this.processedBuffer;
|
||||
}
|
||||
|
||||
hasProcessed() {
|
||||
return this.hasProcessedFrame;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle volume analysis
|
||||
class VolumeAnalyzer {
|
||||
calculateVolume(input, output, messagePort) {
|
||||
const originalVolume = Math.sqrt(input.reduce((sum, v) => sum + v * v, 0) / input.length);
|
||||
const processedVolume = Math.sqrt(output.reduce((sum, v) => sum + v * v, 0) / output.length);
|
||||
|
||||
messagePort.postMessage({
|
||||
type: 'volume',
|
||||
original: originalVolume,
|
||||
processed: processedVolume,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Handle audio worklet processing
|
||||
class RNNoiseProcessor extends AudioWorkletProcessor {
|
||||
constructor() {
|
||||
super();
|
||||
this.initialized = false;
|
||||
this.enabled = false;
|
||||
this._destroyed = false;
|
||||
this.sampleRate = sampleRate || 48000;
|
||||
|
||||
console.log('AudioWorklet processor initialized with sample rate:', this.sampleRate);
|
||||
|
||||
this.wasmInitializer = new WasmModuleInitializer(this.port);
|
||||
this.contextManager = null;
|
||||
this.frameBuffer = new AudioFrameBuffer();
|
||||
this.volumeAnalyzer = new VolumeAnalyzer();
|
||||
|
||||
this.setupMessageHandler();
|
||||
this.port.postMessage({ type: 'request-wasm' });
|
||||
}
|
||||
|
||||
setupMessageHandler() {
|
||||
this.port.onmessage = async (event) => {
|
||||
const { type, jsContent, enabled } = event.data;
|
||||
switch (type) {
|
||||
case 'sync-module':
|
||||
try {
|
||||
const module = await this.wasmInitializer.initSyncModule(jsContent);
|
||||
this.contextManager = new RNNoiseContextManager(module);
|
||||
this.initialized = true;
|
||||
} catch (error) {
|
||||
console.error('Failed to initialize sync module:', error);
|
||||
}
|
||||
break;
|
||||
case 'enable':
|
||||
this.enabled = enabled;
|
||||
break;
|
||||
default:
|
||||
console.warn('Unknown message type:', type);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
process(inputs, outputs, parameters) {
|
||||
const input = inputs[0]?.[0];
|
||||
const output = outputs[0]?.[0];
|
||||
if (!output) return true;
|
||||
|
||||
// Always fill output with something valid
|
||||
if (!input || input.length === 0) {
|
||||
output.fill(0); // Silence if no input
|
||||
return true;
|
||||
}
|
||||
|
||||
// If not initialized or not enabled, just pass through input safely
|
||||
if (!this.initialized || !this.enabled) {
|
||||
for (let i = 0; i < output.length; i++) {
|
||||
output[i] = Number.isFinite(input[i]) ? input[i] : 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const isFrameReady = this.frameBuffer.addSample(input[i]);
|
||||
|
||||
if (isFrameReady) {
|
||||
this.contextManager.processFrame(
|
||||
this.frameBuffer.getFrameBuffer(),
|
||||
this.frameBuffer.getProcessedBuffer(),
|
||||
this.port
|
||||
);
|
||||
this.frameBuffer.resetBuffer();
|
||||
}
|
||||
|
||||
// Output processed sample if available, else fallback to input (with safety)
|
||||
let sample = this.frameBuffer.hasProcessed() ? this.frameBuffer.getProcessedSample() : input[i];
|
||||
output[i] = Number.isFinite(sample) ? sample : 0;
|
||||
}
|
||||
|
||||
this.volumeAnalyzer.calculateVolume(input, output, this.port);
|
||||
return true;
|
||||
}
|
||||
|
||||
destroy() {
|
||||
if (this._destroyed) return;
|
||||
|
||||
if (this.contextManager) {
|
||||
this.contextManager.destroy();
|
||||
this.contextManager = null;
|
||||
}
|
||||
|
||||
this._destroyed = true;
|
||||
}
|
||||
}
|
||||
|
||||
registerProcessor('NoiseSuppressionProcessor', RNNoiseProcessor);
|
||||
553
public/js/RnnoiseSync.js
Normal file
553
public/js/RnnoiseSync.js
Normal file
تم حذف اختلاف الملف لأن أحد الأسطر أو أكثر طويلة جداً
@@ -11,7 +11,7 @@ if (location.href.substr(0, 5) !== 'https') location.href = 'https' + location.h
|
||||
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
|
||||
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
|
||||
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
|
||||
* @version 1.8.92
|
||||
* @version 1.9.00
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -1881,9 +1881,6 @@ function handleButtons() {
|
||||
await refreshMyAudioDevices();
|
||||
userLog('info', 'Refreshed audio devices', 'top-end');
|
||||
};
|
||||
applyAudioOptionsButton.onclick = () => {
|
||||
rc.closeThenProduce(RoomClient.mediaType.audio, microphoneSelect.value);
|
||||
};
|
||||
speakerTestBtn.onclick = () => {
|
||||
sound('ring', true);
|
||||
};
|
||||
@@ -2703,49 +2700,6 @@ function handleSelects() {
|
||||
userLog('info', `Buttons always visible ${status}`, 'top-end');
|
||||
e.target.blur();
|
||||
};
|
||||
// audio options
|
||||
switchAutoGainControl.onchange = (e) => {
|
||||
localStorageSettings.mic_auto_gain_control = e.currentTarget.checked;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
switchEchoCancellation.onchange = (e) => {
|
||||
localStorageSettings.mic_echo_cancellations = e.currentTarget.checked;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
switchNoiseSuppression.onchange = (e) => {
|
||||
localStorageSettings.mic_noise_suppression = e.currentTarget.checked;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
sampleRateSelect.onchange = (e) => {
|
||||
localStorageSettings.mic_sample_rate = e.currentTarget.selectedIndex;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
sampleSizeSelect.onchange = (e) => {
|
||||
localStorageSettings.mic_sample_size = e.currentTarget.selectedIndex;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
channelCountSelect.onchange = (e) => {
|
||||
localStorageSettings.mic_channel_count = e.currentTarget.selectedIndex;
|
||||
lS.setSettings(localStorageSettings);
|
||||
e.target.blur();
|
||||
};
|
||||
micLatencyRange.oninput = (e) => {
|
||||
localStorageSettings.mic_latency = e.currentTarget.value;
|
||||
lS.setSettings(localStorageSettings);
|
||||
micLatencyValue.innerText = e.currentTarget.value;
|
||||
e.target.blur();
|
||||
};
|
||||
micVolumeRange.oninput = (e) => {
|
||||
localStorageSettings.mic_volume = e.currentTarget.value;
|
||||
lS.setSettings(localStorageSettings);
|
||||
micVolumeValue.innerText = e.currentTarget.value;
|
||||
e.target.blur();
|
||||
};
|
||||
// recording
|
||||
switchHostOnlyRecording.onchange = (e) => {
|
||||
hostOnlyRecording = e.currentTarget.checked;
|
||||
@@ -3343,18 +3297,7 @@ function loadSettingsFromLocalStorage() {
|
||||
themeCustom.input.value = themeCustom.color;
|
||||
|
||||
switchDominantSpeakerFocus.checked = localStorageSettings.dominant_speaker_focus;
|
||||
|
||||
switchAutoGainControl.checked = localStorageSettings.mic_auto_gain_control;
|
||||
switchEchoCancellation.checked = localStorageSettings.mic_echo_cancellations;
|
||||
switchNoiseSuppression.checked = localStorageSettings.mic_noise_suppression;
|
||||
sampleRateSelect.selectedIndex = localStorageSettings.mic_sample_rate;
|
||||
sampleSizeSelect.selectedIndex = localStorageSettings.mic_sample_size;
|
||||
channelCountSelect.selectedIndex = localStorageSettings.mic_channel_count;
|
||||
|
||||
micLatencyRange.value = localStorageSettings.mic_latency || 50;
|
||||
micLatencyValue.innerText = localStorageSettings.mic_latency || 50;
|
||||
micVolumeRange.value = localStorageSettings.mic_volume || 100;
|
||||
micVolumeValue.innerText = localStorageSettings.mic_volume || 100;
|
||||
|
||||
screenOptimization.selectedIndex = localStorageSettings.screen_optimization;
|
||||
videoFps.selectedIndex = localStorageSettings.video_fps;
|
||||
@@ -5517,7 +5460,7 @@ function showAbout() {
|
||||
position: 'center',
|
||||
imageUrl: BRAND.about?.imageUrl && BRAND.about.imageUrl.trim() !== '' ? BRAND.about.imageUrl : image.about,
|
||||
customClass: { image: 'img-about' },
|
||||
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.8.92',
|
||||
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.9.00',
|
||||
html: `
|
||||
<br />
|
||||
<div id="about">
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
|
||||
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
|
||||
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
|
||||
* @version 1.8.92
|
||||
* @version 1.9.00
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -402,6 +402,9 @@ class RoomClient {
|
||||
this.myAudioEl = null;
|
||||
this.showPeerInfo = false; // on peerName mouse hover show additional info
|
||||
|
||||
// Noise Suppression
|
||||
this.RNNoiseProcessor = null;
|
||||
|
||||
this.videoProducerId = null;
|
||||
this.screenProducerId = null;
|
||||
this.audioProducerId = null;
|
||||
@@ -1728,6 +1731,16 @@ class RoomClient {
|
||||
}
|
||||
}
|
||||
|
||||
if (audio) {
|
||||
/*
|
||||
* Initialize RNNoise Suppression if enabled and supported
|
||||
* This will only apply to audio tracks
|
||||
* and will not affect video tracks.
|
||||
*/
|
||||
this.initRNNoiseSuppression();
|
||||
stream = await this.getRNNoiseSuppressionStream(stream);
|
||||
}
|
||||
|
||||
console.log('Supported Constraints', navigator.mediaDevices.getSupportedConstraints());
|
||||
|
||||
const track = audio ? stream.getAudioTracks()[0] : stream.getVideoTracks()[0];
|
||||
@@ -2114,23 +2127,67 @@ class RoomClient {
|
||||
saveVirtualBackgroundSettings(blurLevel, backgroundImage, backgroundTransparent);
|
||||
}
|
||||
|
||||
// ####################################################
|
||||
// NOISE SUPPRESSION
|
||||
// ####################################################
|
||||
|
||||
initRNNoiseSuppression() {
|
||||
if (typeof RNNoiseProcessor === 'undefined') {
|
||||
console.warn('RNNoiseProcessor is not available.');
|
||||
return;
|
||||
}
|
||||
|
||||
this.disableRNNoiseSuppression();
|
||||
|
||||
this.RNNoiseProcessor = new RNNoiseProcessor();
|
||||
}
|
||||
|
||||
async getRNNoiseSuppressionStream(stream) {
|
||||
if (!this.RNNoiseProcessor) {
|
||||
console.warn('RNNoiseProcessor not initialized.');
|
||||
return stream;
|
||||
}
|
||||
|
||||
try {
|
||||
const processedStream = await this.RNNoiseProcessor.startProcessing(stream);
|
||||
|
||||
if (localStorageSettings.mic_noise_suppression) {
|
||||
this.RNNoiseProcessor.toggleNoiseSuppression();
|
||||
switchNoiseSuppression.checked = this.RNNoiseProcessor.noiseSuppressionEnabled;
|
||||
}
|
||||
|
||||
if (typeof labelNoiseSuppression !== 'undefined') {
|
||||
labelNoiseSuppression.textContent = this.RNNoiseProcessor.noiseSuppressionEnabled
|
||||
? '🔊 Noise Suppression'
|
||||
: '🔇 Noise Suppression';
|
||||
}
|
||||
|
||||
return processedStream;
|
||||
} catch (err) {
|
||||
console.warn('RNNoiseProcessor failed, using original stream:', err);
|
||||
return stream;
|
||||
}
|
||||
}
|
||||
|
||||
disableRNNoiseSuppression() {
|
||||
if (this.RNNoiseProcessor) {
|
||||
this.RNNoiseProcessor.stopProcessing();
|
||||
this.RNNoiseProcessor = null;
|
||||
console.warn('RNNoiseProcessor already initialized, stopping previous instance.');
|
||||
}
|
||||
}
|
||||
|
||||
// ####################################################
|
||||
// AUDIO/VIDEO/SCREEN CONSTRAINTS
|
||||
// ####################################################
|
||||
|
||||
getAudioConstraints(deviceId) {
|
||||
const audioConstraints = {};
|
||||
if (deviceId) {
|
||||
audioConstraints.deviceId = deviceId;
|
||||
}
|
||||
return {
|
||||
audio: {
|
||||
autoGainControl: switchAutoGainControl.checked,
|
||||
echoCancellation: switchNoiseSuppression.checked,
|
||||
noiseSuppression: switchEchoCancellation.checked,
|
||||
sampleRate: parseInt(sampleRateSelect.value),
|
||||
sampleSize: parseInt(sampleSizeSelect.value),
|
||||
channelCount: parseInt(channelCountSelect.value),
|
||||
latency: parseInt(micLatencyRange.value),
|
||||
volume: parseInt(micVolumeRange.value / 100),
|
||||
deviceId: deviceId,
|
||||
},
|
||||
audio: audioConstraints,
|
||||
video: false,
|
||||
};
|
||||
}
|
||||
@@ -3390,6 +3447,7 @@ class RoomClient {
|
||||
if (VideoAI.active) this.stopSession();
|
||||
if (this.rtmpFilestreamer) this.stopRTMP();
|
||||
if (this.rtmpUrlstreamer) this.stopRTMPfromURL();
|
||||
if (this.RNNoiseProcessor) this.disableRNNoiseSuppression();
|
||||
|
||||
const clean = () => {
|
||||
this._isConnected = false;
|
||||
|
||||
@@ -119,6 +119,7 @@
|
||||
<script defer src="../js/Transcription.js"></script>
|
||||
<script defer src="../js/VideoGrid.js"></script>
|
||||
<script defer src="../js/VirtualBackground.js"></script>
|
||||
<script defer src="../js/NodeProcessor.js"></script>
|
||||
<script defer src="../js/RoomClient.js"></script>
|
||||
<script defer src="../js/Room.js"></script>
|
||||
<script defer src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
||||
@@ -530,181 +531,28 @@ access to use this app.
|
||||
<div class="volume-bar"></div>
|
||||
</div>
|
||||
<br />
|
||||
<div class="dropdown">
|
||||
<button
|
||||
class="dropdown-toggle btn-custom"
|
||||
type="button"
|
||||
id="micOptionsButton"
|
||||
data-bs-toggle="dropdown"
|
||||
aria-expanded="false"
|
||||
>
|
||||
<i class="fas fa-screwdriver-wrench"></i>
|
||||
<p>Advanced options</p>
|
||||
</button>
|
||||
<div class="dropdown-menu text-start" aria-labelledby="micOptionsButton">
|
||||
<!-- onclick="event.stopPropagation()" -->
|
||||
<table class="settingsTable">
|
||||
<tr id="autoGainControlButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-ear-listen"></i>
|
||||
<p>Auto gain control</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="form-check form-switch form-switch-md">
|
||||
<input
|
||||
id="switchAutoGainControl"
|
||||
class="form-check-input"
|
||||
type="checkbox"
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="echoCancellationButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-ear-listen"></i>
|
||||
<p>Echo cancellation</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="form-check form-switch form-switch-md">
|
||||
<input
|
||||
id="switchEchoCancellation"
|
||||
class="form-check-input"
|
||||
type="checkbox"
|
||||
checked
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="noiseSuppressionButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-ear-listen"></i>
|
||||
<p>Noise suppression</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="form-check form-switch form-switch-md">
|
||||
<input
|
||||
id="switchNoiseSuppression"
|
||||
class="form-check-input"
|
||||
type="checkbox"
|
||||
checked
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="sampleRateButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-microphone-lines"></i>
|
||||
<p>Sample rate</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<select
|
||||
id="sampleRateSelect"
|
||||
class="form-select form-select-sm text-light bg-dark"
|
||||
>
|
||||
<option value="48000">48000 Hz</option>
|
||||
<option value="44100">44100 Hz</option>
|
||||
</select>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="sampleSizeButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-microphone-lines"></i>
|
||||
<p>Sample size</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<select
|
||||
id="sampleSizeSelect"
|
||||
class="form-select form-select-sm text-light bg-dark"
|
||||
>
|
||||
<option value="16">16 bits</option>
|
||||
<option value="32">32 bits</option>
|
||||
</select>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="channelCountButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-microphone-lines"></i>
|
||||
<p>Channel count</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<select
|
||||
id="channelCountSelect"
|
||||
class="form-select form-select-sm text-light bg-dark"
|
||||
>
|
||||
<option value="1">1 (mono)</option>
|
||||
<option value="2">2 (stereo)</option>
|
||||
</select>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="micLatencyButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fab fa-audible"></i>
|
||||
<p>Latency</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="title">
|
||||
<input
|
||||
type="range"
|
||||
class="form-range"
|
||||
id="micLatencyRange"
|
||||
name="latency"
|
||||
min="10"
|
||||
max="1000"
|
||||
value="50"
|
||||
step="10"
|
||||
/>
|
||||
<p><span id="micLatencyValue">50</span> ms</p>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="micVolumeButton">
|
||||
<td>
|
||||
<div class="title">
|
||||
<i class="fas fa-volume-high"></i>
|
||||
<p>Volume</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div>
|
||||
<input
|
||||
type="range"
|
||||
class="form-range"
|
||||
id="micVolumeRange"
|
||||
name="volume"
|
||||
min="0"
|
||||
max="100"
|
||||
value="100"
|
||||
step="10"
|
||||
/>
|
||||
<p><span id="micVolumeValue">100</span> %</p>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<button id="applyAudioOptionsButton" class="fas fa-check">
|
||||
<p>Apply options</p>
|
||||
</button>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="micOptionsButton">
|
||||
<table class="settingsTable">
|
||||
<tr id="noiseSuppressionButton">
|
||||
<td>
|
||||
<div id="labelNoiseSuppression" class="title">
|
||||
<i class="fas fa-ear-listen"></i>
|
||||
<p>Noise Suppression</p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="form-check form-switch form-switch-md">
|
||||
<input
|
||||
id="switchNoiseSuppression"
|
||||
class="form-check-input"
|
||||
type="checkbox"
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<br />
|
||||
<div id="speakerSelectDiv">
|
||||
<hr />
|
||||
<div class="title">
|
||||
|
||||
المرجع في مشكلة جديدة
حظر مستخدم