[mirotalksfu] - #194 Refactoring
هذا الالتزام موجود في:
@@ -58,7 +58,7 @@ dev dependencies: {
|
||||
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
|
||||
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
|
||||
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
|
||||
* @version 1.7.49
|
||||
* @version 1.7.50
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "mirotalksfu",
|
||||
"version": "1.7.49",
|
||||
"version": "1.7.50",
|
||||
"description": "WebRTC SFU browser-based video calls",
|
||||
"main": "Server.js",
|
||||
"scripts": {
|
||||
@@ -79,7 +79,7 @@
|
||||
"mediasoup-client": "3.8.1",
|
||||
"ngrok": "^5.0.0-beta.2",
|
||||
"nodemailer": "^6.10.0",
|
||||
"openai": "^4.85.1",
|
||||
"openai": "^4.85.2",
|
||||
"qs": "6.14.0",
|
||||
"sanitize-filename": "^1.6.3",
|
||||
"socket.io": "4.8.1",
|
||||
|
||||
@@ -64,7 +64,7 @@ let BRAND = {
|
||||
},
|
||||
about: {
|
||||
imageUrl: '../images/mirotalk-logo.gif',
|
||||
title: '<strong>WebRTC SFU v1.7.49</strong>',
|
||||
title: '<strong>WebRTC SFU v1.7.50</strong>',
|
||||
html: `
|
||||
<button
|
||||
id="support-button"
|
||||
|
||||
@@ -11,7 +11,7 @@ if (location.href.substr(0, 5) !== 'https') location.href = 'https' + location.h
|
||||
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
|
||||
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
|
||||
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
|
||||
* @version 1.7.49
|
||||
* @version 1.7.50
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -190,10 +190,11 @@ const speakerSelect = getId('speakerSelect');
|
||||
const initSpeakerSelect = getId('initSpeakerSelect');
|
||||
|
||||
// ####################################################
|
||||
// VIRTUAL BACKGROUND DEFAULT IMAGES
|
||||
// VIRTUAL BACKGROUND DEFAULT IMAGES AND INIT CLASS
|
||||
// ####################################################
|
||||
|
||||
const virtualBackgrounds = Object.values(image.virtualBackground);
|
||||
const virtualBackground = new VirtualBackground();
|
||||
|
||||
// ####################################################
|
||||
// DYNAMIC SETTINGS
|
||||
@@ -5129,7 +5130,6 @@ function showImageSelector() {
|
||||
|
||||
async function applyVirtualBackground(videoElement, stream, blurLevel, backgroundImage) {
|
||||
const videoTrack = stream.getVideoTracks()[0];
|
||||
const virtualBackground = new VirtualBackground();
|
||||
|
||||
if (blurLevel) {
|
||||
videoElement.srcObject = await virtualBackground.applyBlurToWebRTCStream(videoTrack, blurLevel);
|
||||
@@ -5269,7 +5269,7 @@ function showAbout() {
|
||||
position: 'center',
|
||||
imageUrl: BRAND.about?.imageUrl && BRAND.about.imageUrl.trim() !== '' ? BRAND.about.imageUrl : image.about,
|
||||
customClass: { image: 'img-about' },
|
||||
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.7.49',
|
||||
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.7.50',
|
||||
html: `
|
||||
<br />
|
||||
<div id="about">
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
|
||||
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
|
||||
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
|
||||
* @version 1.7.49
|
||||
* @version 1.7.50
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -1490,7 +1490,6 @@ class RoomClient {
|
||||
// Handle Virtual Background and Blur using MediaPipe
|
||||
if (video && MediaStreamTrackProcessorSupported) {
|
||||
const videoTrack = stream.getVideoTracks()[0];
|
||||
const virtualBackground = new VirtualBackground();
|
||||
|
||||
if (virtualBackgroundBlurLevel) {
|
||||
// Apply blur before sending it to WebRTC stream
|
||||
|
||||
@@ -1,166 +1,215 @@
|
||||
'use strict';
|
||||
|
||||
class VirtualBackground {
|
||||
static instance = null;
|
||||
|
||||
constructor() {
|
||||
if (VirtualBackground.instance) {
|
||||
return VirtualBackground.instance;
|
||||
}
|
||||
VirtualBackground.instance = this;
|
||||
|
||||
this.segmentation = null;
|
||||
this.initialized = false;
|
||||
this.gifCanvas = null;
|
||||
this.gifCtx = null;
|
||||
this.pendingFrames = [];
|
||||
this.activeProcessor = null;
|
||||
this.activeGenerator = null;
|
||||
this.isProcessing = false;
|
||||
this.gifAnimation = null;
|
||||
this.currentGifFrame = null;
|
||||
this.gifCanvas = null;
|
||||
this.gifContext = null;
|
||||
}
|
||||
|
||||
async initializeSegmentation() {
|
||||
if (!this.segmentation) {
|
||||
if (this.initialized) return;
|
||||
|
||||
try {
|
||||
this.segmentation = new SelfieSegmentation({
|
||||
locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation/${file}`,
|
||||
});
|
||||
this.segmentation.setOptions({ modelSelection: 1 });
|
||||
}
|
||||
|
||||
if (!this.initialized) {
|
||||
try {
|
||||
this.segmentation.setOptions({ modelSelection: 1 });
|
||||
this.segmentation.onResults(this.handleSegmentationResults.bind(this));
|
||||
|
||||
await this.segmentation.initialize();
|
||||
this.initialized = true;
|
||||
console.log('✅ Segmentation initialized successfully.');
|
||||
} catch (error) {
|
||||
console.error('❌ Error initializing segmentation:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
handleSegmentationResults(results) {
|
||||
const pending = this.pendingFrames.shift();
|
||||
if (!pending || !results?.segmentationMask) return;
|
||||
|
||||
const { videoFrame, controller, imageBitmap, maskHandler } = pending;
|
||||
|
||||
try {
|
||||
const canvas = new OffscreenCanvas(videoFrame.displayWidth, videoFrame.displayHeight);
|
||||
const ctx = canvas.getContext('2d');
|
||||
|
||||
// Apply original frame
|
||||
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
|
||||
// Apply mask processing
|
||||
maskHandler(ctx, canvas, results.segmentationMask, imageBitmap);
|
||||
|
||||
// Create new video frame
|
||||
const processedFrame = new VideoFrame(canvas, {
|
||||
timestamp: videoFrame.timestamp,
|
||||
alpha: 'keep',
|
||||
});
|
||||
|
||||
controller.enqueue(processedFrame);
|
||||
} catch (error) {
|
||||
console.error('❌ Frame processing error:', error);
|
||||
} finally {
|
||||
// Close frames after processing
|
||||
if (videoFrame) videoFrame.close();
|
||||
if (imageBitmap) imageBitmap.close();
|
||||
}
|
||||
}
|
||||
|
||||
async processStreamWithSegmentation(videoTrack, maskHandler) {
|
||||
// Stop any existing processor
|
||||
await this.stopCurrentProcessor();
|
||||
|
||||
// Initialize segmentation if not already done
|
||||
await this.initializeSegmentation();
|
||||
|
||||
// Create new processor and generator
|
||||
const processor = new MediaStreamTrackProcessor({ track: videoTrack });
|
||||
const generator = new MediaStreamTrackGenerator({ kind: 'video' });
|
||||
|
||||
await this.initializeSegmentation(); // Ensure segmentation is initialized
|
||||
|
||||
if (!this.segmentation) {
|
||||
console.error('❌ Segmentation is still undefined after initialization!');
|
||||
return new MediaStream([videoTrack]); // Fallback to original video stream
|
||||
}
|
||||
|
||||
const processSegmentation = async (videoFrame, controller) => {
|
||||
const transformer = new TransformStream({
|
||||
transform: async (videoFrame, controller) => {
|
||||
try {
|
||||
const imageBitmap = await createImageBitmap(videoFrame);
|
||||
|
||||
// Process segmentation
|
||||
this.segmentation.onResults(async (results) => {
|
||||
if (!results || !results.segmentationMask) {
|
||||
console.warn('⚠️ Segmentation mask is missing, skipping frame.');
|
||||
videoFrame.close();
|
||||
imageBitmap.close();
|
||||
return;
|
||||
}
|
||||
|
||||
const mask = results.segmentationMask;
|
||||
const canvas = new OffscreenCanvas(videoFrame.displayWidth, videoFrame.displayHeight);
|
||||
const ctx = canvas.getContext('2d');
|
||||
|
||||
// Draw the original video frame
|
||||
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
|
||||
// Call the mask handler (either for blur or background)
|
||||
await maskHandler(ctx, canvas, mask, imageBitmap);
|
||||
|
||||
// Convert back to VideoFrame
|
||||
const newFrame = new VideoFrame(canvas, { timestamp: videoFrame.timestamp });
|
||||
controller.enqueue(newFrame);
|
||||
|
||||
// Close VideoFrame & imageBitmap after processing
|
||||
videoFrame.close();
|
||||
imageBitmap.close();
|
||||
});
|
||||
|
||||
// Send frame to MediaPipe
|
||||
await this.segmentation.send({ image: imageBitmap }).catch((err) => {
|
||||
console.error('❌ Segmentation processing failed:', err);
|
||||
return null;
|
||||
this.pendingFrames.push({
|
||||
videoFrame,
|
||||
controller,
|
||||
imageBitmap,
|
||||
maskHandler,
|
||||
});
|
||||
await this.segmentation.send({ image: imageBitmap });
|
||||
} catch (error) {
|
||||
console.error('❌ Error in processSegmentation:', error);
|
||||
console.error('❌ Frame transformation error:', error);
|
||||
videoFrame.close();
|
||||
return; // Ensure the frame is closed and processing stops
|
||||
}
|
||||
};
|
||||
|
||||
const transformer = new TransformStream({
|
||||
async transform(videoFrame, controller) {
|
||||
await processSegmentation(videoFrame, controller);
|
||||
},
|
||||
flush: () => {
|
||||
// Cleanup any remaining resources
|
||||
this.pendingFrames.forEach((frame) => {
|
||||
if (frame.videoFrame) frame.videoFrame.close();
|
||||
if (frame.imageBitmap) frame.imageBitmap.close();
|
||||
});
|
||||
this.pendingFrames = [];
|
||||
},
|
||||
});
|
||||
|
||||
processor.readable.pipeThrough(transformer).pipeTo(generator.writable);
|
||||
// Store active streams
|
||||
this.activeProcessor = processor;
|
||||
this.activeGenerator = generator;
|
||||
this.isProcessing = true;
|
||||
|
||||
// Setup error handling
|
||||
const cleanup = () => {
|
||||
this.stopCurrentProcessor().catch(() => {});
|
||||
};
|
||||
|
||||
// Start processing pipeline
|
||||
processor.readable
|
||||
.pipeThrough(transformer)
|
||||
.pipeTo(generator.writable)
|
||||
.catch((error) => {
|
||||
//console.error('❌ Processing pipeline error:', error);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
return new MediaStream([generator]);
|
||||
}
|
||||
|
||||
async stopCurrentProcessor() {
|
||||
if (!this.activeProcessor) return;
|
||||
|
||||
try {
|
||||
// Abort the writable stream first
|
||||
if (this.activeGenerator?.writable) {
|
||||
await this.activeGenerator.writable.abort('Processing stopped').catch(() => {});
|
||||
}
|
||||
|
||||
// Cancel the readable stream if not locked
|
||||
if (this.activeProcessor?.readable && !this.activeProcessor.readable.locked) {
|
||||
await this.activeProcessor.readable.cancel('Processing stopped').catch(() => {});
|
||||
}
|
||||
|
||||
// Cleanup pending frames
|
||||
this.pendingFrames.forEach((frame) => {
|
||||
frame.videoFrame?.close();
|
||||
frame.imageBitmap?.close();
|
||||
});
|
||||
this.pendingFrames = [];
|
||||
|
||||
console.log('✅ Processor successfully stopped');
|
||||
} catch (error) {
|
||||
console.error('❌ Processor shutdown error:', error);
|
||||
} finally {
|
||||
this.activeProcessor = null;
|
||||
this.activeGenerator = null;
|
||||
this.isProcessing = false;
|
||||
}
|
||||
}
|
||||
|
||||
async applyBlurToWebRTCStream(videoTrack, blurLevel = 10) {
|
||||
const maskHandler = async (ctx, canvas, mask, imageBitmap) => {
|
||||
// Apply the mask to keep the person in focus
|
||||
ctx.save();
|
||||
ctx.globalCompositeOperation = 'destination-in';
|
||||
ctx.drawImage(mask, 0, 0, canvas.width, canvas.height);
|
||||
ctx.restore();
|
||||
|
||||
// Save the focused person area
|
||||
const personImageBitmap = await createImageBitmap(canvas);
|
||||
|
||||
// Draw the original video frame again
|
||||
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
|
||||
// Invert the mask to apply the blur to the background
|
||||
ctx.save();
|
||||
ctx.globalCompositeOperation = 'destination-out';
|
||||
ctx.drawImage(mask, 0, 0, canvas.width, canvas.height);
|
||||
ctx.restore();
|
||||
|
||||
// Apply the blur effect to the background
|
||||
ctx.filter = `blur(${blurLevel}px)`;
|
||||
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height);
|
||||
|
||||
// Draw the person back on top of the blurred background
|
||||
ctx.filter = 'none';
|
||||
ctx.globalCompositeOperation = 'destination-over';
|
||||
ctx.drawImage(personImageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
};
|
||||
|
||||
return this.processStreamWithSegmentation(videoTrack, maskHandler);
|
||||
}
|
||||
|
||||
async applyVirtualBackgroundToWebRTCStream(videoTrack, image = 'https://i.postimg.cc/t9PJw5P7/forest.jpg') {
|
||||
const isGif = image.endsWith('.gif') || image.startsWith('data:image/gif');
|
||||
|
||||
const backgroundImage = isGif ? await this.loadGifImage(image) : await this.loadImage(image);
|
||||
|
||||
const maskHandler = async (ctx, canvas, mask, imageBitmap) => {
|
||||
try {
|
||||
// Apply the mask to keep the person in focus
|
||||
// Step 1: Apply the mask to keep the person in focus
|
||||
ctx.save();
|
||||
ctx.globalCompositeOperation = 'destination-in';
|
||||
ctx.drawImage(mask, 0, 0, canvas.width, canvas.height);
|
||||
ctx.restore();
|
||||
|
||||
// Save the focused person area
|
||||
const personImageBitmap = await createImageBitmap(canvas);
|
||||
// Step 2: Draw the background (it will be blurred later)
|
||||
ctx.save();
|
||||
ctx.globalCompositeOperation = 'destination-over';
|
||||
ctx.filter = `blur(${blurLevel}px)`; // Apply blur to the entire canvas
|
||||
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
ctx.restore();
|
||||
|
||||
// If GIF is detected, draw the current animated frame
|
||||
if (isGif) {
|
||||
if (this.currentGifFrame) {
|
||||
ctx.drawImage(this.currentGifFrame, 0, 0, canvas.width, canvas.height);
|
||||
}
|
||||
} else {
|
||||
// Draw the background image
|
||||
ctx.drawImage(backgroundImage, 0, 0, canvas.width, canvas.height);
|
||||
}
|
||||
|
||||
// Draw the person back on top of the background image
|
||||
ctx.globalCompositeOperation = 'source-over';
|
||||
ctx.drawImage(personImageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
// Step 3: Redraw the person in focus on top of the blurred background
|
||||
ctx.save();
|
||||
ctx.globalCompositeOperation = 'destination-over'; // Ensure the person is on top
|
||||
ctx.filter = 'none'; // Reset filter to remove blur on the person
|
||||
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
|
||||
ctx.restore();
|
||||
} catch (error) {
|
||||
console.error('❌ Error in maskHandler:', error);
|
||||
}
|
||||
};
|
||||
|
||||
if (isGif) this.animateGifBackground();
|
||||
return this.processStreamWithSegmentation(videoTrack, maskHandler);
|
||||
}
|
||||
|
||||
async applyVirtualBackgroundToWebRTCStream(videoTrack, imageUrl) {
|
||||
const isGif = imageUrl.endsWith('.gif') || imageUrl.startsWith('data:image/gif');
|
||||
let background;
|
||||
|
||||
isGif ? (background = await this.loadGifImage(imageUrl)) : (background = await this.loadImage(imageUrl));
|
||||
|
||||
const maskHandler = (ctx, canvas, mask) => {
|
||||
// Apply person mask
|
||||
ctx.globalCompositeOperation = 'destination-in';
|
||||
ctx.drawImage(mask, 0, 0);
|
||||
|
||||
// Draw background
|
||||
ctx.globalCompositeOperation = 'destination-over';
|
||||
isGif && this.currentGifFrame
|
||||
? ctx.drawImage(this.currentGifFrame, 0, 0, canvas.width, canvas.height)
|
||||
: ctx.drawImage(background, 0, 0, canvas.width, canvas.height);
|
||||
};
|
||||
|
||||
return this.processStreamWithSegmentation(videoTrack, maskHandler);
|
||||
}
|
||||
|
||||
@@ -178,11 +227,11 @@ class VirtualBackground {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
this.gifCanvas = document.createElement('canvas');
|
||||
this.gifCtx = this.gifCanvas.getContext('2d');
|
||||
this.gifContext = this.gifCanvas.getContext('2d');
|
||||
|
||||
gifler(src).get((a) => {
|
||||
this.gifAnimation = a;
|
||||
a.animateInCanvas(this.gifCanvas); // Start the animation
|
||||
gifler(src).get((animation) => {
|
||||
this.gifAnimation = animation;
|
||||
animation.animateInCanvas(this.gifCanvas); // Start the animation
|
||||
console.log('✅ GIF loaded and animation started.');
|
||||
resolve(this.gifCanvas);
|
||||
});
|
||||
@@ -198,7 +247,9 @@ class VirtualBackground {
|
||||
|
||||
// Updates the current GIF frame at each animation step
|
||||
const updateFrame = () => {
|
||||
if (this.gifAnimation && this.gifCanvas) {
|
||||
this.currentGifFrame = this.gifCanvas;
|
||||
}
|
||||
requestAnimationFrame(updateFrame);
|
||||
};
|
||||
updateFrame();
|
||||
|
||||
المرجع في مشكلة جديدة
حظر مستخدم