[mirotalksfu] - #194 Improvements

هذا الالتزام موجود في:
Miroslav Pejic
2025-02-19 12:21:19 +01:00
الأصل f9b89fe212
التزام 2cd92e6ac1
6 ملفات معدلة مع 103 إضافات و90 حذوفات

عرض الملف

@@ -58,7 +58,7 @@ dev dependencies: {
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
* @version 1.7.50
* @version 1.7.51
*
*/

عرض الملف

@@ -1,6 +1,6 @@
{
"name": "mirotalksfu",
"version": "1.7.50",
"version": "1.7.51",
"description": "WebRTC SFU browser-based video calls",
"main": "Server.js",
"scripts": {

عرض الملف

@@ -64,7 +64,7 @@ let BRAND = {
},
about: {
imageUrl: '../images/mirotalk-logo.gif',
title: '<strong>WebRTC SFU v1.7.50</strong>',
title: '<strong>WebRTC SFU v1.7.51</strong>',
html: `
<button
id="support-button"

عرض الملف

@@ -11,7 +11,7 @@ if (location.href.substr(0, 5) !== 'https') location.href = 'https' + location.h
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
* @version 1.7.50
* @version 1.7.51
*
*/
@@ -5219,15 +5219,15 @@ async function loadVirtualBackgroundSettings() {
if (!MediaStreamTrackProcessorSupported) return;
const savedSettings = localStorage.getItem('virtualBackgroundSettings');
if (savedSettings) {
const { blurLevel, imageUrl } = JSON.parse(savedSettings);
if (blurLevel) {
console.log('-------> Apply Blur');
await applyVirtualBackground(initVideo, initStream, blurLevel);
} else if (imageUrl) {
console.log('-------> Apply Virtual background');
await applyVirtualBackground(initVideo, initStream, null, imageUrl);
}
if (!savedSettings) return;
const { blurLevel, imageUrl } = JSON.parse(savedSettings);
if (blurLevel) {
await applyVirtualBackground(initVideo, initStream, blurLevel);
} else if (imageUrl) {
await applyVirtualBackground(initVideo, initStream, null, imageUrl);
}
}
@@ -5269,7 +5269,7 @@ function showAbout() {
position: 'center',
imageUrl: BRAND.about?.imageUrl && BRAND.about.imageUrl.trim() !== '' ? BRAND.about.imageUrl : image.about,
customClass: { image: 'img-about' },
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.7.50',
title: BRAND.about?.title && BRAND.about.title.trim() !== '' ? BRAND.about.title : 'WebRTC SFU v1.7.51',
html: `
<br />
<div id="about">

عرض الملف

@@ -9,7 +9,7 @@
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
* @version 1.7.50
* @version 1.7.51
*
*/

عرض الملف

@@ -2,11 +2,17 @@ class VirtualBackground {
static instance = null;
constructor() {
// Ensure only one instance of VirtualBackground exists
if (VirtualBackground.instance) {
return VirtualBackground.instance;
}
VirtualBackground.instance = this;
this.resetState();
}
resetState() {
// Reset all necessary state variables
this.segmentation = null;
this.initialized = false;
this.pendingFrames = [];
@@ -20,6 +26,7 @@ class VirtualBackground {
}
async initializeSegmentation() {
// Initialize the segmentation model if not already done
if (this.initialized) return;
try {
@@ -40,11 +47,15 @@ class VirtualBackground {
}
handleSegmentationResults(results) {
// Handle the segmentation results by processing the next frame
const pending = this.pendingFrames.shift();
if (!pending || !results?.segmentationMask) return;
const { videoFrame, controller, imageBitmap, maskHandler } = pending;
this.processFrame(videoFrame, controller, imageBitmap, maskHandler, results.segmentationMask);
}
processFrame(videoFrame, controller, imageBitmap, maskHandler, segmentationMask) {
try {
const canvas = new OffscreenCanvas(videoFrame.displayWidth, videoFrame.displayHeight);
const ctx = canvas.getContext('2d');
@@ -53,60 +64,67 @@ class VirtualBackground {
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
// Apply mask processing
maskHandler(ctx, canvas, results.segmentationMask, imageBitmap);
maskHandler(ctx, canvas, segmentationMask, imageBitmap);
// Create new video frame
// Create new video frame with the processed content
const processedFrame = new VideoFrame(canvas, {
timestamp: videoFrame.timestamp,
alpha: 'keep',
});
// Enqueue the processed frame to continue the stream
controller.enqueue(processedFrame);
} catch (error) {
console.error('❌ Frame processing error:', error);
} finally {
// Close frames after processing
if (videoFrame) videoFrame.close();
if (imageBitmap) imageBitmap.close();
// Close frames after processing to release resources
videoFrame?.close();
imageBitmap?.close();
}
}
async processStreamWithSegmentation(videoTrack, maskHandler) {
// Stop any existing processor
// Stop any existing processor before starting a new one
await this.stopCurrentProcessor();
// Initialize segmentation if not already done
await this.initializeSegmentation();
// Create new processor and generator
// Create new processor and generator for stream transformation
const processor = new MediaStreamTrackProcessor({ track: videoTrack });
const generator = new MediaStreamTrackGenerator({ kind: 'video' });
const transformer = new TransformStream({
transform: async (videoFrame, controller) => {
if (!this.segmentation || !this.initialized) {
console.warn('⚠️ Segmentation is not initialized, skipping frame.');
videoFrame?.close();
return;
}
try {
// Create image bitmap from video frame
const imageBitmap = await createImageBitmap(videoFrame);
this.pendingFrames.push({
videoFrame,
controller,
imageBitmap,
maskHandler,
});
if (!imageBitmap) {
console.warn('⚠️ Failed to create imageBitmap, skipping frame.');
videoFrame?.close();
return;
}
// Queue the frame for segmentation processing
this.pendingFrames.push({ videoFrame, controller, imageBitmap, maskHandler });
// Send the image to the segmentation model for processing
await this.segmentation.send({ image: imageBitmap });
} catch (error) {
console.error('❌ Frame transformation error:', error);
videoFrame.close();
return; // Ensure the frame is closed and processing stops
} finally {
// Close the video frame after processing
videoFrame?.close();
}
},
flush: () => {
// Cleanup any remaining resources
this.pendingFrames.forEach((frame) => {
if (frame.videoFrame) frame.videoFrame.close();
if (frame.imageBitmap) frame.imageBitmap.close();
});
this.pendingFrames = [];
},
flush: () => this.cleanPendingFrames(), // Clean up any pending frames when the stream ends
});
// Store active streams
@@ -114,106 +132,95 @@ class VirtualBackground {
this.activeGenerator = generator;
this.isProcessing = true;
// Setup error handling
const cleanup = () => {
this.stopCurrentProcessor().catch(() => {});
};
// Start processing pipeline
// Start the processing pipeline
processor.readable
.pipeThrough(transformer)
.pipeTo(generator.writable)
.catch((error) => {
//console.error('❌ Processing pipeline error:', error);
cleanup();
});
.catch(() => this.stopCurrentProcessor());
return new MediaStream([generator]);
}
cleanPendingFrames() {
// Close all pending frames to release resources
while (this.pendingFrames.length) {
const { videoFrame } = this.pendingFrames.pop();
videoFrame?.close();
}
}
async stopCurrentProcessor() {
// Stop any ongoing processor and clean up resources
if (!this.activeProcessor) return;
this.isProcessing = false;
this.cleanPendingFrames();
try {
// Abort the writable stream first
if (this.activeGenerator?.writable) {
await this.activeGenerator.writable.abort('Processing stopped').catch(() => {});
// Abort the writable stream if it's not locked
if (this.activeGenerator?.writable && !this.activeGenerator.writable.locked) {
await this.activeGenerator.writable.abort('Processing stopped');
}
// Cancel the readable stream if not locked
// Cancel the readable stream if it's not locked
if (this.activeProcessor?.readable && !this.activeProcessor.readable.locked) {
await this.activeProcessor.readable.cancel('Processing stopped').catch(() => {});
await this.activeProcessor.readable.cancel('Processing stopped');
}
// Cleanup pending frames
this.pendingFrames.forEach((frame) => {
frame.videoFrame?.close();
frame.imageBitmap?.close();
});
this.pendingFrames = [];
console.log('✅ Processor successfully stopped');
} catch (error) {
console.error('❌ Processor shutdown error:', error);
} finally {
// Reset active processor and generator
this.activeProcessor = null;
this.activeGenerator = null;
this.isProcessing = false;
}
}
async applyBlurToWebRTCStream(videoTrack, blurLevel = 10) {
const maskHandler = async (ctx, canvas, mask, imageBitmap) => {
try {
// Step 1: Apply the mask to keep the person in focus
ctx.save();
ctx.globalCompositeOperation = 'destination-in';
ctx.drawImage(mask, 0, 0, canvas.width, canvas.height);
ctx.restore();
// Handler for applying blur effect to the background
const maskHandler = (ctx, canvas, mask, imageBitmap) => {
// Keep only the person using the segmentation mask
ctx.save();
ctx.globalCompositeOperation = 'destination-in';
ctx.drawImage(mask, 0, 0, canvas.width, canvas.height);
ctx.restore();
// Step 2: Draw the background (it will be blurred later)
ctx.save();
ctx.globalCompositeOperation = 'destination-over';
ctx.filter = `blur(${blurLevel}px)`; // Apply blur to the entire canvas
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
ctx.restore();
// Step 3: Redraw the person in focus on top of the blurred background
ctx.save();
ctx.globalCompositeOperation = 'destination-over'; // Ensure the person is on top
ctx.filter = 'none'; // Reset filter to remove blur on the person
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
ctx.restore();
} catch (error) {
console.error('❌ Error in maskHandler:', error);
}
// Apply blur to background and draw image behind the person
ctx.save();
ctx.globalCompositeOperation = 'destination-over';
ctx.filter = `blur(${blurLevel}px)`;
ctx.drawImage(imageBitmap, 0, 0, canvas.width, canvas.height);
ctx.restore();
};
console.log('✅ Apply Blur.');
return this.processStreamWithSegmentation(videoTrack, maskHandler);
}
async applyVirtualBackgroundToWebRTCStream(videoTrack, imageUrl) {
// Determine if the background is a GIF
const isGif = imageUrl.endsWith('.gif') || imageUrl.startsWith('data:image/gif');
let background;
isGif ? (background = await this.loadGifImage(imageUrl)) : (background = await this.loadImage(imageUrl));
let background = isGif ? await this.loadGifImage(imageUrl) : await this.loadImage(imageUrl);
// Handler for applying virtual background
const maskHandler = (ctx, canvas, mask) => {
// Apply person mask
ctx.globalCompositeOperation = 'destination-in';
ctx.drawImage(mask, 0, 0);
// Draw background
// Draw background (GIF or static image)
ctx.globalCompositeOperation = 'destination-over';
isGif && this.currentGifFrame
? ctx.drawImage(this.currentGifFrame, 0, 0, canvas.width, canvas.height)
: ctx.drawImage(background, 0, 0, canvas.width, canvas.height);
};
console.log('✅ Apply Virtual Background.');
return this.processStreamWithSegmentation(videoTrack, maskHandler);
}
async loadImage(src) {
// Load an image from the provided source URL
return new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = 'anonymous';
@@ -224,8 +231,14 @@ class VirtualBackground {
}
async loadGifImage(src) {
// Load and animate a GIF using gifler
return new Promise((resolve, reject) => {
try {
if (this.gifAnimation) {
this.gifAnimation.stop(); // Stop previous animation
this.gifAnimation = null;
}
this.gifCanvas = document.createElement('canvas');
this.gifContext = this.gifCanvas.getContext('2d');
@@ -243,9 +256,9 @@ class VirtualBackground {
}
animateGifBackground() {
// Continuously update the GIF frame for animation
if (!this.gifAnimation) return;
// Updates the current GIF frame at each animation step
const updateFrame = () => {
if (this.gifAnimation && this.gifCanvas) {
this.currentGifFrame = this.gifCanvas;