[mirotalksfu] - add DeepSeek integration

هذا الالتزام موجود في:
Miroslav Pejic
2025-05-03 04:25:32 +02:00
الأصل 0abb7d3f58
التزام ddb84d7940
12 ملفات معدلة مع 270 إضافات و22 حذوفات

عرض الملف

@@ -48,6 +48,7 @@ module.exports = class Room {
screen_cant_share: false,
chat_cant_privately: false,
chat_cant_chatgpt: false,
chat_cant_deep_seek: false,
media_cant_sharing: false,
};
this.survey = config?.features?.survey;
@@ -489,6 +490,9 @@ module.exports = class Room {
case 'chat_cant_chatgpt':
this._moderator.chat_cant_chatgpt = data.status;
break;
case 'chat_cant_deep_seek':
this._moderator.chat_cant_deep_seek = data.status;
break;
case 'media_cant_sharing':
this._moderator.media_cant_sharing = data.status;
break;

عرض الملف

@@ -64,7 +64,7 @@ dev dependencies: {
* @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon
* @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970
* @author Miroslav Pejic - miroslav.pejic.85@gmail.com
* @version 1.8.36
* @version 1.8.37
*
*/
@@ -1448,6 +1448,7 @@ function startServer() {
mattermost: config.integrations?.mattermost?.enabled ? config.integrations.mattermost : false,
slack: slackEnabled ? config.integrations?.slack : false,
chatGPT: config.integrations?.chatGPT?.enabled ? config.integrations.chatGPT : false,
deepSeek: config.integrations?.deepSeek?.enabled ? config.integrations.deepSeek : false,
email_alerts: config?.integrations?.email?.alert ? config.integrations.email : false,
},
@@ -2397,6 +2398,7 @@ function startServer() {
case 'screen_cant_share':
case 'chat_cant_privately':
case 'chat_cant_chatgpt':
case 'chat_cant_deep_seek':
case 'media_cant_sharing':
room.broadCast(socket.id, 'updateRoomModerator', moderator);
break;
@@ -2642,6 +2644,49 @@ function startServer() {
}
});
socket.on('getDeepSeek', async ({ time, room, name, prompt, context }, cb) => {
if (!roomExists(socket)) return;
if (!config?.integrations?.deepSeek?.enabled) return cb({ message: 'DeepSeek seems disabled, try later!' });
try {
// Add the prompt to the context
context.push({ role: 'user', content: prompt });
// Call DeepSeek's API to generate response
const response = await axios.post(
`${config?.integrations?.deepSeek?.basePath}chat/completions`,
{
model: config?.integrations?.deepSeek?.model,
messages: context,
max_tokens: config?.integrations?.deepSeek?.max_tokens,
temperature: config?.integrations?.deepSeek?.temperature,
},
{
headers: {
Authorization: `Bearer ${config?.integrations?.deepSeek?.apiKey}`,
'Content-Type': 'application/json',
},
},
);
// Extract message from completion
const message = response.data.choices[0].message.content.trim();
// Add response to context
context.push({ role: 'assistant', content: message });
// Log conversation details
log.info('DeepSeek', {
time: time,
room: room,
name: name,
context: context,
});
// Callback response to client
cb({ message: message, context: context });
} catch (error) {
log.error('DeepSeek', error);
cb({ message: error.message });
}
});
// https://docs.heygen.com/reference/list-avatars-v2
socket.on('getAvatarList', async ({}, cb) => {
if (!config?.integrations?.videoAI?.enabled || !config?.integrations?.videoAI?.apiKey)

عرض الملف

@@ -503,7 +503,7 @@ module.exports = {
*
* Advanced Settings:
* -----------------
* - max_tokens: Maximum response length (default: 1000 tokens)
* - max_tokens: Maximum response length (default: 1024 tokens)
* - temperature: Creativity control (0=strict, 1=creative) (default: 0)
*
* Usage Example:
@@ -512,6 +512,7 @@ module.exports = {
* - gpt-3.5-turbo (recommended)
* - gpt-4
* - gpt-4-turbo
* - ...
*
* 2. Temperature Guide:
* - 0.0: Factual responses
@@ -523,8 +524,55 @@ module.exports = {
basePath: process.env.CHATGPT_BASE_PATH || 'https://api.openai.com/v1/',
apiKey: process.env.CHATGPT_API_KEY || '',
model: process.env.CHATGPT_MODEL || 'gpt-3.5-turbo',
max_tokens: parseInt(process.env.CHATGPT_MAX_TOKENS) || 1000,
temperature: parseInt(process.env.CHATGPT_TEMPERATURE) || 0,
max_tokens: parseInt(process.env.CHATGPT_MAX_TOKENS) || 1024,
temperature: parseInt(process.env.CHATGPT_TEMPERATURE) || 0.7,
},
/**
* DeepDeek Integration Configuration
* ================================
* DeepDeek API integration for AI-powered chat functionality
*
* Setup Instructions:
* ------------------
* 1. Go to https://deepseek.com/
* 2. Create your DeepDeek account
* 3. Generate your API key at https://deepseek.com/account/api-keys
*
* Core Settings:
* -------------
* - enabled : Enable/disable DeepDeek integration [true/false] (default: false)
* - basePath : DeepDeek API endpoint (default: 'https://api.deepseek.com/v1/')
* - apiKey : DeepDeek API secret key (ALWAYS store in .env)
* - model : DeepDeek model version (default: 'deepdeek-chat')
*
* Advanced Settings:
* -----------------
* - max_tokens: Maximum response length (default: 1024 tokens)
* - temperature: Creativity control (0=strict, 1=creative) (default: 0)
*
* Usage Example:
* -------------
* 1. Supported Models:
* - deepseek-chat (recommended)
* - deepseek-coder
* - deepseek-math
* - deepseek-llm
* - ...
*
* 2. Temperature Guide:
* - 0.0: Factual responses
* - 0.7: Balanced
* - 1.0: Maximum creativity
*
*/
deepSeek: {
enabled: process.env.DEEP_SEEK_ENABLED === 'true',
basePath: process.env.DEEP_SEEK_BASE_PATH || 'https://api.deepseek.com/v1/',
apiKey: process.env.DEEP_SEEK_API_KEY || '',
model: process.env.DEEP_SEEK_MODEL || 'deepseek-chat',
max_tokens: parseInt(process.env.DEEP_SEEK_MAX_TOKENS) || 1024,
temperature: parseInt(process.env.DEEP_SEEK_TEMPERATURE) || 0.7,
},
/**