diff --git a/.env.template b/.env.template index 2f1f3369..f1d695f4 100644 --- a/.env.template +++ b/.env.template @@ -131,8 +131,16 @@ CHATGPT_ENABLED=false # Enable ChatGPT integration ( CHATGPT_BASE_PATH=https://api.openai.com/v1/ # OpenAI base path CHATGPT_API_KEY= # OpenAI API key CHATGPT_MODEL=gpt-3.5-turbo # Model to use (gpt-3.5-turbo, gpt-4, etc.) -CHATGPT_MAX_TOKENS=1000 # Max response tokens -CHATGPT_TEMPERATURE=0 # Creativity level (0-1) +CHATGPT_MAX_TOKENS=1024 # Max response tokens +CHATGPT_TEMPERATURE=0.7 # Creativity level (0-1) + +# DeepSeek Integration +DEEP_SEEK_ENABLED=false # Enable DeepSeek integration (true|false) +DEEP_SEEK_BASE_PATH=https://api.deepseek.com/v1/ # DeepSeek base path +DEEP_SEEK_API_KEY= # DeepSeek API key +DEEP_SEEK_MODEL=deepseek-chat # Model to use (deepseek-chat, deepseek-coder, etc.) +DEEP_SEEK_MAX_TOKENS=1024 # Max response tokens +DEEP_SEEK_TEMPERATURE=0.7 # Creativity level (0-1) # Video AI (HeyGen) Integration VIDEOAI_ENABLED=false # Enable video AI avatars (true|false) diff --git a/app/src/Room.js b/app/src/Room.js index 5c90b698..e999757e 100644 --- a/app/src/Room.js +++ b/app/src/Room.js @@ -48,6 +48,7 @@ module.exports = class Room { screen_cant_share: false, chat_cant_privately: false, chat_cant_chatgpt: false, + chat_cant_deep_seek: false, media_cant_sharing: false, }; this.survey = config?.features?.survey; @@ -489,6 +490,9 @@ module.exports = class Room { case 'chat_cant_chatgpt': this._moderator.chat_cant_chatgpt = data.status; break; + case 'chat_cant_deep_seek': + this._moderator.chat_cant_deep_seek = data.status; + break; case 'media_cant_sharing': this._moderator.media_cant_sharing = data.status; break; diff --git a/app/src/Server.js b/app/src/Server.js index 2ec38dd9..f61913c4 100644 --- a/app/src/Server.js +++ b/app/src/Server.js @@ -64,7 +64,7 @@ dev dependencies: { * @license For commercial or closed source, contact us at license.mirotalk@gmail.com or purchase directly via CodeCanyon * @license CodeCanyon: https://codecanyon.net/item/mirotalk-sfu-webrtc-realtime-video-conferences/40769970 * @author Miroslav Pejic - miroslav.pejic.85@gmail.com - * @version 1.8.36 + * @version 1.8.37 * */ @@ -1448,6 +1448,7 @@ function startServer() { mattermost: config.integrations?.mattermost?.enabled ? config.integrations.mattermost : false, slack: slackEnabled ? config.integrations?.slack : false, chatGPT: config.integrations?.chatGPT?.enabled ? config.integrations.chatGPT : false, + deepSeek: config.integrations?.deepSeek?.enabled ? config.integrations.deepSeek : false, email_alerts: config?.integrations?.email?.alert ? config.integrations.email : false, }, @@ -2397,6 +2398,7 @@ function startServer() { case 'screen_cant_share': case 'chat_cant_privately': case 'chat_cant_chatgpt': + case 'chat_cant_deep_seek': case 'media_cant_sharing': room.broadCast(socket.id, 'updateRoomModerator', moderator); break; @@ -2642,6 +2644,49 @@ function startServer() { } }); + socket.on('getDeepSeek', async ({ time, room, name, prompt, context }, cb) => { + if (!roomExists(socket)) return; + + if (!config?.integrations?.deepSeek?.enabled) return cb({ message: 'DeepSeek seems disabled, try later!' }); + + try { + // Add the prompt to the context + context.push({ role: 'user', content: prompt }); + // Call DeepSeek's API to generate response + const response = await axios.post( + `${config?.integrations?.deepSeek?.basePath}chat/completions`, + { + model: config?.integrations?.deepSeek?.model, + messages: context, + max_tokens: config?.integrations?.deepSeek?.max_tokens, + temperature: config?.integrations?.deepSeek?.temperature, + }, + { + headers: { + Authorization: `Bearer ${config?.integrations?.deepSeek?.apiKey}`, + 'Content-Type': 'application/json', + }, + }, + ); + // Extract message from completion + const message = response.data.choices[0].message.content.trim(); + // Add response to context + context.push({ role: 'assistant', content: message }); + // Log conversation details + log.info('DeepSeek', { + time: time, + room: room, + name: name, + context: context, + }); + // Callback response to client + cb({ message: message, context: context }); + } catch (error) { + log.error('DeepSeek', error); + cb({ message: error.message }); + } + }); + // https://docs.heygen.com/reference/list-avatars-v2 socket.on('getAvatarList', async ({}, cb) => { if (!config?.integrations?.videoAI?.enabled || !config?.integrations?.videoAI?.apiKey) diff --git a/app/src/config.template.js b/app/src/config.template.js index 28a13d12..44013e98 100644 --- a/app/src/config.template.js +++ b/app/src/config.template.js @@ -503,7 +503,7 @@ module.exports = { * * Advanced Settings: * ----------------- - * - max_tokens: Maximum response length (default: 1000 tokens) + * - max_tokens: Maximum response length (default: 1024 tokens) * - temperature: Creativity control (0=strict, 1=creative) (default: 0) * * Usage Example: @@ -512,6 +512,7 @@ module.exports = { * - gpt-3.5-turbo (recommended) * - gpt-4 * - gpt-4-turbo + * - ... * * 2. Temperature Guide: * - 0.0: Factual responses @@ -523,8 +524,55 @@ module.exports = { basePath: process.env.CHATGPT_BASE_PATH || 'https://api.openai.com/v1/', apiKey: process.env.CHATGPT_API_KEY || '', model: process.env.CHATGPT_MODEL || 'gpt-3.5-turbo', - max_tokens: parseInt(process.env.CHATGPT_MAX_TOKENS) || 1000, - temperature: parseInt(process.env.CHATGPT_TEMPERATURE) || 0, + max_tokens: parseInt(process.env.CHATGPT_MAX_TOKENS) || 1024, + temperature: parseInt(process.env.CHATGPT_TEMPERATURE) || 0.7, + }, + + /** + * DeepDeek Integration Configuration + * ================================ + * DeepDeek API integration for AI-powered chat functionality + * + * Setup Instructions: + * ------------------ + * 1. Go to https://deepseek.com/ + * 2. Create your DeepDeek account + * 3. Generate your API key at https://deepseek.com/account/api-keys + * + * Core Settings: + * ------------- + * - enabled : Enable/disable DeepDeek integration [true/false] (default: false) + * - basePath : DeepDeek API endpoint (default: 'https://api.deepseek.com/v1/') + * - apiKey : DeepDeek API secret key (ALWAYS store in .env) + * - model : DeepDeek model version (default: 'deepdeek-chat') + * + * Advanced Settings: + * ----------------- + * - max_tokens: Maximum response length (default: 1024 tokens) + * - temperature: Creativity control (0=strict, 1=creative) (default: 0) + * + * Usage Example: + * ------------- + * 1. Supported Models: + * - deepseek-chat (recommended) + * - deepseek-coder + * - deepseek-math + * - deepseek-llm + * - ... + * + * 2. Temperature Guide: + * - 0.0: Factual responses + * - 0.7: Balanced + * - 1.0: Maximum creativity + * + */ + deepSeek: { + enabled: process.env.DEEP_SEEK_ENABLED === 'true', + basePath: process.env.DEEP_SEEK_BASE_PATH || 'https://api.deepseek.com/v1/', + apiKey: process.env.DEEP_SEEK_API_KEY || '', + model: process.env.DEEP_SEEK_MODEL || 'deepseek-chat', + max_tokens: parseInt(process.env.DEEP_SEEK_MAX_TOKENS) || 1024, + temperature: parseInt(process.env.DEEP_SEEK_TEMPERATURE) || 0.7, }, /** diff --git a/package.json b/package.json index cbae1414..41f131d6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "mirotalksfu", - "version": "1.8.36", + "version": "1.8.37", "description": "WebRTC SFU browser-based video calls", "main": "Server.js", "scripts": { @@ -69,7 +69,7 @@ "compression": "1.8.0", "cors": "2.8.5", "crypto-js": "4.2.0", - "discord.js": "^14.19.2", + "discord.js": "^14.19.3", "dompurify": "^3.2.5", "dotenv": "^16.5.0", "express": "5.1.0", diff --git a/public/images/deepSeek.png b/public/images/deepSeek.png new file mode 100644 index 00000000..eb05814b Binary files /dev/null and b/public/images/deepSeek.png differ diff --git a/public/js/Brand.js b/public/js/Brand.js index f67ddc54..c967cc3c 100644 --- a/public/js/Brand.js +++ b/public/js/Brand.js @@ -64,7 +64,7 @@ let BRAND = { }, about: { imageUrl: '../images/mirotalk-logo.gif', - title: 'WebRTC SFU v1.8.36', + title: 'WebRTC SFU v1.8.37', html: `