TgRouting/deamon/router/llmRouter.js

51 lines
1.5 KiB
JavaScript

import OpenAI from 'openai';
const OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test';
const LM_BASE_URL = process.env.LM_BASE_URL || 'http://localhost:1234/v1';
export const openai = new OpenAI({
baseURL: LM_BASE_URL,
apiKey: OPENAI_API_KEY,
});
import {buildRouterPrompt} from '../utils/llmUtils.js'
/**
* Маршрутизация через LLM: возвращает { bot, reason }
* @param {string} userText
* @param {Array} bots - [{ name, description }]
* @returns {Promise<{bot: string, reason: string}>}
*/
export async function routeByLLM(userText, bots) {
const prompt = buildRouterPrompt(bots);
console.log('prompt',prompt);
const messages = [
{ role: 'system', content: prompt },
{ role: 'user', content: userText }
]
console.log('messages',messages[1]);
const data = {
model: process.env.LM_MODEL || 'vikhrmodels-vikhr-nemo-12b-instruct-r-21-09-24',
messages
}
const response = await openai.chat.completions.create(data)
const content = response.choices[0]?.message?.content || '{}'
console.log('content',content);
let jsonString = content
.replace(/```(?:json)?\s*/g, '') // убираем ``` или ```json и любые пробелы/переводы строки после
.replace(/```/g, '') // убираем оставшиеся ```
.trim();
try {
const result = JSON.parse(jsonString)
// result: { bot, reason }
return result
} catch (e) {
return { bot: 'unknown', reason: 'Ошибка парсинга ответа' }
}
}