You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
98 lines
2.7 KiB
98 lines
2.7 KiB
/** |
|
* OpenAI-совместимый Chat Completions. Общий для импорта и редактора. |
|
*/ |
|
|
|
/** |
|
* @returns {null | { provider: string, apiKey: string, baseUrl: string, model: string }} |
|
*/ |
|
export function getLlmConfig() { |
|
if (process.env.DEEPSEEK_API_KEY) { |
|
return { |
|
provider: 'deepseek', |
|
apiKey: process.env.DEEPSEEK_API_KEY, |
|
baseUrl: (process.env.LLM_BASE_URL || 'https://api.deepseek.com/v1').replace( |
|
/\/+$/, |
|
'' |
|
), |
|
model: process.env.LLM_MODEL || 'deepseek-chat', |
|
}; |
|
} |
|
if (process.env.OPENAI_API_KEY) { |
|
return { |
|
provider: 'openai', |
|
apiKey: process.env.OPENAI_API_KEY, |
|
baseUrl: (process.env.LLM_BASE_URL || 'https://api.openai.com/v1').replace( |
|
/\/+$/, |
|
'' |
|
), |
|
model: process.env.LLM_MODEL || 'gpt-4o-mini', |
|
}; |
|
} |
|
return null; |
|
} |
|
|
|
/** |
|
* @param {{ baseUrl: string, apiKey: string, model: string }} cfg |
|
* @param {string} system |
|
* @param {string} user |
|
* @param {number} [temperature] |
|
* @returns {Promise<string>} raw assistant message |
|
*/ |
|
export async function chatCompletionTextContent(cfg, system, user, temperature = 0.25) { |
|
const url = `${cfg.baseUrl}/chat/completions`; |
|
const body = { |
|
model: cfg.model, |
|
messages: [ |
|
{ role: 'system', content: system }, |
|
{ role: 'user', content: user }, |
|
], |
|
temperature, |
|
}; |
|
if (process.env.LLM_NO_JSON !== '1') { |
|
body.response_format = { type: 'json_object' }; |
|
} |
|
const ac = new AbortController(); |
|
const t = setTimeout(() => ac.abort(), 120000); |
|
let res; |
|
try { |
|
res = await fetch(url, { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/json', |
|
Authorization: `Bearer ${cfg.apiKey}`, |
|
}, |
|
body: JSON.stringify(body), |
|
signal: ac.signal, |
|
}); |
|
} catch (e) { |
|
if (e.name === 'AbortError') { |
|
const err = new Error('Превышен таймаут ожидания ответа LLM (120 с).'); |
|
err.code = 'llm_timeout'; |
|
throw err; |
|
} |
|
const err = new Error( |
|
e instanceof Error ? e.message : 'Сбой сети при обращении к LLM' |
|
); |
|
err.code = 'llm_network'; |
|
throw err; |
|
} finally { |
|
clearTimeout(t); |
|
} |
|
if (!res.ok) { |
|
const errText = await res.text(); |
|
const err = new Error( |
|
`LLM ${res.status}: ${errText.replace(/\s+/g, ' ').slice(0, 280)}` |
|
); |
|
err.code = 'llm_http'; |
|
err.status = res.status; |
|
throw err; |
|
} |
|
const data = await res.json(); |
|
const content = data?.choices?.[0]?.message?.content; |
|
if (typeof content !== 'string' || !content.trim()) { |
|
const e = new Error('Пустой content в ответе API.'); |
|
e.code = 'llm_empty'; |
|
throw e; |
|
} |
|
return content; |
|
}
|
|
|