mirror of
https://github.com/tiennm99/claude-central-gateway.git
synced 2026-04-17 13:20:56 +00:00
- Split monolithic messages.js (223 lines) into 5 focused modules - Add full tool_use/tool_result round-trip translation - Add x-api-key header authentication (Anthropic SDK default) - Fix SSE Content-Type via Hono streamSSE helper - Fix streaming usage tracking with stream_options.include_usage - Add stop_reason mapping (end_turn, max_tokens, tool_use, stop_sequence) - Forward stop_sequences to OpenAI stop parameter - Handle system message as string or array of content blocks - Use timing-safe XOR comparison for auth tokens - Cache OpenAI client and model map across requests - Sanitize error responses to prevent upstream detail leakage - Use crypto.randomUUID() for unique message IDs - Remove non-existent build/dev commands from vercel.json
32 lines
949 B
JavaScript
32 lines
949 B
JavaScript
import OpenAI from 'openai';
|
|
|
|
// Cache OpenAI client and parsed model map to avoid re-creation per request
|
|
let cachedClient = null;
|
|
let cachedApiKey = null;
|
|
let cachedModelMap = null;
|
|
let cachedModelMapRaw = null;
|
|
|
|
export function getOpenAIClient(env) {
|
|
if (cachedClient && cachedApiKey === env.OPENAI_API_KEY) {
|
|
return cachedClient;
|
|
}
|
|
cachedApiKey = env.OPENAI_API_KEY;
|
|
cachedClient = new OpenAI({ apiKey: env.OPENAI_API_KEY });
|
|
return cachedClient;
|
|
}
|
|
|
|
export function mapModel(claudeModel, env) {
|
|
const raw = env.MODEL_MAP || '';
|
|
if (raw !== cachedModelMapRaw) {
|
|
cachedModelMapRaw = raw;
|
|
cachedModelMap = Object.fromEntries(
|
|
raw.split(',').filter(Boolean).map((p) => {
|
|
const trimmed = p.trim();
|
|
const idx = trimmed.indexOf(':');
|
|
return idx > 0 ? [trimmed.slice(0, idx), trimmed.slice(idx + 1)] : [trimmed, trimmed];
|
|
})
|
|
);
|
|
}
|
|
return cachedModelMap[claudeModel] || claudeModel;
|
|
}
|