refactor: Modularize proxy and add tool_use support

- Split monolithic messages.js (223 lines) into 5 focused modules
- Add full tool_use/tool_result round-trip translation
- Add x-api-key header authentication (Anthropic SDK default)
- Fix SSE Content-Type via Hono streamSSE helper
- Fix streaming usage tracking with stream_options.include_usage
- Add stop_reason mapping (end_turn, max_tokens, tool_use, stop_sequence)
- Forward stop_sequences to OpenAI stop parameter
- Handle system message as string or array of content blocks
- Use timing-safe XOR comparison for auth tokens
- Cache OpenAI client and model map across requests
- Sanitize error responses to prevent upstream detail leakage
- Use crypto.randomUUID() for unique message IDs
- Remove non-existent build/dev commands from vercel.json
This commit is contained in:
2026-04-05 11:47:06 +07:00
parent 415790a9f1
commit a1113e02aa
7 changed files with 432 additions and 202 deletions

39
src/auth-middleware.js Normal file
View File

@@ -0,0 +1,39 @@
// Constant-time string comparison using byte-level XOR
// Works cross-platform: Cloudflare Workers, Node 18+, Deno, Bun
function timingSafeEqual(a, b) {
const encoder = new TextEncoder();
const bufA = encoder.encode(a);
const bufB = encoder.encode(b);
if (bufA.byteLength !== bufB.byteLength) return false;
let result = 0;
for (let i = 0; i < bufA.byteLength; i++) {
result |= bufA[i] ^ bufB[i];
}
return result === 0;
}
// Auth middleware supporting both x-api-key (Anthropic SDK) and Authorization Bearer
export function authMiddleware() {
return async (c, next) => {
const gatewayToken = c.env.GATEWAY_TOKEN;
if (!gatewayToken) {
return c.json({
type: 'error',
error: { type: 'api_error', message: 'GATEWAY_TOKEN not configured' },
}, 500);
}
const apiKey = c.req.header('x-api-key') || '';
const authHeader = c.req.header('Authorization') || '';
const token = apiKey || (authHeader.startsWith('Bearer ') ? authHeader.slice(7) : authHeader);
if (!token || !timingSafeEqual(token, gatewayToken)) {
return c.json({
type: 'error',
error: { type: 'authentication_error', message: 'Unauthorized' },
}, 401);
}
await next();
};
}

View File

@@ -1,6 +1,7 @@
import { Hono } from 'hono';
import { logger } from 'hono/logger';
import { cors } from 'hono/cors';
import { authMiddleware } from './auth-middleware.js';
import messages from './routes/messages.js';
const app = new Hono();
@@ -9,9 +10,12 @@ const app = new Hono();
app.use('*', logger());
app.use('*', cors());
// Health check
// Health check (unauthenticated)
app.get('/', (c) => c.json({ status: 'ok', name: 'Claude Central Gateway' }));
// Auth middleware for API routes
app.use('/v1/*', authMiddleware());
// Routes
app.route('/v1', messages);

31
src/openai-client.js Normal file
View File

@@ -0,0 +1,31 @@
import OpenAI from 'openai';
// Cache OpenAI client and parsed model map to avoid re-creation per request
let cachedClient = null;
let cachedApiKey = null;
let cachedModelMap = null;
let cachedModelMapRaw = null;
export function getOpenAIClient(env) {
if (cachedClient && cachedApiKey === env.OPENAI_API_KEY) {
return cachedClient;
}
cachedApiKey = env.OPENAI_API_KEY;
cachedClient = new OpenAI({ apiKey: env.OPENAI_API_KEY });
return cachedClient;
}
export function mapModel(claudeModel, env) {
const raw = env.MODEL_MAP || '';
if (raw !== cachedModelMapRaw) {
cachedModelMapRaw = raw;
cachedModelMap = Object.fromEntries(
raw.split(',').filter(Boolean).map((p) => {
const trimmed = p.trim();
const idx = trimmed.indexOf(':');
return idx > 0 ? [trimmed.slice(0, idx), trimmed.slice(idx + 1)] : [trimmed, trimmed];
})
);
}
return cachedModelMap[claudeModel] || claudeModel;
}

View File

@@ -1,222 +1,45 @@
import { Hono } from 'hono';
import { stream } from 'hono/streaming';
import OpenAI from 'openai';
import { getOpenAIClient, mapModel } from '../openai-client.js';
import { buildOpenAIRequest } from '../transform-request.js';
import { transformResponse, streamAnthropicResponse } from '../transform-response.js';
const app = new Hono();
// Parse model mapping from env var
function parseModelMap(envVar) {
if (!envVar) return {};
return Object.fromEntries(
envVar.split(',').map(pair => {
const [claude, provider] = pair.trim().split(':');
return [claude, provider];
})
);
}
// Map Claude model to provider model
function mapModel(claudeModel, env) {
const modelMap = parseModelMap(env.MODEL_MAP);
return modelMap[claudeModel] || claudeModel;
}
// Transform Anthropic messages to OpenAI format
function transformMessages(request) {
const messages = [];
// Add system message if present
if (request.system) {
messages.push({ role: 'system', content: request.system });
}
// Transform messages array
for (const msg of request.messages || []) {
if (typeof msg.content === 'string') {
messages.push({ role: msg.role, content: msg.content });
} else if (Array.isArray(msg.content)) {
// Handle multi-part content
const content = [];
for (const part of msg.content) {
if (part.type === 'text') {
content.push({ type: 'text', text: part.text });
} else if (part.type === 'image') {
if (part.source?.type === 'base64') {
content.push({
type: 'image_url',
image_url: {
url: `data:${part.source.media_type};base64,${part.source.data}`
}
});
} else if (part.source?.type === 'url') {
content.push({
type: 'image_url',
image_url: { url: part.source.url }
});
}
}
}
messages.push({ role: msg.role, content });
}
}
return messages;
}
// Format Anthropic SSE event
function formatSSE(event, data) {
return `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
}
// Auth middleware
app.use('*', async (c, next) => {
const authHeader = c.req.header('Authorization') || '';
const token = authHeader.startsWith('Bearer ')
? authHeader.slice(7)
: authHeader;
if (token !== c.env.GATEWAY_TOKEN) {
return c.json({ type: 'error', error: { type: 'authentication_error', message: 'Unauthorized' } }, 401);
}
await next();
});
// POST /v1/messages
// POST /v1/messages — proxy Anthropic Messages API to OpenAI Chat Completions
app.post('/messages', async (c) => {
const env = c.env;
// Validate OpenAI API key
if (!env.OPENAI_API_KEY) {
return c.json({ type: 'error', error: { type: 'api_error', message: 'OPENAI_API_KEY not configured' } }, 500);
return c.json({
type: 'error',
error: { type: 'api_error', message: 'Provider API key not configured' },
}, 500);
}
try {
const anthropicRequest = await c.req.json();
const openai = new OpenAI({
apiKey: env.OPENAI_API_KEY
});
const messages = transformMessages(anthropicRequest);
const openai = getOpenAIClient(env);
const model = mapModel(anthropicRequest.model, env);
const streamResponse = anthropicRequest.stream !== false;
const isStreaming = anthropicRequest.stream === true;
const openaiPayload = buildOpenAIRequest(anthropicRequest, model);
if (streamResponse) {
// Streaming response
const streamResponse = await openai.chat.completions.create({
model,
messages,
stream: true,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
let messageId = `msg_${Date.now()}`;
let outputTokens = 0;
return stream(c, async (s) => {
// Send message_start event
s.write(formatSSE('message_start', {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model: anthropicRequest.model,
stop_reason: null,
usage: { input_tokens: 0, output_tokens: 0 }
}
}));
// Send content_block_start
s.write(formatSSE('content_block_start', {
type: 'content_block_start',
index: 0,
content_block: { type: 'text', text: '' }
}));
for await (const chunk of streamResponse) {
const delta = chunk.choices[0]?.delta;
if (delta?.content) {
s.write(formatSSE('content_block_delta', {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: delta.content }
}));
}
if (chunk.usage) {
outputTokens = chunk.usage.completion_tokens || outputTokens;
}
}
// Send content_block_stop
s.write(formatSSE('content_block_stop', {
type: 'content_block_stop',
index: 0
}));
// Send message_delta with final usage
s.write(formatSSE('message_delta', {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: { output_tokens: outputTokens }
}));
// Send message_stop
s.write(formatSSE('message_stop', { type: 'message_stop' }));
});
} else {
// Non-streaming response
const response = await openai.chat.completions.create({
model,
messages,
stream: false,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
const content = response.choices[0]?.message?.content || '';
return c.json({
id: `msg_${Date.now()}`,
type: 'message',
role: 'assistant',
content: [{ type: 'text', text: content }],
model: anthropicRequest.model,
stop_reason: 'end_turn',
usage: {
input_tokens: response.usage?.prompt_tokens || 0,
output_tokens: response.usage?.completion_tokens || 0
}
});
if (isStreaming) {
const openaiStream = await openai.chat.completions.create(openaiPayload);
return streamAnthropicResponse(c, openaiStream, anthropicRequest);
}
const response = await openai.chat.completions.create(openaiPayload);
return c.json(transformResponse(response, anthropicRequest));
} catch (error) {
console.error('Proxy error:', error);
if (error.status) {
return c.json({
type: 'error',
error: {
type: 'api_error',
message: error.message
}
}, error.status);
}
const status = error.status || 500;
return c.json({
type: 'error',
error: {
type: 'internal_error',
message: 'Internal server error'
}
}, 500);
type: status >= 500 ? 'api_error' : 'invalid_request_error',
message: 'Request failed',
},
}, status);
}
});

163
src/transform-request.js Normal file
View File

@@ -0,0 +1,163 @@
// Transform Anthropic Messages API request → OpenAI Chat Completions API request
// Build complete OpenAI request payload from Anthropic request
export function buildOpenAIRequest(anthropicRequest, model) {
const payload = {
model,
messages: transformMessages(anthropicRequest),
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p,
};
if (anthropicRequest.stream === true) {
payload.stream = true;
payload.stream_options = { include_usage: true };
}
if (anthropicRequest.stop_sequences?.length) {
payload.stop = anthropicRequest.stop_sequences;
}
if (anthropicRequest.tools?.length) {
payload.tools = anthropicRequest.tools.map((t) => ({
type: 'function',
function: { name: t.name, description: t.description, parameters: t.input_schema },
}));
}
if (anthropicRequest.tool_choice) {
payload.tool_choice = mapToolChoice(anthropicRequest.tool_choice);
}
return payload;
}
// Map Anthropic tool_choice → OpenAI tool_choice
function mapToolChoice(tc) {
if (tc.type === 'auto') return 'auto';
if (tc.type === 'any') return 'required';
if (tc.type === 'none') return 'none';
if (tc.type === 'tool') return { type: 'function', function: { name: tc.name } };
return 'auto';
}
// Transform Anthropic messages array → OpenAI messages array
function transformMessages(request) {
const messages = [];
// System message: string or array of text blocks
if (request.system) {
const systemText = typeof request.system === 'string'
? request.system
: request.system.filter((b) => b.type === 'text').map((b) => b.text).join('\n\n');
if (systemText) {
messages.push({ role: 'system', content: systemText });
}
}
for (const msg of request.messages || []) {
if (typeof msg.content === 'string') {
messages.push({ role: msg.role, content: msg.content });
continue;
}
if (!Array.isArray(msg.content)) continue;
if (msg.role === 'assistant') {
transformAssistantMessage(msg, messages);
} else {
transformUserMessage(msg, messages);
}
}
return messages;
}
// Transform assistant message with potential tool_use blocks
function transformAssistantMessage(msg, messages) {
const content = [];
const toolCalls = [];
for (const part of msg.content) {
if (part.type === 'text') {
content.push({ type: 'text', text: part.text });
} else if (part.type === 'tool_use') {
toolCalls.push({
id: part.id,
type: 'function',
function: { name: part.name, arguments: JSON.stringify(part.input) },
});
}
// Skip thinking, cache_control, and other unsupported blocks
}
const openaiMsg = { role: 'assistant' };
// Set content: string for text-only, null if only tool_calls
if (content.length === 1 && content[0].type === 'text') {
openaiMsg.content = content[0].text;
} else if (content.length > 1) {
openaiMsg.content = content;
} else {
openaiMsg.content = null;
}
if (toolCalls.length) {
openaiMsg.tool_calls = toolCalls;
}
messages.push(openaiMsg);
}
// Transform user message with potential tool_result and mixed content blocks
function transformUserMessage(msg, messages) {
const content = [];
const toolResults = [];
for (const part of msg.content) {
if (part.type === 'text') {
content.push({ type: 'text', text: part.text });
} else if (part.type === 'image') {
if (part.source?.type === 'base64') {
content.push({
type: 'image_url',
image_url: { url: `data:${part.source.media_type};base64,${part.source.data}` },
});
} else if (part.source?.type === 'url') {
content.push({ type: 'image_url', image_url: { url: part.source.url } });
}
} else if (part.type === 'tool_result') {
const resultContent = extractToolResultContent(part);
toolResults.push({
role: 'tool',
tool_call_id: part.tool_use_id,
content: resultContent,
});
}
}
// Each tool_result becomes a separate OpenAI tool message
for (const tr of toolResults) {
messages.push(tr);
}
// Non-tool content becomes a user message
if (content.length === 1 && content[0].type === 'text') {
messages.push({ role: 'user', content: content[0].text });
} else if (content.length > 0) {
messages.push({ role: 'user', content });
}
}
// Extract text content from tool_result (string, array, or undefined)
function extractToolResultContent(part) {
const prefix = part.is_error ? '[ERROR] ' : '';
if (typeof part.content === 'string') return prefix + part.content;
if (!part.content) return prefix || '';
const text = part.content
.filter((b) => b.type === 'text')
.map((b) => b.text)
.join('\n');
return prefix + text;
}

172
src/transform-response.js Normal file
View File

@@ -0,0 +1,172 @@
import { streamSSE } from 'hono/streaming';
// Map OpenAI finish_reason → Anthropic stop_reason
function mapStopReason(finishReason, hadStopSequences) {
if (finishReason === 'stop' && hadStopSequences) return 'stop_sequence';
const map = {
stop: 'end_turn',
length: 'max_tokens',
tool_calls: 'tool_use',
content_filter: 'end_turn',
};
return map[finishReason] || 'end_turn';
}
// Build Anthropic content array from OpenAI response message
function buildContentBlocks(message) {
const content = [];
if (message?.content) {
content.push({ type: 'text', text: message.content });
}
if (message?.tool_calls) {
for (const tc of message.tool_calls) {
let input = {};
try {
input = JSON.parse(tc.function.arguments || '{}');
} catch {
input = {};
}
content.push({ type: 'tool_use', id: tc.id, name: tc.function.name, input });
}
}
return content.length ? content : [{ type: 'text', text: '' }];
}
// Transform non-streaming OpenAI response → Anthropic response
export function transformResponse(openaiResponse, anthropicRequest) {
const choice = openaiResponse.choices[0];
const hadStopSequences = Boolean(anthropicRequest.stop_sequences?.length);
return {
id: `msg_${crypto.randomUUID()}`,
type: 'message',
role: 'assistant',
content: buildContentBlocks(choice?.message),
model: anthropicRequest.model,
stop_reason: mapStopReason(choice?.finish_reason, hadStopSequences),
usage: {
input_tokens: openaiResponse.usage?.prompt_tokens || 0,
output_tokens: openaiResponse.usage?.completion_tokens || 0,
},
};
}
// Write a single SSE event
async function writeEvent(stream, event, data) {
await stream.writeSSE({ event, data: JSON.stringify(data) });
}
// Stream OpenAI response → Anthropic SSE events
export function streamAnthropicResponse(c, openaiStream, anthropicRequest) {
const hadStopSequences = Boolean(anthropicRequest.stop_sequences?.length);
return streamSSE(c, async (stream) => {
const messageId = `msg_${crypto.randomUUID()}`;
let inputTokens = 0;
let outputTokens = 0;
let finishReason = null;
// Streaming state for content blocks
let blockIndex = 0;
let textBlockStarted = false;
const toolCallBuffers = {}; // index → { id, name, arguments }
// Send message_start
await writeEvent(stream, 'message_start', {
type: 'message_start',
message: {
id: messageId, type: 'message', role: 'assistant',
content: [], model: anthropicRequest.model,
stop_reason: null, usage: { input_tokens: 0, output_tokens: 0 },
},
});
for await (const chunk of openaiStream) {
const choice = chunk.choices[0];
const delta = choice?.delta;
// Track finish_reason from final chunk
if (choice?.finish_reason) {
finishReason = choice.finish_reason;
}
// Handle text content delta
if (delta?.content) {
if (!textBlockStarted) {
await writeEvent(stream, 'content_block_start', {
type: 'content_block_start', index: blockIndex,
content_block: { type: 'text', text: '' },
});
textBlockStarted = true;
}
await writeEvent(stream, 'content_block_delta', {
type: 'content_block_delta', index: blockIndex,
delta: { type: 'text_delta', text: delta.content },
});
}
// Handle tool_calls delta
if (delta?.tool_calls) {
// Close text block before starting tool blocks
if (textBlockStarted) {
await writeEvent(stream, 'content_block_stop', {
type: 'content_block_stop', index: blockIndex,
});
textBlockStarted = false;
blockIndex++;
}
for (const tc of delta.tool_calls) {
const idx = tc.index;
if (tc.id) {
// New tool call starting
toolCallBuffers[idx] = { id: tc.id, name: tc.function?.name || '', arguments: '' };
await writeEvent(stream, 'content_block_start', {
type: 'content_block_start', index: blockIndex + idx,
content_block: { type: 'tool_use', id: tc.id, name: tc.function?.name || '' },
});
}
if (tc.function?.arguments) {
toolCallBuffers[idx].arguments += tc.function.arguments;
await writeEvent(stream, 'content_block_delta', {
type: 'content_block_delta', index: blockIndex + idx,
delta: { type: 'input_json_delta', partial_json: tc.function.arguments },
});
}
}
}
// Capture usage from final chunk (requires stream_options.include_usage)
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens || inputTokens;
outputTokens = chunk.usage.completion_tokens || outputTokens;
}
}
// Close any open text block
if (textBlockStarted) {
await writeEvent(stream, 'content_block_stop', {
type: 'content_block_stop', index: blockIndex,
});
}
// Close any open tool call blocks
for (const idx of Object.keys(toolCallBuffers)) {
await writeEvent(stream, 'content_block_stop', {
type: 'content_block_stop', index: blockIndex + Number(idx),
});
}
// Send message_delta with final stop_reason and usage
await writeEvent(stream, 'message_delta', {
type: 'message_delta',
delta: { stop_reason: mapStopReason(finishReason, hadStopSequences) },
usage: { output_tokens: outputTokens },
});
// Send message_stop
await writeEvent(stream, 'message_stop', { type: 'message_stop' });
});
}

View File

@@ -1,6 +1,4 @@
{
"buildCommand": "npm run build",
"devCommand": "npm run dev",
"installCommand": "npm install",
"framework": null,
"rewrites": [