refactor: Migrate to Hono framework for multi-platform support

- Replace Vercel-specific handler with Hono framework
- Add Cloudflare Workers support via wrangler.toml
- Restructure project: src/index.js, src/routes/messages.js
- Update README with Cloudflare deployment instructions
- Save framework decision to memory

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-25 23:02:08 +07:00
parent bfb936ae4a
commit 415790a9f1
11 changed files with 332 additions and 242 deletions

View File

@@ -2,3 +2,4 @@
- [Memory Saving Rules](memory_saving_rules.md) — How to save memories in this project - [Memory Saving Rules](memory_saving_rules.md) — How to save memories in this project
- [Implementation Plan](implementation_plan.md) — Architecture and design decisions for Claude Central Gateway - [Implementation Plan](implementation_plan.md) — Architecture and design decisions for Claude Central Gateway
- [Framework Decision - Hono](framework_decision.md) — Why Hono was chosen over alternatives

View File

@@ -0,0 +1,26 @@
---
name: Framework Decision - Hono
description: Why Hono was chosen over alternatives for the gateway
type: project
---
## Framework Choice: Hono
**Decision:** Use Hono as the web framework for Claude Central Gateway.
**Why Hono over alternatives:**
| Alternative | Why not |
|-------------|---------|
| Nitro | Overkill for simple proxy, 200KB+ bundle vs 14KB |
| itty-router | Cloudflare-focused, Vercel needs adapter |
| Native | Duplicate code per platform, manual streaming |
**Why Hono:**
- Single codebase for Vercel + Cloudflare + Deno + Bun
- Ultra-lightweight (~14KB)
- First-class streaming support (critical for SSE)
- Zero-config multi-platform
- Aligns with project philosophy: "Minimal, simple, deploy anywhere"
**How to apply:** All API routes should use Hono's `app.route()` pattern. Keep handlers simple and stateless.

View File

@@ -21,7 +21,8 @@ Claude Code → Gateway (Vercel) → OpenAI API
### Key Decisions ### Key Decisions
- **Language**: Node.js with JavaScript (no TypeScript) - **Language**: Node.js with JavaScript (no TypeScript)
- **Deployment**: Vercel serverless functions - **Framework**: Hono (multi-platform: Vercel, Cloudflare, Deno, Bun)
- **Deployment**: Vercel serverless functions OR Cloudflare Workers
- **Providers**: OpenAI first (via official SDK), others in TODO - **Providers**: OpenAI first (via official SDK), others in TODO
- **Config**: Environment variables only (no database) - **Config**: Environment variables only (no database)
- **Auth**: Single shared token (user's `ANTHROPIC_AUTH_TOKEN` must match `GATEWAY_TOKEN`) - **Auth**: Single shared token (user's `ANTHROPIC_AUTH_TOKEN` must match `GATEWAY_TOKEN`)
@@ -39,9 +40,15 @@ Claude Code → Gateway (Vercel) → OpenAI API
### File Structure ### File Structure
``` ```
api/v1/messages.js - Main proxy handler src/
package.json - Dependencies (openai SDK) ├── index.js - Hono app entry point
vercel.json - Routing config ├── routes/
│ └── messages.js - /v1/messages proxy handler
api/
└── index.js - Vercel adapter
package.json - Dependencies (hono, openai)
vercel.json - Vercel config
wrangler.toml - Cloudflare Workers config
``` ```
### How to apply: When adding new providers or modifying the gateway, follow the established pattern in `api/v1/messages.js` for request/response transformation. ### How to apply: When adding new providers or modifying the gateway, follow the established pattern in `api/v1/messages.js` for request/response transformation.

View File

@@ -12,7 +12,7 @@ Minimal, simple, deploy anywhere.
## Quick Start ## Quick Start
### 1. Deploy to Vercel ### Deploy to Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/tiennm99/claude-central-gateway) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/tiennm99/claude-central-gateway)
@@ -21,12 +21,24 @@ Or manually:
```bash ```bash
git clone https://github.com/tiennm99/claude-central-gateway git clone https://github.com/tiennm99/claude-central-gateway
cd claude-central-gateway cd claude-central-gateway
npm install
vercel vercel
``` ```
### 2. Set Environment Variables ### Deploy to Cloudflare Workers
In Vercel dashboard, set these environment variables: ```bash
git clone https://github.com/tiennm99/claude-central-gateway
cd claude-central-gateway
npm install
npm run deploy:cf
```
### Set Environment Variables
**Vercel**: Dashboard → Settings → Environment Variables
**Cloudflare**: `wrangler.toml` or Dashboard → Workers → Variables
| Variable | Description | Example | | Variable | Description | Example |
|----------|-------------|---------| |----------|-------------|---------|
@@ -34,7 +46,7 @@ In Vercel dashboard, set these environment variables:
| `OPENAI_API_KEY` | Your OpenAI API key | `sk-...` | | `OPENAI_API_KEY` | Your OpenAI API key | `sk-...` |
| `MODEL_MAP` | Model name mapping | `claude-sonnet-4-20250514:gpt-4o` | | `MODEL_MAP` | Model name mapping | `claude-sonnet-4-20250514:gpt-4o` |
### 3. Configure Claude Code ### Configure Claude Code
```bash ```bash
export ANTHROPIC_BASE_URL=https://your-gateway.vercel.app export ANTHROPIC_BASE_URL=https://your-gateway.vercel.app

3
api/index.js Normal file
View File

@@ -0,0 +1,3 @@
import app from '../src/index.js';
export default app.fetch;

View File

@@ -1,226 +0,0 @@
import OpenAI from 'openai';
// Parse model mapping from env var
function parseModelMap(envVar) {
if (!envVar) return {};
return Object.fromEntries(
envVar.split(',').map(pair => {
const [claude, provider] = pair.trim().split(':');
return [claude, provider];
})
);
}
// Map Claude model to provider model
function mapModel(claudeModel) {
const modelMap = parseModelMap(process.env.MODEL_MAP);
return modelMap[claudeModel] || claudeModel;
}
// Transform Anthropic messages to OpenAI format
function transformMessages(request) {
const messages = [];
// Add system message if present
if (request.system) {
messages.push({ role: 'system', content: request.system });
}
// Transform messages array
for (const msg of request.messages || []) {
if (typeof msg.content === 'string') {
messages.push({ role: msg.role, content: msg.content });
} else if (Array.isArray(msg.content)) {
// Handle multi-part content
const textParts = msg.content.filter(c => c.type === 'text');
const imageParts = msg.content.filter(c => c.type === 'image');
const content = [];
for (const part of textParts) {
content.push({ type: 'text', text: part.text });
}
for (const part of imageParts) {
if (part.source?.type === 'base64') {
content.push({
type: 'image_url',
image_url: {
url: `data:${part.source.media_type};base64,${part.source.data}`
}
});
} else if (part.source?.type === 'url') {
content.push({
type: 'image_url',
image_url: { url: part.source.url }
});
}
}
messages.push({ role: msg.role, content });
}
}
return messages;
}
// Format Anthropic SSE event
function formatSSE(event, data) {
return `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
}
export default async function handler(req, res) {
// Only allow POST
if (req.method !== 'POST') {
return res.status(405).json({ error: 'Method not allowed' });
}
// Validate auth token
const authHeader = req.headers.authorization || '';
const token = authHeader.startsWith('Bearer ')
? authHeader.slice(7)
: authHeader;
if (token !== process.env.GATEWAY_TOKEN) {
return res.status(401).json({ error: 'Unauthorized' });
}
// Validate OpenAI API key
if (!process.env.OPENAI_API_KEY) {
return res.status(500).json({ error: 'OPENAI_API_KEY not configured' });
}
try {
const anthropicRequest = req.body;
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
const messages = transformMessages(anthropicRequest);
const model = mapModel(anthropicRequest.model);
const stream = anthropicRequest.stream !== false;
if (stream) {
// Set headers for SSE
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
const streamResponse = await openai.chat.completions.create({
model,
messages,
stream: true,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
let messageId = `msg_${Date.now()}`;
let inputTokens = 0;
let outputTokens = 0;
// Send message_start event
res.write(formatSSE('message_start', {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model: anthropicRequest.model,
stop_reason: null,
usage: { input_tokens: 0, output_tokens: 0 }
}
}));
// Send content_block_start
res.write(formatSSE('content_block_start', {
type: 'content_block_start',
index: 0,
content_block: { type: 'text', text: '' }
}));
let textIndex = 0;
for await (const chunk of streamResponse) {
const delta = chunk.choices[0]?.delta;
if (delta?.content) {
res.write(formatSSE('content_block_delta', {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: delta.content }
}));
}
// Track usage if available
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens || inputTokens;
outputTokens = chunk.usage.completion_tokens || outputTokens;
}
}
// Send content_block_stop
res.write(formatSSE('content_block_stop', {
type: 'content_block_stop',
index: 0
}));
// Send message_delta with final usage
res.write(formatSSE('message_delta', {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: { output_tokens: outputTokens }
}));
// Send message_stop
res.write(formatSSE('message_stop', { type: 'message_stop' }));
res.end();
} else {
// Non-streaming response
const response = await openai.chat.completions.create({
model,
messages,
stream: false,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
const content = response.choices[0]?.message?.content || '';
res.json({
id: `msg_${Date.now()}`,
type: 'message',
role: 'assistant',
content: [{ type: 'text', text: content }],
model: anthropicRequest.model,
stop_reason: 'end_turn',
usage: {
input_tokens: response.usage?.prompt_tokens || 0,
output_tokens: response.usage?.completion_tokens || 0
}
});
}
} catch (error) {
console.error('Proxy error:', error);
// Handle OpenAI API errors
if (error.status) {
return res.status(error.status).json({
type: 'error',
error: {
type: 'api_error',
message: error.message
}
});
}
return res.status(500).json({
type: 'error',
error: {
type: 'internal_error',
message: 'Internal server error'
}
});
}
}

View File

@@ -3,13 +3,21 @@
"version": "1.0.0", "version": "1.0.0",
"description": "A lightweight proxy for Claude Code that routes requests to third-party API providers", "description": "A lightweight proxy for Claude Code that routes requests to third-party API providers",
"private": true, "private": true,
"type": "module",
"scripts": { "scripts": {
"start": "vercel dev", "dev": "hono dev",
"deploy": "vercel --prod" "start": "hono start",
"deploy:vercel": "vercel --prod",
"deploy:cf": "wrangler deploy",
"dev:cf": "wrangler dev"
}, },
"dependencies": { "dependencies": {
"hono": "^4.6.0",
"openai": "^4.85.0" "openai": "^4.85.0"
}, },
"devDependencies": {
"wrangler": "^3.0.0"
},
"engines": { "engines": {
"node": ">=18" "node": ">=18"
} }

27
src/index.js Normal file
View File

@@ -0,0 +1,27 @@
import { Hono } from 'hono';
import { logger } from 'hono/logger';
import { cors } from 'hono/cors';
import messages from './routes/messages.js';
const app = new Hono();
// Middleware
app.use('*', logger());
app.use('*', cors());
// Health check
app.get('/', (c) => c.json({ status: 'ok', name: 'Claude Central Gateway' }));
// Routes
app.route('/v1', messages);
// 404 handler
app.notFound((c) => c.json({ error: 'Not found' }, 404));
// Error handler
app.onError((err, c) => {
console.error('Error:', err);
return c.json({ error: 'Internal server error' }, 500);
});
export default app;

223
src/routes/messages.js Normal file
View File

@@ -0,0 +1,223 @@
import { Hono } from 'hono';
import { stream } from 'hono/streaming';
import OpenAI from 'openai';
const app = new Hono();
// Parse model mapping from env var
function parseModelMap(envVar) {
if (!envVar) return {};
return Object.fromEntries(
envVar.split(',').map(pair => {
const [claude, provider] = pair.trim().split(':');
return [claude, provider];
})
);
}
// Map Claude model to provider model
function mapModel(claudeModel, env) {
const modelMap = parseModelMap(env.MODEL_MAP);
return modelMap[claudeModel] || claudeModel;
}
// Transform Anthropic messages to OpenAI format
function transformMessages(request) {
const messages = [];
// Add system message if present
if (request.system) {
messages.push({ role: 'system', content: request.system });
}
// Transform messages array
for (const msg of request.messages || []) {
if (typeof msg.content === 'string') {
messages.push({ role: msg.role, content: msg.content });
} else if (Array.isArray(msg.content)) {
// Handle multi-part content
const content = [];
for (const part of msg.content) {
if (part.type === 'text') {
content.push({ type: 'text', text: part.text });
} else if (part.type === 'image') {
if (part.source?.type === 'base64') {
content.push({
type: 'image_url',
image_url: {
url: `data:${part.source.media_type};base64,${part.source.data}`
}
});
} else if (part.source?.type === 'url') {
content.push({
type: 'image_url',
image_url: { url: part.source.url }
});
}
}
}
messages.push({ role: msg.role, content });
}
}
return messages;
}
// Format Anthropic SSE event
function formatSSE(event, data) {
return `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
}
// Auth middleware
app.use('*', async (c, next) => {
const authHeader = c.req.header('Authorization') || '';
const token = authHeader.startsWith('Bearer ')
? authHeader.slice(7)
: authHeader;
if (token !== c.env.GATEWAY_TOKEN) {
return c.json({ type: 'error', error: { type: 'authentication_error', message: 'Unauthorized' } }, 401);
}
await next();
});
// POST /v1/messages
app.post('/messages', async (c) => {
const env = c.env;
// Validate OpenAI API key
if (!env.OPENAI_API_KEY) {
return c.json({ type: 'error', error: { type: 'api_error', message: 'OPENAI_API_KEY not configured' } }, 500);
}
try {
const anthropicRequest = await c.req.json();
const openai = new OpenAI({
apiKey: env.OPENAI_API_KEY
});
const messages = transformMessages(anthropicRequest);
const model = mapModel(anthropicRequest.model, env);
const streamResponse = anthropicRequest.stream !== false;
if (streamResponse) {
// Streaming response
const streamResponse = await openai.chat.completions.create({
model,
messages,
stream: true,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
let messageId = `msg_${Date.now()}`;
let outputTokens = 0;
return stream(c, async (s) => {
// Send message_start event
s.write(formatSSE('message_start', {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model: anthropicRequest.model,
stop_reason: null,
usage: { input_tokens: 0, output_tokens: 0 }
}
}));
// Send content_block_start
s.write(formatSSE('content_block_start', {
type: 'content_block_start',
index: 0,
content_block: { type: 'text', text: '' }
}));
for await (const chunk of streamResponse) {
const delta = chunk.choices[0]?.delta;
if (delta?.content) {
s.write(formatSSE('content_block_delta', {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: delta.content }
}));
}
if (chunk.usage) {
outputTokens = chunk.usage.completion_tokens || outputTokens;
}
}
// Send content_block_stop
s.write(formatSSE('content_block_stop', {
type: 'content_block_stop',
index: 0
}));
// Send message_delta with final usage
s.write(formatSSE('message_delta', {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: { output_tokens: outputTokens }
}));
// Send message_stop
s.write(formatSSE('message_stop', { type: 'message_stop' }));
});
} else {
// Non-streaming response
const response = await openai.chat.completions.create({
model,
messages,
stream: false,
max_tokens: anthropicRequest.max_tokens,
temperature: anthropicRequest.temperature,
top_p: anthropicRequest.top_p
});
const content = response.choices[0]?.message?.content || '';
return c.json({
id: `msg_${Date.now()}`,
type: 'message',
role: 'assistant',
content: [{ type: 'text', text: content }],
model: anthropicRequest.model,
stop_reason: 'end_turn',
usage: {
input_tokens: response.usage?.prompt_tokens || 0,
output_tokens: response.usage?.completion_tokens || 0
}
});
}
} catch (error) {
console.error('Proxy error:', error);
if (error.status) {
return c.json({
type: 'error',
error: {
type: 'api_error',
message: error.message
}
}, error.status);
}
return c.json({
type: 'error',
error: {
type: 'internal_error',
message: 'Internal server error'
}
}, 500);
}
});
export default app;

View File

@@ -1,12 +1,12 @@
{ {
"buildCommand": "npm run build",
"devCommand": "npm run dev",
"installCommand": "npm install",
"framework": null,
"rewrites": [ "rewrites": [
{ {
"source": "/v1/messages", "source": "/(.*)",
"destination": "/api/v1/messages" "destination": "/api/index"
},
{
"source": "/v1/:path*",
"destination": "/api/v1/:path*"
} }
] ]
} }

9
wrangler.toml Normal file
View File

@@ -0,0 +1,9 @@
name = "claude-central-gateway"
main = "src/index.js"
compatibility_date = "2024-01-01"
[vars]
# Set these in Cloudflare dashboard or wrangler.toml
# GATEWAY_TOKEN = "your-token"
# OPENAI_API_KEY = "sk-..."
# MODEL_MAP = "claude-sonnet-4-20250514:gpt-4o"