feat: OpenAI-compatible /v1/chat/completions endpoint (#328)

This commit is contained in:
Cameron
2026-02-23 16:52:29 -08:00
committed by GitHub
parent 1cad6e6508
commit 1c083201c3
6 changed files with 1644 additions and 6 deletions

View File

@@ -30,8 +30,6 @@ jobs:
e2e:
name: E2E Tests
runs-on: ubuntu-latest
# Only run e2e on main branch (has secrets)
if: github.ref == 'refs/heads/main' || github.event_name == 'push'
steps:
- uses: actions/checkout@v4
@@ -47,6 +45,8 @@ jobs:
run: npm run build
- name: Run e2e tests
# Tests requiring secrets (bot.e2e, models.e2e) skip gracefully via describe.skipIf.
# OpenAI SDK compat tests always run (no secrets needed, uses mock gateway).
run: npm run test:e2e
env:
LETTA_API_KEY: ${{ secrets.LETTA_API_KEY }}

View File

@@ -0,0 +1,282 @@
/**
* E2E Tests for OpenAI-compatible API endpoint
*
* Uses the real `openai` npm SDK as the client to prove full compatibility.
* No Letta API secrets needed -- uses a mock AgentRouter so this runs in CI.
*
* Run with: npm run test:e2e
*/
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import * as http from 'http';
import OpenAI from 'openai';
import { createApiServer } from '../src/api/server.js';
import type { AgentRouter } from '../src/core/interfaces.js';
const TEST_API_KEY = 'e2e-test-key-openai-compat';
function createMockRouter(overrides: Partial<AgentRouter> = {}): AgentRouter {
return {
deliverToChannel: vi.fn().mockResolvedValue('msg-1'),
sendToAgent: vi.fn().mockResolvedValue('Hello from lettabot! I can help you with that.'),
streamToAgent: vi.fn().mockReturnValue((async function* () {
yield { type: 'reasoning', content: 'Let me think about this...' };
yield { type: 'assistant', content: 'Hello' };
yield { type: 'assistant', content: ' from' };
yield { type: 'assistant', content: ' lettabot!' };
yield { type: 'tool_call', toolCallId: 'call_abc123', toolName: 'web_search', toolInput: { query: 'lettabot docs' } };
yield { type: 'tool_result', content: 'Search results...' };
yield { type: 'assistant', content: ' I found' };
yield { type: 'assistant', content: ' the answer.' };
yield { type: 'result', success: true };
})()),
getAgentNames: vi.fn().mockReturnValue(['lettabot', 'helper-bot']),
...overrides,
};
}
function getPort(server: http.Server): number {
const addr = server.address();
if (typeof addr === 'object' && addr) return addr.port;
throw new Error('Server not listening');
}
describe('e2e: OpenAI SDK compatibility', () => {
let server: http.Server;
let port: number;
let router: AgentRouter;
let client: OpenAI;
beforeAll(async () => {
router = createMockRouter();
server = createApiServer(router, {
port: 0, // OS-assigned port
apiKey: TEST_API_KEY,
host: '127.0.0.1',
});
await new Promise<void>((resolve) => {
if (server.listening) { resolve(); return; }
server.once('listening', resolve);
});
port = getPort(server);
// Create OpenAI SDK client pointing at our server
client = new OpenAI({
apiKey: TEST_API_KEY,
baseURL: `http://127.0.0.1:${port}/v1`,
});
});
afterAll(async () => {
await new Promise<void>((resolve) => server.close(() => resolve()));
});
// ---------------------------------------------------------------------------
// Models
// ---------------------------------------------------------------------------
it('lists models via OpenAI SDK', async () => {
const models = await client.models.list();
// The SDK returns a page object; iterate to get all
const modelList: OpenAI.Models.Model[] = [];
for await (const model of models) {
modelList.push(model);
}
expect(modelList).toHaveLength(2);
expect(modelList[0].id).toBe('lettabot');
expect(modelList[1].id).toBe('helper-bot');
expect(modelList[0].owned_by).toBe('lettabot');
});
// ---------------------------------------------------------------------------
// Non-streaming (sync)
// ---------------------------------------------------------------------------
it('sends a sync chat completion via OpenAI SDK', async () => {
const completion = await client.chat.completions.create({
model: 'lettabot',
messages: [{ role: 'user', content: 'Hello!' }],
});
// Validate the SDK parsed it correctly
expect(completion.id).toMatch(/^chatcmpl-/);
expect(completion.object).toBe('chat.completion');
expect(completion.model).toBe('lettabot');
expect(completion.choices).toHaveLength(1);
expect(completion.choices[0].message.role).toBe('assistant');
expect(completion.choices[0].message.content).toBe('Hello from lettabot! I can help you with that.');
expect(completion.choices[0].finish_reason).toBe('stop');
// Verify the router received the right call
expect(router.sendToAgent).toHaveBeenCalledWith(
'lettabot',
'Hello!',
expect.objectContaining({ type: 'webhook' }),
);
});
it('defaults to first model when model field is omitted', async () => {
// The OpenAI SDK requires model, but we can test with the first agent name
const completion = await client.chat.completions.create({
model: 'lettabot',
messages: [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: 'First question' },
{ role: 'assistant', content: 'First answer' },
{ role: 'user', content: 'Follow-up question' },
],
});
// Should extract last user message
expect(router.sendToAgent).toHaveBeenCalledWith(
'lettabot',
'Follow-up question',
expect.any(Object),
);
expect(completion.choices[0].message.content).toBeTruthy();
});
it('throws on unknown model', async () => {
await expect(
client.chat.completions.create({
model: 'nonexistent-model',
messages: [{ role: 'user', content: 'Hi' }],
})
).rejects.toThrow(); // SDK throws on 404
});
// ---------------------------------------------------------------------------
// Streaming
// ---------------------------------------------------------------------------
it('streams a chat completion via OpenAI SDK', async () => {
// Fresh mock for streaming (generators are consumed once)
(router as any).streamToAgent = vi.fn().mockReturnValue((async function* () {
yield { type: 'reasoning', content: 'thinking...' };
yield { type: 'assistant', content: 'Hello' };
yield { type: 'assistant', content: ' world' };
yield { type: 'result', success: true };
})());
const stream = await client.chat.completions.create({
model: 'lettabot',
messages: [{ role: 'user', content: 'Stream test' }],
stream: true,
});
const chunks: OpenAI.Chat.Completions.ChatCompletionChunk[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// Should have role announcement + content deltas + stop
expect(chunks.length).toBeGreaterThanOrEqual(3);
// First chunk should announce the role
expect(chunks[0].choices[0].delta.role).toBe('assistant');
// Collect all content
const content = chunks
.map(c => c.choices[0].delta.content)
.filter(Boolean)
.join('');
expect(content).toBe('Hello world');
// Last chunk should have finish_reason
const lastChunk = chunks[chunks.length - 1];
expect(lastChunk.choices[0].finish_reason).toBe('stop');
// All chunks should share the same ID
const ids = new Set(chunks.map(c => c.id));
expect(ids.size).toBe(1);
expect(chunks[0].id).toMatch(/^chatcmpl-/);
});
it('streams tool calls in OpenAI format', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue((async function* () {
yield { type: 'assistant', content: 'Let me search.' };
yield { type: 'tool_call', toolCallId: 'call_xyz', toolName: 'web_search', toolInput: { query: 'test' } };
yield { type: 'tool_result', content: 'results' };
yield { type: 'assistant', content: ' Found it!' };
yield { type: 'result', success: true };
})());
const stream = await client.chat.completions.create({
model: 'lettabot',
messages: [{ role: 'user', content: 'Search for something' }],
stream: true,
});
const chunks: OpenAI.Chat.Completions.ChatCompletionChunk[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// Find tool call chunks
const toolCallChunks = chunks.filter(c => c.choices[0].delta.tool_calls);
expect(toolCallChunks).toHaveLength(1);
const toolCall = toolCallChunks[0].choices[0].delta.tool_calls![0];
expect(toolCall.function?.name).toBe('web_search');
expect(toolCall.function?.arguments).toContain('test');
expect(toolCall.id).toBe('call_xyz');
// Content should not include reasoning or tool results
const content = chunks
.map(c => c.choices[0].delta.content)
.filter(Boolean)
.join('');
expect(content).toBe('Let me search. Found it!');
expect(content).not.toContain('thinking');
expect(content).not.toContain('results');
});
it('filters reasoning from streamed output', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue((async function* () {
yield { type: 'reasoning', content: 'Deep reasoning about the problem...' };
yield { type: 'reasoning', content: 'More thinking happening here...' };
yield { type: 'assistant', content: 'Here is my answer.' };
yield { type: 'result', success: true };
})());
const stream = await client.chat.completions.create({
model: 'lettabot',
messages: [{ role: 'user', content: 'Think hard' }],
stream: true,
});
const allContent: string[] = [];
for await (const chunk of stream) {
if (chunk.choices[0].delta.content) {
allContent.push(chunk.choices[0].delta.content);
}
}
const fullText = allContent.join('');
expect(fullText).toBe('Here is my answer.');
expect(fullText).not.toContain('Deep reasoning');
expect(fullText).not.toContain('More thinking');
});
// ---------------------------------------------------------------------------
// Auth
// ---------------------------------------------------------------------------
it('authenticates with Bearer token (OpenAI SDK default)', async () => {
// The OpenAI SDK sends Authorization: Bearer <key> by default
// If we got this far, auth is working. But let's also verify a wrong key fails.
const badClient = new OpenAI({
apiKey: 'wrong-key',
baseURL: `http://127.0.0.1:${port}/v1`,
});
await expect(
badClient.chat.completions.create({
model: 'lettabot',
messages: [{ role: 'user', content: 'Hi' }],
})
).rejects.toThrow();
});
});

View File

@@ -99,12 +99,45 @@ export function saveApiKey(key: string): void {
}
/**
* Validate API key from request headers
* Extract API key from request headers.
* Checks X-Api-Key first (lettabot convention), then Authorization: Bearer <key> (OpenAI convention).
*
* Note: When using Authorization header, ensure CORS includes 'Authorization' in Access-Control-Allow-Headers.
*
* @param headers - HTTP request headers
* @returns The extracted API key, or null if not found
*/
export function extractApiKey(headers: IncomingHttpHeaders): string | null {
// 1. X-Api-Key header (lettabot convention)
const xApiKey = headers['x-api-key'];
if (xApiKey && typeof xApiKey === 'string') {
return xApiKey;
}
// 2. Authorization: Bearer <key> (OpenAI convention)
const auth = headers['authorization'];
if (auth && typeof auth === 'string') {
const match = auth.match(/^Bearer\s+(.+)$/i);
if (match) {
return match[1];
}
}
return null;
}
/**
* Validate API key from request headers.
* Supports both X-Api-Key and Authorization: Bearer <key> formats.
*
* @param headers - HTTP request headers
* @param expectedKey - The expected API key to validate against
* @returns true if the provided key matches the expected key, false otherwise
*/
export function validateApiKey(headers: IncomingHttpHeaders, expectedKey: string): boolean {
const providedKey = headers['x-api-key'];
const providedKey = extractApiKey(headers);
if (!providedKey || typeof providedKey !== 'string') {
if (!providedKey) {
return false;
}

View File

@@ -0,0 +1,834 @@
import { describe, it, expect, vi, beforeAll, afterAll } from 'vitest';
import * as http from 'http';
import { createApiServer } from './server.js';
import type { AgentRouter } from '../core/interfaces.js';
import {
generateCompletionId,
extractLastUserMessage,
buildCompletion,
buildChunk,
buildToolCallChunk,
formatSSE,
SSE_DONE,
buildErrorResponse,
buildModelList,
validateChatRequest,
} from './openai-compat.js';
import type { OpenAIChatMessage } from './openai-compat.js';
const TEST_API_KEY = 'test-key-12345';
const TEST_PORT = 0;
function createMockRouter(overrides: Partial<AgentRouter> = {}): AgentRouter {
return {
deliverToChannel: vi.fn().mockResolvedValue('msg-1'),
sendToAgent: vi.fn().mockResolvedValue('Agent says hello'),
streamToAgent: vi.fn().mockReturnValue(
(async function* () {
yield { type: 'reasoning', content: 'thinking...' };
yield { type: 'assistant', content: 'Hello ' };
yield { type: 'assistant', content: 'world' };
yield { type: 'result', success: true };
})(),
),
getAgentNames: vi.fn().mockReturnValue(['LettaBot']),
...overrides,
};
}
function getPort(server: http.Server): number {
const addr = server.address();
if (typeof addr === 'object' && addr) return addr.port;
throw new Error('Server not listening');
}
async function request(
port: number,
method: string,
path: string,
body?: string,
headers: Record<string, string> = {},
): Promise<{ status: number; headers: http.IncomingHttpHeaders; body: string }> {
return new Promise((resolve, reject) => {
const req = http.request(
{ hostname: '127.0.0.1', port, method, path, headers },
(res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () =>
resolve({ status: res.statusCode!, headers: res.headers, body: data }),
);
},
);
req.on('error', reject);
if (body) req.write(body);
req.end();
});
}
// ============================================================================
// UNIT TESTS FOR UTILITY FUNCTIONS
// ============================================================================
describe('openai-compat utilities', () => {
describe('generateCompletionId', () => {
it('returns a string starting with chatcmpl-', () => {
const id = generateCompletionId();
expect(id).toMatch(/^chatcmpl-/);
});
it('returns unique IDs', () => {
const id1 = generateCompletionId();
const id2 = generateCompletionId();
expect(id1).not.toBe(id2);
});
it('returns IDs with reasonable length', () => {
const id = generateCompletionId();
expect(id.length).toBeGreaterThan(10);
});
});
describe('extractLastUserMessage', () => {
it('returns the last user message content', () => {
const messages: OpenAIChatMessage[] = [
{ role: 'user', content: 'First message' },
{ role: 'assistant', content: 'Response' },
{ role: 'user', content: 'Last message' },
];
expect(extractLastUserMessage(messages)).toBe('Last message');
});
it('returns null when no user messages', () => {
const messages: OpenAIChatMessage[] = [
{ role: 'system', content: 'System prompt' },
{ role: 'assistant', content: 'Hello' },
];
expect(extractLastUserMessage(messages)).toBeNull();
});
it('skips system and assistant messages', () => {
const messages: OpenAIChatMessage[] = [
{ role: 'system', content: 'System' },
{ role: 'assistant', content: 'Assistant' },
{ role: 'user', content: 'User message' },
{ role: 'assistant', content: 'Another assistant' },
];
expect(extractLastUserMessage(messages)).toBe('User message');
});
it('returns null for empty array', () => {
expect(extractLastUserMessage([])).toBeNull();
});
it('handles only one user message', () => {
const messages: OpenAIChatMessage[] = [{ role: 'user', content: 'Only message' }];
expect(extractLastUserMessage(messages)).toBe('Only message');
});
});
describe('buildCompletion', () => {
it('builds a valid completion response', () => {
const result = buildCompletion('chatcmpl-1', 'LettaBot', 'Hello!');
expect(result.object).toBe('chat.completion');
expect(result.id).toBe('chatcmpl-1');
expect(result.model).toBe('LettaBot');
expect(result.choices).toHaveLength(1);
expect(result.choices[0].index).toBe(0);
expect(result.choices[0].message.role).toBe('assistant');
expect(result.choices[0].message.content).toBe('Hello!');
expect(result.choices[0].finish_reason).toBe('stop');
expect(result.usage).toBeNull();
expect(result.created).toBeGreaterThan(0);
});
it('respects custom finish_reason', () => {
const result = buildCompletion('chatcmpl-2', 'bot', 'text', 'tool_calls');
expect(result.choices[0].finish_reason).toBe('tool_calls');
});
it('sets created timestamp', () => {
const before = Math.floor(Date.now() / 1000);
const result = buildCompletion('chatcmpl-3', 'bot', 'text');
const after = Math.floor(Date.now() / 1000);
expect(result.created).toBeGreaterThanOrEqual(before);
expect(result.created).toBeLessThanOrEqual(after);
});
});
describe('buildChunk', () => {
it('builds a valid streaming chunk', () => {
const chunk = buildChunk('chatcmpl-1', 'LettaBot', { content: 'Hello' });
expect(chunk.object).toBe('chat.completion.chunk');
expect(chunk.id).toBe('chatcmpl-1');
expect(chunk.model).toBe('LettaBot');
expect(chunk.choices).toHaveLength(1);
expect(chunk.choices[0].index).toBe(0);
expect(chunk.choices[0].delta).toEqual({ content: 'Hello' });
expect(chunk.choices[0].finish_reason).toBeNull();
expect(chunk.created).toBeGreaterThan(0);
});
it('includes finish_reason when provided', () => {
const chunk = buildChunk('chatcmpl-2', 'bot', { content: 'Done' }, 'stop');
expect(chunk.choices[0].finish_reason).toBe('stop');
});
it('handles role delta', () => {
const chunk = buildChunk('chatcmpl-3', 'bot', { role: 'assistant' });
expect(chunk.choices[0].delta).toEqual({ role: 'assistant' });
});
});
describe('buildToolCallChunk', () => {
it('builds a tool call chunk with correct structure', () => {
const chunk = buildToolCallChunk(
'chatcmpl-1',
'LettaBot',
0,
'call_123',
'web_search',
'{"query":"test"}',
);
expect(chunk.object).toBe('chat.completion.chunk');
expect(chunk.choices[0].delta.tool_calls).toHaveLength(1);
expect(chunk.choices[0].delta.tool_calls![0]).toEqual({
index: 0,
id: 'call_123',
type: 'function',
function: {
name: 'web_search',
arguments: '{"query":"test"}',
},
});
});
it('handles different tool indices', () => {
const chunk = buildToolCallChunk(
'chatcmpl-2',
'bot',
2,
'call_456',
'calculator',
'{}',
);
expect(chunk.choices[0].delta.tool_calls![0].index).toBe(2);
});
});
describe('formatSSE', () => {
it('formats data as SSE line', () => {
expect(formatSSE({ test: 1 })).toBe('data: {"test":1}\n\n');
});
it('handles complex objects', () => {
const data = { nested: { value: 'test' }, array: [1, 2, 3] };
const result = formatSSE(data);
expect(result).toMatch(/^data: /);
expect(result).toMatch(/\n\n$/);
expect(JSON.parse(result.replace('data: ', '').trim())).toEqual(data);
});
it('SSE_DONE constant is correct', () => {
expect(SSE_DONE).toBe('data: [DONE]\n\n');
});
});
describe('buildErrorResponse', () => {
it('builds error with default values', () => {
const result = buildErrorResponse('Something went wrong');
expect(result.status).toBe(400);
expect(result.body.error.message).toBe('Something went wrong');
expect(result.body.error.type).toBe('invalid_request_error');
});
it('respects custom status and type', () => {
const result = buildErrorResponse('Not found', 'model_not_found', 404);
expect(result.status).toBe(404);
expect(result.body.error.type).toBe('model_not_found');
expect(result.body.error.message).toBe('Not found');
});
it('includes null code field', () => {
const result = buildErrorResponse('Test');
expect(result.body.error.code).toBeNull();
});
});
describe('buildModelList', () => {
it('builds model list from agent names', () => {
const list = buildModelList(['bot1', 'bot2']);
expect(list.object).toBe('list');
expect(list.data).toHaveLength(2);
expect(list.data[0].id).toBe('bot1');
expect(list.data[0].object).toBe('model');
expect(list.data[0].owned_by).toBe('lettabot');
expect(list.data[1].id).toBe('bot2');
});
it('handles empty agent list', () => {
const list = buildModelList([]);
expect(list.object).toBe('list');
expect(list.data).toHaveLength(0);
});
it('sets created timestamps', () => {
const list = buildModelList(['bot1']);
expect(list.data[0].created).toBeGreaterThan(0);
});
});
describe('validateChatRequest', () => {
it('returns null for valid request', () => {
expect(
validateChatRequest({ messages: [{ role: 'user', content: 'hi' }] }),
).toBeNull();
});
it('returns error for missing messages', () => {
const err = validateChatRequest({});
expect(err).not.toBeNull();
expect(err!.status).toBe(400);
expect(err!.body.error.message).toContain('messages');
});
it('returns error for empty messages', () => {
const err = validateChatRequest({ messages: [] });
expect(err).not.toBeNull();
expect(err!.status).toBe(400);
expect(err!.body.error.message).toContain('non-empty array');
});
it('returns error for non-object body', () => {
const err1 = validateChatRequest(null);
expect(err1).not.toBeNull();
expect(err1!.status).toBe(400);
const err2 = validateChatRequest('string');
expect(err2).not.toBeNull();
expect(err2!.status).toBe(400);
});
it('returns error for non-array messages', () => {
const err = validateChatRequest({ messages: 'not an array' });
expect(err).not.toBeNull();
expect(err!.status).toBe(400);
});
it('accepts request with multiple messages', () => {
const result = validateChatRequest({
messages: [
{ role: 'system', content: 'You are helpful' },
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi!' },
{ role: 'user', content: 'How are you?' },
],
});
expect(result).toBeNull();
});
});
});
// ============================================================================
// SERVER ROUTE TESTS: GET /v1/models
// ============================================================================
describe('GET /v1/models', () => {
let server: http.Server;
let port: number;
let router: AgentRouter;
beforeAll(async () => {
router = createMockRouter();
server = createApiServer(router, {
port: TEST_PORT,
apiKey: TEST_API_KEY,
host: '127.0.0.1',
});
await new Promise<void>((resolve) => {
if (server.listening) {
resolve();
return;
}
server.once('listening', resolve);
});
port = getPort(server);
});
afterAll(async () => {
await new Promise<void>((resolve) => server.close(() => resolve()));
});
it('returns 401 without auth', async () => {
const res = await request(port, 'GET', '/v1/models');
expect(res.status).toBe(401);
const parsed = JSON.parse(res.body);
expect(parsed.error.type).toBe('invalid_request_error');
});
it('accepts Bearer token auth', async () => {
const res = await request(port, 'GET', '/v1/models', undefined, {
authorization: `Bearer ${TEST_API_KEY}`,
});
expect(res.status).toBe(200);
const parsed = JSON.parse(res.body);
expect(parsed.object).toBe('list');
expect(parsed.data).toHaveLength(1);
expect(parsed.data[0].id).toBe('LettaBot');
});
it('accepts X-Api-Key header', async () => {
const res = await request(port, 'GET', '/v1/models', undefined, {
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
const parsed = JSON.parse(res.body);
expect(parsed.object).toBe('list');
});
it('returns model list with correct structure', async () => {
const res = await request(port, 'GET', '/v1/models', undefined, {
authorization: `Bearer ${TEST_API_KEY}`,
});
expect(res.status).toBe(200);
const parsed = JSON.parse(res.body);
expect(parsed.object).toBe('list');
expect(parsed.data).toHaveLength(1);
expect(parsed.data[0]).toMatchObject({
id: 'LettaBot',
object: 'model',
owned_by: 'lettabot',
});
expect(parsed.data[0].created).toBeGreaterThan(0);
});
it('returns 401 with wrong API key', async () => {
const res = await request(port, 'GET', '/v1/models', undefined, {
authorization: 'Bearer wrong-key',
});
expect(res.status).toBe(401);
});
});
// ============================================================================
// SERVER ROUTE TESTS: POST /v1/chat/completions
// ============================================================================
describe('POST /v1/chat/completions', () => {
let server: http.Server;
let port: number;
let router: AgentRouter;
beforeAll(async () => {
router = createMockRouter();
server = createApiServer(router, {
port: TEST_PORT,
apiKey: TEST_API_KEY,
host: '127.0.0.1',
});
await new Promise<void>((resolve) => {
if (server.listening) {
resolve();
return;
}
server.once('listening', resolve);
});
port = getPort(server);
});
afterAll(async () => {
await new Promise<void>((resolve) => server.close(() => resolve()));
});
it('returns 401 without auth', async () => {
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Hello' }],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
});
expect(res.status).toBe(401);
const parsed = JSON.parse(res.body);
expect(parsed.error.type).toBe('invalid_request_error');
});
it('accepts Bearer auth', async () => {
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Hello' }],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
authorization: `Bearer ${TEST_API_KEY}`,
});
expect(res.status).toBe(200);
});
it('accepts X-Api-Key header', async () => {
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Hello' }],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
});
it('returns sync completion by default', async () => {
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Hello' }],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
const parsed = JSON.parse(res.body);
expect(parsed.object).toBe('chat.completion');
expect(parsed.id).toMatch(/^chatcmpl-/);
expect(parsed.model).toBe('LettaBot');
expect(parsed.choices).toHaveLength(1);
expect(parsed.choices[0].index).toBe(0);
expect(parsed.choices[0].message.role).toBe('assistant');
expect(parsed.choices[0].message.content).toBe('Agent says hello');
expect(parsed.choices[0].finish_reason).toBe('stop');
expect(parsed.usage).toBeNull();
expect(parsed.created).toBeGreaterThan(0);
});
it('returns 404 for unknown model', async () => {
const body = JSON.stringify({
model: 'UnknownBot',
messages: [{ role: 'user', content: 'hi' }],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(404);
const parsed = JSON.parse(res.body);
expect(parsed.error.type).toBe('model_not_found');
expect(parsed.error.message).toContain('UnknownBot');
});
it('returns 400 for missing messages', async () => {
const body = JSON.stringify({ model: 'LettaBot' });
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(400);
const parsed = JSON.parse(res.body);
expect(parsed.error.type).toBe('invalid_request_error');
expect(parsed.error.message).toContain('messages');
});
it('returns 400 for empty messages array', async () => {
const body = JSON.stringify({ model: 'LettaBot', messages: [] });
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(400);
});
it('returns 400 for invalid JSON', async () => {
const res = await request(
port,
'POST',
'/v1/chat/completions',
'not valid json',
{
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
},
);
expect(res.status).toBe(400);
});
it('extracts last user message from messages array', async () => {
const body = JSON.stringify({
model: 'LettaBot',
messages: [
{ role: 'system', content: 'You are helpful' },
{ role: 'user', content: 'First message' },
{ role: 'assistant', content: 'I see' },
{ role: 'user', content: 'Second message' },
],
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
expect(router.sendToAgent).toHaveBeenCalledWith(
'LettaBot',
'Second message',
expect.any(Object),
);
});
it('returns SSE stream when stream: true', async () => {
// Reset mock for streaming
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield { type: 'reasoning', content: 'thinking...' };
yield { type: 'assistant', content: 'Hello ' };
yield { type: 'assistant', content: 'world' };
yield {
type: 'tool_call',
toolCallId: 'call_1',
toolName: 'web_search',
toolInput: { query: 'test' },
};
yield { type: 'tool_result', content: 'result data' };
yield { type: 'assistant', content: '!' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Stream' }],
stream: true,
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
expect(res.headers['content-type']).toBe('text/event-stream');
expect(res.headers['cache-control']).toBe('no-cache');
expect(res.headers['connection']).toBe('keep-alive');
// Parse SSE events
const events = res.body
.split('\n\n')
.filter((line) => line.startsWith('data: '))
.map((line) => line.replace('data: ', ''))
.filter((line) => line !== '[DONE]')
.map((line) => JSON.parse(line));
// Should have: role announcement, content chunks, tool_call, final chunk
expect(events.length).toBeGreaterThanOrEqual(5);
// First chunk: role
expect(events[0].object).toBe('chat.completion.chunk');
expect(events[0].choices[0].delta.role).toBe('assistant');
expect(events[0].choices[0].finish_reason).toBeNull();
// Content deltas (reasoning should be skipped, tool_result should be skipped)
const contentChunks = events.filter(
(e: any) => e.choices[0].delta.content !== undefined,
);
const contentParts = contentChunks.map((e: any) => e.choices[0].delta.content);
expect(contentParts).toContain('Hello ');
expect(contentParts).toContain('world');
expect(contentParts).toContain('!');
expect(contentParts).not.toContain('thinking...'); // reasoning filtered
// Tool call chunk
const toolChunks = events.filter((e: any) => e.choices[0].delta.tool_calls);
expect(toolChunks).toHaveLength(1);
expect(toolChunks[0].choices[0].delta.tool_calls[0].function.name).toBe(
'web_search',
);
expect(toolChunks[0].choices[0].delta.tool_calls[0].id).toBe('call_1');
expect(
JSON.parse(toolChunks[0].choices[0].delta.tool_calls[0].function.arguments),
).toEqual({ query: 'test' });
// Final chunk has finish_reason
const lastEvent = events[events.length - 1];
expect(lastEvent.choices[0].finish_reason).toBe('stop');
// data: [DONE] should be present
expect(res.body).toContain('data: [DONE]');
});
it('handles stream with only assistant content', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield { type: 'assistant', content: 'Simple ' };
yield { type: 'assistant', content: 'response' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Hi' }],
stream: true,
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(res.status).toBe(200);
const events = res.body
.split('\n\n')
.filter((line) => line.startsWith('data: '))
.map((line) => line.replace('data: ', ''))
.filter((line) => line !== '[DONE]')
.map((line) => JSON.parse(line));
// Role + 2 content chunks + final chunk
expect(events.length).toBe(4);
expect(events[0].choices[0].delta.role).toBe('assistant');
expect(events[1].choices[0].delta.content).toBe('Simple ');
expect(events[2].choices[0].delta.content).toBe('response');
expect(events[3].choices[0].finish_reason).toBe('stop');
});
it('calls streamToAgent with correct parameters', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield { type: 'assistant', content: 'test' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Test message' }],
stream: true,
});
await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
expect(router.streamToAgent).toHaveBeenCalledWith(
'LettaBot',
'Test message',
expect.any(Object),
);
});
it('filters out reasoning events in stream', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield { type: 'reasoning', content: 'This should not appear' };
yield { type: 'reasoning', content: 'Neither should this' };
yield { type: 'assistant', content: 'But this should' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Test' }],
stream: true,
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
const events = res.body
.split('\n\n')
.filter((line) => line.startsWith('data: '))
.map((line) => line.replace('data: ', ''))
.filter((line) => line !== '[DONE]')
.map((line) => JSON.parse(line));
// Should only have role, content, and final chunk (no reasoning)
const allContent = events
.map((e: any) => e.choices[0].delta.content)
.filter(Boolean)
.join('');
expect(allContent).not.toContain('This should not appear');
expect(allContent).not.toContain('Neither should this');
expect(allContent).toBe('But this should');
});
it('filters out tool_result events in stream', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield {
type: 'tool_call',
toolCallId: 'call_1',
toolName: 'test',
toolInput: {},
};
yield { type: 'tool_result', content: 'This should be hidden' };
yield { type: 'assistant', content: 'Final answer' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Test' }],
stream: true,
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
const events = res.body
.split('\n\n')
.filter((line) => line.startsWith('data: '))
.map((line) => line.replace('data: ', ''))
.filter((line) => line !== '[DONE]')
.map((line) => JSON.parse(line));
const allContent = events
.map((e: any) => e.choices[0].delta.content)
.filter(Boolean)
.join('');
expect(allContent).not.toContain('This should be hidden');
expect(allContent).toBe('Final answer');
});
it('handles multiple tool calls in stream', async () => {
(router as any).streamToAgent = vi.fn().mockReturnValue(
(async function* () {
yield {
type: 'tool_call',
toolCallId: 'call_1',
toolName: 'tool1',
toolInput: { arg: 1 },
};
yield {
type: 'tool_call',
toolCallId: 'call_2',
toolName: 'tool2',
toolInput: { arg: 2 },
};
yield { type: 'assistant', content: 'Done' };
yield { type: 'result', success: true };
})(),
);
const body = JSON.stringify({
model: 'LettaBot',
messages: [{ role: 'user', content: 'Test' }],
stream: true,
});
const res = await request(port, 'POST', '/v1/chat/completions', body, {
'content-type': 'application/json',
'x-api-key': TEST_API_KEY,
});
const events = res.body
.split('\n\n')
.filter((line) => line.startsWith('data: '))
.map((line) => line.replace('data: ', ''))
.filter((line) => line !== '[DONE]')
.map((line) => JSON.parse(line));
const toolChunks = events.filter((e: any) => e.choices[0].delta.tool_calls);
expect(toolChunks).toHaveLength(2);
expect(toolChunks[0].choices[0].delta.tool_calls[0].function.name).toBe('tool1');
expect(toolChunks[1].choices[0].delta.tool_calls[0].function.name).toBe('tool2');
});
});

316
src/api/openai-compat.ts Normal file
View File

@@ -0,0 +1,316 @@
import { randomUUID } from 'crypto';
// ============================================================================
// Request types
// ============================================================================
/**
* OpenAI Chat Completions request body.
*/
export interface OpenAIChatRequest {
model: string;
messages: OpenAIChatMessage[];
stream?: boolean;
// We ignore other OpenAI params (temperature, max_tokens, tools, etc.)
}
/**
* A single message in the OpenAI messages array.
*/
export interface OpenAIChatMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: OpenAIToolCall[];
}
// ============================================================================
// Response types (non-streaming)
// ============================================================================
/**
* OpenAI Chat Completion response (non-streaming).
*/
export interface OpenAIChatCompletion {
id: string;
object: 'chat.completion';
created: number;
model: string;
choices: OpenAIChatChoice[];
usage: null;
}
/**
* A single choice in a non-streaming completion response.
*/
export interface OpenAIChatChoice {
index: number;
message: {
role: 'assistant';
content: string | null;
tool_calls?: OpenAIToolCall[];
};
finish_reason: 'stop' | 'tool_calls' | 'length' | null;
}
// ============================================================================
// Response types (streaming)
// ============================================================================
/**
* OpenAI Chat Completion chunk (streaming).
*/
export interface OpenAIChatChunk {
id: string;
object: 'chat.completion.chunk';
created: number;
model: string;
choices: OpenAIChatChunkChoice[];
}
/**
* A single choice in a streaming chunk.
*/
export interface OpenAIChatChunkChoice {
index: number;
delta: {
role?: 'assistant';
content?: string | null;
tool_calls?: OpenAIToolCallDelta[];
};
finish_reason: 'stop' | 'tool_calls' | 'length' | null;
}
// ============================================================================
// Tool call types
// ============================================================================
/**
* OpenAI tool call (non-streaming).
*/
export interface OpenAIToolCall {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
/**
* OpenAI tool call delta (streaming).
*/
export interface OpenAIToolCallDelta {
index: number;
id?: string;
type?: 'function';
function?: {
name?: string;
arguments?: string;
};
}
// ============================================================================
// Models endpoint
// ============================================================================
/**
* OpenAI models list response.
*/
export interface OpenAIModelList {
object: 'list';
data: OpenAIModel[];
}
/**
* A single model in the models list.
*/
export interface OpenAIModel {
id: string;
object: 'model';
created: number;
owned_by: string;
}
// ============================================================================
// Error response
// ============================================================================
/**
* OpenAI error response.
*/
export interface OpenAIErrorResponse {
error: {
message: string;
type: string;
param: string | null;
code: string | null;
};
}
// ============================================================================
// Helper functions
// ============================================================================
/**
* Generate a unique chat completion ID.
*/
export function generateCompletionId(): string {
return `chatcmpl-${randomUUID()}`;
}
/**
* Extract the last user message from an OpenAI messages array.
* Returns the content string, or null if none found.
*/
export function extractLastUserMessage(messages: OpenAIChatMessage[]): string | null {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === 'user' && messages[i].content) {
return messages[i].content as string;
}
}
return null;
}
/**
* Build a sync (non-streaming) completion response.
*/
export function buildCompletion(
id: string,
model: string,
content: string,
finishReason: 'stop' | 'tool_calls' = 'stop',
): OpenAIChatCompletion {
return {
id,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model,
choices: [{
index: 0,
message: { role: 'assistant', content },
finish_reason: finishReason,
}],
usage: null,
};
}
/**
* Build a streaming chunk.
*/
export function buildChunk(
id: string,
model: string,
delta: OpenAIChatChunkChoice['delta'],
finishReason: 'stop' | 'tool_calls' | null = null,
): OpenAIChatChunk {
return {
id,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model,
choices: [{
index: 0,
delta,
finish_reason: finishReason,
}],
};
}
/**
* Build a tool call streaming chunk.
*/
export function buildToolCallChunk(
id: string,
model: string,
toolIndex: number,
toolCallId: string,
functionName: string,
args: string,
): OpenAIChatChunk {
return buildChunk(id, model, {
tool_calls: [{
index: toolIndex,
id: toolCallId,
type: 'function',
function: { name: functionName, arguments: args },
}],
});
}
/**
* Format an SSE data line. Returns "data: <json>\n\n".
*/
export function formatSSE(data: unknown): string {
return `data: ${JSON.stringify(data)}\n\n`;
}
/**
* The SSE terminator.
*/
export const SSE_DONE = 'data: [DONE]\n\n';
/**
* Build an OpenAI-format error response.
*/
export function buildErrorResponse(
message: string,
type: string = 'invalid_request_error',
status: number = 400,
): { status: number; body: OpenAIErrorResponse } {
return {
status,
body: {
error: {
message,
type,
param: null,
code: null,
},
},
};
}
/**
* Build the models list from agent names.
*/
export function buildModelList(agentNames: string[]): OpenAIModelList {
const now = Math.floor(Date.now() / 1000);
return {
object: 'list',
data: agentNames.map(name => ({
id: name,
object: 'model' as const,
created: now,
owned_by: 'lettabot',
})),
};
}
/**
* Validate an OpenAI chat completion request.
* Returns null if valid, or an error response object.
*/
export function validateChatRequest(body: unknown): { status: number; body: OpenAIErrorResponse } | null {
if (!body || typeof body !== 'object') {
return buildErrorResponse('Invalid request body', 'invalid_request_error', 400);
}
const req = body as Record<string, unknown>;
if (!Array.isArray(req.messages) || req.messages.length === 0) {
return buildErrorResponse('messages is required and must be a non-empty array', 'invalid_request_error', 400);
}
// Validate each message has role and content
for (const msg of req.messages) {
if (!msg || typeof msg !== 'object') {
return buildErrorResponse('Each message must be an object', 'invalid_request_error', 400);
}
const m = msg as Record<string, unknown>;
if (!m.role || typeof m.role !== 'string') {
return buildErrorResponse('Each message must have a role', 'invalid_request_error', 400);
}
}
return null;
}

View File

@@ -11,6 +11,12 @@ import { listPairingRequests, approvePairingCode } from '../pairing/store.js';
import { parseMultipart } from './multipart.js';
import type { AgentRouter } from '../core/interfaces.js';
import type { ChannelId } from '../core/types.js';
import {
generateCompletionId, extractLastUserMessage, buildCompletion,
buildChunk, buildToolCallChunk, formatSSE, SSE_DONE,
buildErrorResponse, buildModelList, validateChatRequest,
} from './openai-compat.js';
import type { OpenAIChatRequest } from './openai-compat.js';
import { createLogger } from '../logger.js';
@@ -36,7 +42,7 @@ export function createApiServer(deliverer: AgentRouter, options: ServerOptions):
const corsOrigin = options.corsOrigin || req.headers.origin || 'null';
res.setHeader('Access-Control-Allow-Origin', corsOrigin);
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, X-Api-Key');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, X-Api-Key, Authorization');
// Handle OPTIONS preflight
if (req.method === 'OPTIONS') {
@@ -306,6 +312,173 @@ export function createApiServer(deliverer: AgentRouter, options: ServerOptions):
return;
}
// Route: GET /v1/models (OpenAI-compatible)
if (req.url === '/v1/models' && req.method === 'GET') {
try {
if (!validateApiKey(req.headers, options.apiKey)) {
const err = buildErrorResponse('Invalid API key', 'invalid_request_error', 401);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
const models = buildModelList(deliverer.getAgentNames());
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(models));
} catch (error: any) {
console.error('[API] Models error:', error);
const err = buildErrorResponse(error.message || 'Internal server error', 'server_error', 500);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
}
return;
}
// Route: POST /v1/chat/completions (OpenAI-compatible)
if (req.url === '/v1/chat/completions' && req.method === 'POST') {
try {
if (!validateApiKey(req.headers, options.apiKey)) {
const err = buildErrorResponse('Invalid API key', 'invalid_request_error', 401);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
const contentType = req.headers['content-type'] || '';
if (!contentType.includes('application/json')) {
const err = buildErrorResponse('Content-Type must be application/json', 'invalid_request_error', 400);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
const body = await readBody(req, MAX_BODY_SIZE);
let parsed: unknown;
try {
parsed = JSON.parse(body);
} catch {
const err = buildErrorResponse('Invalid JSON body', 'invalid_request_error', 400);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
// Validate OpenAI request shape
const validationError = validateChatRequest(parsed);
if (validationError) {
res.writeHead(validationError.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(validationError.body));
return;
}
const chatReq = parsed as OpenAIChatRequest;
// Extract the last user message
const userMessage = extractLastUserMessage(chatReq.messages);
if (!userMessage) {
const err = buildErrorResponse('No user message found in messages array', 'invalid_request_error', 400);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
if (userMessage.length > MAX_TEXT_LENGTH) {
const err = buildErrorResponse(`Message too long (max ${MAX_TEXT_LENGTH} chars)`, 'invalid_request_error', 400);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
// Resolve agent from model field
const agentNames = deliverer.getAgentNames();
const modelName = chatReq.model || agentNames[0];
const agentName = agentNames.includes(modelName) ? modelName : undefined;
// If an explicit model was requested but doesn't match any agent, error
if (chatReq.model && !agentNames.includes(chatReq.model)) {
const err = buildErrorResponse(
`Model not found: ${chatReq.model}. Available: ${agentNames.join(', ')}`,
'model_not_found',
404,
);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
return;
}
const completionId = generateCompletionId();
const context = { type: 'webhook' as const, outputMode: 'silent' as const };
console.log(`[API] OpenAI chat: model="${modelName}", stream=${!!chatReq.stream}, msg="${userMessage.slice(0, 100)}..."`);
if (chatReq.stream) {
// ---- Streaming response ----
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
});
let clientDisconnected = false;
req.on('close', () => { clientDisconnected = true; });
// First chunk: role announcement
res.write(formatSSE(buildChunk(completionId, modelName, { role: 'assistant' })));
try {
let toolIndex = 0;
for await (const msg of deliverer.streamToAgent(agentName, userMessage, context)) {
if (clientDisconnected) break;
if (msg.type === 'assistant' && msg.content) {
// Text content delta
res.write(formatSSE(buildChunk(completionId, modelName, { content: msg.content })));
} else if (msg.type === 'tool_call') {
// Tool call delta (emit name + args in one chunk)
const toolCallId = msg.toolCallId || `call_${msg.uuid || 'unknown'}`;
const toolName = msg.toolName || 'unknown';
const args = msg.toolInput ? JSON.stringify(msg.toolInput) : '{}';
res.write(formatSSE(buildToolCallChunk(
completionId, modelName, toolIndex++, toolCallId, toolName, args,
)));
} else if (msg.type === 'result') {
// Final chunk
break;
}
// Skip 'reasoning', 'tool_result', and other internal types
}
} catch (streamError: any) {
if (!clientDisconnected) {
// Emit error as a content delta so clients see it
res.write(formatSSE(buildChunk(completionId, modelName, {
content: `\n\n[Error: ${streamError.message}]`,
})));
}
}
// Finish chunk + done sentinel
if (!clientDisconnected) {
res.write(formatSSE(buildChunk(completionId, modelName, {}, 'stop')));
res.write(SSE_DONE);
}
res.end();
} else {
// ---- Sync response ----
const response = await deliverer.sendToAgent(agentName, userMessage, context);
const completion = buildCompletion(completionId, modelName, response);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(completion));
}
} catch (error: any) {
console.error('[API] OpenAI chat error:', error);
const err = buildErrorResponse(error.message || 'Internal server error', 'server_error', 500);
res.writeHead(err.status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(err.body));
}
return;
}
// Route: 404 Not Found
sendError(res, 404, 'Not found');
});