Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,29 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
`@ai-sdk/react/useChat` directly
- AI `/chat` endpoint from `DEFAULT_AI_ROUTES` plugin REST API definition

### Added
- `ai` v6 as a dependency of `@objectstack/spec` for type re-exports
- **Vercel AI Data Stream Protocol support on `/api/v1/ai/chat`** — The chat
endpoint now supports dual-mode responses:
- **Streaming (default)**: When `stream` is not `false`, returns Vercel Data
Stream Protocol frames (`0:` text, `9:` tool-call, `d:` finish, etc.),
directly consumable by `@ai-sdk/react/useChat`
- **JSON (legacy)**: When `stream: false`, returns the original JSON response
- Accepts Vercel useChat flat body format (`system`, `model`, `temperature`,
`maxTokens` as top-level fields) alongside the legacy `{ messages, options }`
- `systemPrompt` / `system` field is prepended as a system message
- Message validation now accepts Vercel multi-part array content
- `RouteResponse.vercelDataStream` flag signals HTTP server layer to encode
events using the Vercel Data Stream frame format
- **`VercelLLMAdapter`** — Production adapter wrapping Vercel AI SDK's
`generateText` / `streamText` for any compatible model provider (OpenAI,
Anthropic, Google, Ollama, etc.)
- **`vercel-stream-encoder.ts`** — Utilities (`encodeStreamPart`,
`encodeVercelDataStream`) to convert `TextStreamPart<ToolSet>` events into
Vercel Data Stream wire-format frames
- 176 service-ai tests passing (18 new tests for stream encoder, route
dual-mode, systemPrompt, flat options, array content)

## [4.0.1] — 2026-03-31

### Fixed
Expand Down
109 changes: 106 additions & 3 deletions packages/services/service-ai/src/__tests__/ai-service.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -382,18 +382,106 @@ describe('AI Routes', () => {
expect(paths).toContain('DELETE /api/v1/ai/conversations/:id');
});

it('POST /api/v1/ai/chat should return chat result', async () => {
it('POST /api/v1/ai/chat should return JSON result when stream=false', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: { messages: [{ role: 'user', content: 'Hi' }], stream: false },
});

expect(response.status).toBe(200);
expect((response.body as any).content).toBe('[memory] Hi');
});

it('POST /api/v1/ai/chat should default to Vercel Data Stream mode', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: { messages: [{ role: 'user', content: 'Hi' }] },
});

expect(response.status).toBe(200);
expect(response.stream).toBe(true);
expect(response.vercelDataStream).toBe(true);
expect(response.events).toBeDefined();

// Consume the Vercel Data Stream events
const events: unknown[] = [];
for await (const event of response.events!) {
events.push(event);
}
expect(events.length).toBeGreaterThan(0);
});

it('POST /api/v1/ai/chat should prepend systemPrompt as system message', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: {
messages: [{ role: 'user', content: 'Hello' }],
system: 'You are a helpful assistant',
stream: false,
},
});

expect(response.status).toBe(200);
// MemoryLLMAdapter echoes the last user message
expect((response.body as any).content).toBe('[memory] Hello');
});

it('POST /api/v1/ai/chat should accept deprecated systemPrompt field', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: {
messages: [{ role: 'user', content: 'Hi' }],
systemPrompt: 'Be concise',
stream: false,
},
});

expect(response.status).toBe(200);
expect((response.body as any).content).toBe('[memory] Hi');
});

it('POST /api/v1/ai/chat should accept flat Vercel-style fields (model, temperature)', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: {
messages: [{ role: 'user', content: 'Hi' }],
model: 'gpt-4o',
temperature: 0.5,
stream: false,
},
});

expect(response.status).toBe(200);
// MemoryLLMAdapter uses the model from options when provided
expect((response.body as any).model).toBe('gpt-4o');
});

it('POST /api/v1/ai/chat should accept array content (Vercel multi-part)', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

const response = await chatRoute.handler({
body: {
messages: [{ role: 'user', content: [{ type: 'text', text: 'Hi' }] }],
stream: false,
},
});

// MemoryLLMAdapter falls back to "(complex content)" for non-string
expect(response.status).toBe(200);
expect((response.body as any).content).toBe('[memory] (complex content)');
});

it('POST /api/v1/ai/chat should return 400 without messages', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
Expand Down Expand Up @@ -531,16 +619,30 @@ describe('AI Routes', () => {
expect((response.body as any).error).toContain('message.role');
});

it('POST /api/v1/ai/chat should return 400 for messages with non-string content', async () => {
it('POST /api/v1/ai/chat should return 400 for messages with non-string/non-array content', async () => {
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;

// Numeric content should be rejected
const response = await chatRoute.handler({
body: { messages: [{ role: 'user', content: 123 }] },
});

expect(response.status).toBe(400);
expect((response.body as any).error).toContain('content');

// Object content (not an array) should be rejected
const response2 = await chatRoute.handler({
body: { messages: [{ role: 'user', content: { nested: true } }] },
});
expect(response2.status).toBe(400);
expect((response2.body as any).error).toContain('content');

// Boolean content should be rejected
const response3 = await chatRoute.handler({
body: { messages: [{ role: 'user', content: true }] },
});
expect(response3.status).toBe(400);
expect((response3.body as any).error).toContain('content');
});

it('POST /api/v1/ai/conversations/:id/messages should return 400 for invalid role', async () => {
Expand Down Expand Up @@ -620,6 +722,7 @@ describe('AI Routes', () => {
{ role: 'assistant', content: '' },
{ role: 'tool', content: '{"temp": 22}', toolCallId: 'call_1' },
],
stream: false,
},
});

Expand Down
Loading
Loading