diff --git a/.changeset/config.json b/.changeset/config.json index fe2fffd69..7638e775f 100644 --- a/.changeset/config.json +++ b/.changeset/config.json @@ -38,6 +38,7 @@ "@objectstack/service-job", "@objectstack/service-queue", "@objectstack/service-realtime", + "@objectstack/service-ai", "@objectstack/service-storage", "@objectstack/docs", "create-objectstack", diff --git a/CHANGELOG.md b/CHANGELOG.md index 8214fc56b..1de488a81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added +- **`@objectstack/service-ai` — Unified AI capability service plugin** — New kernel plugin + providing standardized AI service integration: + - Registers as kernel `'ai'` service conforming to `IAIService` contract + - LLM adapter layer with provider abstraction (`LLMAdapter` interface) and built-in + `MemoryLLMAdapter` for testing/development + - `ToolRegistry` for metadata/business tool registration and execution + - `InMemoryConversationService` implementing `IAIConversationService` for multi-turn + conversation management with message persistence + - REST/SSE route self-registration (`/api/v1/ai/chat`, `/api/v1/ai/chat/stream`, + `/api/v1/ai/complete`, `/api/v1/ai/models`, `/api/v1/ai/conversations`) + - Plugin lifecycle hooks (`ai:ready`, `ai:routes`) for extensibility +- **Expanded `IAIService` contract** — Added streaming (`streamChat`), tool calling protocol + (`AIToolDefinition`, `AIToolCall`, `AIToolResult`, `AIMessageWithTools`, + `AIRequestOptionsWithTools`, `AIStreamEvent`), and conversation management + (`IAIConversationService`, `AIConversation`) to `packages/spec/src/contracts/ai-service.ts` - **`@objectstack/plugin-setup` — Platform Setup App plugin** — New internal plugin (`packages/plugins/plugin-setup`) that owns and finalizes the platform Setup App. Ships four built-in Setup Areas (Administration, Platform, System, AI) as empty diff --git a/ROADMAP.md b/ROADMAP.md index 00cd474f5..d10f804e9 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -535,13 +535,13 @@ Objects now declare `namespace: 'sys'` and a short `name` (e.g., `name: 'user'`) | `IAutomationService` | **P2** | `@objectstack/service-automation` | ✅ Plugin-based DAG flow engine + HTTP API + Client SDK (67 tests) | | `IWorkflowService` | **P2** | `@objectstack/service-workflow` | State machine + approval processes | | `IGraphQLService` | **P2** | `@objectstack/service-graphql` | Auto-generated GraphQL from objects | -| `IAIService` | **P2** | `@objectstack/service-ai` | LLM integration (OpenAI/Anthropic/local) | +| `IAIService` | **P2** | `@objectstack/service-ai` | ✅ LLM adapter layer, ToolRegistry, conversation management, REST/SSE routes, streaming (52 tests) | | `IAnalyticsService` | **P3** | `@objectstack/service-analytics` | ✅ Multi-driver analytics with strategy pattern (NativeSQL/ObjectQL/InMemory), CubeRegistry, generateSql (34 tests) | - [x] `service-automation` — Implement `IAutomationService` with plugin-based DAG flow engine (CRUD/Logic/HTTP nodes, fault edges, parallel branches, cycle detection, safe eval, timeout, versioning), HTTP API CRUD (9 routes), Client SDK (10 methods), execution history with step-level logging - [ ] `service-workflow` — Implement `IWorkflowService` with state machine runtime - [ ] `service-graphql` — Implement `IGraphQLService` with auto-schema generation -- [ ] `service-ai` — Implement `IAIService` with multi-provider LLM routing +- [x] `service-ai` — Implement `IAIService` with LLM adapter layer, ToolRegistry, InMemoryConversationService, REST/SSE routes (/api/v1/ai/*), streaming support (streamChat), kernel plugin (52 tests) - [x] `service-analytics` — Implement full `IAnalyticsService` with multi-driver strategy pattern (NativeSQLStrategy P1, ObjectQLStrategy P2, InMemoryStrategy P3), CubeRegistry with auto-inference from object schemas, generateSql dry-run, kernel plugin lifecycle --- @@ -584,7 +584,7 @@ Objects now declare `namespace: 'sys'` and a short `name` (e.g., `name: 'user'`) - [x] **Phase A+: Dual Transport** (v3.2) — Remote-only mode via `@libsql/client` (libsql://, https://), auto-detection of transport mode, pre-configured client injection, full CRUD/schema/bulk/transaction support in remote mode - [ ] **Phase B: Edge & Sync** (v3.2) — Embedded replica sync, WASM build for Cloudflare/Deno, offline write queue - [x] **Phase C: Multi-Tenancy** (v3.3) — Database-per-tenant router with TTL cache, concurrency dedup, lifecycle callbacks - - [ ] **Phase D: Advanced** (v4.0) — Vector search + `IAIService`, FTS5 + `ISearchService`, ~~better-auth adapter~~ (✅ done in plugin-auth) + - [ ] **Phase D: Advanced** (v4.0) — Vector search + `IAIService` (✅ `service-ai` base implemented), FTS5 + `ISearchService`, ~~better-auth adapter~~ (✅ done in plugin-auth) - [ ] Driver benchmark suite comparing performance across all drivers ### 6.2 Multi-Tenancy @@ -644,15 +644,15 @@ Objects now declare `namespace: 'sys'` and a short `name` (e.g., `name: 'user'`) ### 7.1 Core AI Services -- [ ] `service-ai` — Multi-provider LLM service (OpenAI, Anthropic, Gemini, local models) +- [x] `service-ai` — Multi-provider LLM service with adapter pattern, streaming, tool registry, conversation management, REST/SSE routes - [ ] NLQ (Natural Language Query) runtime — translate natural language to ObjectQL - [ ] Embedding service for vector search and RAG ### 7.2 Agent Framework - [ ] Agent runtime — execute AI agents defined in spec schemas -- [ ] Tool registry — connect agents to ObjectQL operations, APIs, and workflows -- [ ] Conversation management — persistent chat with context windows +- [x] Tool registry — connect agents to ObjectQL operations, APIs, and workflows (initial implementation in `service-ai`) +- [x] Conversation management — persistent chat with context windows (initial implementation in `service-ai`) ### 7.3 RAG Pipeline @@ -870,7 +870,7 @@ Final polish and advanced features. | 15 | Feed Service | `IFeedService` | ✅ | `@objectstack/service-feed` | In-memory feed/chatter (comments, reactions, subscriptions) | | 16 | Search Service | `ISearchService` | ❌ | `@objectstack/service-search` (planned) | Spec only | | 17 | Notification Service | `INotificationService` | ❌ | `@objectstack/service-notification` (planned) | Spec only | -| 18 | AI Service | `IAIService` | ❌ | `@objectstack/service-ai` (planned) | Spec only | +| 18 | AI Service | `IAIService` | ✅ | `@objectstack/service-ai` | LLM adapter layer, ToolRegistry, conversation management, REST/SSE routes (52 tests) | | 19 | Automation Service | `IAutomationService` | ✅ | `@objectstack/service-automation` | DAG engine + HTTP API CRUD + Client SDK + typed returns (67 tests) | | 20 | Workflow Service | `IWorkflowService` | ❌ | `@objectstack/service-workflow` (planned) | Spec only | | 21 | GraphQL Service | `IGraphQLService` | ❌ | `@objectstack/service-graphql` (planned) | Spec only | diff --git a/packages/services/service-ai/CHANGELOG.md b/packages/services/service-ai/CHANGELOG.md new file mode 100644 index 000000000..195f35c31 --- /dev/null +++ b/packages/services/service-ai/CHANGELOG.md @@ -0,0 +1,12 @@ +# @objectstack/service-ai + +## 3.3.1 + +### Patch Changes + +- Initial release of AI Service plugin + - LLM adapter layer with provider abstraction (memory adapter included) + - Conversation management service with in-memory persistence + - Tool registry for metadata/business tool registration + - REST/SSE route self-registration (`/api/v1/ai/*`) + - Kernel plugin registering as `'ai'` service conforming to `IAIService` contract diff --git a/packages/services/service-ai/package.json b/packages/services/service-ai/package.json new file mode 100644 index 000000000..595450481 --- /dev/null +++ b/packages/services/service-ai/package.json @@ -0,0 +1,29 @@ +{ + "name": "@objectstack/service-ai", + "version": "3.3.1", + "license": "Apache-2.0", + "description": "AI Service for ObjectStack — implements IAIService with LLM adapter layer, conversation management, tool registry, and REST/SSE routes", + "type": "module", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js", + "require": "./dist/index.cjs" + } + }, + "scripts": { + "build": "tsup --config ../../../tsup.config.ts", + "test": "vitest run" + }, + "dependencies": { + "@objectstack/core": "workspace:*", + "@objectstack/spec": "workspace:*" + }, + "devDependencies": { + "@types/node": "^25.5.0", + "typescript": "^6.0.2", + "vitest": "^4.1.2" + } +} diff --git a/packages/services/service-ai/src/__tests__/ai-service.test.ts b/packages/services/service-ai/src/__tests__/ai-service.test.ts new file mode 100644 index 000000000..c948a4eaa --- /dev/null +++ b/packages/services/service-ai/src/__tests__/ai-service.test.ts @@ -0,0 +1,731 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import type { AIMessage, IAIService, AIStreamEvent } from '@objectstack/spec/contracts'; +import { AIService } from '../ai-service.js'; +import { MemoryLLMAdapter } from '../adapters/memory-adapter.js'; +import { ToolRegistry } from '../tools/tool-registry.js'; +import { InMemoryConversationService } from '../conversation/in-memory-conversation-service.js'; +import { buildAIRoutes } from '../routes/ai-routes.js'; +import { AIServicePlugin } from '../plugin.js'; +import type { LLMAdapter } from '../adapters/types.js'; + +// Suppress logger output in tests +const silentLogger = { + info: vi.fn(), + debug: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + child: vi.fn().mockReturnThis(), +} as any; + +// ───────────────────────────────────────────────────────────────── +// MemoryLLMAdapter +// ───────────────────────────────────────────────────────────────── + +describe('MemoryLLMAdapter', () => { + let adapter: MemoryLLMAdapter; + + beforeEach(() => { + adapter = new MemoryLLMAdapter(); + }); + + it('should have name "memory"', () => { + expect(adapter.name).toBe('memory'); + }); + + it('should echo the last user message in chat()', async () => { + const messages: AIMessage[] = [ + { role: 'system', content: 'You are helpful.' }, + { role: 'user', content: 'Hello AI' }, + ]; + const result = await adapter.chat(messages); + expect(result.content).toBe('[memory] Hello AI'); + expect(result.model).toBe('memory'); + expect(result.usage).toBeDefined(); + }); + + it('should handle no user message in chat()', async () => { + const messages: AIMessage[] = [{ role: 'system', content: 'System only' }]; + const result = await adapter.chat(messages); + expect(result.content).toBe('[memory] (no user message)'); + }); + + it('should echo prompt in complete()', async () => { + const result = await adapter.complete('test prompt'); + expect(result.content).toBe('[memory] test prompt'); + }); + + it('should stream word-by-word in streamChat()', async () => { + const messages: AIMessage[] = [{ role: 'user', content: 'Hi there' }]; + const events: AIStreamEvent[] = []; + for await (const event of adapter.streamChat(messages)) { + events.push(event); + } + // "[memory]" + " Hi" + " there" = 3 text-delta events + 1 finish + expect(events.filter(e => e.type === 'text-delta').length).toBeGreaterThan(0); + expect(events[events.length - 1].type).toBe('finish'); + }); + + it('should return zero vectors for embed()', async () => { + const result = await adapter.embed(['hello', 'world']); + expect(result).toHaveLength(2); + expect(result[0]).toEqual([0, 0, 0]); + }); + + it('should list memory model', async () => { + const models = await adapter.listModels(); + expect(models).toEqual(['memory']); + }); +}); + +// ───────────────────────────────────────────────────────────────── +// ToolRegistry +// ───────────────────────────────────────────────────────────────── + +describe('ToolRegistry', () => { + let registry: ToolRegistry; + + beforeEach(() => { + registry = new ToolRegistry(); + }); + + it('should register and retrieve a tool', () => { + const def = { name: 'test_tool', description: 'A test', parameters: {} }; + registry.register(def, async () => 'result'); + expect(registry.has('test_tool')).toBe(true); + expect(registry.getDefinition('test_tool')).toEqual(def); + expect(registry.size).toBe(1); + expect(registry.names()).toEqual(['test_tool']); + }); + + it('should unregister a tool', () => { + registry.register({ name: 'tool_a', description: 'A', parameters: {} }, async () => ''); + registry.unregister('tool_a'); + expect(registry.has('tool_a')).toBe(false); + expect(registry.size).toBe(0); + }); + + it('should execute a tool call', async () => { + registry.register( + { name: 'add', description: 'Add numbers', parameters: {} }, + async (args) => String((args.a as number) + (args.b as number)), + ); + + const result = await registry.execute({ + id: 'call_1', + name: 'add', + arguments: JSON.stringify({ a: 3, b: 4 }), + }); + + expect(result.toolCallId).toBe('call_1'); + expect(result.content).toBe('7'); + expect(result.isError).toBeUndefined(); + }); + + it('should return error for unknown tool', async () => { + const result = await registry.execute({ + id: 'call_x', + name: 'unknown', + arguments: '{}', + }); + expect(result.isError).toBe(true); + expect(result.content).toContain('not registered'); + }); + + it('should return error on handler failure', async () => { + registry.register( + { name: 'fail_tool', description: 'Fails', parameters: {} }, + async () => { throw new Error('boom'); }, + ); + + const result = await registry.execute({ + id: 'call_f', + name: 'fail_tool', + arguments: '{}', + }); + expect(result.isError).toBe(true); + expect(result.content).toBe('boom'); + }); + + it('should execute multiple tool calls in parallel', async () => { + registry.register( + { name: 'echo', description: 'Echo', parameters: {} }, + async (args) => args.msg as string, + ); + + const results = await registry.executeAll([ + { id: 'c1', name: 'echo', arguments: '{"msg":"a"}' }, + { id: 'c2', name: 'echo', arguments: '{"msg":"b"}' }, + ]); + + expect(results).toHaveLength(2); + expect(results[0].content).toBe('a'); + expect(results[1].content).toBe('b'); + }); + + it('should return all definitions', () => { + registry.register({ name: 't1', description: 'T1', parameters: {} }, async () => ''); + registry.register({ name: 't2', description: 'T2', parameters: {} }, async () => ''); + expect(registry.getAll()).toHaveLength(2); + }); + + it('should clear all tools', () => { + registry.register({ name: 'x', description: 'X', parameters: {} }, async () => ''); + registry.clear(); + expect(registry.size).toBe(0); + }); +}); + +// ───────────────────────────────────────────────────────────────── +// InMemoryConversationService +// ───────────────────────────────────────────────────────────────── + +describe('InMemoryConversationService', () => { + let svc: InMemoryConversationService; + + beforeEach(() => { + svc = new InMemoryConversationService(); + }); + + it('should create a conversation', async () => { + const conv = await svc.create({ title: 'Test', userId: 'u1' }); + expect(conv.id).toBeDefined(); + expect(conv.title).toBe('Test'); + expect(conv.userId).toBe('u1'); + expect(conv.messages).toHaveLength(0); + expect(conv.createdAt).toBeDefined(); + }); + + it('should get a conversation by ID', async () => { + const created = await svc.create({ title: 'Lookup' }); + const found = await svc.get(created.id); + expect(found).not.toBeNull(); + expect(found!.id).toBe(created.id); + + const missing = await svc.get('nonexistent'); + expect(missing).toBeNull(); + }); + + it('should list conversations with filters', async () => { + await svc.create({ userId: 'a', agentId: 'ag1' }); + await svc.create({ userId: 'b', agentId: 'ag1' }); + await svc.create({ userId: 'a', agentId: 'ag2' }); + + expect((await svc.list()).length).toBe(3); + expect((await svc.list({ userId: 'a' })).length).toBe(2); + expect((await svc.list({ agentId: 'ag1' })).length).toBe(2); + expect((await svc.list({ limit: 1 })).length).toBe(1); + }); + + it('should add messages to a conversation', async () => { + const conv = await svc.create({}); + await svc.addMessage(conv.id, { role: 'user', content: 'Hi' }); + const updated = await svc.addMessage(conv.id, { role: 'assistant', content: 'Hello!' }); + expect(updated.messages).toHaveLength(2); + }); + + it('should throw when adding message to non-existent conversation', async () => { + await expect( + svc.addMessage('nope', { role: 'user', content: 'Hi' }), + ).rejects.toThrow('not found'); + }); + + it('should delete a conversation', async () => { + const conv = await svc.create({}); + await svc.delete(conv.id); + expect(await svc.get(conv.id)).toBeNull(); + }); + + it('should track size', async () => { + expect(svc.size).toBe(0); + await svc.create({}); + expect(svc.size).toBe(1); + }); + + it('should clear all conversations', async () => { + await svc.create({}); + await svc.create({}); + svc.clear(); + expect(svc.size).toBe(0); + }); +}); + +// ───────────────────────────────────────────────────────────────── +// AIService (Orchestrator) +// ───────────────────────────────────────────────────────────────── + +describe('AIService', () => { + it('should use MemoryLLMAdapter by default', async () => { + const service = new AIService({ logger: silentLogger }); + expect(service.adapterName).toBe('memory'); + + const result = await service.chat([{ role: 'user', content: 'Hi' }]); + expect(result.content).toBe('[memory] Hi'); + }); + + it('should delegate complete() to adapter', async () => { + const service = new AIService({ logger: silentLogger }); + const result = await service.complete('test'); + expect(result.content).toBe('[memory] test'); + }); + + it('should stream via adapter.streamChat()', async () => { + const service = new AIService({ logger: silentLogger }); + const events: AIStreamEvent[] = []; + for await (const event of service.streamChat([{ role: 'user', content: 'Hi' }])) { + events.push(event); + } + expect(events.length).toBeGreaterThan(1); + expect(events[events.length - 1].type).toBe('finish'); + }); + + it('should fall back to non-streaming when adapter has no streamChat', async () => { + const adapter: LLMAdapter = { + name: 'no-stream', + chat: async () => ({ content: 'response', model: 'test' }), + complete: async () => ({ content: '' }), + // no streamChat + }; + const service = new AIService({ adapter, logger: silentLogger }); + + const events: AIStreamEvent[] = []; + for await (const event of service.streamChat([{ role: 'user', content: 'Hi' }])) { + events.push(event); + } + + expect(events).toHaveLength(2); + expect(events[0].type).toBe('text-delta'); + expect(events[0].textDelta).toBe('response'); + expect(events[1].type).toBe('finish'); + }); + + it('should delegate embed() to adapter', async () => { + const service = new AIService({ logger: silentLogger }); + const embeddings = await service.embed('hello'); + expect(embeddings).toHaveLength(1); + }); + + it('should throw when adapter does not support embed()', async () => { + const adapter: LLMAdapter = { + name: 'no-embed', + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + }; + const service = new AIService({ adapter, logger: silentLogger }); + await expect(service.embed('hello')).rejects.toThrow('does not support embeddings'); + }); + + it('should delegate listModels() to adapter', async () => { + const service = new AIService({ logger: silentLogger }); + const models = await service.listModels(); + expect(models).toEqual(['memory']); + }); + + it('should return empty array when adapter has no listModels()', async () => { + const adapter: LLMAdapter = { + name: 'no-models', + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + }; + const service = new AIService({ adapter, logger: silentLogger }); + const models = await service.listModels(); + expect(models).toEqual([]); + }); + + it('should expose toolRegistry and conversationService', () => { + const service = new AIService({ logger: silentLogger }); + expect(service.toolRegistry).toBeInstanceOf(ToolRegistry); + expect(service.conversationService).toBeInstanceOf(InMemoryConversationService); + }); + + it('should accept custom adapter', async () => { + const customAdapter: LLMAdapter = { + name: 'custom', + chat: async () => ({ content: 'custom response' }), + complete: async (p) => ({ content: `custom: ${p}` }), + }; + const service = new AIService({ adapter: customAdapter, logger: silentLogger }); + expect(service.adapterName).toBe('custom'); + + const result = await service.chat([{ role: 'user', content: 'test' }]); + expect(result.content).toBe('custom response'); + }); +}); + +// ───────────────────────────────────────────────────────────────── +// Routes +// ───────────────────────────────────────────────────────────────── + +describe('AI Routes', () => { + let service: AIService; + + beforeEach(() => { + service = new AIService({ logger: silentLogger }); + }); + + it('should build all expected routes', () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + expect(routes.length).toBe(8); + + const paths = routes.map(r => `${r.method} ${r.path}`); + expect(paths).toContain('POST /api/v1/ai/chat'); + expect(paths).toContain('POST /api/v1/ai/chat/stream'); + expect(paths).toContain('POST /api/v1/ai/complete'); + expect(paths).toContain('GET /api/v1/ai/models'); + expect(paths).toContain('POST /api/v1/ai/conversations'); + expect(paths).toContain('GET /api/v1/ai/conversations'); + expect(paths).toContain('POST /api/v1/ai/conversations/:id/messages'); + expect(paths).toContain('DELETE /api/v1/ai/conversations/:id'); + }); + + it('POST /api/v1/ai/chat should return chat result', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!; + + const response = await chatRoute.handler({ + body: { messages: [{ role: 'user', content: 'Hi' }] }, + }); + + expect(response.status).toBe(200); + expect((response.body as any).content).toBe('[memory] Hi'); + }); + + it('POST /api/v1/ai/chat should return 400 without messages', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!; + + const response = await chatRoute.handler({ body: {} }); + expect(response.status).toBe(400); + }); + + it('POST /api/v1/ai/chat/stream should return streaming response', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const streamRoute = routes.find(r => r.path === '/api/v1/ai/chat/stream')!; + + const response = await streamRoute.handler({ + body: { messages: [{ role: 'user', content: 'Hello' }] }, + }); + + expect(response.status).toBe(200); + expect(response.stream).toBe(true); + expect(response.events).toBeDefined(); + + // Consume the stream + const events: unknown[] = []; + for await (const event of response.events!) { + events.push(event); + } + expect(events.length).toBeGreaterThan(0); + }); + + it('POST /api/v1/ai/complete should return completion result', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const completeRoute = routes.find(r => r.path === '/api/v1/ai/complete')!; + + const response = await completeRoute.handler({ + body: { prompt: 'test prompt' }, + }); + + expect(response.status).toBe(200); + expect((response.body as any).content).toBe('[memory] test prompt'); + }); + + it('POST /api/v1/ai/complete should return 400 without prompt', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const completeRoute = routes.find(r => r.path === '/api/v1/ai/complete')!; + + const response = await completeRoute.handler({ body: {} }); + expect(response.status).toBe(400); + }); + + it('GET /api/v1/ai/models should return model list', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const modelsRoute = routes.find(r => r.path === '/api/v1/ai/models')!; + + const response = await modelsRoute.handler({}); + expect(response.status).toBe(200); + expect((response.body as any).models).toContain('memory'); + }); + + it('POST /api/v1/ai/conversations should create conversation', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + + const response = await createRoute.handler({ + body: { title: 'Test Conv', userId: 'u1' }, + }); + + expect(response.status).toBe(201); + expect((response.body as any).title).toBe('Test Conv'); + }); + + it('GET /api/v1/ai/conversations should list conversations', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + const listRoute = routes.find(r => r.method === 'GET' && r.path === '/api/v1/ai/conversations')!; + + await createRoute.handler({ body: { title: 'C1' } }); + await createRoute.handler({ body: { title: 'C2' } }); + + const response = await listRoute.handler({}); + expect(response.status).toBe(200); + expect((response.body as any).conversations).toHaveLength(2); + }); + + it('POST /api/v1/ai/conversations/:id/messages should add message', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + const addMsgRoute = routes.find(r => r.path === '/api/v1/ai/conversations/:id/messages')!; + + const created = await createRoute.handler({ body: {} }); + const convId = (created.body as any).id; + + const response = await addMsgRoute.handler({ + params: { id: convId }, + body: { role: 'user', content: 'Hi there' }, + }); + + expect(response.status).toBe(200); + expect((response.body as any).messages).toHaveLength(1); + }); + + it('POST /api/v1/ai/conversations/:id/messages should return 404 for unknown conversation', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const addMsgRoute = routes.find(r => r.path === '/api/v1/ai/conversations/:id/messages')!; + + const response = await addMsgRoute.handler({ + params: { id: 'unknown' }, + body: { role: 'user', content: 'Hi' }, + }); + + expect(response.status).toBe(404); + }); + + it('DELETE /api/v1/ai/conversations/:id should delete conversation', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + const deleteRoute = routes.find(r => r.path === '/api/v1/ai/conversations/:id')!; + + const created = await createRoute.handler({ body: {} }); + const convId = (created.body as any).id; + + const response = await deleteRoute.handler({ params: { id: convId } }); + expect(response.status).toBe(204); + }); + + // ── Message validation ─────────────────────────────────────── + + it('POST /api/v1/ai/chat should return 400 for messages with invalid role', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!; + + const response = await chatRoute.handler({ + body: { messages: [{ role: 'invalid', content: 'Hi' }] }, + }); + + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('message.role'); + }); + + it('POST /api/v1/ai/chat should return 400 for messages with non-string content', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!; + + const response = await chatRoute.handler({ + body: { messages: [{ role: 'user', content: 123 }] }, + }); + + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('content'); + }); + + it('POST /api/v1/ai/conversations/:id/messages should return 400 for invalid role', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + const addMsgRoute = routes.find(r => r.path === '/api/v1/ai/conversations/:id/messages')!; + + const created = await createRoute.handler({ body: {} }); + const convId = (created.body as any).id; + + const response = await addMsgRoute.handler({ + params: { id: convId }, + body: { role: 'invalid_role', content: 'Hi' }, + }); + + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('message.role'); + }); + + it('POST /api/v1/ai/conversations/:id/messages should return 400 for missing content', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const addMsgRoute = routes.find(r => r.path === '/api/v1/ai/conversations/:id/messages')!; + + const response = await addMsgRoute.handler({ + params: { id: 'conv_1' }, + body: { role: 'user' }, + }); + + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('content'); + }); + + // ── Limit parsing ─────────────────────────────────────────── + + it('GET /api/v1/ai/conversations should parse limit from query string', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const createRoute = routes.find(r => r.method === 'POST' && r.path === '/api/v1/ai/conversations')!; + const listRoute = routes.find(r => r.method === 'GET' && r.path === '/api/v1/ai/conversations')!; + + await createRoute.handler({ body: { title: 'C1' } }); + await createRoute.handler({ body: { title: 'C2' } }); + await createRoute.handler({ body: { title: 'C3' } }); + + const response = await listRoute.handler({ query: { limit: '2' } }); + expect(response.status).toBe(200); + expect((response.body as any).conversations).toHaveLength(2); + }); + + it('GET /api/v1/ai/conversations should return 400 for invalid limit', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const listRoute = routes.find(r => r.method === 'GET' && r.path === '/api/v1/ai/conversations')!; + + const response = await listRoute.handler({ query: { limit: 'abc' } }); + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('limit'); + }); + + it('GET /api/v1/ai/conversations should return 400 for negative limit', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const listRoute = routes.find(r => r.method === 'GET' && r.path === '/api/v1/ai/conversations')!; + + const response = await listRoute.handler({ query: { limit: '-1' } }); + expect(response.status).toBe(400); + expect((response.body as any).error).toContain('limit'); + }); + + // ── Tool message in chat ──────────────────────────────────── + + it('POST /api/v1/ai/chat should accept tool role messages', async () => { + const routes = buildAIRoutes(service, service.conversationService, silentLogger); + const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!; + + const response = await chatRoute.handler({ + body: { + messages: [ + { role: 'user', content: 'What is the weather?' }, + { role: 'assistant', content: '' }, + { role: 'tool', content: '{"temp": 22}', toolCallId: 'call_1' }, + ], + }, + }); + + expect(response.status).toBe(200); + }); +}); + +// ───────────────────────────────────────────────────────────────── +// AIServicePlugin (Integration) +// ───────────────────────────────────────────────────────────────── + +describe('AIServicePlugin', () => { + function createMockContext() { + const services = new Map(); + const hooks = new Map(); + + return { + registerService: vi.fn((name: string, service: unknown) => services.set(name, service)), + replaceService: vi.fn((name: string, service: unknown) => services.set(name, service)), + getService: vi.fn((name: string): T => { + if (!services.has(name)) throw new Error(`Service "${name}" not found`); + return services.get(name) as T; + }), + getServices: vi.fn(() => services), + hook: vi.fn((name: string, handler: Function) => { + if (!hooks.has(name)) hooks.set(name, []); + hooks.get(name)!.push(handler); + }), + trigger: vi.fn(async () => {}), + logger: silentLogger, + getKernel: vi.fn(), + } as any; + } + + it('should register as "ai" service on init', async () => { + const plugin = new AIServicePlugin(); + const ctx = createMockContext(); + + await plugin.init(ctx); + + expect(ctx.registerService).toHaveBeenCalledWith('ai', expect.any(Object)); + const service = ctx.getService('ai'); + expect(service).toBeDefined(); + expect(typeof service.chat).toBe('function'); + }); + + it('should have correct plugin metadata', () => { + const plugin = new AIServicePlugin(); + expect(plugin.name).toBe('com.objectstack.service-ai'); + expect(plugin.version).toBe('1.0.0'); + expect(plugin.type).toBe('standard'); + }); + + it('should trigger ai:ready on start', async () => { + const plugin = new AIServicePlugin(); + const ctx = createMockContext(); + + await plugin.init(ctx); + await plugin.start!(ctx); + + expect(ctx.trigger).toHaveBeenCalledWith('ai:ready', expect.any(Object)); + expect(ctx.trigger).toHaveBeenCalledWith('ai:routes', expect.any(Array)); + }); + + it('should use custom adapter when provided', async () => { + const customAdapter: LLMAdapter = { + name: 'custom-test', + chat: async () => ({ content: 'custom' }), + complete: async () => ({ content: '' }), + }; + + const plugin = new AIServicePlugin({ adapter: customAdapter }); + const ctx = createMockContext(); + + await plugin.init(ctx); + + const service = ctx.getService('ai'); + expect(service.adapterName).toBe('custom-test'); + }); + + it('should replace existing AI service', async () => { + const plugin = new AIServicePlugin(); + const ctx = createMockContext(); + + // Pre-register a mock AI service + ctx.registerService('ai', { chat: vi.fn(), complete: vi.fn() }); + + await plugin.init(ctx); + + expect(ctx.replaceService).toHaveBeenCalledWith('ai', expect.any(Object)); + }); + + it('should clean up on destroy', async () => { + const plugin = new AIServicePlugin(); + const ctx = createMockContext(); + + await plugin.init(ctx); + await plugin.destroy!(); + + // After destroy, the plugin should not throw + // (internal service reference cleared) + }); + + it('should register debug hook when debug=true', async () => { + const plugin = new AIServicePlugin({ debug: true }); + const ctx = createMockContext(); + + await plugin.init(ctx); + + expect(ctx.hook).toHaveBeenCalledWith('ai:beforeChat', expect.any(Function)); + }); +}); diff --git a/packages/services/service-ai/src/adapters/index.ts b/packages/services/service-ai/src/adapters/index.ts new file mode 100644 index 000000000..d877e3fee --- /dev/null +++ b/packages/services/service-ai/src/adapters/index.ts @@ -0,0 +1,4 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +export type { LLMAdapter } from './types.js'; +export { MemoryLLMAdapter } from './memory-adapter.js'; diff --git a/packages/services/service-ai/src/adapters/memory-adapter.ts b/packages/services/service-ai/src/adapters/memory-adapter.ts new file mode 100644 index 000000000..2ef29d0aa --- /dev/null +++ b/packages/services/service-ai/src/adapters/memory-adapter.ts @@ -0,0 +1,64 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { + AIMessage, + AIRequestOptions, + AIResult, + AIStreamEvent, +} from '@objectstack/spec/contracts'; +import type { LLMAdapter } from './types.js'; + +/** + * MemoryLLMAdapter — deterministic in-memory adapter for testing & development. + * + * Always echoes back the last user message prefixed with "[memory] ". + * Useful for unit tests, CI pipelines, and local dev without an LLM key. + */ +export class MemoryLLMAdapter implements LLMAdapter { + readonly name = 'memory'; + + async chat(messages: AIMessage[], options?: AIRequestOptions): Promise { + const lastUserMessage = [...messages].reverse().find(m => m.role === 'user'); + const content = lastUserMessage + ? `[memory] ${lastUserMessage.content}` + : '[memory] (no user message)'; + + return { + content, + model: options?.model ?? 'memory', + usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 }, + }; + } + + async complete(prompt: string, options?: AIRequestOptions): Promise { + return { + content: `[memory] ${prompt}`, + model: options?.model ?? 'memory', + usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 }, + }; + } + + async *streamChat( + messages: AIMessage[], + _options?: AIRequestOptions, + ): AsyncIterable { + const result = await this.chat(messages); + // Emit word-by-word deltas for realistic streaming simulation + const words = result.content.split(' '); + for (let i = 0; i < words.length; i++) { + const textDelta = i === 0 ? words[i] : ` ${words[i]}`; + yield { type: 'text-delta', textDelta }; + } + yield { type: 'finish', result }; + } + + async embed(input: string | string[]): Promise { + const texts = Array.isArray(input) ? input : [input]; + // Return deterministic zero vectors of dimension 3 + return texts.map(() => [0, 0, 0]); + } + + async listModels(): Promise { + return ['memory']; + } +} diff --git a/packages/services/service-ai/src/adapters/types.ts b/packages/services/service-ai/src/adapters/types.ts new file mode 100644 index 000000000..04c837304 --- /dev/null +++ b/packages/services/service-ai/src/adapters/types.ts @@ -0,0 +1,52 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { + AIMessage, + AIRequestOptions, + AIResult, + AIStreamEvent, +} from '@objectstack/spec/contracts'; + +/** + * LLM Provider Adapter Interface + * + * Adapters translate between the ObjectStack AI protocol and concrete + * LLM provider SDKs (OpenAI, Anthropic, Ollama, etc.). + * + * Each adapter is a thin wrapper — all orchestration, conversation + * management, and tool execution logic lives in the AI service layer. + */ +export interface LLMAdapter { + /** Unique adapter identifier (e.g. 'openai', 'anthropic', 'memory') */ + readonly name: string; + + /** + * Generate a chat completion. + * @param messages - Conversation messages + * @param options - Request configuration (includes tool definitions) + */ + chat(messages: AIMessage[], options?: AIRequestOptions): Promise; + + /** + * Generate a text completion from a single prompt. + * @param prompt - Input prompt string + * @param options - Request configuration + */ + complete(prompt: string, options?: AIRequestOptions): Promise; + + /** + * Stream a chat completion as an async iterable of events. + * Implementations that do not support streaming may omit this method. + */ + streamChat?(messages: AIMessage[], options?: AIRequestOptions): AsyncIterable; + + /** + * Generate embedding vectors. + */ + embed?(input: string | string[], model?: string): Promise; + + /** + * List models available through this adapter. + */ + listModels?(): Promise; +} diff --git a/packages/services/service-ai/src/ai-service.ts b/packages/services/service-ai/src/ai-service.ts new file mode 100644 index 000000000..4d36f343a --- /dev/null +++ b/packages/services/service-ai/src/ai-service.ts @@ -0,0 +1,112 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { + AIMessage, + AIRequestOptions, + AIResult, + AIStreamEvent, + IAIService, + IAIConversationService, +} from '@objectstack/spec/contracts'; +import type { Logger } from '@objectstack/spec/contracts'; +import { createLogger } from '@objectstack/core'; +import type { LLMAdapter } from './adapters/types.js'; +import { MemoryLLMAdapter } from './adapters/memory-adapter.js'; +import { ToolRegistry } from './tools/tool-registry.js'; +import { InMemoryConversationService } from './conversation/in-memory-conversation-service.js'; + +/** + * Configuration for AIService. + */ +export interface AIServiceConfig { + /** LLM adapter to delegate calls to (defaults to MemoryLLMAdapter). */ + adapter?: LLMAdapter; + /** Logger instance. */ + logger?: Logger; + /** Pre-registered tools. */ + toolRegistry?: ToolRegistry; + /** Conversation service (defaults to InMemoryConversationService). */ + conversationService?: IAIConversationService; +} + +/** + * AIService — Unified AI capability service. + * + * Implements {@link IAIService} by delegating to a pluggable {@link LLMAdapter} + * and managing tools and conversations through dedicated sub-components: + * + * | Component | Responsibility | + * |:---|:---| + * | {@link LLMAdapter} | LLM provider abstraction (chat, complete, stream, embed) | + * | {@link ToolRegistry} | Tool definition storage & execution | + * | {@link IAIConversationService} | Conversation CRUD & message persistence | + * + * The service is registered as `'ai'` in the kernel service registry by + * the {@link AIServicePlugin}. + */ +export class AIService implements IAIService { + private readonly adapter: LLMAdapter; + private readonly logger: Logger; + readonly toolRegistry: ToolRegistry; + readonly conversationService: IAIConversationService; + + constructor(config: AIServiceConfig = {}) { + this.adapter = config.adapter ?? new MemoryLLMAdapter(); + this.logger = config.logger ?? createLogger({ level: 'info', format: 'pretty' }); + this.toolRegistry = config.toolRegistry ?? new ToolRegistry(); + this.conversationService = config.conversationService ?? new InMemoryConversationService(); + + this.logger.info( + `[AI] Service initialized with adapter="${this.adapter.name}", ` + + `tools=${this.toolRegistry.size}`, + ); + } + + /** The name of the active LLM adapter. */ + get adapterName(): string { + return this.adapter.name; + } + + // ── IAIService implementation ────────────────────────────────── + + async chat(messages: AIMessage[], options?: AIRequestOptions): Promise { + this.logger.debug('[AI] chat', { messageCount: messages.length, model: options?.model }); + return this.adapter.chat(messages, options); + } + + async complete(prompt: string, options?: AIRequestOptions): Promise { + this.logger.debug('[AI] complete', { promptLength: prompt.length, model: options?.model }); + return this.adapter.complete(prompt, options); + } + + async *streamChat( + messages: AIMessage[], + options?: AIRequestOptions, + ): AsyncIterable { + this.logger.debug('[AI] streamChat', { messageCount: messages.length, model: options?.model }); + + if (!this.adapter.streamChat) { + // Fallback: emit the entire response as a single text-delta + finish + const result = await this.adapter.chat(messages, options); + yield { type: 'text-delta', textDelta: result.content }; + yield { type: 'finish', result }; + return; + } + + yield* this.adapter.streamChat(messages, options); + } + + async embed(input: string | string[], model?: string): Promise { + if (!this.adapter.embed) { + throw new Error(`[AI] Adapter "${this.adapter.name}" does not support embeddings`); + } + return this.adapter.embed(input, model); + } + + async listModels(): Promise { + if (!this.adapter.listModels) { + return []; + } + return this.adapter.listModels(); + } +} diff --git a/packages/services/service-ai/src/conversation/in-memory-conversation-service.ts b/packages/services/service-ai/src/conversation/in-memory-conversation-service.ts new file mode 100644 index 000000000..2190d848c --- /dev/null +++ b/packages/services/service-ai/src/conversation/in-memory-conversation-service.ts @@ -0,0 +1,103 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { + AIConversation, + AIMessage, + IAIConversationService, +} from '@objectstack/spec/contracts'; + +/** + * InMemoryConversationService — Reference implementation of IAIConversationService. + * + * Stores conversations in a simple Map. Suitable for development, testing, + * and single-process deployments. Production environments should replace + * this with a persistent implementation (e.g., backed by ObjectQL/SQL). + */ +export class InMemoryConversationService implements IAIConversationService { + private readonly store = new Map(); + private counter = 0; + + async create(options: { + title?: string; + agentId?: string; + userId?: string; + metadata?: Record; + } = {}): Promise { + const now = new Date().toISOString(); + const id = `conv_${++this.counter}`; + + const conversation: AIConversation = { + id, + title: options.title, + agentId: options.agentId, + userId: options.userId, + messages: [], + createdAt: now, + updatedAt: now, + metadata: options.metadata, + }; + + this.store.set(id, conversation); + return conversation; + } + + async get(conversationId: string): Promise { + return this.store.get(conversationId) ?? null; + } + + async list(options: { + userId?: string; + agentId?: string; + limit?: number; + cursor?: string; + } = {}): Promise { + let results = Array.from(this.store.values()); + + if (options.userId) { + results = results.filter(c => c.userId === options.userId); + } + if (options.agentId) { + results = results.filter(c => c.agentId === options.agentId); + } + + // Simple cursor-based pagination: cursor = conversation ID + if (options.cursor) { + const idx = results.findIndex(c => c.id === options.cursor); + if (idx >= 0) { + results = results.slice(idx + 1); + } + } + + if (options.limit && options.limit > 0) { + results = results.slice(0, options.limit); + } + + return results; + } + + async addMessage(conversationId: string, message: AIMessage): Promise { + const conversation = this.store.get(conversationId); + if (!conversation) { + throw new Error(`Conversation "${conversationId}" not found`); + } + + conversation.messages.push(message); + conversation.updatedAt = new Date().toISOString(); + return conversation; + } + + async delete(conversationId: string): Promise { + this.store.delete(conversationId); + } + + /** Total number of stored conversations. */ + get size(): number { + return this.store.size; + } + + /** Clear all conversations. */ + clear(): void { + this.store.clear(); + this.counter = 0; + } +} diff --git a/packages/services/service-ai/src/conversation/index.ts b/packages/services/service-ai/src/conversation/index.ts new file mode 100644 index 000000000..3d0912ea3 --- /dev/null +++ b/packages/services/service-ai/src/conversation/index.ts @@ -0,0 +1,3 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +export { InMemoryConversationService } from './in-memory-conversation-service.js'; diff --git a/packages/services/service-ai/src/index.ts b/packages/services/service-ai/src/index.ts new file mode 100644 index 000000000..5a64a246e --- /dev/null +++ b/packages/services/service-ai/src/index.ts @@ -0,0 +1,24 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +// Core service +export { AIService } from './ai-service.js'; +export type { AIServiceConfig } from './ai-service.js'; + +// Kernel plugin +export { AIServicePlugin } from './plugin.js'; +export type { AIServicePluginOptions } from './plugin.js'; + +// Adapters +export { MemoryLLMAdapter } from './adapters/memory-adapter.js'; +export type { LLMAdapter } from './adapters/types.js'; + +// Conversation +export { InMemoryConversationService } from './conversation/in-memory-conversation-service.js'; + +// Tool registry +export { ToolRegistry } from './tools/tool-registry.js'; +export type { ToolHandler } from './tools/tool-registry.js'; + +// Routes +export { buildAIRoutes } from './routes/ai-routes.js'; +export type { RouteDefinition, RouteRequest, RouteResponse } from './routes/ai-routes.js'; diff --git a/packages/services/service-ai/src/plugin.ts b/packages/services/service-ai/src/plugin.ts new file mode 100644 index 000000000..1e1862d3a --- /dev/null +++ b/packages/services/service-ai/src/plugin.ts @@ -0,0 +1,114 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { Plugin, PluginContext } from '@objectstack/core'; +import type { IAIService } from '@objectstack/spec/contracts'; +import { AIService } from './ai-service.js'; +import type { AIServiceConfig } from './ai-service.js'; +import type { LLMAdapter } from './adapters/types.js'; +import { buildAIRoutes } from './routes/ai-routes.js'; + +/** + * Configuration options for the AIServicePlugin. + */ +export interface AIServicePluginOptions { + /** LLM adapter to use (defaults to MemoryLLMAdapter). */ + adapter?: LLMAdapter; + /** Enable debug logging. */ + debug?: boolean; +} + +/** + * AIServicePlugin — Kernel plugin for the unified AI capability service. + * + * Lifecycle: + * 1. **init** — Creates {@link AIService}, registers as `'ai'` service. + * If an existing AI service is already registered, it is replaced. + * 2. **start** — Triggers `'ai:ready'` hook so other plugins can register + * tools or extend the service. Registers REST/SSE routes. + * 3. **destroy** — Cleans up references. + * + * @example + * ```ts + * import { LiteKernel } from '@objectstack/core'; + * import { AIServicePlugin } from '@objectstack/service-ai'; + * + * const kernel = new LiteKernel(); + * kernel.use(new AIServicePlugin()); + * await kernel.bootstrap(); + * + * const ai = kernel.getService('ai'); + * const result = await ai.chat([{ role: 'user', content: 'Hello' }]); + * ``` + */ +export class AIServicePlugin implements Plugin { + name = 'com.objectstack.service-ai'; + version = '1.0.0'; + type = 'standard' as const; + dependencies: string[] = []; + + private service?: AIService; + private readonly options: AIServicePluginOptions; + + constructor(options: AIServicePluginOptions = {}) { + this.options = options; + } + + async init(ctx: PluginContext): Promise { + // Check if there is an existing AI service (e.g. from dev-plugin) + let hasExisting = false; + try { + const existing = ctx.getService('ai'); + if (existing && typeof existing.chat === 'function') { + hasExisting = true; + ctx.logger.debug('[AI] Found existing AI service, replacing'); + } + } catch { + // No existing service — that's fine + } + + const config: AIServiceConfig = { + adapter: this.options.adapter, + logger: ctx.logger, + }; + + this.service = new AIService(config); + + // Register or replace the AI service + if (hasExisting) { + ctx.replaceService('ai', this.service); + } else { + ctx.registerService('ai', this.service); + } + + if (this.options.debug) { + ctx.hook('ai:beforeChat', async (messages: unknown) => { + ctx.logger.debug('[AI] Before chat', { messages }); + }); + } + + ctx.logger.info('[AI] Service initialized'); + } + + async start(ctx: PluginContext): Promise { + if (!this.service) return; + + // Trigger hook to notify AI service is ready — other plugins can register tools + await ctx.trigger('ai:ready', this.service); + + // Build and expose route definitions + const routes = buildAIRoutes(this.service, this.service.conversationService, ctx.logger); + + // Trigger hook so HTTP server plugins can mount these routes + await ctx.trigger('ai:routes', routes); + + ctx.logger.info( + `[AI] Service started — adapter="${this.service.adapterName}", ` + + `tools=${this.service.toolRegistry.size}, ` + + `routes=${routes.length}`, + ); + } + + async destroy(): Promise { + this.service = undefined; + } +} diff --git a/packages/services/service-ai/src/routes/ai-routes.ts b/packages/services/service-ai/src/routes/ai-routes.ts new file mode 100644 index 000000000..9343e9811 --- /dev/null +++ b/packages/services/service-ai/src/routes/ai-routes.ts @@ -0,0 +1,286 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { IAIService, IAIConversationService, AIMessage } from '@objectstack/spec/contracts'; +import type { Logger } from '@objectstack/spec/contracts'; + +/** + * Minimal HTTP handler abstraction so routes stay framework-agnostic. + * + * Consumers wire these handlers to their HTTP server of choice + * (Hono, Express, Fastify, etc.) via the kernel's HTTP server service. + */ +export interface RouteDefinition { + /** HTTP method */ + method: 'GET' | 'POST' | 'DELETE'; + /** Path pattern (e.g. '/api/v1/ai/chat') */ + path: string; + /** Human-readable description */ + description: string; + /** + * Handler receives a plain request-like object and returns a response-like + * object. SSE responses set `stream: true` and provide an async iterable. + */ + handler: (req: RouteRequest) => Promise; +} + +export interface RouteRequest { + /** Parsed JSON body (for POST requests) */ + body?: unknown; + /** Route/query parameters */ + params?: Record; + /** Query string parameters */ + query?: Record; +} + +export interface RouteResponse { + /** HTTP status code */ + status: number; + /** JSON-serializable body (for non-streaming responses) */ + body?: unknown; + /** If true, `stream` provides SSE events */ + stream?: boolean; + /** Async iterable of SSE events (when stream=true) */ + events?: AsyncIterable; +} + +/** Valid message roles accepted by the AI routes. */ +const VALID_ROLES = new Set(['system', 'user', 'assistant', 'tool']); + +/** + * Validate that `raw` is a well-formed AIMessage. + * Returns null on success, or an error string on failure. + */ +function validateMessage(raw: unknown): string | null { + if (typeof raw !== 'object' || raw === null) { + return 'each message must be an object'; + } + const msg = raw as Record; + if (typeof msg.role !== 'string' || !VALID_ROLES.has(msg.role)) { + return `message.role must be one of ${[...VALID_ROLES].map(r => `"${r}"`).join(', ')}`; + } + if (typeof msg.content !== 'string') { + return 'message.content must be a string'; + } + return null; +} + +/** + * Build the standard AI REST/SSE routes. + * + * Depends on contracts ({@link IAIService} + {@link IAIConversationService}) + * rather than concrete implementations, so any compliant service pair can + * be wired in. + * + * Routes: + * | Method | Path | Description | + * |:---|:---|:---| + * | POST | /api/v1/ai/chat | Synchronous chat completion | + * | POST | /api/v1/ai/chat/stream | SSE streaming chat completion | + * | POST | /api/v1/ai/complete | Text completion | + * | GET | /api/v1/ai/models | List available models | + * | POST | /api/v1/ai/conversations | Create a conversation | + * | GET | /api/v1/ai/conversations | List conversations | + * | POST | /api/v1/ai/conversations/:id/messages | Add message to conversation | + * | DELETE | /api/v1/ai/conversations/:id | Delete conversation | + */ +export function buildAIRoutes( + aiService: IAIService, + conversationService: IAIConversationService, + logger: Logger, +): RouteDefinition[] { + return [ + // ── Chat ──────────────────────────────────────────────────── + { + method: 'POST', + path: '/api/v1/ai/chat', + description: 'Synchronous chat completion', + handler: async (req) => { + const { messages, options } = (req.body ?? {}) as { + messages?: unknown[]; + options?: Record; + }; + + if (!Array.isArray(messages) || messages.length === 0) { + return { status: 400, body: { error: 'messages array is required' } }; + } + + for (const msg of messages) { + const err = validateMessage(msg); + if (err) return { status: 400, body: { error: err } }; + } + + try { + const result = await aiService.chat(messages as AIMessage[], options as any); + return { status: 200, body: result }; + } catch (err) { + logger.error('[AI Route] /chat error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + + // ── Stream Chat (SSE) ────────────────────────────────────── + { + method: 'POST', + path: '/api/v1/ai/chat/stream', + description: 'SSE streaming chat completion', + handler: async (req) => { + const { messages, options } = (req.body ?? {}) as { + messages?: unknown[]; + options?: Record; + }; + + if (!Array.isArray(messages) || messages.length === 0) { + return { status: 400, body: { error: 'messages array is required' } }; + } + + for (const msg of messages) { + const err = validateMessage(msg); + if (err) return { status: 400, body: { error: err } }; + } + + try { + if (!aiService.streamChat) { + return { status: 501, body: { error: 'Streaming is not supported by the configured AI service' } }; + } + const events = aiService.streamChat(messages as AIMessage[], options as any); + return { status: 200, stream: true, events }; + } catch (err) { + logger.error('[AI Route] /chat/stream error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + + // ── Complete ──────────────────────────────────────────────── + { + method: 'POST', + path: '/api/v1/ai/complete', + description: 'Text completion', + handler: async (req) => { + const { prompt, options } = (req.body ?? {}) as { + prompt?: string; + options?: Record; + }; + + if (!prompt || typeof prompt !== 'string') { + return { status: 400, body: { error: 'prompt string is required' } }; + } + + try { + const result = await aiService.complete(prompt, options as any); + return { status: 200, body: result }; + } catch (err) { + logger.error('[AI Route] /complete error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + + // ── Models ────────────────────────────────────────────────── + { + method: 'GET', + path: '/api/v1/ai/models', + description: 'List available models', + handler: async () => { + try { + const models = aiService.listModels ? await aiService.listModels() : []; + return { status: 200, body: { models } }; + } catch (err) { + logger.error('[AI Route] /models error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + + // ── Conversations ────────────────────────────────────────── + { + method: 'POST', + path: '/api/v1/ai/conversations', + description: 'Create a conversation', + handler: async (req) => { + try { + const options = (req.body ?? {}) as Record; + const conversation = await conversationService.create(options as any); + return { status: 201, body: conversation }; + } catch (err) { + logger.error('[AI Route] POST /conversations error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + { + method: 'GET', + path: '/api/v1/ai/conversations', + description: 'List conversations', + handler: async (req) => { + try { + const rawQuery = req.query ?? {}; + const options: Record = { ...rawQuery }; + + if (typeof rawQuery.limit === 'string') { + const parsedLimit = Number(rawQuery.limit); + if (!Number.isFinite(parsedLimit) || parsedLimit <= 0 || !Number.isInteger(parsedLimit)) { + return { status: 400, body: { error: 'Invalid limit parameter' } }; + } + options.limit = parsedLimit; + } + + const conversations = await conversationService.list(options as any); + return { status: 200, body: { conversations } }; + } catch (err) { + logger.error('[AI Route] GET /conversations error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + { + method: 'POST', + path: '/api/v1/ai/conversations/:id/messages', + description: 'Add message to a conversation', + handler: async (req) => { + const id = req.params?.id; + if (!id) { + return { status: 400, body: { error: 'conversation id is required' } }; + } + + const message = req.body; + const validationError = validateMessage(message); + if (validationError) { + return { status: 400, body: { error: validationError } }; + } + + try { + const conversation = await conversationService.addMessage(id, message as AIMessage); + return { status: 200, body: conversation }; + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + if (msg.includes('not found')) { + return { status: 404, body: { error: msg } }; + } + logger.error('[AI Route] POST /conversations/:id/messages error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + { + method: 'DELETE', + path: '/api/v1/ai/conversations/:id', + description: 'Delete a conversation', + handler: async (req) => { + const id = req.params?.id; + if (!id) { + return { status: 400, body: { error: 'conversation id is required' } }; + } + + try { + await conversationService.delete(id); + return { status: 204 }; + } catch (err) { + logger.error('[AI Route] DELETE /conversations/:id error', err instanceof Error ? err : undefined); + return { status: 500, body: { error: 'Internal AI service error' } }; + } + }, + }, + ]; +} diff --git a/packages/services/service-ai/src/routes/index.ts b/packages/services/service-ai/src/routes/index.ts new file mode 100644 index 000000000..c10f27651 --- /dev/null +++ b/packages/services/service-ai/src/routes/index.ts @@ -0,0 +1,4 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +export { buildAIRoutes } from './ai-routes.js'; +export type { RouteDefinition, RouteRequest, RouteResponse } from './ai-routes.js'; diff --git a/packages/services/service-ai/src/tools/index.ts b/packages/services/service-ai/src/tools/index.ts new file mode 100644 index 000000000..91154720a --- /dev/null +++ b/packages/services/service-ai/src/tools/index.ts @@ -0,0 +1,4 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +export { ToolRegistry } from './tool-registry.js'; +export type { ToolHandler } from './tool-registry.js'; diff --git a/packages/services/service-ai/src/tools/tool-registry.ts b/packages/services/service-ai/src/tools/tool-registry.ts new file mode 100644 index 000000000..db26019a5 --- /dev/null +++ b/packages/services/service-ai/src/tools/tool-registry.ts @@ -0,0 +1,109 @@ +// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license. + +import type { AIToolDefinition, AIToolCall, AIToolResult } from '@objectstack/spec/contracts'; + +/** + * Handler function for a registered tool. + * + * Receives parsed arguments and returns the tool output as a string. + */ +export type ToolHandler = (args: Record) => Promise | string; + +/** + * ToolRegistry — Central registry for AI-callable tools. + * + * Plugins register tools (metadata helpers, data queries, business actions) + * during the `ai:ready` hook. The AI service resolves tool calls against + * this registry and feeds the results back to the LLM. + */ +export class ToolRegistry { + private readonly definitions = new Map(); + private readonly handlers = new Map(); + + /** + * Register a tool with its definition and handler. + * @param definition - Tool definition (name, description, parameters schema) + * @param handler - Async function that executes the tool + */ + register(definition: AIToolDefinition, handler: ToolHandler): void { + this.definitions.set(definition.name, definition); + this.handlers.set(definition.name, handler); + } + + /** + * Unregister a tool by name. + */ + unregister(name: string): void { + this.definitions.delete(name); + this.handlers.delete(name); + } + + /** + * Check whether a tool is registered. + */ + has(name: string): boolean { + return this.definitions.has(name); + } + + /** + * Get the definition for a registered tool. + */ + getDefinition(name: string): AIToolDefinition | undefined { + return this.definitions.get(name); + } + + /** + * Return all registered tool definitions. + */ + getAll(): AIToolDefinition[] { + return Array.from(this.definitions.values()); + } + + /** Number of registered tools. */ + get size(): number { + return this.definitions.size; + } + + /** All registered tool names. */ + names(): string[] { + return Array.from(this.definitions.keys()); + } + + /** + * Execute a tool call and return the result. + */ + async execute(toolCall: AIToolCall): Promise { + const handler = this.handlers.get(toolCall.name); + if (!handler) { + return { + toolCallId: toolCall.id, + content: `Tool "${toolCall.name}" is not registered`, + isError: true, + }; + } + + try { + const args: Record = JSON.parse(toolCall.arguments); + const content = await handler(args); + return { toolCallId: toolCall.id, content }; + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return { toolCallId: toolCall.id, content: message, isError: true }; + } + } + + /** + * Execute multiple tool calls in parallel. + */ + async executeAll(toolCalls: AIToolCall[]): Promise { + return Promise.all(toolCalls.map(tc => this.execute(tc))); + } + + /** + * Clear all registered tools. + */ + clear(): void { + this.definitions.clear(); + this.handlers.clear(); + } +} diff --git a/packages/services/service-ai/tsconfig.json b/packages/services/service-ai/tsconfig.json new file mode 100644 index 000000000..0b8b99d88 --- /dev/null +++ b/packages/services/service-ai/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src", + "types": [ + "node" + ] + }, + "include": [ + "src" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/packages/spec/src/contracts/ai-service.test.ts b/packages/spec/src/contracts/ai-service.test.ts index 6a9c1a3f6..734034ef3 100644 --- a/packages/spec/src/contracts/ai-service.test.ts +++ b/packages/spec/src/contracts/ai-service.test.ts @@ -1,5 +1,16 @@ import { describe, it, expect } from 'vitest'; -import type { IAIService, AIMessage, AIResult } from './ai-service'; +import type { + IAIService, + AIMessage, + AIResult, + AIToolDefinition, + AIToolCall, + AIToolResult, + AIRequestOptions, + AIStreamEvent, + AIConversation, + IAIConversationService, +} from './ai-service'; describe('AI Service Contract', () => { it('should allow a minimal IAIService implementation with required methods', () => { @@ -91,4 +102,344 @@ describe('AI Service Contract', () => { expect(models).toHaveLength(3); expect(models).toContain('gpt-4'); }); + + // ----------------------------------------------------------------------- + // Tool Calling Types + // ----------------------------------------------------------------------- + + describe('Tool Calling Types', () => { + it('should construct valid AIToolDefinition values', () => { + const tool: AIToolDefinition = { + name: 'get_weather', + description: 'Get current weather for a location', + parameters: { + type: 'object', + properties: { location: { type: 'string' } }, + required: ['location'], + }, + }; + + expect(tool.name).toBe('get_weather'); + expect(tool.description).toBe('Get current weather for a location'); + expect(tool.parameters).toBeDefined(); + }); + + it('should construct valid AIToolCall values', () => { + const call: AIToolCall = { + id: 'call_abc123', + name: 'get_weather', + arguments: JSON.stringify({ location: 'London' }), + }; + + expect(call.id).toBe('call_abc123'); + expect(JSON.parse(call.arguments)).toEqual({ location: 'London' }); + }); + + it('should construct valid AIToolResult values', () => { + const result: AIToolResult = { + toolCallId: 'call_abc123', + content: '{"temp": 18, "unit": "celsius"}', + }; + + expect(result.toolCallId).toBe('call_abc123'); + expect(result.isError).toBeUndefined(); + + const errorResult: AIToolResult = { + toolCallId: 'call_xyz', + content: 'Tool not found', + isError: true, + }; + + expect(errorResult.isError).toBe(true); + }); + + it('should support AIMessageWithTools for tool conversations', () => { + const assistantMsg: AIMessage = { + role: 'assistant', + content: '', + toolCalls: [ + { id: 'call_1', name: 'get_weather', arguments: '{"location":"Paris"}' }, + ], + }; + + expect(assistantMsg.toolCalls).toHaveLength(1); + expect(assistantMsg.toolCalls![0].name).toBe('get_weather'); + + const toolMsg: AIMessage = { + role: 'tool', + content: '{"temp": 22}', + toolCallId: 'call_1', + }; + + expect(toolMsg.role).toBe('tool'); + expect(toolMsg.toolCallId).toBe('call_1'); + }); + + it('should support tool options on AIRequestOptions', () => { + const options: AIRequestOptions = { + model: 'gpt-4', + temperature: 0.7, + tools: [ + { + name: 'search', + description: 'Search the web', + parameters: { type: 'object', properties: {} }, + }, + ], + toolChoice: 'auto', + }; + + expect(options.tools).toHaveLength(1); + expect(options.toolChoice).toBe('auto'); + }); + + it('should support non-streaming tool calling via chat()', async () => { + const service: IAIService = { + chat: async (messages, options?) => { + // Simulate tool call detection + if (options?.tools && options.tools.length > 0) { + return { content: 'Using tools', model: 'gpt-4' }; + } + return { content: 'No tools' }; + }, + complete: async () => ({ content: '' }), + }; + + const result = await service.chat( + [{ role: 'user', content: 'What is the weather?' }], + { + model: 'gpt-4', + tools: [{ name: 'get_weather', description: 'Get weather', parameters: {} }], + toolChoice: 'auto', + }, + ); + + expect(result.content).toBe('Using tools'); + }); + }); + + // ----------------------------------------------------------------------- + // Streaming – streamChat + // ----------------------------------------------------------------------- + + describe('streamChat', () => { + it('should allow IAIService implementation with streamChat', () => { + const service: IAIService = { + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + async *streamChat(_messages, _options?) { + yield { type: 'text-delta', textDelta: 'Hello' } satisfies AIStreamEvent; + yield { type: 'finish', result: { content: 'Hello' } } satisfies AIStreamEvent; + }, + }; + + expect(service.streamChat).toBeDefined(); + }); + + it('should stream text-delta events', async () => { + const service: IAIService = { + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + async *streamChat() { + yield { type: 'text-delta' as const, textDelta: 'Hello' }; + yield { type: 'text-delta' as const, textDelta: ' world' }; + yield { type: 'finish' as const, result: { content: 'Hello world' } }; + }, + }; + + const events: AIStreamEvent[] = []; + for await (const event of service.streamChat!([], {})) { + events.push(event); + } + + expect(events).toHaveLength(3); + expect(events[0].type).toBe('text-delta'); + expect(events[0].textDelta).toBe('Hello'); + expect(events[2].type).toBe('finish'); + expect(events[2].result?.content).toBe('Hello world'); + }); + + it('should stream tool-call events', async () => { + const service: IAIService = { + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + async *streamChat() { + yield { + type: 'tool-call-delta' as const, + toolCall: { id: 'call_1', name: 'get_weather' }, + }; + yield { + type: 'tool-call' as const, + toolCall: { id: 'call_1', name: 'get_weather', arguments: '{"location":"NYC"}' }, + }; + yield { type: 'finish' as const, result: { content: '' } }; + }, + }; + + const events: AIStreamEvent[] = []; + for await (const event of service.streamChat!([], {})) { + events.push(event); + } + + expect(events[0].type).toBe('tool-call-delta'); + expect(events[1].toolCall?.arguments).toBe('{"location":"NYC"}'); + }); + + it('should stream error events', async () => { + const service: IAIService = { + chat: async () => ({ content: '' }), + complete: async () => ({ content: '' }), + async *streamChat() { + yield { type: 'error' as const, error: 'Rate limit exceeded' }; + }, + }; + + const events: AIStreamEvent[] = []; + for await (const event of service.streamChat!([], {})) { + events.push(event); + } + + expect(events[0].type).toBe('error'); + expect(events[0].error).toBe('Rate limit exceeded'); + }); + }); + + // ----------------------------------------------------------------------- + // IAIConversationService + // ----------------------------------------------------------------------- + + describe('IAIConversationService', () => { + function createMockConversationService(): IAIConversationService { + const store = new Map(); + + return { + async create(options = {}) { + const now = new Date().toISOString(); + const conv: AIConversation = { + id: `conv_${store.size + 1}`, + title: options.title, + agentId: options.agentId, + userId: options.userId, + messages: [], + createdAt: now, + updatedAt: now, + metadata: options.metadata, + }; + store.set(conv.id, conv); + return conv; + }, + + async get(conversationId) { + return store.get(conversationId) ?? null; + }, + + async list(options = {}) { + let results = Array.from(store.values()); + if (options.userId) { + results = results.filter((c) => c.userId === options.userId); + } + if (options.agentId) { + results = results.filter((c) => c.agentId === options.agentId); + } + if (options.limit) { + results = results.slice(0, options.limit); + } + return results; + }, + + async addMessage(conversationId, message) { + const conv = store.get(conversationId); + if (!conv) throw new Error('Conversation not found'); + conv.messages.push(message); + conv.updatedAt = new Date().toISOString(); + return conv; + }, + + async delete(conversationId) { + store.delete(conversationId); + }, + }; + } + + it('should create a conversation', async () => { + const svc = createMockConversationService(); + const conv = await svc.create({ title: 'Test Chat', userId: 'user_1' }); + + expect(conv.id).toBeDefined(); + expect(conv.title).toBe('Test Chat'); + expect(conv.userId).toBe('user_1'); + expect(conv.messages).toHaveLength(0); + expect(conv.createdAt).toBeDefined(); + }); + + it('should get a conversation by ID', async () => { + const svc = createMockConversationService(); + const created = await svc.create({ title: 'Lookup Test' }); + + const found = await svc.get(created.id); + expect(found).not.toBeNull(); + expect(found!.id).toBe(created.id); + + const missing = await svc.get('nonexistent'); + expect(missing).toBeNull(); + }); + + it('should list conversations with filters', async () => { + const svc = createMockConversationService(); + await svc.create({ userId: 'user_a', agentId: 'agent_1' }); + await svc.create({ userId: 'user_b', agentId: 'agent_1' }); + await svc.create({ userId: 'user_a', agentId: 'agent_2' }); + + const all = await svc.list(); + expect(all).toHaveLength(3); + + const byUser = await svc.list({ userId: 'user_a' }); + expect(byUser).toHaveLength(2); + + const byAgent = await svc.list({ agentId: 'agent_1' }); + expect(byAgent).toHaveLength(2); + + const limited = await svc.list({ limit: 1 }); + expect(limited).toHaveLength(1); + }); + + it('should add messages to a conversation', async () => { + const svc = createMockConversationService(); + const conv = await svc.create({ title: 'Message Test' }); + + const updated = await svc.addMessage(conv.id, { + role: 'user', + content: 'Hello!', + }); + + expect(updated.messages).toHaveLength(1); + expect(updated.messages[0].content).toBe('Hello!'); + + const updated2 = await svc.addMessage(conv.id, { + role: 'assistant', + content: 'Hi there!', + }); + + expect(updated2.messages).toHaveLength(2); + }); + + it('should delete a conversation', async () => { + const svc = createMockConversationService(); + const conv = await svc.create({ title: 'Delete Me' }); + + await svc.delete(conv.id); + const result = await svc.get(conv.id); + expect(result).toBeNull(); + }); + + it('should support metadata on conversations', async () => { + const svc = createMockConversationService(); + const conv = await svc.create({ + title: 'With Meta', + metadata: { source: 'web', tags: ['support'] }, + }); + + expect(conv.metadata).toEqual({ source: 'web', tags: ['support'] }); + }); + }); }); diff --git a/packages/spec/src/contracts/ai-service.ts b/packages/spec/src/contracts/ai-service.ts index 563a28e85..9750530ff 100644 --- a/packages/spec/src/contracts/ai-service.ts +++ b/packages/spec/src/contracts/ai-service.ts @@ -14,17 +14,30 @@ */ /** - * A chat message in a conversation + * A chat message in a conversation. + * + * Supports the standard `system`, `user`, and `assistant` roles as well as + * the `tool` role used to return tool execution results to the model. + * Tool-call metadata (`toolCalls`, `toolCallId`) is optional so that plain + * messages remain simple while tool-using conversations can carry the + * necessary context. */ export interface AIMessage { /** Message role */ - role: 'system' | 'user' | 'assistant'; + role: 'system' | 'user' | 'assistant' | 'tool'; /** Message content */ content: string; + /** Tool calls requested by the assistant (present when role='assistant') */ + toolCalls?: AIToolCall[]; + /** ID of the tool call this message responds to (present when role='tool') */ + toolCallId?: string; } /** - * Options for AI completion/chat requests + * Options for AI completion/chat requests. + * + * Includes tool-related configuration so that tool calling works in both + * streaming (`streamChat`) and non-streaming (`chat`) modes. */ export interface AIRequestOptions { /** Model identifier to use */ @@ -35,6 +48,10 @@ export interface AIRequestOptions { maxTokens?: number; /** Stop sequences */ stop?: string[]; + /** Tool definitions available to the model */ + tools?: AIToolDefinition[]; + /** How the model should use tools: 'auto', 'none', or a specific tool name */ + toolChoice?: 'auto' | 'none' | string; } /** @@ -53,6 +70,84 @@ export interface AIResult { }; } +// --------------------------------------------------------------------------- +// Tool Calling Protocol +// --------------------------------------------------------------------------- + +/** + * Definition of a tool that can be invoked by the AI model + */ +export interface AIToolDefinition { + /** Tool name (snake_case identifier) */ + name: string; + /** Human-readable description */ + description: string; + /** JSON Schema describing the tool parameters */ + parameters: Record; +} + +/** + * A tool call requested by the AI model + */ +export interface AIToolCall { + /** Unique ID for this tool call */ + id: string; + /** Tool name (must match an AIToolDefinition name, snake_case) */ + name: string; + /** JSON-stringified arguments */ + arguments: string; +} + +/** + * Result returned after executing a tool call + */ +export interface AIToolResult { + /** Tool call ID this result corresponds to */ + toolCallId: string; + /** Tool output content */ + content: string; + /** Whether the tool execution errored */ + isError?: boolean; +} + +// --------------------------------------------------------------------------- +// Extended message & request types (backward-compatible aliases) +// --------------------------------------------------------------------------- + +/** + * @deprecated Use {@link AIMessage} directly — tool fields are now on the base type. + */ +export type AIMessageWithTools = AIMessage; + +/** + * @deprecated Use {@link AIRequestOptions} directly — tool fields are now on the base type. + */ +export type AIRequestOptionsWithTools = AIRequestOptions; + +// --------------------------------------------------------------------------- +// Streaming Protocol +// --------------------------------------------------------------------------- + +/** + * A single event emitted during a streaming AI response + */ +export interface AIStreamEvent { + /** Event type */ + type: 'text-delta' | 'tool-call-delta' | 'tool-call' | 'finish' | 'error'; + /** Text content delta (for type='text-delta') */ + textDelta?: string; + /** Tool call info (for type='tool-call-delta' or 'tool-call') */ + toolCall?: Partial; + /** Final result (for type='finish') */ + result?: AIResult; + /** Error message (for type='error') */ + error?: string; +} + +// --------------------------------------------------------------------------- +// IAIService +// --------------------------------------------------------------------------- + export interface IAIService { /** * Generate a chat completion from a conversation @@ -83,4 +178,90 @@ export interface IAIService { * @returns Array of model identifiers */ listModels?(): Promise; + + /** + * Stream a chat completion as an async iterable of events + * @param messages - Array of conversation messages + * @param options - Optional request configuration (supports tool definitions) + * @returns Async iterable of stream events + */ + streamChat?(messages: AIMessage[], options?: AIRequestOptions): AsyncIterable; +} + +// --------------------------------------------------------------------------- +// Conversation Management +// --------------------------------------------------------------------------- + +/** + * A persistent AI conversation with message history + */ +export interface AIConversation { + /** Conversation ID */ + id: string; + /** Title / summary */ + title?: string; + /** Associated agent ID */ + agentId?: string; + /** User who owns the conversation */ + userId?: string; + /** Messages in the conversation */ + messages: AIMessage[]; + /** Creation timestamp (ISO 8601) */ + createdAt: string; + /** Last update timestamp (ISO 8601) */ + updatedAt: string; + /** Conversation metadata */ + metadata?: Record; +} + +/** + * IAIConversationService - Manages persistent AI conversations + * + * Provides CRUD operations for conversations and their messages. + */ +export interface IAIConversationService { + /** + * Create a new conversation + * @param options - Initial conversation properties + * @returns The created conversation + */ + create(options?: { + title?: string; + agentId?: string; + userId?: string; + metadata?: Record; + }): Promise; + + /** + * Get a conversation by ID + * @param conversationId - Conversation identifier + * @returns The conversation, or null if not found + */ + get(conversationId: string): Promise; + + /** + * List conversations with optional filters + * @param options - Filter and pagination options + * @returns Array of matching conversations + */ + list(options?: { + userId?: string; + agentId?: string; + limit?: number; + cursor?: string; + }): Promise; + + /** + * Add a message to a conversation + * @param conversationId - Target conversation ID + * @param message - Message to append + * @returns The updated conversation + */ + addMessage(conversationId: string, message: AIMessage): Promise; + + /** + * Delete a conversation + * @param conversationId - Conversation to delete + */ + delete(conversationId: string): Promise; } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2eb4ceca5..c60d4902c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1025,6 +1025,25 @@ importers: specifier: ^4.1.2 version: 4.1.2(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(happy-dom@20.8.9)(msw@2.12.14(@types/node@25.5.0)(typescript@6.0.2))(vite@8.0.3(@types/node@25.5.0)(esbuild@0.27.4)(jiti@2.6.1)(tsx@4.21.0)) + packages/services/service-ai: + dependencies: + '@objectstack/core': + specifier: workspace:* + version: link:../../core + '@objectstack/spec': + specifier: workspace:* + version: link:../../spec + devDependencies: + '@types/node': + specifier: ^25.5.0 + version: 25.5.0 + typescript: + specifier: ^6.0.2 + version: 6.0.2 + vitest: + specifier: ^4.1.2 + version: 4.1.2(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(happy-dom@20.8.9)(msw@2.12.14(@types/node@25.5.0)(typescript@6.0.2))(vite@8.0.3(@types/node@25.5.0)(esbuild@0.27.4)(jiti@2.6.1)(tsx@4.21.0)) + packages/services/service-analytics: dependencies: '@objectstack/core':