From 6d44703343a201268e0ba69ece73646c6debc375 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Mon, 13 Apr 2026 12:06:09 +0530 Subject: [PATCH 1/2] fix: [AI-678] add stub tool definitions for historical `tool_use` blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Anthropic API requires every `tool_use` block in message history to have a matching tool definition. When agents switch (Plan→Builder), MCP tools disconnect, or tools are filtered by permissions, the history may reference tools absent from the current set — causing a 400 error: "Requests with 'tool_use' and 'tool_result' blocks must include tool definition." Replace the LiteLLM-only `_noop` workaround with a general fix: - Extract all tool names from `tool-call` blocks in message history - Add stub definitions for any names missing from the active tools set - Stubs return "tool no longer available" if the model attempts to call them Closes #678 Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/session/llm.ts | 55 +++++++++++++++------- packages/opencode/test/session/llm.test.ts | 51 ++++++++++++++++++++ 2 files changed, 88 insertions(+), 18 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 4e42fb0d2e..a1052e33b6 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -149,25 +149,27 @@ export namespace LLM { const tools = await resolveTools(input) - // LiteLLM and some Anthropic proxies require the tools parameter to be present - // when message history contains tool calls, even if no tools are being used. - // Add a dummy tool that is never called to satisfy this validation. - // This is enabled for: - // 1. Providers with "litellm" in their ID or API ID (auto-detected) - // 2. Providers with explicit "litellmProxy: true" option (opt-in for custom gateways) - const isLiteLLMProxy = - provider.options?.["litellmProxy"] === true || - input.model.providerID.toLowerCase().includes("litellm") || - input.model.api.id.toLowerCase().includes("litellm") - - if (isLiteLLMProxy && Object.keys(tools).length === 0 && hasToolCalls(input.messages)) { - tools["_noop"] = tool({ - description: - "Placeholder for LiteLLM/Anthropic proxy compatibility - required when message history contains tool calls but no active tools are needed", - inputSchema: jsonSchema({ type: "object", properties: {} }), - execute: async () => ({ output: "", title: "", metadata: {} }), - }) + // altimate_change start — ensure tool definitions exist for all tool_use blocks in history + // The Anthropic API (and proxies like LiteLLM) require every tool_use block in + // message history to have a matching tool definition. When agents switch (Plan→Builder), + // MCP tools disconnect, or tools are filtered by permissions, the history may reference + // tools absent from the current set. Add stub definitions for any missing tools. + // Fixes: https://github.com/AltimateAI/altimate-code/issues/678 + const referencedTools = toolNamesFromMessages(input.messages) + for (const name of referencedTools) { + if (!tools[name]) { + tools[name] = tool({ + description: `[Historical] Tool no longer available in this session`, + inputSchema: jsonSchema({ type: "object", properties: {} }), + execute: async () => ({ + output: "This tool is no longer available. Please use an alternative approach.", + title: "", + metadata: {}, + }), + }) + } } + // altimate_change end return streamText({ onError(error) { @@ -276,4 +278,21 @@ export namespace LLM { } return false } + + // altimate_change start — collect tool names from message history to prevent API validation errors + // Anthropic API requires every tool_use block in message history to have a matching tool + // definition. When agents switch (e.g. Plan→Builder) or MCP tools disconnect, the history + // may reference tools no longer in the active set. This function extracts those names so + // stub definitions can be added. Fixes #678. + export function toolNamesFromMessages(messages: ModelMessage[]): Set { + const names = new Set() + for (const msg of messages) { + if (!Array.isArray(msg.content)) continue + for (const part of msg.content) { + if (part.type === "tool-call") names.add(part.toolName) + } + } + return names + } + // altimate_change end } diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts index b9542088a1..56c7efc7fd 100644 --- a/packages/opencode/test/session/llm.test.ts +++ b/packages/opencode/test/session/llm.test.ts @@ -14,6 +14,57 @@ import type { Agent } from "../../src/agent/agent" import type { MessageV2 } from "../../src/session/message-v2" import { SessionID, MessageID } from "../../src/session/schema" +describe("session.llm.toolNamesFromMessages", () => { + test("returns empty set for empty messages", () => { + expect(LLM.toolNamesFromMessages([])).toEqual(new Set()) + }) + + test("returns empty set for messages with no tool calls", () => { + const messages: ModelMessage[] = [ + { role: "user", content: [{ type: "text", text: "Hello" }] }, + { role: "assistant", content: [{ type: "text", text: "Hi" }] }, + ] + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set()) + }) + + test("extracts tool names from tool-call blocks", () => { + const messages = [ + { + role: "assistant", + content: [ + { type: "tool-call", toolCallId: "call-1", toolName: "bash" }, + { type: "tool-call", toolCallId: "call-2", toolName: "read" }, + ], + }, + ] as ModelMessage[] + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set(["bash", "read"])) + }) + + test("deduplicates tool names across messages", () => { + const messages = [ + { + role: "assistant", + content: [{ type: "tool-call", toolCallId: "call-1", toolName: "bash" }], + }, + { + role: "assistant", + content: [{ type: "tool-call", toolCallId: "call-2", toolName: "bash" }], + }, + ] as ModelMessage[] + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set(["bash"])) + }) + + test("ignores tool-result blocks (only extracts from tool-call)", () => { + const messages = [ + { + role: "tool", + content: [{ type: "tool-result", toolCallId: "call-1", toolName: "bash" }], + }, + ] as ModelMessage[] + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set()) + }) +}) + describe("session.llm.hasToolCalls", () => { test("returns false for empty messages array", () => { expect(LLM.hasToolCalls([])).toBe(false) From 60dcad3a2914d5e1ab601bd1736254afff332f30 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Mon, 13 Apr 2026 12:09:40 +0530 Subject: [PATCH 2/2] =?UTF-8?q?fix:=20address=20code=20review=20findings?= =?UTF-8?q?=20=E2=80=94=20scan=20`tool-result`=20blocks,=20remove=20dead?= =?UTF-8?q?=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Multi-model review (GPT 5.4, Gemini 3.1 Pro) identified three issues: - `toolNamesFromMessages()` now scans both `tool-call` AND `tool-result` blocks, guarding against orphaned tool-results in compacted histories - Use `Object.hasOwn()` instead of direct property check to avoid prototype pollution edge case (`toString`, `constructor`) - Remove dead `hasToolCalls()` function and its tests — sole call site was the LiteLLM workaround deleted in the previous commit Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/session/llm.ts | 16 +--- packages/opencode/test/session/llm.test.ts | 85 ++-------------------- 2 files changed, 10 insertions(+), 91 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index a1052e33b6..cf6afdc860 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -157,7 +157,7 @@ export namespace LLM { // Fixes: https://github.com/AltimateAI/altimate-code/issues/678 const referencedTools = toolNamesFromMessages(input.messages) for (const name of referencedTools) { - if (!tools[name]) { + if (!Object.hasOwn(tools, name)) { tools[name] = tool({ description: `[Historical] Tool no longer available in this session`, inputSchema: jsonSchema({ type: "object", properties: {} }), @@ -267,18 +267,6 @@ export namespace LLM { return input.tools } - // Check if messages contain any tool-call content - // Used to determine if a dummy tool should be added for LiteLLM proxy compatibility - export function hasToolCalls(messages: ModelMessage[]): boolean { - for (const msg of messages) { - if (!Array.isArray(msg.content)) continue - for (const part of msg.content) { - if (part.type === "tool-call" || part.type === "tool-result") return true - } - } - return false - } - // altimate_change start — collect tool names from message history to prevent API validation errors // Anthropic API requires every tool_use block in message history to have a matching tool // definition. When agents switch (e.g. Plan→Builder) or MCP tools disconnect, the history @@ -289,7 +277,7 @@ export namespace LLM { for (const msg of messages) { if (!Array.isArray(msg.content)) continue for (const part of msg.content) { - if (part.type === "tool-call") names.add(part.toolName) + if (part.type === "tool-call" || part.type === "tool-result") names.add(part.toolName) } } return names diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts index 56c7efc7fd..93cfce5461 100644 --- a/packages/opencode/test/session/llm.test.ts +++ b/packages/opencode/test/session/llm.test.ts @@ -54,101 +54,32 @@ describe("session.llm.toolNamesFromMessages", () => { expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set(["bash"])) }) - test("ignores tool-result blocks (only extracts from tool-call)", () => { + test("extracts tool names from tool-result blocks", () => { const messages = [ { role: "tool", content: [{ type: "tool-result", toolCallId: "call-1", toolName: "bash" }], }, ] as ModelMessage[] - expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set()) - }) -}) - -describe("session.llm.hasToolCalls", () => { - test("returns false for empty messages array", () => { - expect(LLM.hasToolCalls([])).toBe(false) - }) - - test("returns false for messages with only text content", () => { - const messages: ModelMessage[] = [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - { - role: "assistant", - content: [{ type: "text", text: "Hi there" }], - }, - ] - expect(LLM.hasToolCalls(messages)).toBe(false) + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set(["bash"])) }) - test("returns true when messages contain tool-call", () => { + test("extracts from both tool-call and tool-result blocks", () => { const messages = [ - { - role: "user", - content: [{ type: "text", text: "Run a command" }], - }, { role: "assistant", - content: [ - { - type: "tool-call", - toolCallId: "call-123", - toolName: "bash", - }, - ], + content: [{ type: "tool-call", toolCallId: "call-1", toolName: "bash" }], }, - ] as ModelMessage[] - expect(LLM.hasToolCalls(messages)).toBe(true) - }) - - test("returns true when messages contain tool-result", () => { - const messages = [ { role: "tool", - content: [ - { - type: "tool-result", - toolCallId: "call-123", - toolName: "bash", - }, - ], - }, - ] as ModelMessage[] - expect(LLM.hasToolCalls(messages)).toBe(true) - }) - - test("returns false for messages with string content", () => { - const messages: ModelMessage[] = [ - { - role: "user", - content: "Hello world", - }, - { - role: "assistant", - content: "Hi there", + content: [{ type: "tool-result", toolCallId: "call-1", toolName: "bash" }], }, - ] - expect(LLM.hasToolCalls(messages)).toBe(false) - }) - - test("returns true when tool-call is mixed with text content", () => { - const messages = [ { - role: "assistant", - content: [ - { type: "text", text: "Let me run that command" }, - { - type: "tool-call", - toolCallId: "call-456", - toolName: "read", - }, - ], + role: "tool", + content: [{ type: "tool-result", toolCallId: "call-2", toolName: "read" }], }, ] as ModelMessage[] - expect(LLM.hasToolCalls(messages)).toBe(true) + expect(LLM.toolNamesFromMessages(messages)).toEqual(new Set(["bash", "read"])) }) })