33import subprocess
44from typing import Any , Dict , Type
55
6- from langchain .agents import AgentExecutor , create_tool_calling_agent
6+ # from langchain.agents import AgentExecutor, create_tool_calling_agent # Removed
77from langchain_core .language_models import BaseChatModel
8- from langchain_core .prompts import ChatPromptTemplate , MessagesPlaceholder
98from langchain_core .runnables import Runnable
109from langchain_core .tools import BaseTool
10+ from langgraph .prebuilt import create_react_agent
1111from pydantic import BaseModel , Field
1212
1313# --- TOOLS ---
@@ -157,6 +157,9 @@ def process(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
157157# --- AGENT ---
158158
159159
160+ # --- AGENT ---
161+
162+
160163def create_commit_agent (llm : BaseChatModel ) -> Runnable :
161164 # 1. Init Tools
162165 tools = [ReadOnlyShellTool (), FileSearchTool (), FileReadTool ()]
@@ -186,18 +189,22 @@ def create_commit_agent(llm: BaseChatModel) -> Runnable:
1861893. If clarification is needed, explore files.
1871904. Final Answer MUST be ONLY the commit message.
188191"""
189- prompt = ChatPromptTemplate .from_messages (
190- [
191- ("system" , system_prompt ),
192- MessagesPlaceholder ("chat_history" , optional = True ),
193- ("human" , "Generate the commit message." ),
194- MessagesPlaceholder ("agent_scratchpad" ),
195- ]
196- )
192+ # Note: create_react_agent handles the prompt internally or via state_modifier.
193+ # We can pass a system string or a function. Since our prompt depends on dynamic
194+ # variables (diff, explanation, etc.), we need to inject them. LangGraph's
195+ # prebuilt agent usually takes a static system message. However, we can use the
196+ # 'messages' state. But to keep it simple and compatible with existing 'invoke'
197+ # interface: We will format the system prompt in the wrapper and pass it as the
198+ # first message.
199+
200+ # Actually, create_react_agent supports 'state_modifier'.
201+ # If we pass a formatted string, it works as system prompt.
202+
203+ # 4. Construct Graph
204+ # We don't construct the graph with ALL variables pre-bound if they change per run.
205+ # Instead, we'll format the prompt in the pipeline and pass it to the agent.
197206
198- # 4. Construct Agent
199- agent = create_tool_calling_agent (llm , tools , prompt )
200- agent_executor = AgentExecutor (agent = agent , tools = tools , verbose = False )
207+ agent_graph = create_react_agent (llm , tools )
201208
202209 # 5. Pipeline with Middleware
203210 def run_pipeline (inputs : Dict [str , Any ]) -> str :
@@ -210,11 +217,34 @@ def run_pipeline(inputs: Dict[str, Any]) -> str:
210217 state .setdefault ("explanation" , "None" )
211218 state .setdefault ("summary" , "None" )
212219 state .setdefault ("todo_str" , "None" )
213- state .setdefault ("chat_history" , [])
220+
221+ # Format System Prompt
222+ formatted_system_prompt = system_prompt .format (
223+ explanation = state ["explanation" ],
224+ todo_str = state ["todo_str" ],
225+ summary = state ["summary" ],
226+ diff = state .get ("diff" , "" ),
227+ )
214228
215229 # Run Agent
216- result = agent_executor .invoke (state )
217- return str (result ["output" ])
230+ # LangGraph inputs: {"messages": [{"role": "user", "content": ...}]}
231+ # We inject the system prompt as a SystemMessage or just update the state.
232+ # create_react_agent primarily looks at 'messages'.
233+
234+ from langchain_core .messages import HumanMessage , SystemMessage
235+
236+ messages = [
237+ SystemMessage (content = formatted_system_prompt ),
238+ HumanMessage (content = "Generate the commit message." ),
239+ ]
240+
241+ # Invoke graph
242+ # result is a dict with 'messages'
243+ result = agent_graph .invoke ({"messages" : messages })
244+
245+ # Extract last message content
246+ last_message = result ["messages" ][- 1 ]
247+ return str (last_message .content )
218248
219249 # Wrap in RunnableLambda to expose 'invoke'
220250 from langchain_core .runnables import RunnableLambda
0 commit comments