Skip to main content
Attach hooks at the agent, model, and tool level:
from langchain_adk import Context, LlmAgent
from langchain_adk.models.llm_request import LlmRequest
from langchain_adk.models.llm_response import LlmResponse

async def log_llm_call(ctx: Context, request: LlmRequest) -> None:
    print(f"[{ctx.agent_name}] LLM call with {len(request.messages)} messages")

async def track_usage(ctx: Context, response: LlmResponse) -> None:
    print(f"Tokens: {response.input_tokens} in / {response.output_tokens} out")

async def handle_llm_error(
    ctx: Context, request: LlmRequest, error: Exception
) -> LlmResponse | None:
    """Called when an LLM call fails. Return a LlmResponse to recover, or None to propagate."""
    print(f"LLM error: {error}")
    return None  # yields an Event with error metadata

async def log_tool(ctx: Context, name: str, args: dict) -> None:
    print(f"[TOOL] {name}({args})")

agent = LlmAgent(
    name="TrackedAgent",
    llm=llm,
    before_model_callback=log_llm_call,
    after_model_callback=track_usage,
    on_model_error_callback=handle_llm_error,
    before_tool_callback=log_tool,
)

AgentTool callbacks

AgentTool supports before_agent_callback and after_agent_callback to intercept child agent events:
from langchain_adk.tools.agent_tool import AgentTool

def on_child_event(event, child_ctx):
    if event.is_final_response():
        print(f"Child answered: {event.text}")
    return None  # return a string to short-circuit

tool = AgentTool(child_agent, before_agent_callback=on_child_event)