Skip to main content
LangGraph is a framework for building stateful, multi-step agent workflows as graphs. It provides StateGraph for custom control flow, conditional routing, subgraph composition, and checkpointing. HoneyHive integrates with LangGraph via the OpenInference LangChain instrumentor (LangGraph builds on LangChain under the hood), automatically capturing graph execution, node transitions, and LLM calls.
Same instrumentor as LangChain. If you’ve already set up LangChain instrumentation, LangGraph graphs are automatically traced too. For the simpler create_agent pattern without custom graphs, see the LangChain integration.

Quick Start

Add HoneyHive tracing in just 4 lines of code. All graph nodes, edges, and LLM calls are automatically traced.
pip install "honeyhive>=1.0.0rc0" langgraph langchain langchain-openai openinference-instrumentation-langchain
import os
from honeyhive import HoneyHiveTracer
from openinference.instrumentation.langchain import LangChainInstrumentor

tracer = HoneyHiveTracer.init(
    api_key=os.getenv("HH_API_KEY"),
    project=os.getenv("HH_PROJECT"),
)
LangChainInstrumentor().instrument(tracer_provider=tracer.provider)

# Your existing LangGraph code works unchanged

Compatibility

RequirementVersion
Python3.10+
langgraph0.2.0+

What Gets Traced

The instrumentor automatically captures:
  • Graph execution - Each graph.invoke() with inputs and outputs
  • Node transitions - Individual node execution and state changes
  • Subgraph spans - Nested agent invocations within coordinator graphs
  • LLM calls - Model requests, responses, and token usage within nodes
  • Conditional routing - Which branches the graph takes
  • Tool calls - Arguments and results for each tool execution
No manual instrumentation required.

Example: Multi-Agent Delegation via Subgraphs

This example builds a coordinator StateGraph that classifies customer questions and delegates to specialist agents built with create_agent. This is the core LangGraph pattern: combining the graph API with agent subgraphs.
import os
from typing import Literal, TypedDict
from honeyhive import HoneyHiveTracer
from openinference.instrumentation.langchain import LangChainInstrumentor
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from pydantic import BaseModel, Field

tracer = HoneyHiveTracer.init(
    api_key=os.getenv("HH_API_KEY"),
    project=os.getenv("HH_PROJECT"),
)
LangChainInstrumentor().instrument(tracer_provider=tracer.provider)

model = ChatOpenAI(model="gpt-4o-mini")

@tool
def lookup_order_status(order_id: str) -> dict:
    """Look up the current status of a customer order."""
    statuses = {
        "ORD-1001": {"state": "shipped", "eta_days": 2},
        "ORD-1002": {"state": "processing", "eta_days": 5},
        "ORD-1003": {"state": "delayed", "eta_days": 8},
    }
    return statuses.get(order_id.upper(), {"state": "not_found"})

@tool
def lookup_policy(topic: str) -> dict:
    """Look up company support policy on a given topic."""
    policies = {
        "refund": {"summary": "Refunds within 30 days for undelivered or damaged items."},
        "cancellation": {"summary": "Cancellation allowed before shipment."},
        "shipping": {"summary": "Standard shipping 3-5 business days."},
    }
    return policies.get(topic.lower().strip(), {"summary": "No policy found."})

# Specialist agents
order_specialist = create_agent(
    model, tools=[lookup_order_status], name="order_specialist",
    system_prompt="You are an order specialist. Return status and ETA in one sentence.",
)
policy_specialist = create_agent(
    model, tools=[lookup_policy], name="policy_specialist",
    system_prompt="You are a policy specialist. Answer refund, cancellation, and shipping questions concisely.",
)

# Coordinator graph
class CoordinatorState(TypedDict):
    question: str
    category: str
    answer: str

class RouteDecision(BaseModel):
    category: Literal["order", "policy", "general"] = Field(
        description="Route the question to the right specialist"
    )

router_llm = model.with_structured_output(RouteDecision)

def classify(state: CoordinatorState) -> dict:
    result = router_llm.invoke([
        SystemMessage(content=(
            "Classify the customer question as 'order' (about a specific "
            "order status/delivery), 'policy' (about refund/cancellation/"
            "shipping rules), or 'general'."
        )),
        HumanMessage(content=state["question"]),
    ])
    return {"category": result.category}

def handle_order(state: CoordinatorState) -> dict:
    result = order_specialist.invoke(
        {"messages": [HumanMessage(content=state["question"])]}
    )
    return {"answer": result["messages"][-1].content}

def handle_policy(state: CoordinatorState) -> dict:
    result = policy_specialist.invoke(
        {"messages": [HumanMessage(content=state["question"])]}
    )
    return {"answer": result["messages"][-1].content}

def handle_general(state: CoordinatorState) -> dict:
    response = model.invoke([
        SystemMessage(content="Answer concisely as a support agent."),
        HumanMessage(content=state["question"]),
    ])
    return {"answer": response.content}

coordinator = (
    StateGraph(CoordinatorState)
    .add_node("classify", classify)
    .add_node("order", handle_order)
    .add_node("policy", handle_policy)
    .add_node("general", handle_general)
    .add_edge(START, "classify")
    .add_conditional_edges(
        "classify",
        lambda state: state["category"],
        {"order": "order", "policy": "policy", "general": "general"},
    )
    .add_edge("order", END)
    .add_edge("policy", END)
    .add_edge("general", END)
    .compile()
)

result = coordinator.invoke({"question": "Where is my order ORD-1001?", "category": "", "answer": ""})
print(result["answer"])
In HoneyHive, you’ll see the full execution hierarchy: coordinator graph, classify node, routing decision, specialist subgraph agent with tool calls, and LLM spans within each node.

Example: Multi-Turn Conversation with Checkpointing

LangGraph’s MemorySaver persists conversation state across turns with the same thread_id. The agent remembers context from previous turns without you managing history manually.
import os
from honeyhive import HoneyHiveTracer
from openinference.instrumentation.langchain import LangChainInstrumentor
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver

tracer = HoneyHiveTracer.init(
    api_key=os.getenv("HH_API_KEY"),
    project=os.getenv("HH_PROJECT"),
)
LangChainInstrumentor().instrument(tracer_provider=tracer.provider)

@tool
def lookup_order_status(order_id: str) -> dict:
    """Look up the current status of a customer order."""
    statuses = {
        "ORD-1001": {"state": "shipped", "eta_days": 2},
        "ORD-1003": {"state": "delayed", "eta_days": 8},
    }
    return statuses.get(order_id.upper(), {"state": "not_found"})

@tool
def lookup_policy(topic: str) -> dict:
    """Look up company support policy on a given topic."""
    policies = {
        "cancellation": {"summary": "Cancellation allowed before shipment. Delayed orders can request assisted cancellation."},
    }
    return policies.get(topic.lower().strip(), {"summary": "No policy found."})

model = ChatOpenAI(model="gpt-4o-mini")
memory = MemorySaver()

agent = create_agent(
    model,
    tools=[lookup_order_status, lookup_policy],
    name="support_agent",
    system_prompt=(
        "You are a customer support agent with memory of the conversation. "
        "Use tools to look up orders and policies. Reference previous "
        "context when the customer follows up."
    ),
    checkpointer=memory,
)

thread = {"configurable": {"thread_id": "customer-session-001"}}

# Turn 1: check order status
result = agent.invoke(
    {"messages": [HumanMessage(content="Can you check the status of order ORD-1003?")]},
    config=thread,
)
print(result["messages"][-1].content)

# Turn 2: follow-up referencing previous context
result = agent.invoke(
    {"messages": [HumanMessage(content="That order is delayed. What are my cancellation options?")]},
    config=thread,
)
print(result["messages"][-1].content)
Each turn produces a separate trace in HoneyHive, but the agent maintains conversation history across turns via the shared thread_id.

Troubleshooting

Traces not appearing

  1. Pass the tracer provider - The instrumentor must receive tracer_provider=tracer.provider:
from honeyhive import HoneyHiveTracer
from openinference.instrumentation.langchain import LangChainInstrumentor

tracer = HoneyHiveTracer.init(project="your-project")

# Correct - pass tracer_provider
LangChainInstrumentor().instrument(tracer_provider=tracer.provider)

# Wrong - missing tracer_provider
LangChainInstrumentor().instrument()
  1. Check environment variables - Ensure HH_API_KEY and HH_PROJECT are set
  2. Initialize before building graphs - Call instrument() before creating StateGraph instances or create_agent calls


Resources