LangGraph astream¶
In this quickstart you will see a barebones example of recording langgraph's astream.
In [ ]:
Copied!
# !pip install trulens trulens-providers-openai openai langchain langchainhub langchain-openai langchain_community faiss-cpu bs4 tiktoken
# !pip install trulens trulens-providers-openai openai langchain langchainhub langchain-openai langchain_community faiss-cpu bs4 tiktoken
In [ ]:
Copied!
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
In [ ]:
Copied!
from trulens.core import TruSession
session = TruSession()
session.reset_database()
from trulens.core import TruSession
session = TruSession()
session.reset_database()
Create agent¶
In [ ]:
Copied!
from dataclasses import dataclass
from langchain.chat_models import init_chat_model
from langgraph.graph import StateGraph, START
@dataclass
class MyState:
topic: str
joke: str = ""
model = init_chat_model(model="gpt-4o-mini")
def call_model(state: MyState):
"""Call the LLM to generate a joke about a topic"""
# Note that message events are emitted even when the LLM is run using .invoke rather than .stream
model_response = model.invoke(
[
{"role": "user", "content": f"Generate a joke about {state.topic}"}
]
)
return {"joke": model_response.content}
graph = (
StateGraph(MyState)
.add_node(call_model)
.add_edge(START, "call_model")
.compile()
)
from dataclasses import dataclass
from langchain.chat_models import init_chat_model
from langgraph.graph import StateGraph, START
@dataclass
class MyState:
topic: str
joke: str = ""
model = init_chat_model(model="gpt-4o-mini")
def call_model(state: MyState):
"""Call the LLM to generate a joke about a topic"""
# Note that message events are emitted even when the LLM is run using .invoke rather than .stream
model_response = model.invoke(
[
{"role": "user", "content": f"Generate a joke about {state.topic}"}
]
)
return {"joke": model_response.content}
graph = (
StateGraph(MyState)
.add_node(call_model)
.add_edge(START, "call_model")
.compile()
)
Instrument chain for logging with TruLens¶
In [ ]:
Copied!
from trulens.apps.langgraph import TruGraph
tru_recorder = TruGraph(
graph,
app_name="Streaming Agent",
app_version="Base",
)
from trulens.apps.langgraph import TruGraph
tru_recorder = TruGraph(
graph,
app_name="Streaming Agent",
app_version="Base",
)
In [ ]:
Copied!
with tru_recorder as recording:
# The stream_mode is set to "messages" to stream LLM tokens
# The metadata contains information about the LLM invocation, including the tags
async for msg, metadata in graph.astream(
{"topic": "cats"},
stream_mode="messages",
):
print(msg.content, end="|", flush=True)
with tru_recorder as recording:
# The stream_mode is set to "messages" to stream LLM tokens
# The metadata contains information about the LLM invocation, including the tags
async for msg, metadata in graph.astream(
{"topic": "cats"},
stream_mode="messages",
):
print(msg.content, end="|", flush=True)
Check results
In [ ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard()
from trulens.dashboard import run_dashboard
run_dashboard()