LangChain Quickstartยถ
In this quickstart you will create a simple rag agent with langchain and learn how to log it and get feedback on an LLM response.
For evaluation, we will leverage the RAG triad of groundedness, context relevance and answer relevance.
Inย [ย ]:
Copied!
# !pip install trulens trulens-apps-langchain trulens-providers-openai openai langchain langchainhub langchain-openai langchain_community faiss-cpu bs4 tiktoken
# !pip install trulens trulens-apps-langchain trulens-providers-openai openai langchain langchainhub langchain-openai langchain_community faiss-cpu bs4 tiktoken
Inย [ย ]:
Copied!
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-..."
Import from LangChain and TruLensยถ
Inย [ย ]:
Copied!
from trulens.apps.langchain import TruChain
from trulens.core import TruSession
session = TruSession()
session.reset_database()
from trulens.apps.langchain import TruChain
from trulens.core import TruSession
session = TruSession()
session.reset_database()
Load documentsยถ
Inย [ย ]:
Copied!
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
import bs4
# Load and chunk contents of the blog
loader = WebBaseLoader(
web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header")
)
),
)
docs = loader.load()
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
import bs4
# Load and chunk contents of the blog
loader = WebBaseLoader(
web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header")
)
),
)
docs = loader.load()
Create Vector Storeยถ
Inย [ย ]:
Copied!
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import OpenAIEmbeddings
vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
all_splits = text_splitter.split_documents(docs)
# Index chunks
_ = vector_store.add_documents(documents=all_splits)
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import OpenAIEmbeddings
vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
all_splits = text_splitter.split_documents(docs)
# Index chunks
_ = vector_store.add_documents(documents=all_splits)
Create RAGยถ
Inย [ย ]:
Copied!
import bs4
from langchain.agents import AgentState, create_agent
from langchain.messages import MessageLikeRepresentation
from langchain_openai import ChatOpenAI
from langchain.tools import tool
# Construct a tool for retrieving context
@tool(response_format="content_and_artifact")
def retrieve_context(query: str):
"""Retrieve information to help answer a query."""
retrieved_docs = vector_store.similarity_search(query, k=2)
serialized = "\n\n".join(
(f"Source: {doc.metadata}\nContent: {doc.page_content}")
for doc in retrieved_docs
)
return serialized, retrieved_docs
tools = [retrieve_context]
# If desired, specify custom instructions
prompt = (
"You have access to a tool that retrieves context from a blog post. "
"Use the tool to help answer user queries."
)
model = ChatOpenAI(model_name="gpt-5-nano", temperature=0)
agent = create_agent(model, tools, system_prompt=prompt)
import bs4
from langchain.agents import AgentState, create_agent
from langchain.messages import MessageLikeRepresentation
from langchain_openai import ChatOpenAI
from langchain.tools import tool
# Construct a tool for retrieving context
@tool(response_format="content_and_artifact")
def retrieve_context(query: str):
"""Retrieve information to help answer a query."""
retrieved_docs = vector_store.similarity_search(query, k=2)
serialized = "\n\n".join(
(f"Source: {doc.metadata}\nContent: {doc.page_content}")
for doc in retrieved_docs
)
return serialized, retrieved_docs
tools = [retrieve_context]
# If desired, specify custom instructions
prompt = (
"You have access to a tool that retrieves context from a blog post. "
"Use the tool to help answer user queries."
)
model = ChatOpenAI(model_name="gpt-5-nano", temperature=0)
agent = create_agent(model, tools, system_prompt=prompt)
Initialize Feedback Function(s)ยถ
Inย [ย ]:
Copied!
import numpy as np
from trulens.core import Feedback
from trulens.providers.openai import OpenAI
provider = OpenAI(model_engine="gpt-4.1-mini")
# Define a groundedness feedback function
f_groundedness = (
Feedback(
provider.groundedness_measure_with_cot_reasons, name="Groundedness"
)
.on_context(collect_list=True)
.on_output()
)
# Question/answer relevance between overall question and answer.
f_answer_relevance = (
Feedback(provider.relevance_with_cot_reasons, name="Answer Relevance")
.on_input()
.on_output()
)
# Context relevance between question and each context chunk.
f_context_relevance = (
Feedback(
provider.context_relevance_with_cot_reasons, name="Context Relevance"
)
.on_input()
.on_context(collect_list=False)
.aggregate(np.mean) # choose a different aggregation method if you wish
)
import numpy as np
from trulens.core import Feedback
from trulens.providers.openai import OpenAI
provider = OpenAI(model_engine="gpt-4.1-mini")
# Define a groundedness feedback function
f_groundedness = (
Feedback(
provider.groundedness_measure_with_cot_reasons, name="Groundedness"
)
.on_context(collect_list=True)
.on_output()
)
# Question/answer relevance between overall question and answer.
f_answer_relevance = (
Feedback(provider.relevance_with_cot_reasons, name="Answer Relevance")
.on_input()
.on_output()
)
# Context relevance between question and each context chunk.
f_context_relevance = (
Feedback(
provider.context_relevance_with_cot_reasons, name="Context Relevance"
)
.on_input()
.on_context(collect_list=False)
.aggregate(np.mean) # choose a different aggregation method if you wish
)
Instrument chain for logging with TruLensยถ
Inย [ย ]:
Copied!
tru_recorder = TruChain(
agent,
app_name="ChatApplication",
app_version="Base",
feedbacks=[f_answer_relevance, f_context_relevance, f_groundedness],
)
tru_recorder = TruChain(
agent,
app_name="ChatApplication",
app_version="Base",
feedbacks=[f_answer_relevance, f_context_relevance, f_groundedness],
)
Record agent invocationยถ
Inย [ย ]:
Copied!
with tru_recorder as recording:
query = "What is task decomposition?"
agent.invoke({"messages": [{"role": "user", "content": query}]})
with tru_recorder as recording:
query = "What is task decomposition?"
agent.invoke({"messages": [{"role": "user", "content": query}]})
Check results
Inย [ย ]:
Copied!
session.get_leaderboard()
session.get_leaderboard()
Inย [ย ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session=session)
from trulens.dashboard import run_dashboard
run_dashboard(session=session)