Iterating on LLM Apps with TruLens¶
Our simple RAG often struggles with retrieving not enough information from the insurance manual to properly answer the question. The information needed may be just outside the chunk that is identified and retrieved by our app. Reducing the size of the chunk and adding "sentence windows" to our retrieval is an advanced RAG technique that can help with retrieving more targeted, complete context. Here we can try this technique, and test its success with TruLens.
!pip install trulens_eval llama_index llama_hub llmsherpa sentence-transformers sentencepiece
# Set your API keys. If you already have them in your var env., you can skip these steps.
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
os.environ["HUGGINGFACE_API_KEY"] = "hf_..."
from trulens_eval import Tru
Load data and test set¶
from llama_hub.smart_pdf_loader import SmartPDFLoader
llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
pdf_loader = SmartPDFLoader(llmsherpa_api_url=llmsherpa_api_url)
documents = pdf_loader.load_data("https://www.iii.org/sites/default/files/docs/pdf/Insurance_Handbook_20103.pdf")
# Load some questions for evaluation
honest_evals = [
"What are the typical coverage options for homeowners insurance?",
"What are the requirements for long term care insurance to start?",
"Can annuity benefits be passed to beneficiaries?",
"Are credit scores used to set insurance premiums? If so, how?",
"Who provides flood insurance?",
"Can you get flood insurance outside high-risk areas?",
"How much in losses does fraud account for in property & casualty insurance?",
"Do pay-as-you-drive insurance policies have an impact on greenhouse gas emissions? How much?",
"What was the most costly earthquake in US history for insurers?",
"Does it matter who is at fault to be compensated when injured on the job?"
]
Set up Evaluation¶
import numpy as np
from trulens_eval import Tru, Feedback, TruLlama, OpenAI as fOpenAI
tru = Tru()
from trulens_eval.feedback import Groundedness
openai = fOpenAI()
qa_relevance = (
Feedback(openai.relevance_with_cot_reasons, name="Answer Relevance")
.on_input_output()
)
qs_relevance = (
Feedback(openai.relevance_with_cot_reasons, name = "Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
# embedding distance
from langchain.embeddings.openai import OpenAIEmbeddings
from trulens_eval.feedback import Embeddings
model_name = 'text-embedding-ada-002'
embed_model = OpenAIEmbeddings(
model=model_name,
openai_api_key=os.environ["OPENAI_API_KEY"]
)
embed = Embeddings(embed_model=embed_model)
f_embed_dist = (
Feedback(embed.cosine_distance)
.on_input()
.on(TruLlama.select_source_nodes().node.text)
)
from trulens_eval.feedback import Groundedness
grounded = Groundedness(groundedness_provider=openai)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text.collect())
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
honest_feedbacks = [qa_relevance, qs_relevance, f_embed_dist, f_groundedness]
Our simple RAG often struggles with retrieving not enough information from the insurance manual to properly answer the question. The information needed may be just outside the chunk that is identified and retrieved by our app. Let's try sentence window retrieval to retrieve a wider chunk.
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.indices.postprocessor import SentenceTransformerRerank, MetadataReplacementPostProcessor
from llama_index.core import ServiceContext, VectorStoreIndex, StorageContext, Document, load_index_from_storage
from llama_index.llms.openai import OpenAI
import os
# initialize llm
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5)
# knowledge store
document = Document(text="\n\n".join([doc.text for doc in documents]))
# set system prompt
from llama_index import Prompt
system_prompt = Prompt("We have provided context information below that you may use. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Please answer the question: {query_str}\n")
def build_sentence_window_index(
document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index"
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
[document], service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
sentence_index = build_sentence_window_index(
document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index"
)
def get_sentence_window_query_engine(
sentence_index,
system_prompt,
similarity_top_k=6,
rerank_top_n=2,
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank], text_qa_template = system_prompt
)
return sentence_window_engine
sentence_window_engine = get_sentence_window_query_engine(sentence_index, system_prompt=system_prompt)
tru_recorder_rag_sentencewindow = TruLlama(
sentence_window_engine,
app_id='2) Sentence Window RAG - Honest Eval',
feedbacks=honest_feedbacks
)
# Run evaluation on 10 sample questions
with tru_recorder_rag_sentencewindow as recording:
for question in honest_evals:
response = sentence_window_engine.query(question)
tru.get_leaderboard(app_ids=["1) Basic RAG - Honest Eval", "2) Sentence Window RAG - Honest Eval"])
How does the sentence window RAG compare to our prototype? You decide!