Google Vertexยถ
In this quickstart you will learn how to run evaluation functions using models from google Vertex like PaLM-2.
Inย [ย ]:
Copied!
# !pip install trulens trulens-apps-langchain trulens-providers-litellm google-cloud-aiplatform==1.36.3 litellm==1.11.1 langchain==0.0.347
# !pip install trulens trulens-apps-langchain trulens-providers-litellm google-cloud-aiplatform==1.36.3 litellm==1.11.1 langchain==0.0.347
Authenticationยถ
Inย [ย ]:
Copied!
from google.cloud import aiplatform
from google.cloud import aiplatform
Inย [ย ]:
Copied!
aiplatform.init(project="...", location="us-central1")
aiplatform.init(project="...", location="us-central1")
Import from LangChain and TruLensยถ
Inย [ย ]:
Copied!
# Imports main tools:
# Imports from langchain to build app. You may need to install langchain first
# with the following:
# !pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.chat import HumanMessagePromptTemplate
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.apps.langchain import TruChain
from trulens.providers.litellm import LiteLLM
session = TruSession()
session.reset_database()
# Imports main tools:
# Imports from langchain to build app. You may need to install langchain first
# with the following:
# !pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.chat import HumanMessagePromptTemplate
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.apps.langchain import TruChain
from trulens.providers.litellm import LiteLLM
session = TruSession()
session.reset_database()
Create Simple LLM Applicationยถ
This example uses a LangChain framework and OpenAI LLM
Inย [ย ]:
Copied!
full_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
llm = VertexAI()
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
full_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
llm = VertexAI()
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
Send your first requestยถ
Inย [ย ]:
Copied!
prompt_input = "What is a good name for a store that sells colorful socks?"
prompt_input = "What is a good name for a store that sells colorful socks?"
Inย [ย ]:
Copied!
llm_response = chain(prompt_input)
display(llm_response)
llm_response = chain(prompt_input)
display(llm_response)
Initialize Feedback Function(s)ยถ
Inย [ย ]:
Copied!
# Initialize LiteLLM-based feedback function collection class:
litellm = LiteLLM(model_engine="chat-bison")
# Define a relevance function using LiteLLM
relevance = Feedback(litellm.relevance_with_cot_reasons).on_input_output()
# By default this will check relevance on the main app input and main app
# output.
# Initialize LiteLLM-based feedback function collection class:
litellm = LiteLLM(model_engine="chat-bison")
# Define a relevance function using LiteLLM
relevance = Feedback(litellm.relevance_with_cot_reasons).on_input_output()
# By default this will check relevance on the main app input and main app
# output.
Instrument chain for logging with TruLensยถ
Inย [ย ]:
Copied!
tru_recorder = TruChain(
chain, app_name="Chain1_ChatApplication", feedbacks=[relevance]
)
tru_recorder = TruChain(
chain, app_name="Chain1_ChatApplication", feedbacks=[relevance]
)
Inย [ย ]:
Copied!
with tru_recorder as recording:
llm_response = chain(prompt_input)
display(llm_response)
with tru_recorder as recording:
llm_response = chain(prompt_input)
display(llm_response)
Inย [ย ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]
Explore in a Dashboardยถ
Inย [ย ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
Or view results directly in your notebookยถ
Inย [ย ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]