Pinecone is a vector database service that is designed to enable developers to work with high-dimensional vector data efficiently.
With HoneyHive, you can trace all your operations using a single line of code. Find a list of all supported integrations here.
HoneyHive Setup
Follow the HoneyHive Installation Guide to get your API key and initialize the tracer.
Pinecone Setup
Log in to the Pinecone Console to create a new project and get your API key.
Note: please use version pinecone-client==5.0.0
for Python.
Example
Here is an example of how to trace your code in HoneyHive.
from openai import OpenAI
from pinecone import Pinecone
from honeyhive.tracer import HoneyHiveTracer
from honeyhive.tracer.custom import trace
HoneyHiveTracer.init(
api_key="MY_HONEYHIVE_API_KEY",
project="MY_HONEYHIVE_PROJECT_NAME",
session_name="pinecone-docs"
)
openai_client = OpenAI()
pc = Pinecone(api_key="MY_PINECONE_API_KEY")
index = pc.Index("MY_PINECONE_INDEX_NAME")
def embed_query(query):
res = openai_client.embeddings.create(
model="text-embedding-ada-002",
input=query
)
query_vector = res.data[0].embedding
return query_vector
documents = [
"Jack is a software engineer.",
"Jill is a nurse.",
"Jane is a teacher.",
"John is a doctor.",
]
index.upsert(vectors=[
{
"id": "A", "values": embed_query(documents[0]), "metadata": {"_node_content": documents[0]}
},
{
"id": "B", "values": embed_query(documents[1]), "metadata": {"_node_content": documents[1]}
}
])
@trace(
config={
"embedding_model": "text-embedding-ada-002",
"top_k": 3
}
)
def get_relevant_documents(query):
query_vector = embed_query(query)
res = index.query(vector=query_vector, top_k=3, include_metadata=True)
print(res)
return [item['metadata']['_node_content'] for item in res['matches']]
@trace(
config={
"model": "gpt-4o",
"prompt": "You are a helpful assistant"
},
metadata={
"version": 1
}
)
def generate_response(context, query):
prompt = f"Context: {context}\n\nQuestion: {query}\n\nAnswer:"
response = openai_client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content
@trace()
def rag_pipeline(query):
docs = get_relevant_documents(query)
response = generate_response("\n".join(docs), query)
return response
def main():
query = "What does Jack do?"
response = rag_pipeline(query)
print(f"Query: {query}")
print(f"Response: {response}")
if __name__ == "__main__":
main()
View your Traces
Once you run your code, you can view your execution trace in the HoneyHive UI by clicking the Data Store
tab on the left sidebar.