import * as fs from 'fs';
import { OpenAI } from "@langchain/openai";
import { TextLoader } from 'langchain/document_loaders/fs/text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { OpenAIEmbeddings } from "@langchain/openai";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { RetrievalQAChain } from 'langchain/chains';
import { HoneyHiveLangChainTracer } from 'honeyhive';
async function runQA(): Promise<void> {
const tracer = new HoneyHiveLangChainTracer({
project: process.env.HH_PROJECT,
sessionName: process.env.HH_SESSION_NAME,
apiKey: process.env.HH_API_KEY,
});
const tracerConfig = {
callbacks: [tracer],
};
// Load the document with tracing
const loader = new TextLoader('state_of_the_union.txt', tracerConfig);
const documents = await loader.load();
// Split the document into chunks with tracing
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 200,
...tracerConfig,
});
const docs = await textSplitter.splitDocuments(documents);
// Create embeddings with tracing
const embeddings = new OpenAIEmbeddings(tracerConfig);
// Create a FAISS vector store from the documents with tracing
const vectorStore = await FaissStore.fromDocuments(docs, embeddings, tracerConfig);
// Create a retriever interface with tracing
const retriever = vectorStore.asRetriever(tracerConfig);
// Initialize the OpenAI LLM with tracing
const llm = new OpenAI({
temperature: 0,
...tracerConfig,
});
// Create a RetrievalQA chain with tracing
const qaChain = RetrievalQAChain.fromLLM(llm, retriever, tracerConfig);
// Ask a question
const query = "What did the president say about Ketanji Brown Jackson?";
const res = await qaChain.call({ query, ...tracerConfig });
console.log(res.text);
}
runQA().catch(console.error);