ai
ai copied to clipboard
Unable to receive the stream as expected
Description
I am trying to receive a stream of output on the frontend, but instead, I am receiving the entire output at once. I am unable to receive the stream as expected.
Code example
import { pinecone } from '@/utils/pineconeClient'; import { OpenAIEmbeddings } from '@langchain/openai'; import { PineconeStore } from "@langchain/pinecone"; import { StreamingTextResponse } from "ai" import { ChatOpenAI, OpenAI } from '@langchain/openai'; import { ConversationalRetrievalQAChain } from 'langchain/chains'; import { CONDENSE_PROMPT, QA_PROMPT } from "@/utils/promptTemplates"
const PINECONE_INDEX_NAME = process.env.PINECONE_INDEX_NAME ?? '';
export async function callChain(question:any,history:any) {
try { const index: any = pinecone?.Index(PINECONE_INDEX_NAME); const vectorStore = await PineconeStore.fromExistingIndex( new OpenAIEmbeddings({}), { pineconeIndex: index, textKey: 'test', namespace: "test", } );
let streamedResponse: any = "";
const model = new ChatOpenAI({
temperature: 0,
modelName: 'gpt-3.5-turbo-16k',
streaming: true,
verbose: true,
callbacks: [
{
handleLLMNewToken(token) {
streamedResponse += token;
},
},
],
});
const nonStreamingModel = new OpenAI({
});
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
qaTemplate: QA_PROMPT,
questionGeneratorTemplate: CONDENSE_PROMPT,
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
let response: any = await chain.stream({
question: question,
chat_history: history || [],
}
);
for await (const chunk of streamedResponse) {
console.log(`${chunk}|`);
}
return new StreamingTextResponse(streamedResponse)
} catch (error: any) { throw new Error("Call chain method failed to execute successfully!!");
} }
Additional context
No response
Can you try the new integration approach? https://sdk.vercel.ai/providers/adapters/langchain