langchaingo icon indicating copy to clipboard operation
langchaingo copied to clipboard

Error with NewConversationalRetrievalQA implementation related to memory buffer

Open victorguidi opened this issue 1 year ago • 1 comments

Hey everyone,

A few months ago I implemented a Rag in one of our applications that used the NewRetrievalQA and it was working wonders! This week I am in charge of implementating the Rag but with access to memory in the chat so I decided to change to the NewConversationalRetrievalQA.

Steps to Reproduce:

  1. Implement NewConversationalRetrievalQA using memory access in the chat.
  2. Use the provided implementation code.
  3. Attempt to run the application.

Expected Behavior: The NewConversationalRetrievalQA should work without errors, utilizing the memory access in the chat.

Actual Behavior: Encountered the following error: missing memory key in input values: memory key is of wrong type

Code used and additional information:

Langchain Version: v0.1.8 Golang Version: 1.22.0

	mem := memory.NewConversationBuffer(memory.WithChatHistory(memory.NewChatMessageHistory(
		memory.WithPreviousMessages([]schema.ChatMessage{
			schema.HumanChatMessage{Content: "Hello"},
			schema.AIChatMessage{Content: "Hello, How Are your?"},
		}),
	)), memory.WithMemoryKey("chat_history"))

	var s Schemas
	llmchain := chains.NewLLMChain(llm, s.FormatPrompt())
	chain := chains.NewStuffDocuments(llmchain)
	// rchain := chains.NewRetrievalQA(chain, qdrant)
	condeseChain := chains.LoadCondenseQuestionGenerator(llm)
	rchain := chains.NewConversationalRetrievalQA(chain, condeseChain, qdrant, mem)
	log.Println(rchain.GetMemory(), rchain.GetInputKeys())
	rchain.ReturnSourceDocuments = true
	rchain.InputKey = "question"

s.FormatPrompt() Function:

func (s *Schemas) FormatPrompt() prompts.PromptTemplate {
	prompt := prompts.PromptTemplate{
		Template: "
      PROMPT HERE....

      Document: {{.input_documents}}

      History: {{.chat_history}}

      Question: {{.question}}
    ",
		InputVariables: []string{"input_documents", "chat_history", "question"},
		TemplateFormat: prompts.TemplateFormatGoTemplate,
	}
	return prompt
}

I have also tried with the default history variable and also adding the memory as inputKey but nothing seemed to work... Is that a bug or am I missing something here?

Thanks a lot

victorguidi avatar May 13 '24 19:05 victorguidi


package main

import (
	"context"
	"fmt"
	"log"

	"github.com/tmc/langchain/chains"
	"github.com/tmc/langchain/llms"
	"github.com/tmc/langchain/memory"
	"github.com/tmc/langchain/prompts"
	"github.com/tmc/langchain/retrievers"
	"github.com/tmc/langchain/schema"
)

// Schemas defines the prompt formatting logic.
type Schemas struct{}

// FormatPrompt returns the prompt template for the chain.
func (s *Schemas) FormatPrompt() prompts.PromptTemplate {
	prompt := prompts.PromptTemplate{
		Template: `
You are a helpful assistant. Use the following information to answer the question.

Documents: {{.input_documents}}

Chat History: {{.chat_history}}

Question: {{.question}}

Answer:
`,
		InputVariables: []string{"input_documents", "chat_history", "question"},
		TemplateFormat: prompts.TemplateFormatGoTemplate,
	}
	return prompt
}

func main() {
	// Initialize the LLM (replace with your LLM, e.g., OpenAI, or a mock for testing).
	llm, err := llms.NewMockLLM() // Mock LLM for demonstration; replace with real LLM.
	if err != nil {
		log.Fatalf("Failed to initialize LLM: %v", err)
	}

	// Initialize the retriever (replace with your Qdrant retriever).
	qdrant := retrievers.NewMockRetriever() // Mock retriever; replace with Qdrant.

	// Initialize conversation memory with chat history.
	mem := memory.NewConversationBuffer(
		memory.WithChatHistory(memory.NewChatMessageHistory(
			memory.WithPreviousMessages([]schema.ChatMessage{
				schema.HumanChatMessage{Content: "Hello"},
				schema.AIChatMessage{Content: "Hello, how are you?"},
			}),
		)),
		memory.WithMemoryKey("chat_history"),
		memory.WithInputKey("question"),
		memory.WithOutputKey("answer"),
	)

	// Create the prompt template.
	var s Schemas
	prompt := s.FormatPrompt()

	// Create the LLM chain.
	llmChain := chains.NewLLMChain(llm, prompt)

	// Create the StuffDocuments chain.
	stuffChain := chains.NewStuffDocuments(llmChain)

	// Load the CondenseQuestionGenerator chain.
	condensePrompt := prompts.PromptTemplate{
		Template: `
Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question.

Chat History: {{.chat_history}}

Follow-up Question: {{.question}}

Standalone Question:
`,
		InputVariables: []string{"chat_history", "question"},
		TemplateFormat: prompts.TemplateFormatGoTemplate,
	}
	condenseLLMChain := chains.NewLLMChain(llm, condensePrompt)
	condenseChain := chains.NewCondenseQuestionChain(condenseLLMChain, []string{"chat_history", "question"}, "standalone_question")

	// Create the ConversationalRetrievalQA chain.
	rchain := chains.NewConversationalRetrievalQA(
		stuffChain,
		condenseChain,
		qdrant,
		mem,
	)
	rchain.ReturnSourceDocuments = true
	rchain.InputKey = "question"

	// Log memory and input keys for debugging.
	log.Printf("Memory: %v, InputKeys: %v", rchain.GetMemory(), rchain.GetInputKeys())

	// Run the chain with a sample question.
	ctx := context.Background()
	inputs := map[string]interface{}{
		"question": "What is the capital of France?",
		// Note: chat_history is provided by the memory, so we don't need to include it here.
	}

	output, err := rchain.Call(ctx, inputs)
	if err != nil {
		log.Fatalf("Chain execution failed: %v", err)
	}

	// Print the result.
	fmt.Printf("Output: %+v\n", output)

	// Simulate a follow-up question to test memory.
	inputs = map[string]interface{}{
		"question": "And what about Spain?",
	}
	output, err = rchain.Call(ctx, inputs)
	if err != nil {
		log.Fatalf("Chain execution failed: %v", err)
	}

	// Print the follow-up result.
	fmt.Printf("Follow-up Output: %+v\n", output)
}

ljluestc avatar May 23 '25 13:05 ljluestc