langchain
langchain copied to clipboard
Generative Agents don't work with AzureChatOpenAI
System Info
I tried to run this example: https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html
But when I set the LLM with AzureChatOpenAI doesn't work. The error is:
Traceback (most recent call last):
File "/home/adrian-ubuntu/projects/generative-agents/langchain_generative_agent.py", line 79, in <module>
print(tommie.get_summary())
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/experimental/generative_agents/generative_agent.py", line 215, in get_summary
self.summary = self._compute_agent_summary()
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/experimental/generative_agents/generative_agent.py", line 201, in _compute_agent_summary
self.chain(prompt)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chains/base.py", line 239, in run
return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chains/base.py", line 140, in __call__
raise e
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chains/base.py", line 134, in __call__
self._call(inputs, run_manager=run_manager)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chains/llm.py", line 69, in _call
response = self.generate([inputs], run_manager=run_manager)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chains/llm.py", line 79, in generate
return self.llm.generate_prompt(
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/base.py", line 142, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/base.py", line 90, in generate
raise e
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/base.py", line 82, in generate
results = [
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/base.py", line 83, in <listcomp>
self._generate(m, stop=stop, run_manager=run_manager)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/openai.py", line 293, in _generate
response = self.completion_with_retry(messages=message_dicts, **params)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/openai.py", line 254, in completion_with_retry
return _completion_with_retry(**kwargs)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/tenacity/__init__.py", line 289, in wrapped_f
return self(f, *args, **kw)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/tenacity/__init__.py", line 379, in __call__
do = self.iter(retry_state=retry_state)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/tenacity/__init__.py", line 314, in iter
return fut.result()
File "/usr/lib/python3.10/concurrent/futures/_base.py", line 451, in result
return self.__get_result()
File "/usr/lib/python3.10/concurrent/futures/_base.py", line 403, in __get_result
raise self._exception
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/tenacity/__init__.py", line 382, in __call__
result = fn(*args, **kwargs)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/langchain/chat_models/openai.py", line 252, in _completion_with_retry
return self.client.create(**kwargs)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create
response, _, api_key = requestor.request(
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/openai/api_requestor.py", line 226, in request
resp, got_stream = self._interpret_response(result, stream)
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/openai/api_requestor.py", line 620, in _interpret_response
self._interpret_response_line(
File "/home/adrian-ubuntu/projects/.venv_generative-agents/lib/python3.10/site-packages/openai/api_requestor.py", line 683, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: Resource not found
Process finished with exit code 1
But with a simple example like:
model = AzureChatOpenAI(deployment_name="gpt-35-turbo", max_tokens=1500)
print(model([HumanMessage(content="Translate this sentence from English to French. I love programming.")]))
works perfectly (and both runs are configured with same env variables)
Version of langchain: 0.0.168
Who can help?
No response
Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
Reproduction
import logging
from langchain.chat_models import AzureChatOpenAI
from langchain.llms import AzureOpenAI
logging.basicConfig(level=logging.ERROR)
from datetime import datetime, timedelta
from typing import List
from termcolor import colored
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.experimental.generative_agents import GenerativeAgent, GenerativeAgentMemory
import math
import faiss
def relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# This will differ depending on a few things:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
return 1.0 - score / math.sqrt(2)
def create_new_memory_retriever():
"""Create a new vector store retriever unique to the agent."""
# Define your embedding model
embeddings_model = OpenAIEmbeddings(deployment="text-embedding-ada-002_deploy", chunk_size=1)
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)
return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
USER_NAME = "Person A" # The name you want to use when interviewing the agent.
LLM = AzureChatOpenAI(deployment_name="gpt-35-turbo", max_tokens=1500)
tommies_memory = GenerativeAgentMemory(
llm=LLM,
memory_retriever=create_new_memory_retriever(),
verbose=True,
reflection_threshold=8 # we will give this a relatively low number to show how reflection works
)
tommie = GenerativeAgent(name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here
status="looking for a job",
# When connected to a virtual world, we can have the characters update their status
memory_retriever=create_new_memory_retriever(),
llm=LLM,
memory=tommies_memory
)
# The current "Summary" of a character can't be made because the agent hasn't made
# any observations yet.
print(tommie.get_summary())
Expected behavior
Just working