langchain
langchain copied to clipboard
Wikipedia Tool not working as expected in Agents: Google API
Checked other resources
- [X] I added a very descriptive title to this issue.
- [X] I searched the LangChain documentation with the integrated search.
- [x] I used the GitHub search to find a similar question and didn't find it.
- [X] I am sure that this is a bug in LangChain rather than my code.
- [X] The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
Example Code
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain_google_genai import ChatGoogleGenerativeAI
# Google's Gemini model used
llm = ChatGoogleGenerativeAI(model='gemini-pro',
temperature=0.0,
convert_system_message_to_human=True)
tools = load_tools(['llm-math', 'wikipedia'], llm=llm)
agent = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors = True,
verbose = True
)
# A variable question with a question in string format
# question = "some question"
result = agent.invoke(question)
Error Message and Stack Trace (if applicable)
Final error
TypeError: WikipediaQueryRun._run() got an unexpected keyword argument 'search'
Stack Trace
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[57], line 1
----> 1 result = agent.invoke("What is the latest MacOS version name?")
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1432, in AgentExecutor._call(self, inputs, run_manager)
1430 # We now enter the agent loop (until it returns something).
1431 while self._should_continue(iterations, time_elapsed):
-> 1432 next_step_output = self._take_next_step(
1433 name_to_tool_map,
1434 color_mapping,
1435 inputs,
1436 intermediate_steps,
1437 run_manager=run_manager,
1438 )
1439 if isinstance(next_step_output, AgentFinish):
1440 return self._return(
1441 next_step_output, intermediate_steps, run_manager=run_manager
1442 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in <listcomp>(.0)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1223, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1221 yield agent_action
1222 for agent_action in actions:
-> 1223 yield self._perform_agent_action(
1224 name_to_tool_map, color_mapping, agent_action, run_manager
1225 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1245, in AgentExecutor._perform_agent_action(self, name_to_tool_map, color_mapping, agent_action, run_manager)
1243 tool_run_kwargs["llm_prefix"] = ""
1244 # We then call the tool on the tool input to get an observation
-> 1245 observation = tool.run(
1246 agent_action.tool_input,
1247 verbose=self.verbose,
1248 color=color,
1249 callbacks=run_manager.get_child() if run_manager else None,
1250 **tool_run_kwargs,
1251 )
1252 else:
1253 tool_run_kwargs = self.agent.tool_run_logging_kwargs()
File ~/anaconda3/lib/python3.11/site-packages/langchain_core/tools.py:422, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)
420 except (Exception, KeyboardInterrupt) as e:
421 run_manager.on_tool_error(e)
--> 422 raise e
423 else:
424 run_manager.on_tool_end(observation, color=color, name=self.name, **kwargs)
File ~/anaconda3/lib/python3.11/site-packages/langchain_core/tools.py:381, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)
378 parsed_input = self._parse_input(tool_input)
379 tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
380 observation = (
--> 381 self._run(*tool_args, run_manager=run_manager, **tool_kwargs)
382 if new_arg_supported
383 else self._run(*tool_args, **tool_kwargs)
384 )
385 except ValidationError as e:
386 if not self.handle_validation_error:
TypeError: WikipediaQueryRun._run() got an unexpected keyword argument 'search'
Description
So, depending on the question, sometimes there is an error (mentioned above) and sometimes the agent runs without any issues.
I'll be sure to give you 2 questions, one for each case.
Working example
question = "What is India?"
result = agent.invoke(question)
output
(some summary content follows the screenshot)
Note: See how the JSON Action output has 2 keys:
{
"action": "wikipedia",
"action_input": "India"
}
Non-working example
question = "What is the latest MacOS?"
result = agent.invoke(question)
output
This, followed by the error message mentioned above.
Note: See how the JSON Action output has 2 keys but different from what was above:
{
"action": "wikipedia",
"action_input": {
"search": "macOS"
}
}
System Info
System Information
pip freeze | grep langchain
langchain==0.1.13
langchain-community==0.0.29
langchain-core==0.1.33
langchain-experimental==0.0.55
langchain-google-genai==0.0.11
langchain-text-splitters==0.0.1
python -m langchain_core.sys_info:
> OS: Darwin
> OS Version: Darwin Kernel Version 23.3.0: Wed Dec 20 21:30:27 PST 2023; root:xnu-10002.81.5~7/RELEASE_ARM64_T8103
> Python Version: 3.11.5 (main, Sep 11 2023, 08:31:25) [Clang 14.0.6 ]
Package Information
-------------------
> langchain_core: 0.1.33
> langchain: 0.1.13
> langchain_community: 0.0.29
> langsmith: 0.1.23
> langchain_experimental: 0.0.55
> langchain_google_genai: 0.0.11
> langchain_text_splitters: 0.0.1
Packages not installed (Not Necessarily a Problem)
--------------------------------------------------
The following packages were not found:
> langgraph
> langserve
system: Macbook air M1 chip
Do not call agent.invoke directly but use agent_executor = AgentExecutor(agent=agent, tools=tools) result = agent_executor.invoke({"input": "some question?"})
@liugddx I'm new to LangChain. Please help me with a little more details.
I saw that initialize_agent has been deprecated.
The AgentExecutor you mentioned, needs an agent as a parameter. How else can I create this agent?
I tried it with the agent created using initialize_agent and ended up getting an error:
Code
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "what is the latest MacOS"})
Error (short)
TypeError: Agent.plan() got multiple values for argument 'intermediate_steps'
Error (Long)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[15], line 1
----> 1 agent_executor.invoke({"input": "what is the latest MacOS"})
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File ~/anaconda3/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1432, in AgentExecutor._call(self, inputs, run_manager)
1430 # We now enter the agent loop (until it returns something).
1431 while self._should_continue(iterations, time_elapsed):
-> 1432 next_step_output = self._take_next_step(
1433 name_to_tool_map,
1434 color_mapping,
1435 inputs,
1436 intermediate_steps,
1437 run_manager=run_manager,
1438 )
1439 if isinstance(next_step_output, AgentFinish):
1440 return self._return(
1441 next_step_output, intermediate_steps, run_manager=run_manager
1442 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in <listcomp>(.0)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1166, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1163 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
1165 # Call the LLM to see what to do.
-> 1166 output = self.agent.plan(
1167 intermediate_steps,
1168 callbacks=run_manager.get_child() if run_manager else None,
1169 **inputs,
1170 )
1171 except OutputParserException as e:
1172 if isinstance(self.handle_parsing_errors, bool):
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:397, in RunnableAgent.plan(self, intermediate_steps, callbacks, **kwargs)
389 final_output: Any = None
390 if self.stream_runnable:
391 # Use streaming to make sure that the underlying LLM is invoked in a
392 # streaming
(...)
395 # Because the response from the plan is not a generator, we need to
396 # accumulate the output into final output and return that.
--> 397 for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
398 if final_output is None:
399 final_output = chunk
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1571, in AgentExecutor.stream(self, input, config, **kwargs)
1560 config = ensure_config(config)
1561 iterator = AgentExecutorIterator(
1562 self,
1563 input,
(...)
1569 **kwargs,
1570 )
-> 1571 for step in iterator:
1572 yield step
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent_iterator.py:174, in AgentExecutorIterator.__iter__(self)
168 while self.agent_executor._should_continue(
169 self.iterations, self.time_elapsed
170 ):
171 # take the next step: this plans next action, executes it,
172 # yielding action and observation as they are generated
173 next_step_seq: NextStepOutput = []
--> 174 for chunk in self.agent_executor._iter_next_step(
175 self.name_to_tool_map,
176 self.color_mapping,
177 self.inputs,
178 self.intermediate_steps,
179 run_manager,
180 ):
181 next_step_seq.append(chunk)
182 # if we're yielding actions, yield them as they come
183 # do not yield AgentFinish, which will be handled below
File ~/anaconda3/lib/python3.11/site-packages/langchain/agents/agent.py:1166, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1163 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
1165 # Call the LLM to see what to do.
-> 1166 output = self.agent.plan(
1167 intermediate_steps,
1168 callbacks=run_manager.get_child() if run_manager else None,
1169 **inputs,
1170 )
1171 except OutputParserException as e:
1172 if isinstance(self.handle_parsing_errors, bool):
TypeError: Agent.plan() got multiple values for argument 'intermediate_steps'
@Ruhil-DS Try creating your agent like so and use ChatPromptTemplate for the question :
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
llm_with_tools = llm.bind(functions=tools)
prompt = ChatPromptTemplate.from_messages(
[
# MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
]
)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad":lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
)
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
result = agent_executor.invoke({"input": "ASK YOUR QUESTION HERE ???"})
print(result)