CrewAI-Studio icon indicating copy to clipboard operation
CrewAI-Studio copied to clipboard

Can't connect to any LLM

Open navarisun1982 opened this issue 1 year ago • 10 comments

i have this error which i can't define, I tried working with Ollama or groq, added the API or URL in .env file, same result

Traceback (most recent call last):\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\llms\\OpenAI\\openai.py\", line 860, in completion\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\llms\\OpenAI\\openai.py\", line 796, in completion\n self.make_sync_openai_chat_completion_request(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\llms\\OpenAI\\openai.py\", line 657, in make_sync_openai_chat_completion_request\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\llms\\OpenAI\\openai.py\", line 639, in make_sync_openai_chat_completion_request\n raw_response = openai_client.chat.completions.with_raw_response.create(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\_legacy_response.py\", line 356, in wrapped\n return cast(LegacyAPIResponse[R], func(*args, **kwargs))\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\_utils\\_utils.py\", line 275, in wrapper\n return func(*args, **kwargs)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\resources\\chat\\completions.py\", line 829, in create\n return self._post(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\_base_client.py\", line 1280, in post\n return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\_base_client.py\", line 957, in request\n return self._request(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\openai\\_base_client.py\", line 1061, in _request\n raise self._make_status_error_from_response(err.response) from None\nopenai.AuthenticationError: Error code: 401 - {'error': {'message': 'Incorrect API key provided: NA. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\main.py\", line 1607, in completion\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\main.py\", line 1580, in completion\n response = openai_chat_completions.completion(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\llms\\OpenAI\\openai.py\", line 870, in completion\n raise OpenAIError(\nlitellm.llms.OpenAI.openai.OpenAIError: Error code: 401 - {'error': {'message': 'Incorrect API key provided: NA. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"D:\\CrewAI-Studio\\app\\pg_crew_run.py\", line 53, in run_crew\n result = crewai_crew.kickoff(inputs=inputs)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\crew.py\", line 550, in kickoff\n self._handle_crew_planning()\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\crew.py\", line 626, in _handle_crew_planning\n )._handle_crew_planning()\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\utilities\\planning_handler.py\", line 39, in _handle_crew_planning\n result = planner_task.execute_sync()\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\task.py\", line 192, in execute_sync\n return self._execute_core(agent, context, tools)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\task.py\", line 250, in _execute_core\n result = agent.execute_task(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agent.py\", line 357, in execute_task\n result = self.execute_task(task, context, tools)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agent.py\", line 357, in execute_task\n result = self.execute_task(task, context, tools)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agent.py\", line 356, in execute_task\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agent.py\", line 345, in execute_task\n result = self.agent_executor.invoke(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agents\\crew_agent_executor.py\", line 103, in invoke\n formatted_answer = self._invoke_loop()\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agents\\crew_agent_executor.py\", line 203, in _invoke_loop\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\agents\\crew_agent_executor.py\", line 125, in _invoke_loop\n answer = self.llm.call(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\crewai\\llm.py\", line 164, in call\n response = litellm.completion(**params)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\utils.py\", line 960, in wrapper\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\utils.py\", line 849, in wrapper\n result = original_function(*args, **kwargs)\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\main.py\", line 3065, in completion\n raise exception_type(\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\litellm_core_utils\\exception_mapping_utils.py\", line 2137, in exception_type\n raise e\n File \"D:\\CrewAI-Studio\\venv\\lib\\site-packages\\litellm\\litellm_core_utils\\exception_mapping_utils.py\", line 343, in exception_type\n raise AuthenticationError(\nlitellm.exceptions.AuthenticationError: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: NA. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}\n"

navarisun1982 avatar Dec 05 '24 11:12 navarisun1982

is this project still active? or si there any better alternatives

navarisun1982 avatar Dec 09 '24 08:12 navarisun1982

After recent updates to CrewAI, certain components have become dependent on OpenAI. I’ll need to rewrite the LLM integration code to ensure full compatibility with alternative LLM providers.

strnad avatar Dec 10 '24 14:12 strnad

Just a short shot:

Probably it is a Windows issue? I cloned currently the project into my WSL (Debian distro) and was able to connect first agent to my local ollama instance of openGPT. Only thing I had to pay attention, that I had to prefix the LLM with ollama/ in .env:

OLLAMA_MODELS="ollama/teuken,[...]

stritti avatar Dec 10 '24 21:12 stritti

Just a short shot:

Probably it is a Windows issue? I cloned currently the project into my WSL (Debian distro) and was able to connect first agent to my local ollama instance of openGPT. Only thing I had to pay attention, that I had to prefix the LLM with ollama/ in .env:

OLLAMA_MODELS="ollama/teuken,[...]

yes I have tried the perfix thing, still not working on windows

navarisun1982 avatar Dec 11 '24 04:12 navarisun1982

Owner

great to hear that, thx

navarisun1982 avatar Dec 11 '24 04:12 navarisun1982

I am also having similar issues trying to use Ollama on MacOS.

ri-Z avatar Dec 25 '24 18:12 ri-Z

@strnad - I'm running into the same trying to use ollama locally.

iammrbt avatar Jan 02 '25 21:01 iammrbt

for ollama on Windows I might have a hint: I've setup a custom path or models via System Variables.

It has the same variable name like the ollama model variable in .env Therefore only the path was displayed as model. Renaming the variable to ollama_model in .env and app/llm.py solved that.

Another hint: ollama models with / in the name seem to cause problems.

sebmue78 avatar Jan 03 '25 22:01 sebmue78

I've made some changes into the llms.py, it works for me with ollama now, can anyone confirm it is fixed for your case?

strnad avatar Jan 24 '25 15:01 strnad

I've tried both Claude & ChatGPT and got errors following this:

For Agent : Anthropic I got this error.

Provider List: https://docs.litellm.ai/docs/providers Provider List: https://docs.litellm.ai/docs/providers Provider List: https://docs.litellm.ai/docs/providers Error during LLM call: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=model='claude-3-5-sonnet-20240620' max_tokens=4095 temperature=0.1 anthropic_api_url='https://api.anthropic.com' anthropic_api_key=SecretStr('') model_kwargs={} Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers Error running crew: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=model='claude-3-5-sonnet-20240620' max_tokens=4095 temperature=0.1 anthropic_api_url='https://api.anthropic.com' anthropic_api_key=SecretStr('') model_kwargs={} Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers Traceback (most recent call last): File "/CrewAI-Studio/app/pg_crew_run.py", line 62, in run_crew result = crewai_crew.kickoff(inputs=inputs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 558, in kickoff result = self._run_sequential_process() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 665, in _run_sequential_process return self._execute_tasks(self.tasks) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 767, in _execute_tasks task_output = task.execute_sync( ^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/task.py", line 302, in execute_sync return self._execute_core(agent, context, tools) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/task.py", line 366, in _execute_core result = agent.execute_task( ^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agent.py", line 259, in execute_task raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agent.py", line 248, in execute_task result = self.agent_executor.invoke( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 112, in invoke raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 102, in invoke formatted_answer = self._invoke_loop() ^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 160, in _invoke_loop raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 140, in _invoke_loop answer = self._get_llm_response() ^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 210, in _get_llm_response raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 201, in _get_llm_response answer = self.llm.call( ^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/llm.py", line 252, in call response = litellm.completion(**params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/utils.py", line 1100, in wrapper raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/utils.py", line 978, in wrapper result = original_function(args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/main.py", line 2981, in completion raise exception_type( File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/main.py", line 943, in completion model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( ^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/get_llm_provider_logic.py", line 356, in get_llm_provider raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/get_llm_provider_logic.py", line 333, in get_llm_provider raise litellm.exceptions.BadRequestError( # type: ignore litellm.exceptions.BadRequestError: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=model='claude-3-5-sonnet-20240620' max_tokens=4095 temperature=0.1 anthropic_api_url='https://api.anthropic.com' anthropic_api_key=SecretStr('*********') model_kwargs={} Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers

And for Agent : ChatGPT I got this error.

LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'. Error during LLM call: litellm.RateLimitError: RateLimitError: OpenAIException - Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}} Error running crew: litellm.RateLimitError: RateLimitError: OpenAIException - Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}} Traceback (most recent call last): File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 707, in completion raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 634, in completion self.make_sync_openai_chat_completion_request( File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py", line 145, in sync_wrapper result = func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 453, in make_sync_openai_chat_completion_request raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 435, in make_sync_openai_chat_completion_request raw_response = openai_client.chat.completions.with_raw_response.create( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_legacy_response.py", line 364, in wrapped return cast(LegacyAPIResponse[R], func(*args, **kwargs)) ^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_utils/_utils.py", line 279, in wrapper return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/resources/chat/completions.py", line 863, in create return self._post( ^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1283, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 960, in request return self._request( ^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1049, in _request return self._retry_request( ^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1098, in _retry_request return self._request( ^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1049, in _request return self._retry_request( ^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1098, in _retry_request return self._request( ^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/openai/_base_client.py", line 1064, in _request raise self._make_status_error_from_response(err.response) from None openai.RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/main.py", line 1626, in completion raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/main.py", line 1599, in completion response = openai_chat_completions.completion( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 717, in completion raise OpenAIError( litellm.llms.openai.common_utils.OpenAIError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/CrewAI-Studio/app/pg_crew_run.py", line 62, in run_crew result = crewai_crew.kickoff(inputs=inputs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 558, in kickoff result = self._run_sequential_process() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 665, in _run_sequential_process return self._execute_tasks(self.tasks) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/crew.py", line 767, in _execute_tasks task_output = task.execute_sync( ^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/task.py", line 302, in execute_sync return self._execute_core(agent, context, tools) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/task.py", line 366, in _execute_core result = agent.execute_task( ^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agent.py", line 259, in execute_task raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agent.py", line 248, in execute_task result = self.agent_executor.invoke( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 112, in invoke raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 102, in invoke formatted_answer = self._invoke_loop() ^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 160, in _invoke_loop raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 140, in _invoke_loop answer = self._get_llm_response() ^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 210, in _get_llm_response raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 201, in _get_llm_response answer = self.llm.call( ^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/crewai/llm.py", line 252, in call response = litellm.completion(**params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/utils.py", line 1100, in wrapper raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/utils.py", line 978, in wrapper result = original_function(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/main.py", line 2981, in completion raise exception_type( ^^^^^^^^^^^^^^^ File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2190, in exception_type raise e File "/CrewAI-Studio/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 424, in exception_type raise RateLimitError( litellm.exceptions.RateLimitError: litellm.RateLimitError: RateLimitError: OpenAIException - Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}

Please Help.

wantanej avatar Feb 06 '25 14:02 wantanej