Traceback (most recent call last):
File "/opt/anaconda3/bin/interpreter", line 8, in
sys.exit(main())
^^^^^^
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 612, in main
start_terminal_interface(interpreter)
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 471, in start_terminal_interface
interpreter = profile(
^^^^^^^^
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 64, in profile
return apply_profile(interpreter, profile, profile_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 148, in apply_profile
exec(profile["start_script"], scope, scope)
File "", line 1, in
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/core.py", line 145, in local_setup
self = local_setup(self)
^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/terminal_interface/local_setup.py", line 314, in local_setup
interpreter.computer.ai.chat("ping")
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 86, in run
self.load()
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 397, in load
self.interpreter.computer.ai.chat("ping")
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 322, in run
yield from run_tool_calling_llm(self, params)
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/run_tool_calling_llm.py", line 178, in run_tool_calling_llm
for chunk in llm.completions(**request_params):
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 466, in fixed_litellm_completions
raise first_error # If all attempts fail, raise the first error
^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 443, in fixed_litellm_completions
yield from litellm.completion(**params)
File "/opt/anaconda3/lib/python3.12/site-packages/litellm/llms/ollama.py", line 428, in ollama_completion_stream
raise e
File "/opt/anaconda3/lib/python3.12/site-packages/litellm/llms/ollama.py", line 406, in ollama_completion_stream
function_call = json.loads(response_content)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/json/init.py", line 346, in loads
return _default_decoder.decode(s)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/anaconda3/lib/python3.12/json/decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
^^^^^^^^^^^^^^^^^^^^^^
json.decoder.JSONDecodeError: Unterminated string starting at: line 1 column 2 (char 1)
Traceback (most recent call last):
File "/Users/niehu/miniforge3/envs/open_interpreter/bin/interpreter", line 8, in
sys.exit(main())
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 612, in main
start_terminal_interface(interpreter)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 471, in start_terminal_interface
interpreter = profile(
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 64, in profile
return apply_profile(interpreter, profile, profile_path)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 148, in apply_profile
exec(profile["start_script"], scope, scope)
File "", line 1, in
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/core.py", line 145, in local_setup
self = local_setup(self)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/terminal_interface/local_setup.py", line 314, in local_setup
interpreter.computer.ai.chat("ping")
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/llm.py", line 86, in run
self.load()
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/llm.py", line 397, in load
self.interpreter.computer.ai.chat("ping")
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/llm.py", line 322, in run
yield from run_tool_calling_llm(self, params)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/run_tool_calling_llm.py", line 178, in run_tool_calling_llm
for chunk in llm.completions(**request_params):
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/llm.py", line 466, in fixed_litellm_completions
raise first_error # If all attempts fail, raise the first error
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/interpreter/core/llm/llm.py", line 443, in fixed_litellm_completions
yield from litellm.completion(**params)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/litellm/llms/ollama.py", line 428, in ollama_completion_stream
raise e
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/site-packages/litellm/llms/ollama.py", line 406, in ollama_completion_stream
function_call = json.loads(response_content)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/json/init.py", line 346, in loads
return _default_decoder.decode(s)
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/Users/niehu/miniforge3/envs/open_interpreter/lib/python3.10/json/decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Unterminated string starting at: line 1 column 2 (char 1)
Traceback (most recent call last):
File "", line 198, in run_module_as_main
File "", line 88, in run_code
File "D:\OpenInterpreter\oi_venv\Scripts\interpreter.exe_main.py", line 7, in
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\terminal_interface\start_terminal_interface.py", line 612, in main
start_terminal_interface(interpreter)
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\terminal_interface\start_terminal_interface.py", line 471, in start_terminal_interface
interpreter = profile(
^^^^^^^^
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\terminal_interface\profiles\profiles.py", line 64, in profile
return apply_profile(interpreter, profile, profile_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\terminal_interface\profiles\profiles.py", line 148, in apply_profile
exec(profile["start_script"], scope, scope)
File "", line 1, in
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\core.py", line 145, in local_setup
self = local_setup(self)
^^^^^^^^^^^^^^^^^
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\terminal_interface\local_setup.py", line 314, in local_setup
interpreter.computer.ai.chat("ping")
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\computer\ai\ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\llm.py", line 86, in run
self.load()
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\llm.py", line 397, in load
self.interpreter.computer.ai.chat("ping")
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\computer\ai\ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\llm.py", line 322, in run
yield from run_tool_calling_llm(self, params)
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\run_tool_calling_llm.py", line 178, in run_tool_calling_llm
for chunk in llm.completions(**request_params):
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\llm.py", line 466, in fixed_litellm_completions
raise first_error # If all attempts fail, raise the first error
^^^^^^^^^^^^^^^^^
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\interpreter\core\llm\llm.py", line 443, in fixed_litellm_completions
yield from litellm.completion(**params)
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\litellm\llms\ollama.py", line 428, in ollama_completion_stream
raise e
File "D:\OpenInterpreter\oi_venv\Lib\site-packages\litellm\llms\ollama.py", line 406, in ollama_completion_stream
function_call = json.loads(response_content)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Grunkah\AppData\Local\Programs\Python\Python311\Lib\json_init.py", line 346, in loads
return _default_decoder.decode(s)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Grunkah\AppData\Local\Programs\Python\Python311\Lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Grunkah\AppData\Local\Programs\Python\Python311\Lib\json\decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
^^^^^^^^^^^^^^^^^^^^^^
json.decoder.JSONDecodeError: Unterminated string starting at: line 1 column 2 (char 1)
(oi_venv) PS D:\OpenInterpreter>
If the issue persists, please share the output of interpreter --version and ollama --version
Sadly wont Work :(
I`d had previous Python 3.12 installed on the Local Maschine but i changed it to 3.11 and delete all dependencies of 3.12.
-> Sandbox:
I have installed it on a Sandbox. Sadly wont work. But i get an other Error Mesage.
-> Local:
(oi_venv) PS D:\OpenInterpreter> interpreter --version
Open Interpreter 0.4.3 Developer Preview
(oi_venv) PS D:\OpenInterpreter> ollama --version
ollama version is 0.3.14
(oi_venv) PS D:\OpenInterpreter> python --version
Python 3.11.0
While Installation:
"DEPRECATION: wget is being installed using the legacy 'setup.py install' method, because it does not have a 'pyproject.toml' and the 'wheel' package is not installed. pip 23.1 will enforce this behaviour change. A possible replacement is to enable the '--use-pep517' option. Discussion can be found at https://github.com/pypa/pip/issues/8559
Running setup.py install for wget ... done
DEPRECATION: pyperclip is being installed using the legacy 'setup.py install' method, because it does not have a 'pyproject.toml' and the 'wheel' package is not installed. pip 23.1 will enforce this behaviour change. A possible replacement is to enable the '--use-pep517' option. Discussion can be found at https://github.com/pypa/pip/issues/8559"
Edit:
After new Installment with python -m pip install --upgrade pip, Error above won`t occur. Same Error if i started the --local.
I've tried the approach suggested above, so far and have also recently reinstalled Open-Interpreter a few times. I've also reinstalled Python in an attempt to resolve the issue.
After multiple attempts to install Open-Interpreter and starting the "Interpreter --local" just result in the same Error from above.
I'm starting to suspect that there might be a configuration problem with my Windows 11 installation. Unfortunately, I'm not sure what this would entail or how to fix it. I don't want to reinstall Windows, if fixing the issue is an option,
To be honest, I'm getting frustrated with the issues caused by Windows 11 again - it's not the first time I've encountered problems like this due to its quirks. Last time, it was related to PyTorch 😂. Guess how i fixed it.
Traceback (most recent call last):
File "/home/tyson/open-interpreter/.env/bin/interpreter", line 8, in
sys.exit(main())
^^^^^^
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 612, in main
start_terminal_interface(interpreter)
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/terminal_interface/start_terminal_interface.py", line 471, in start_terminal_interface
interpreter = profile(
^^^^^^^^
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 64, in profile
return apply_profile(interpreter, profile, profile_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/terminal_interface/profiles/profiles.py", line 148, in apply_profile
exec(profile["start_script"], scope, scope)
File "", line 1, in
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/core.py", line 145, in local_setup
self = local_setup(self)
^^^^^^^^^^^^^^^^^
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/terminal_interface/local_setup.py", line 314, in local_setup
interpreter.computer.ai.chat("ping")
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 86, in run
self.load()
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 397, in load
self.interpreter.computer.ai.chat("ping")
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/computer/ai/ai.py", line 134, in chat
for chunk in self.computer.interpreter.llm.run(messages):
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 322, in run
yield from run_tool_calling_llm(self, params)
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/run_tool_calling_llm.py", line 178, in run_tool_calling_llm
for chunk in llm.completions(**request_params):
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 466, in fixed_litellm_completions
raise first_error # If all attempts fail, raise the first error
^^^^^^^^^^^^^^^^^
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/interpreter/core/llm/llm.py", line 443, in fixed_litellm_completions
yield from litellm.completion(**params)
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/litellm/llms/ollama.py", line 428, in ollama_completion_stream
raise e
File "/home/tyson/open-interpreter/.env/lib/python3.12/site-packages/litellm/llms/ollama.py", line 406, in ollama_completion_stream
function_call = json.loads(response_content)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.12/json/init.py", line 346, in loads
return _default_decoder.decode(s)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.12/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.12/json/decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
^^^^^^^^^^^^^^^^^^^^^^
json.decoder.JSONDecodeError: Unterminated string starting at: line 1 column 2 (char 1)
(.env) tyson@tyson-b760mds3h:~/open-interpreter$ interpreter --version
Open Interpreter 0.4.3 Developer Preview
(.env) tyson@tyson-b760mds3h:~/open-interpreter$ ollama --version
ollama version is 0.4.0
Add: --no-llm_supports_functions When launching interpreter
Thanks! it works! Adding these parameter is useful.
command:
interpreter --local --no-llm_supports_functions
select "ollama"and the model that you want
How can I use "interpreter --local --no-llm_supports_functions" inside python code
currently
interpreter.llm.model = "ollama/llama3.2" # Specific configuration may vary
interpreter.llm.api_base = "http://localhost:11434" # Typical Ollama local endpoint
#interpreter.llm.api_key = "your_api_key_if_required"