localGPT
localGPT copied to clipboard
No module named 'triton'
Getting the following error under Windows 11:
OS Name: Microsoft Windows 11 Pro OS Version: 10.0.22631 N/A Build 22631 OS Manufacturer: Microsoft Corporation OS Configuration: Standalone Workstation OS Build Type: Multiprocessor Free
File "C:\Projects\localGPT\localGPT_UI.py", line 4, in
I'm having the same issue. I installed this yesterday and it was working perfectly fine. Now I can't load it.
+1 getting this on windows 10 as well. I installed this yesterday on a Windows server and then installed another version today on my local PC to test with different GPUs, and the version I installed yesterday still works fine, while the version installed today gets an identical error.
Windows version for server with working instance: 10.0.17763.4737 Windows version for desktop with broken instance: 10.0.19045.4046
Python versions tried: 3.10.11, 3.11.2
File "C:\workspace\chatbotGPT\run_localGPT.py", line 24, in <module>
from load_models import (
File "C:\workspace\chatbotGPT\load_models.py", line 6, in <module>
from auto_gptq import AutoGPTQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\auto_gptq\__init__.py", line 4, in <module>
from .utils.peft_utils import get_gptq_peft_model
File "C:\workspace\chatbotGPT\env\lib\site-packages\auto_gptq\utils\peft_utils.py", line 9, in <module>
from peft import get_peft_model, PeftConfig, PeftModel, PeftType
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\__init__.py", line 22, in <module>
from .auto import (
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\auto.py", line 32, in <module>
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\mapping.py", line 22, in <module>
from .mixed_model import PeftMixedModel
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\mixed_model.py", line 26, in <module>
from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\__init__.py", line 21, in <module>
from .lora import LoraConfig, LoraModel, LoftQConfig
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\__init__.py", line 20, in <module>
from .model import LoraModel
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\model.py", line 42, in <module>
from .awq import dispatch_awq
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\awq.py", line 26, in <module>
from awq.modules.linear import WQLinear_GEMM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\__init__.py", line 2, in <module>
from awq.models.auto import AutoAWQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\models\__init__.py", line 15, in <module>
from .mixtral import MixtralAWQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\models\mixtral.py", line 7, in <module>
from awq.modules.fused.moe import FusedSparseMoeBlock
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\modules\fused\moe.py", line 2, in <module>
import triton
ModuleNotFoundError: No module named 'triton'
I was giving it a try on windows 11, same error
I now join in this problem
Use this pip install https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl worked for me
It really helped! After that, there was another error that I decided to pip install llama-cpp-python
Same issue here... ModuleNotFoundError: No module named 'triton'
pip install https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl
Tried this, but now i get the following:
script failed: Traceback (most recent call last):
File "C:\workspace\chatbotGPT\run_localGPT.py", line 24, in <module>
from load_models import (
File "C:\workspace\chatbotGPT\load_models.py", line 6, in <module>
from auto_gptq import AutoGPTQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\auto_gptq\__init__.py", line 4, in <module>
from .utils.peft_utils import get_gptq_peft_model
File "C:\workspace\chatbotGPT\env\lib\site-packages\auto_gptq\utils\peft_utils.py", line 9, in <module>
from peft import get_peft_model, PeftConfig, PeftModel, PeftType
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\__init__.py", line 22, in <module>
from .auto import (
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\auto.py", line 32, in <module>
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\mapping.py", line 22, in <module>
from .mixed_model import PeftMixedModel
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\mixed_model.py", line 26, in <module>
from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\__init__.py", line 21, in <module>
from .lora import LoraConfig, LoraModel, LoftQConfig
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\__init__.py", line 20, in <module>
from .model import LoraModel
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\model.py", line 42, in <module>
from .awq import dispatch_awq
File "C:\workspace\chatbotGPT\env\lib\site-packages\peft\tuners\lora\awq.py", line 26, in <module>
from awq.modules.linear import WQLinear_GEMM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\__init__.py", line 2, in <module>
from awq.models.auto import AutoAWQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\models\__init__.py", line 15, in <module>
from .mixtral import MixtralAWQForCausalLM
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\models\mixtral.py", line 7, in <module>
from awq.modules.fused.moe import FusedSparseMoeBlock
File "C:\workspace\chatbotGPT\env\lib\site-packages\awq\modules\fused\moe.py", line 2, in <module>
import triton
File "C:\workspace\chatbotGPT\env\lib\site-packages\triton\__init__.py", line 10, in <module>
from .runtime import Config, autotune, heuristics, JITFunction, KernelInterface
File "C:\workspace\chatbotGPT\env\lib\site-packages\triton\runtime\__init__.py", line 1, in <module>
from .autotuner import Config, Heuristics, autotune, heuristics # noqa: F401
File "C:\workspace\chatbotGPT\env\lib\site-packages\triton\runtime\autotuner.py", line 7, in <module>
from ..testing import do_bench
File "C:\workspace\chatbotGPT\env\lib\site-packages\triton\testing.py", line 9, in <module>
import triton._C.libtriton.triton as _triton
ImportError: DLL load failed while importing libtriton: The specified module could not be found.
It really helped! After that, there was another error that I decided to pip install llama-cpp-python
@Shelushun Same issue. Brother.
@Shelushun Is there any solution ?
Use this pip install https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl worked for me
the same error.