[Bug]: AttributeError: 'NoneType' object has no attribute 'lowvram'
Checklist
- [ ] The issue exists after disabling all extensions
- [X] The issue exists on a clean installation of webui
- [ ] The issue is caused by an extension, but I believe it is caused by a bug in the webui
- [ ] The issue exists in the current version of the webui
- [ ] The issue has not been reported before recently
- [ ] The issue has been reported before but has not been fixed yet
What happened?
I've done a fresh install using this automated setup file/instruction: https://github.com/viking1304/a1111-setup/discussions/2
I still get AttributeError: 'NoneType' object has no attribute 'lowvram' error message in the browser when trying to generate an image or load a model
Steps to reproduce the problem
- install model from repo 2. run terminal commands as below
What should have happened?
generate images as expected?
What browsers do you use to access the UI ?
Google Chrome
Sysinfo
{
"Platform": "macOS-13.6.3-x86_64-i386-64bit",
"Python": "3.10.14",
"Version": "v1.9.3",
"Commit": "1c0a0c4c26f78c32095ebc7f8af82f5c04fca8c0",
"Script path": "/Users/thomasmclaughlin/stable-diffusion-webui",
"Data path": "/Users/thomasmclaughlin/stable-diffusion-webui",
"Extensions dir": "/Users/thomasmclaughlin/stable-diffusion-webui/extensions",
"Checksum": "a71d5230d4b09eda5a5cb0ad0af4a92603e3c20d85b23a32d1aeabf66ffb2d2b",
"Commandline": [
"launch.py",
"--skip-torch-cuda-test",
"--upcast-sampling",
"--no-half-vae",
"--use-cpu",
"interrogate"
],
"Torch env info": {
"torch_version": "2.2.0",
"is_debug_build": "False",
"cuda_compiled_version": null,
"gcc_version": null,
"clang_version": "12.0.0 (clang-1200.0.32.29)",
"cmake_version": "version 3.29.3",
"os": "macOS 13.6.3 (x86_64)",
"libc_version": "N/A",
"python_version": "3.10.14 (main, Mar 19 2024, 21:46:16) [Clang 15.0.0 (clang-1500.1.0.2.5)] (64-bit runtime)",
"python_platform": "macOS-13.6.3-x86_64-i386-64bit",
"is_cuda_available": "False",
"cuda_runtime_version": null,
"cuda_module_loading": "N/A",
"nvidia_driver_version": null,
"nvidia_gpu_models": null,
"cudnn_version": null,
"pip_version": "pip3",
"pip_packages": [
"numpy==1.26.2",
"open-clip-torch==2.20.0",
"pytorch-lightning==1.9.4",
"torch==2.2.0.dev20231010",
"torchdiffeq==0.2.3",
"torchmetrics==1.4.0",
"torchsde==0.2.6",
"torchvision==0.17.0.dev20231010"
],
"conda_packages": null,
"hip_compiled_version": "N/A",
"hip_runtime_version": "N/A",
"miopen_runtime_version": "N/A",
"caching_allocator_config": "",
"is_xnnpack_available": "True",
"cpu_info": "Intel(R) Core(TM) i7-1068NG7 CPU @ 2.30GHz"
},
"Exceptions": [
{
"exception": "'NoneType' object has no attribute 'lowvram'",
"traceback": [
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 57, f",
"res = list(func(*args, **kwargs))"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 36, f",
"res = func(*args, **kwargs)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py, line 109, txt2img",
"processed = processing.process_images(p)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py, line 832, process_images",
"sd_models.reload_model_weights()"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 860, reload_model_weights",
"sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 793, reuse_model_from_already_loaded",
"send_model_to_cpu(sd_model)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 662, send_model_to_cpu",
"if m.lowvram:"
]
]
},
{
"exception": "'NoneType' object has no attribute 'lowvram'",
"traceback": [
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 57, f",
"res = list(func(*args, **kwargs))"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 36, f",
"res = func(*args, **kwargs)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py, line 109, txt2img",
"processed = processing.process_images(p)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py, line 832, process_images",
"sd_models.reload_model_weights()"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 860, reload_model_weights",
"sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 793, reuse_model_from_already_loaded",
"send_model_to_cpu(sd_model)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 662, send_model_to_cpu",
"if m.lowvram:"
]
]
},
{
"exception": "'NoneType' object has no attribute 'lowvram'",
"traceback": [
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 57, f",
"res = list(func(*args, **kwargs))"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 36, f",
"res = func(*args, **kwargs)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py, line 109, txt2img",
"processed = processing.process_images(p)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py, line 832, process_images",
"sd_models.reload_model_weights()"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 860, reload_model_weights",
"sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 793, reuse_model_from_already_loaded",
"send_model_to_cpu(sd_model)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 662, send_model_to_cpu",
"if m.lowvram:"
]
]
},
{
"exception": "'NoneType' object has no attribute 'lowvram'",
"traceback": [
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 57, f",
"res = list(func(*args, **kwargs))"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 36, f",
"res = func(*args, **kwargs)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py, line 109, txt2img",
"processed = processing.process_images(p)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py, line 832, process_images",
"sd_models.reload_model_weights()"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 860, reload_model_weights",
"sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 793, reuse_model_from_already_loaded",
"send_model_to_cpu(sd_model)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py, line 662, send_model_to_cpu",
"if m.lowvram:"
]
]
},
{
"exception": "'NoneType' object has no attribute 'lowvram'",
"traceback": [
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/options.py, line 165, set",
"option.onchange()"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py, line 13, f",
"res = func(*args, **kwargs)"
],
[
"/Users/thomasmclaughlin/stable-diffusion-webui/modules/initialize_util.py, line 181,
Console logs
Launching webui...
################################################################
Install script for stable-diffusion + Web UI
Tested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.
################################################################
################################################################
Running on thomasmclaughlin user
################################################################
################################################################
Repo already cloned, using it as install directory
################################################################
################################################################
Create and activate python venv
################################################################
################################################################
Launching launch.py...
################################################################
Python 3.10.14 (main, Mar 19 2024, 21:46:16) [Clang 15.0.0 (clang-1500.1.0.2.5)]
Version: v1.9.3
Commit hash: 1c0a0c4c26f78c32095ebc7f8af82f5c04fca8c0
Installing torch and torchvision
Looking in indexes: https://download.pytorch.org/whl/nightly/cpu
Collecting torch
Downloading https://download.pytorch.org/whl/nightly/cpu/torch-2.2.0.dev20231010-cp310-none-macosx_10_9_x86_64.whl (147.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 147.6/147.6 MB 4.1 MB/s eta 0:00:00
Collecting torchvision
Downloading https://download.pytorch.org/whl/nightly/cpu/torchvision-0.17.0.dev20231010-cp310-cp310-macosx_10_13_x86_64.whl (1.7 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 4.7 MB/s eta 0:00:00
Collecting filelock (from torch)
Downloading https://download.pytorch.org/whl/nightly/filelock-3.13.1-py3-none-any.whl (11 kB)
Collecting typing-extensions (from torch)
Downloading https://download.pytorch.org/whl/nightly/typing_extensions-4.8.0-py3-none-any.whl (31 kB)
Collecting sympy (from torch)
Downloading https://download.pytorch.org/whl/nightly/sympy-1.12-py3-none-any.whl (5.7 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.7/5.7 MB 4.9 MB/s eta 0:00:00
Collecting networkx (from torch)
Downloading https://download.pytorch.org/whl/nightly/networkx-3.2.1-py3-none-any.whl (1.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.6/1.6 MB 4.7 MB/s eta 0:00:00
Collecting jinja2 (from torch)
Downloading https://download.pytorch.org/whl/nightly/Jinja2-3.1.3-py3-none-any.whl (133 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 133.2/133.2 kB 2.7 MB/s eta 0:00:00
Collecting fsspec (from torch)
Downloading https://download.pytorch.org/whl/nightly/fsspec-2024.2.0-py3-none-any.whl (170 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 170.9/170.9 kB 3.0 MB/s eta 0:00:00
Collecting numpy (from torchvision)
Downloading https://download.pytorch.org/whl/nightly/numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl (20.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 20.6/20.6 MB 4.9 MB/s eta 0:00:00
Collecting requests (from torchvision)
Downloading https://download.pytorch.org/whl/nightly/requests-2.31.0-py3-none-any.whl (62 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.6/62.6 kB 1.8 MB/s eta 0:00:00
Collecting pillow!=8.3.*,>=5.3.0 (from torchvision)
Downloading https://download.pytorch.org/whl/nightly/Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl (3.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.3/3.3 MB 4.9 MB/s eta 0:00:00
Collecting MarkupSafe>=2.0 (from jinja2->torch)
Downloading https://download.pytorch.org/whl/nightly/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl (14 kB)
Collecting charset-normalizer<4,>=2 (from requests->torchvision)
Downloading https://download.pytorch.org/whl/nightly/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl (122 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 122.5/122.5 kB 2.4 MB/s eta 0:00:00
Collecting idna<4,>=2.5 (from requests->torchvision)
Downloading https://download.pytorch.org/whl/nightly/idna-3.7-py3-none-any.whl (66 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 66.8/66.8 kB 1.8 MB/s eta 0:00:00
Collecting urllib3<3,>=1.21.1 (from requests->torchvision)
Downloading https://download.pytorch.org/whl/nightly/urllib3-2.2.1-py3-none-any.whl (121 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 121.1/121.1 kB 984.8 kB/s eta 0:00:00
Collecting certifi>=2017.4.17 (from requests->torchvision)
Downloading https://download.pytorch.org/whl/nightly/certifi-2024.2.2-py3-none-any.whl (163 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 163.8/163.8 kB 3.0 MB/s eta 0:00:00
Collecting mpmath>=0.19 (from sympy->torch)
Downloading https://download.pytorch.org/whl/nightly/mpmath-1.2.1-py3-none-any.whl (532 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 532.6/532.6 kB 4.2 MB/s eta 0:00:00
Installing collected packages: mpmath, urllib3, typing-extensions, sympy, pillow, numpy, networkx, MarkupSafe, idna, fsspec, filelock, charset-normalizer, certifi, requests, jinja2, torch, torchvision
Successfully installed MarkupSafe-2.1.5 certifi-2024.2.2 charset-normalizer-3.3.2 filelock-3.13.1 fsspec-2024.2.0 idna-3.7 jinja2-3.1.3 mpmath-1.2.1 networkx-3.2.1 numpy-1.26.4 pillow-9.3.0 requests-2.31.0 sympy-1.12 torch-2.2.0.dev20231010 torchvision-0.17.0.dev20231010 typing-extensions-4.8.0 urllib3-2.2.1
Installing clip
Installing open_clip
Cloning assets into /Users/thomasmclaughlin/stable-diffusion-webui/repositories/stable-diffusion-webui-assets...
Cloning into '/Users/thomasmclaughlin/stable-diffusion-webui/repositories/stable-diffusion-webui-assets'...
remote: Enumerating objects: 20, done.
remote: Counting objects: 100% (20/20), done.
remote: Compressing objects: 100% (18/18), done.
remote: Total 20 (delta 0), reused 20 (delta 0), pack-reused 0
Receiving objects: 100% (20/20), 132.70 KiB | 8.29 MiB/s, done.
Cloning Stable Diffusion into /Users/thomasmclaughlin/stable-diffusion-webui/repositories/stable-diffusion-stability-ai...
Cloning into '/Users/thomasmclaughlin/stable-diffusion-webui/repositories/stable-diffusion-stability-ai'...
remote: Enumerating objects: 580, done.
remote: Counting objects: 100% (571/571), done.
remote: Compressing objects: 100% (306/306), done.
remote: Total 580 (delta 278), reused 446 (delta 247), pack-reused 9
Receiving objects: 100% (580/580), 73.44 MiB | 4.87 MiB/s, done.
Resolving deltas: 100% (278/278), done.
Cloning Stable Diffusion XL into /Users/thomasmclaughlin/stable-diffusion-webui/repositories/generative-models...
Cloning into '/Users/thomasmclaughlin/stable-diffusion-webui/repositories/generative-models'...
remote: Enumerating objects: 941, done.
remote: Total 941 (delta 0), reused 0 (delta 0), pack-reused 941
Receiving objects: 100% (941/941), 43.85 MiB | 4.87 MiB/s, done.
Resolving deltas: 100% (490/490), done.
Cloning K-diffusion into /Users/thomasmclaughlin/stable-diffusion-webui/repositories/k-diffusion...
Cloning into '/Users/thomasmclaughlin/stable-diffusion-webui/repositories/k-diffusion'...
remote: Enumerating objects: 1345, done.
remote: Counting objects: 100% (1345/1345), done.
remote: Compressing objects: 100% (434/434), done.
remote: Total 1345 (delta 944), reused 1264 (delta 904), pack-reused 0
Receiving objects: 100% (1345/1345), 239.04 KiB | 1.41 MiB/s, done.
Resolving deltas: 100% (944/944), done.
Cloning BLIP into /Users/thomasmclaughlin/stable-diffusion-webui/repositories/BLIP...
Cloning into '/Users/thomasmclaughlin/stable-diffusion-webui/repositories/BLIP'...
remote: Enumerating objects: 277, done.
remote: Counting objects: 100% (165/165), done.
remote: Compressing objects: 100% (30/30), done.
remote: Total 277 (delta 137), reused 136 (delta 135), pack-reused 112
Receiving objects: 100% (277/277), 7.03 MiB | 4.88 MiB/s, done.
Resolving deltas: 100% (152/152), done.
Installing requirements
Launching Web UI with arguments: --skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
Warning: caught exception 'Torch not compiled with CUDA enabled', memory monitor disabled
Downloading: "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" to /Users/thomasmclaughlin/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors
100%|██████████████████████████████████████████████████████████████████████████████| 3.97G/3.97G [20:11<00:00, 3.52MB/s]
Calculating sha256 for /Users/thomasmclaughlin/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors: Running on local URL: http://127.0.0.1:7860
To create a public link, set `share=True` in `launch()`.
Startup time: 1394.5s (prepare environment: 170.9s, import torch: 4.9s, import gradio: 0.8s, setup paths: 1.3s, initialize shared: 0.2s, other imports: 1.1s, list SD models: 1211.8s, load scripts: 1.5s, create ui: 1.0s, gradio launch: 0.9s).
6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa
Loading weights [6ce0161689] from /Users/thomasmclaughlin/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors
Creating model from config: /Users/thomasmclaughlin/stable-diffusion-webui/configs/v1-inference.yaml
/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
warnings.warn(
Applying attention optimization: InvokeAI... done.
loading stable diffusion model: AssertionError
Traceback (most recent call last):
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/initialize.py", line 149, in load_model
shared.sd_model # noqa: B018
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/shared_items.py", line 175, in sd_model
return modules.sd_models.model_data.get_sd_model()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 620, in get_sd_model
load_model()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 770, in load_model
with devices.autocast(), torch.no_grad():
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 218, in autocast
if has_xpu() or has_mps() or cuda_no_autocast():
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 28, in cuda_no_autocast
device_id = get_cuda_device_id()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 40, in get_cuda_device_id
) or torch.cuda.current_device()
File "/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/cuda/__init__.py", line 769, in current_device
_lazy_init()
File "/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/cuda/__init__.py", line 289, in _lazy_init
raise AssertionError("Torch not compiled with CUDA enabled")
AssertionError: Torch not compiled with CUDA enabled
Stable diffusion model failed to load
changing setting sd_model_checkpoint to v1-5-pruned-emaonly.safetensors: AttributeError
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/options.py", line 165, in set
option.onchange()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 13, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/initialize_util.py", line 181, in <lambda>
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
*** Error completing request
*** Arguments: ('task(81zrcszrkhvbhsb)', <gradio.routes.Request object at 0x167751db0>, '', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py", line 832, in process_images
sd_models.reload_model_weights()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
---
*** Error completing request
*** Arguments: ('task(830it2y4e4bv7cc)', <gradio.routes.Request object at 0x16787f820>, 'tewst', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py", line 832, in process_images
sd_models.reload_model_weights()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
---
*** Error completing request
*** Arguments: ('task(01eqylyyp5fjkmj)', <gradio.routes.Request object at 0x166d9cb20>, 'test', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py", line 832, in process_images
sd_models.reload_model_weights()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
---
*** Error completing request
*** Arguments: ('task(cezri1uh4ecc63n)', <gradio.routes.Request object at 0x167762020>, 'test', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py", line 832, in process_images
sd_models.reload_model_weights()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
---
Additional information
No response
To walk around the issue, you need:
- Stop the webui;
- Delete the v1-5-pruned-emaonly.safetensors file (in the dir /path/to/stable-diffusion-webui/models/Stable-diffusion) which you downloaded;
- Start the webui again to let the webui download the v1-5-pruned-emaonly.safetensors file itself.
I don't know why the file downloaded by webui itself is ok but mine is not. I even checked the SHA256 checksum. It's so wired and waste me a lot of time.
followed these steps: exactly same error
100%|██████████████████████████████████████| 3.97G/3.97G [15:08<00:00, 4.69MB/s]
Calculating sha256 for /Users/thomasmclaughlin/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors: Running on local URL: http://127.0.0.1:7860
To create a public link, set `share=True` in `launch()`.
Startup time: 920.8s (prepare environment: 0.2s, import torch: 5.7s, import gradio: 1.2s, setup paths: 1.6s, initialize shared: 0.1s, other imports: 1.2s, list SD models: 909.1s, load scripts: 0.7s, create ui: 0.4s, gradio launch: 0.5s).
6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa
Loading weights [6ce0161689] from /Users/thomasmclaughlin/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors
Creating model from config: /Users/thomasmclaughlin/stable-diffusion-webui/configs/v1-inference.yaml
/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
warnings.warn(
Applying attention optimization: InvokeAI... done.
loading stable diffusion model: AssertionError
Traceback (most recent call last):
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/[email protected]/3.10.14/Frameworks/Python.framework/Versions/3.10/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/initialize.py", line 149, in load_model
shared.sd_model # noqa: B018
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/shared_items.py", line 175, in sd_model
return modules.sd_models.model_data.get_sd_model()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 620, in get_sd_model
load_model()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 770, in load_model
with devices.autocast(), torch.no_grad():
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 218, in autocast
if has_xpu() or has_mps() or cuda_no_autocast():
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 28, in cuda_no_autocast
device_id = get_cuda_device_id()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/devices.py", line 40, in get_cuda_device_id
) or torch.cuda.current_device()
File "/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/cuda/__init__.py", line 769, in current_device
_lazy_init()
File "/Users/thomasmclaughlin/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/cuda/__init__.py", line 289, in _lazy_init
raise AssertionError("Torch not compiled with CUDA enabled")
AssertionError: Torch not compiled with CUDA enabled
Stable diffusion model failed to load
changing setting sd_model_checkpoint to v1-5-pruned-emaonly.safetensors: AttributeError
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/options.py", line 165, in set
option.onchange()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 13, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/initialize_util.py", line 181, in <lambda>
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
*** Error completing request
*** Arguments: ('task(q0r8omuzr5jxt25)', <gradio.routes.Request object at 0x15fa8f8b0>, 'test', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/processing.py", line 832, in process_images
sd_models.reload_model_weights()
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 860, in reload_model_weights
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 793, in reuse_model_from_already_loaded
send_model_to_cpu(sd_model)
File "/Users/thomasmclaughlin/stable-diffusion-webui/modules/sd_models.py", line 662, in send_model_to_cpu
if m.lowvram:
AttributeError: 'NoneType' object has no attribute 'lowvram'
---
It seems that it may not be due to file integrity. I have no idea then. Good luck with you !
When I completely reinstalled the project, it worked for the first time, but when I added other models, the project would throw this exception,
I found that after I deleted the models I added manually, the project could run normally again;
I removed all models, fresh install, trying to switch to default model (as it was pulled again) and see
changing setting sd_model_checkpoint to v1-5-pruned-emaonly.safetensors: AttributeError
Traceback (most recent call last):
File "/Users/gusenits/development/stable_diff/stable-diffusion-webui/modules/options.py", line 165, in set
option.onchange()
File "/Users/gusenits/development/stable_diff/stable-diffusion-webui/modules/call_queue.py", line 13, in f
res = func(*args, **kwargs)
File "/Users/gusenits/development/stable_diff/stable-diffusion-webui/modules/initialize_util.py", line 181, in
Is it resolved?
Looks like there are only two possible solutions for this:
- update MacOS to 13+
- use CPU and lowvram params