Error
Traceback (most recent call last):
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 534, in _run_script
exec(code, module.__dict__)
File "/workspace/generative-models/scripts/demo/sampling.py", line 304, in <module>
state2 = init_st(version_dict2, load_filter=False)
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 212, in wrapper
return cached_func(*args, **kwargs)
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 241, in __call__
return self._get_or_create_cached_value(args, kwargs)
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 267, in _get_or_create_cached_value
return self._handle_cache_miss(cache, value_key, func_args, func_kwargs)
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/streamlit/runtime/caching/cache_utils.py", line 321, in _handle_cache_miss
computed_value = self._info.func(*func_args, **func_kwargs)
File "/workspace/generative-models/scripts/demo/streamlit_helpers.py", line 46, in init_st
model, msg = load_model_from_config(config, ckpt if load_ckpt else None)
File "/workspace/generative-models/scripts/demo/streamlit_helpers.py", line 86, in load_model_from_config
model = instantiate_from_config(config.model)
File "/workspace/generative-models/sgm/util.py", line 175, in instantiate_from_config
return get_obj_from_str(config["target"])(**config.get("params", dict()))
File "/workspace/generative-models/sgm/models/diffusion.py", line 48, in __init__
model = instantiate_from_config(network_config)
File "/workspace/generative-models/sgm/util.py", line 175, in instantiate_from_config
return get_obj_from_str(config["target"])(**config.get("params", dict()))
File "/workspace/generative-models/sgm/modules/diffusionmodules/openaimodel.py", line 665, in __init__
SpatialTransformer(
File "/workspace/generative-models/sgm/modules/attention.py", line 678, in __init__
[
File "/workspace/generative-models/sgm/modules/attention.py", line 679, in <listcomp>
BasicTransformerBlock(
File "/workspace/generative-models/sgm/modules/attention.py", line 512, in __init__
self.attn2 = attn_cls(
File "/workspace/generative-models/sgm/modules/attention.py", line 365, in __init__
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
File "/workspace/generative-models/.pt2/lib/python3.10/site-packages/torch/nn/modules/linear.py", line 96, in __init__
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
TypeError: empty(): argument 'size' must be tuple of ints, but found element of type ListConfig at pos 2
I also get this error. No idea why, was difficult enough to even get it to attempt to do something because the versions of torch installed via requirements dont even install with cuda.
Get the same error too. Stuck here for a while :(
same
same
The followings help me solve this problem:
[1] In sd_xl_refiner.yaml, replace the following code:
[2] In streamlit_helpers, add the following code:
You only need to fix sgd/modules/attention.py file.
- Add the following line to the top of the
sgd/modules/attention.pyfile:
from omegaconf import ListConfig
2. Insert this line in the middle of the sgd/modules/attention.py file, around line 655:
if exists(context_dim) and isinstance(context_dim, ListConfig):
context_dim = list(context_dim)
sgd/modules/attention.py
Shouldn't this be in sgm/modules/attention.py