InvokeAI
InvokeAI copied to clipboard
[bug]: index 77 is out of bounds for dimension 0 with size 77
Is there an existing issue for this?
- [X] I have searched the existing issues
OS
macOS
GPU
mps
VRAM
32
What happened?
_**This error occurred when it was making img2img. I have rebooted the mac, same error, launched install.sh to clean some things, same error.
What should i do now ?**_
Traceback (most recent call last):
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/backend/invoke_ai_web_server.py", line 1125, in generate_images
self.generate.prompt2image(
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/generate.py", line 492, in prompt2image
results = generator.generate(
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/invoke/generator/base.py", line 98, in generate
image = make_image(x_T)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/invoke/generator/txt2img.py", line 42, in make_image
samples, _ = sampler.sample(
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/ksampler.py", line 226, in sample
K.sampling.__dict__[f'sample_{self.schedule}'](
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/k_diffusion/sampling.py", line 518, in sample_dpmpp_2s_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/ksampler.py", line 52, in forward
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/shared_invokeai_diffusion.py", line 107, in do_diffusion_step
unconditioned_next_x, conditioned_next_x = self.apply_standard_conditioning(x, sigma, unconditioning, conditioning)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/shared_invokeai_diffusion.py", line 123, in apply_standard_conditioning
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice,
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/ksampler.py", line 38, in <lambda>
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/k_diffusion/external.py", line 114, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/k_diffusion/external.py", line 140, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/ddpm.py", line 1441, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/ddpm.py", line 2167, in forward
out = self.diffusion_model(x, t, context=cc)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/diffusionmodules/openaimodel.py", line 806, in forward
h = module(h, emb, context)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/diffusionmodules/openaimodel.py", line 88, in forward
x = layer(x, context)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/attention.py", line 271, in forward
x = block(x, context=context)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/attention.py", line 221, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/diffusionmodules/util.py", line 159, in checkpoint
return func(*inputs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/attention.py", line 226, in _forward
x += self.attn2(self.norm2(x.clone()), context=context)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/modules/attention.py", line 199, in forward
r = self.get_invokeai_attention_mem_efficient(q, k, v)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/cross_attention_control.py", line 295, in get_invokeai_attention_mem_efficient
return self.einsum_op_mps_v1(q, k, v)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/cross_attention_control.py", line 250, in einsum_op_mps_v1
return self.einsum_lowest_level(q, k, v, None, None, None)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/cross_attention_control.py", line 229, in einsum_lowest_level
self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/shared_invokeai_diffusion.py", line 69, in <lambda>
lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key))
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/shared_invokeai_diffusion.py", line 61, in callback
saver.add_attention_maps(slice, key)
File "/Users/stephane/invokeai/.venv/lib/python3.10/site-packages/ldm/models/diffusion/cross_attention_map_saving.py", line 31, in add_attention_maps
maps = maps[:, :, self.token_ids]
IndexError: index 77 is out of bounds for dimension 0 with size 77
Screenshots
No response
Additional context
No response
Contact Details
This is an issue with long prompts and version 2.2.4 of InvokeAI - you can try the prerelease of 2.2.5 which fixes this (truncates the tokens as expected) or just use a shorter prompt for now.
This is an issue with long prompts and version 2.2.4 of InvokeAI - you can try the prerelease of 2.2.5 which fixes this (truncates the tokens as expected) or just use a shorter prompt for now.
Thanks a lot for your reply