ComfyUI icon indicating copy to clipboard operation
ComfyUI copied to clipboard

Absurd memory allocation issues runs fine on RTX 3060 crashes and errors out on 4090 on lower settings/similar settings (FLUX)

Open BrechtCorbeel opened this issue 6 months ago • 6 comments

Expected Behavior

run a prompt

Actual Behavior

errors and infinitely stuck/slow lagging pc

Steps to Reproduce

/

Debug Logs

Error occurred when executing ImpactWildcardEncode:

Allocation on device 0 would exceed allowed memory. (out of memory)
Currently allocated : 17.31 GiB
Requested : 7.72 GiB
Device limit : 23.99 GiB
Free (according to CUDA): 0 bytes
PyTorch limit (set by user-supplied memory fraction)
: 17179869184.00 GiB

File "W:\smatrix\Data\Packages\ComfyUI\execution.py", line 317, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "W:\smatrix\Data\Packages\ComfyUI\execution.py", line 192, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "W:\smatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "W:\smatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "\\Newestgtx1650\18 tb\smatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 2163, in doit
model, clip, conditioning = impact.wildcards.process_with_loras(wildcard_opt=populated, model=kwargs['model'], clip=kwargs['clip'], seed=kwargs['seed'], processed=processed)
File "\\Newestgtx1650\18 tb\smatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\wildcards.py", line 409, in process_with_loras
cur = nodes.CLIPTextEncode().encode(clip, prompt)[0]
File "W:\smatrix\Data\Packages\ComfyUI\nodes.py", line 65, in encode
output = clip.encode_from_tokens(tokens, return_pooled=True, return_dict=True)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\sd.py", line 126, in encode_from_tokens
o = self.cond_stage_model.encode_token_weights(tokens)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\flux.py", line 57, in encode_token_weights
t5_out, t5_pooled = self.t5xxl.encode_token_weights(token_weight_pairs_t5)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\sd1_clip.py", line 41, in encode_token_weights
o = self.encode(to_encode)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\sd1_clip.py", line 229, in encode
return self(tokens)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\sd1_clip.py", line 201, in forward
outputs = self.transformer(tokens, attention_mask_model, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state, dtype=torch.float32)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 241, in forward
return self.encoder(x, *args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 213, in forward
x, past_bias = l(x, mask, past_bias, optimized_attention)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 189, in forward
x, past_bias = self.layer[0](x, mask, past_bias, optimized_attention)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 176, in forward
output, past_bias = self.SelfAttention(self.layer_norm(x), mask=mask, past_bias=past_bias, optimized_attention=optimized_attention)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 156, in forward
past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device, x.dtype)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\text_encoders\t5.py", line 147, in compute_bias
values = self.relative_attention_bias(relative_position_bucket, out_dtype=dtype) # shape (query_length, key_length, num_heads)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\ops.py", line 211, in forward
return self.forward_comfy_cast_weights(*args, **kwargs)
File "W:\smatrix\Data\Packages\ComfyUI\comfy\ops.py", line 207, in forward_comfy_cast_weights
return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype)
File "W:\smatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\functional.py", line 2233, in embedding
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)

Other

It might be because of really long prompts (which seems to exponentially increase memory needs at some point), but I can run the same workflow + upscale + 2x more steps on a 3060 just fine but get memory issues and lag on a 4090.

BrechtCorbeel avatar Aug 27 '24 12:08 BrechtCorbeel