deforum-stable-diffusion
deforum-stable-diffusion copied to clipboard
generation issue
hello i have technical problem, this is some details :
my gpu : rtx 3050 for laptop
my prompt : { "0": "tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera", "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field, ruan jia and fenghua zhong", "60": "a beautiful coconut --neg photo, realistic", "90": "a beautiful durian, trending on Artstation" } (the initial one)
my error :
Error completing request
Arguments: ('task(9mzdxu2vwo7fyqm)', None, False, None, '2D', 120, 'replicate', '0:(0)', '0:(1.0025+0.002*sin(1.25*3.14*t/30))', '0:(0)', '0:(0)', '0:(1.75)', '0:(0.5)', '0:(0.5)', '0:(0)', '0:(0)', '0:(0)', False, '0:(0)', '0:(0)', '0:(0)', '0:(53)', '0: (0.065)', '0: (0.65)', '0: (1.0)', '0: (7)', '0:(1.5)', False, '0:(1)', '0:(0)', False, '0: (25)', '0: (70)', '0: (1)', False, '0: (200)', '0: (10000)', '0:(s), 1:(-1), "max_f-2":(-1), "max_f-1":(s)', False, '0: ("Euler a")', '0: ("{video_mask}")', False, '0: ("{video_mask}")', False, '0: ("model1.ckpt"), 100: ("model2.safetensors")', False, '0: (2)', True, '0: (1.05)', '0: (5)', '0: (1.0)', '0: (0.1)', '0: (0.0)', 'LAB', '', 1.0, False, '2', 'None', '0: (1)', 'None', '0: (1)', '0', 'perlin', 8, 8, 4, 0.5, True, False, 0.2, 'border', 'bicubic', False, 'https://deforum.github.io/a1/V1.mp4', 1, 0, -1, False, False, 'https://deforum.github.io/a1/VM1.mp4', False, '20230129210106', '{\n "0": "tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera",\n "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field, ruan jia and fenghua zhong",\n "60": "a beautiful coconut --neg photo, realistic",\n "90": "a beautiful durian, trending on Artstation"\n}\n ', '', '', 512, 512, False, False, -1, 'Euler a', False, 0, 0, 25, 0.0, 1, True, True, False, False, False, 'Deforum_{timestring}', '{timestring}_{index}_{prompt}.png', 'iter', 1, False, False, True, 0.8, 'https://deforum.github.io/a1/I1.png', False, False, False, True, 'https://deforum.github.io/a1/M1.jpg', 1.0, 1.0, 4, 1, True, 4, 'reroll', 10.0, False, 15, False, False, 'FFMPEG mp4', 'None', 'https://deforum.github.io/a1/A1.mp3', False, 'realesr-animevideov3', 'x2', True, False, 'x0_pred', 'C:/SD/20230124234916_%09d.png', 'testvidmanualsettings.mp4', False, 'None', 2, False, 2, False, None, True, False, 'None', True, 'None', False, 'RAFT', 'None', False, 'None', False, 'None', False, False, '0:(0.5)', '0:(1)', '0:(0.5)', '0:(1)', '0:(100)', '0:(0)', False, '{\n "0": "https://deforum.github.io/a1/Gi1.png",\n "max_f/4-5": "https://deforum.github.io/a1/Gi2.png",\n "max_f/2-10": "https://deforum.github.io/a1/Gi3.png",\n "3*max_f/4-15": "https://deforum.github.io/a1/Gi4.jpg",\n "max_f-20": "https://deforum.github.io/a1/Gi1.png"\n}', '0:(0.75)', '0:(0.35)', '0:(0.25)', '0:(20)', '0:(0.075)') {}
Traceback (most recent call last):
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\extensions\deforum\scripts\deforum.py", line 101, in run_deforum
render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui/extensions/deforum/scripts\deforum_helpers\render.py", line 549, in render_animation
image = generate(args, keys, anim_args, loop_args, controlnet_args, root, frame_idx, sampler_name=scheduled_sampler_name)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui/extensions/deforum/scripts\deforum_helpers\generate.py", line 64, in generate
image, caught_vae_exception = generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, frame, return_sample, sampler_name)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui/extensions/deforum/scripts\deforum_helpers\generate.py", line 98, in generate_with_nans_check
raise e
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui/extensions/deforum/scripts\deforum_helpers\generate.py", line 92, in generate_with_nans_check
image = generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, frame, return_sample, sampler_name)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui/extensions/deforum/scripts\deforum_helpers\generate.py", line 235, in generate_inner
processed = processing.process_images(p_txt)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\processing.py", line 503, in process_images
res = process_images_inner(p)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\processing.py", line 653, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\processing.py", line 869, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 358, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 234, in launch_sampling
return func()
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 358, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 152, in forward
devices.test_for_nans(x_out, "unet")
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\modules\devices.py", line 152, in test_for_nans
raise NansException(message)
modules.devices.NansException: A tensor with all NaNs was produced in Unet. This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the "Upcast cross attention layer to float32" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this. Use --disable-nan-check commandline argument to disable this check.
Traceback (most recent call last):
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 987, in postprocess_data
if predictions[i] is components._Keywords.FINISHED_ITERATING:
IndexError: tuple index out of range
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 394, in run_predict
output = await app.get_blocks().process_api(
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1078, in process_api
data = self.postprocess_data(fn_index, result["prediction"], state)
File "C:\Users\Axel\Desktop\prompt\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 991, in postprocess_data
raise ValueError(
ValueError: Number of output components does not match number of values returned from from function f
my models https://civitai.com/models/7371/rev-animated
someone have an idea ?