stable-diffusion-webui-forge
stable-diffusion-webui-forge copied to clipboard
Additional Network Extension support
Checklist
- [ ] The issue exists after disabling all extensions
- [ ] The issue exists on a clean installation of webui
- [x] The issue is caused by an extension, but I believe it is caused by a bug in the webui
- [x] The issue exists in the current version of the webui
- [ ] The issue has not been reported before recently
- [ ] The issue has been reported before but has not been fixed yet
What happened?
If you want to start with a lora
Steps to reproduce the problem
Load a lora
What should have happened?
Loading a lora and dont give a error
What browsers do you use to access the UI ?
No response
Sysinfo
Console logs
Traceback (most recent call last):
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 37, in loop
task.work()
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\txt2img.py", line 111, in txt2img_function
processed = processing.process_images(p)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\processing.py", line 750, in process_images
res = process_images_inner(p)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\processing.py", line 921, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\processing.py", line 1276, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\sd_samplers_common.py", line 263, in launch_sampling
return func()
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\sd_samplers_kdiffusion.py", line 251, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\sd_samplers_cfg_denoiser.py", line 182, in forward
denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules_forge\forge_sampler.py", line 82, in forge_sample
denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\modules\samplers.py", line 289, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\modules\samplers.py", line 258, in calc_cond_uncond_batch
output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\modules\model_base.py", line 89, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 867, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 49, in forward_timestep_embed
x = layer(x, emb)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 244, in forward
return checkpoint(
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\ldm\modules\diffusionmodules\util.py", line 194, in checkpoint
return func(*inputs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 257, in _forward
h = self.in_layers(x)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\container.py", line 215, in forward
input = module(input)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\extensions\sd-webui-additional-networks\scripts\lora_compvis.py", line 91, in forward
return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 460, in forward
return self._conv_forward(input, self.weight, self.bias)
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 456, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.cuda.HalfTensor) and weight type (torch.FloatTensor) should be the same
Input type (torch.cuda.HalfTensor) and weight type (torch.FloatTensor) should be the same
*** Error completing request
*** Arguments: ('task(a2xh97sakanxenm)', <gradio.routes.Request object at 0x000001CCD28F3FD0>, '', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), True, False, 'LoRA', 'LCM_LoRA_Weights_SD15(aaebf6360f7d)', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, None, 'Refresh models', False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "C:\Users\dervl\webui_forge_cu121_torch21\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
TypeError: 'NoneType' object is not iterable
Additional information
With the normal LoRA tab it works.
But like this you cant use X,Y,Z really good
Do you prefer this extension since you can add these training parameters to the X/Y/Z plot?
https://github.com/kohya-ss/sd-webui-additional-networks/blob/e9f3d622b5a98650008a685ea23b27eb810da35a/scripts/addnet_xyz_grid_support.py#L9-L52
IMO, this functionality should be implemented upstream in the webui repo. Kohya themself have deprecated this extension nearly a year ago now as LoRAs are now supported natively and work faster in the webui.
I needed this extension to be able to prompt for multiple Lora's in the same shot. This is the extension. Are there any alternatives? In comfyui it's regional prompting
Prompt S/R
can be used.
You can use Prompt S/R as stated above or this instead: https://github.com/Yinzo/sd-webui-Lora-queue-helper