stable-diffusion-webui-forge
stable-diffusion-webui-forge copied to clipboard
controlnet Ipadapter
*** Error running process_before_every_sampling: D:\webui_forge\webui\extensions-builtin\sd_forge_controlnet\scripts\controlnet.py Traceback (most recent call last): File "D:\webui_forge\webui\modules\scripts.py", line 892, in process_before_every_sampling script.process_before_every_sampling(p, *script_args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\webui_forge\webui\extensions-builtin\sd_forge_controlnet\scripts\controlnet.py", line 561, in process_before_every_sampling self.process_unit_before_every_sampling(p, unit, self.current_params[i], *args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\webui_forge\webui\extensions-builtin\sd_forge_controlnet\scripts\controlnet.py", line 507, in process_unit_before_every_sampling params.model.process_before_every_sampling(p, cond, mask, *args, **kwargs) File "D:\webui_forge\webui\extensions-builtin\sd_forge_ipadapter\scripts\forge_ipadapter.py", line 147, in process_before_every_sampling unet = opIPAdapterApply( File "D:\webui_forge\webui\extensions-builtin\sd_forge_ipadapter\lib_ipadapter\IPAdapterPlus.py", line 688, in apply_ipadapter clip_embed = clip_vision.encode_image(image) File "D:\webui_forge\webui\backend\patcher\clipvision.py", line 123, in encode_image outputs = self.model(pixel_values=pixel_values, output_hidden_states=True) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 1310, in forward vision_outputs = self.vision_model( File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 865, in forward hidden_states = self.embeddings(pixel_values) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 195, in forward patch_embeds = self.patch_embedding(pixel_values) # shape = [, width, grid, grid] File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "D:\webui_forge\webui\backend\operations.py", line 90, in forward return super().forward(x) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\conv.py", line 460, in forward return self._conv_forward(input, self.weight, self.bias) File "D:\webui_forge\system\python\lib\site-packages\torch\nn\modules\conv.py", line 456, in _conv_forward return F.conv2d(input, weight, bias, self.stride, RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.cuda.HalfTensor) should be the same