Loaded model config from [ControlNet/models/cldm_v15.yaml]
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /mnt/testing/skl/visual-chatgpt-main/visual_chatgpt.py:940 in │
│ │
│ 937 │ │ return state, state, txt + ' ' + image_filename + ' ' │
│ 938 │
│ 939 if name == 'main': │
│ ❱ 940 │ bot = ConversationBot() │
│ 941 │ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: │
│ 942 │ │ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT") │
│ 943 │ │ state = gr.State([]) │
│ │
│ /mnt/testing/skl/visual-chatgpt-main/visual_chatgpt.py:821 in init │
│ │
│ 818 │ │ self.image2seg = image2seg() │
│ 819 │ │ self.seg2image = seg2image(device="cuda:3") │
│ 820 │ │ self.image2depth = image2depth() │
│ ❱ 821 │ │ self.depth2image = depth2image(device="cuda:3") │
│ 822 │ │ self.image2normal = image2normal() │
│ 823 │ │ self.normal2image = normal2image(device="cuda:5") │
│ 824 │ │ self.pix2pix = Pix2Pix(device="cuda:3") │
│ │
│ /mnt/testing/skl/visual-chatgpt-main/visual_chatgpt.py:667 in init │
│ │
│ 664 │ def init(self, device): │
│ 665 │ │ print("Initialize depth2image model...") │
│ 666 │ │ model = create_model('ControlNet/models/cldm_v15.yaml', device=device).to(device │
│ ❱ 667 │ │ model.load_state_dict(load_state_dict('ControlNet/models/control_sd15_depth.pth' │
│ 668 │ │ self.model = model.to(device) │
│ 669 │ │ self.device = device │
│ 670 │ │ self.ddim_sampler = DDIMSampler(self.model) │
│ │
│ /mnt/testing/skl/visual-chatgpt-main/ControlNet/cldm/model.py:18 in load_state_dict │
│ │
│ 15 │ │ import safetensors.torch │
│ 16 │ │ state_dict = safetensors.torch.load_file(ckpt_path, device=location) │
│ 17 │ else: │
│ ❱ 18 │ │ state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(loca │
│ 19 │ state_dict = get_state_dict(state_dict) │
│ 20 │ print(f'Loaded state_dict from [{ckpt_path}]') │
│ 21 │ return state_dict │
│ │
│ /home/skl/miniconda3/envs/visgpt/lib/python3.8/site-packages/torch/serialization.py:705 in load │
│ │
│ 702 │ │ │ # If we want to actually tail call to torch.jit.load, we need to │
│ 703 │ │ │ # reset back to the original position. │
│ 704 │ │ │ orig_position = opened_file.tell() │
│ ❱ 705 │ │ │ with _open_zipfile_reader(opened_file) as opened_zipfile: │
│ 706 │ │ │ │ if _is_torchscript_zip(opened_zipfile): │
│ 707 │ │ │ │ │ warnings.warn("'torch.load' received a zip file that looks like a To │
│ 708 │ │ │ │ │ │ │ │ " dispatching to 'torch.jit.load' (call 'torch.jit.loa │
│ │
│ /home/skl/miniconda3/envs/visgpt/lib/python3.8/site-packages/torch/serialization.py:242 in │
│ init │
│ │
│ 239 │
│ 240 class _open_zipfile_reader(_opener): │
│ 241 │ def init(self, name_or_buffer) -> None: │
│ ❱ 242 │ │ super(_open_zipfile_reader, self).init(torch._C.PyTorchFileReader(name_or_bu │
│ 243 │
│ 244 │
│ 245 class _open_zipfile_writer_file(_opener): │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory
i meet the same problem too...
Hi @kailaisun , @FlyingPTT , please try the new version to see whether the issue still occurs. Thanks~
Thank you. Now, another problem occurs. This window keeps getting stuck:
Entering new AgentExecutor chain...
