加载visualglm模型的时候报错:
For torch.distributed users or loading model parallel models, set environment variables RANK, WORLD_SIZE and LOCAL_RANK.
Traceback (most recent call last):
File "/root/TransGPT/multi_modal/hf_infer.py", line 3, in
model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
File "/root/.conda/envs/demo/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 558, in from_pretrained
return model_class.from_pretrained(
File "/root/.conda/envs/demo/lib/python3.10/site-packages/transformers/modeling_utils.py", line 2966, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/THUDM/visualglm-6b/f4f759acde0926fefcd35e2c626e08adb452eff8/modeling_chatglm.py", line 1345, in init
self.image_encoder = BLIP2(config.eva_config, config.qformer_config)
File "/root/.cache/huggingface/modules/transformers_modules/THUDM/visualglm-6b/f4f759acde0926fefcd35e2c626e08adb452eff8/visual.py", line 59, in init
self.vit = EVAViT(EVAViT.get_args(**eva_args))
File "/root/.cache/huggingface/modules/transformers_modules/THUDM/visualglm-6b/f4f759acde0926fefcd35e2c626e08adb452eff8/visual.py", line 20, in init
super().init(args, transformer=transformer, parallel_output=parallel_output, **kwargs)
File "/root/.conda/envs/demo/lib/python3.10/site-packages/sat/model/official/vit_model.py", line 111, in init
super().init(args, transformer=transformer, **kwargs)
File "/root/.conda/envs/demo/lib/python3.10/site-packages/sat/model/base_model.py", line 93, in init
self.transformer = BaseTransformer(
TypeError: sat.model.transformer.BaseTransformer() got multiple values for keyword argument 'parallel_output'