swift icon indicating copy to clipboard operation
swift copied to clipboard

lora微调cogvlm打开量化报错

Open zhuliyi0 opened this issue 3 months ago • 0 comments

CUDA_VISIBLE_DEVICES=0,1
swift sft
--model_type cogvlm-17b-instruct
--sft_type lora
--tuner_backend swift
--dtype bf16
--output_dir output
--dataset coco-mini-en-2
--train_dataset_sample -1
--num_train_epochs 6
--max_length 2048
--check_dataset_strategy warning
--lora_rank 8
--lora_alpha 32
--lora_dropout_p 0.05
--lora_target_modules DEFAULT
--lora_dtype AUTO
--quantization_bit 4
--bnb_4bit_comp_dtype AUTO
--gradient_checkpointing false
--batch_size 1
--weight_decay 0.1
--learning_rate 1e-04
--gradient_accumulation_steps 1
--max_grad_norm 0.5
--warmup_ratio 0.03
--eval_steps 200
--save_steps 1000
--save_total_limit 10
--logging_steps 10
--report_to wandb
--output_dir /root/autodl-tmp/output/uniCate2k-prune-cogvlm-lora-rk8-b1-1e4-dpspd2
--add_output_dir_suffix false
--deepspeed default-zero2

run sh: python /root/swift/swift/cli/sft.py --model_type cogvlm-17b-instruct --sft_type lora --tuner_backend swift --dtype bf16 --output_dir output --dataset coco-mini-en-2 --train_dataset_sample -1 --num_train_epochs 6 --max_length 2048 --check_dataset_strategy warning --lora_rank 8 --lora_alpha 32 --lora_dropout_p 0.05 --lora_target_modules DEFAULT --lora_dtype AUTO --quantization_bit 4 --bnb_4bit_comp_dtype AUTO --gradient_checkpointing false --batch_size 1 --weight_decay 0.1 --learning_rate 1e-04 --gradient_accumulation_steps 1 --max_grad_norm 0.5 --warmup_ratio 0.03 --eval_steps 200 --save_steps 1000 --save_total_limit 10 --logging_steps 10 --report_to wandb --output_dir /root/autodl-tmp/output/uniCate2k-prune-cogvlm-lora-rk8-b1-1e4-dpspd2 --add_output_dir_suffix false --deepspeed default-zero2 2024-03-31 11:24:39,840 - modelscope - INFO - PyTorch version 2.2.1 Found. 2024-03-31 11:24:39,841 - modelscope - INFO - Loading ast index from /root/.cache/modelscope/ast_indexer 2024-03-31 11:24:39,876 - modelscope - INFO - Loading done! Current index file version is 1.13.1, with md5 7e5e05a3b3fecb96067212086f2824c0 and a total number of 972 components indexed [INFO:swift] Start time of running main: 2024-03-31 11:24:41.350395 [INFO:swift] Setting template_type: cogvlm-instruct [INFO:swift] Using deepspeed: {'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'optimizer': {'type': 'AdamW', 'params': {'lr': 'auto', 'betas': 'auto', 'eps': 'auto', 'weight_decay': 'auto'}}, 'scheduler': {'type': 'WarmupDecayLR', 'params': {'total_num_steps': 'auto', 'warmup_min_lr': 'auto', 'warmup_max_lr': 'auto', 'warmup_num_steps': 'auto'}}, 'zero_optimization': {'stage': 2, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False} [INFO:swift] Setting args.lazy_tokenize: True [INFO:swift] args: SftArguments(model_type='cogvlm-17b-instruct', model_id_or_path='ZhipuAI/cogvlm-chat', model_revision='master', sft_type='lora', freeze_parameters=0.0, additional_trainable_parameters=[], tuner_backend='swift', template_type='cogvlm-instruct', output_dir='/root/autodl-tmp/output/uniCate2k-prune-cogvlm-lora-rk8-b1-1e4-dpspd2', add_output_dir_suffix=False, ddp_backend='nccl', ddp_find_unused_parameters=None, ddp_broadcast_buffers=None, seed=42, resume_from_checkpoint=None, dtype='bf16', dataset=['coco-mini-en-2'], dataset_seed=42, dataset_test_ratio=0.01, train_dataset_sample=-1, train_dataset_mix_ratio=None, train_dataset_mix_ds=['ms-bench'], val_dataset_sample=None, use_loss_scale=False, system=None, max_length=2048, truncation_strategy='delete', check_dataset_strategy='warning', custom_train_dataset_path=[], custom_val_dataset_path=[], self_cognition_sample=0, model_name=[None, None], model_author=[None, None], quantization_bit=4, bnb_4bit_comp_dtype='bf16', bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, lora_target_modules=['vision_expert_query_key_value', 'vision_expert_dense', 'language_expert_query_key_value', 'language_expert_dense'], lora_rank=8, lora_alpha=32, lora_dropout_p=0.05, lora_bias_trainable='none', lora_modules_to_save=[], lora_dtype='AUTO', lora_lr_ratio=None, use_rslora=False, lora_layers_to_transform=None, lora_layers_pattern=None, lora_rank_pattern={}, lora_alpha_pattern={}, lora_loftq_config={}, use_dora=False, use_galore=False, galore_rank=128, galore_target_modules=None, galore_update_proj_gap=50, galore_scale=1.0, galore_proj_type='std', galore_optim_per_parameter=False, galore_with_embedding=False, adalora_target_r=8, adalora_init_r=12, adalora_tinit=0, adalora_tfinal=0, adalora_deltaT=1, adalora_beta1=0.85, adalora_beta2=0.85, adalora_orth_reg_weight=0.5, ia3_target_modules=['DEFAULT'], ia3_feedforward_modules=[], ia3_modules_to_save=[], llamapro_num_new_blocks=4, llamapro_num_groups=None, neftune_noise_alpha=None, neftune_backend='transformers', gradient_checkpointing=False, deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'optimizer': {'type': 'AdamW', 'params': {'lr': 'auto', 'betas': 'auto', 'eps': 'auto', 'weight_decay': 'auto'}}, 'scheduler': {'type': 'WarmupDecayLR', 'params': {'total_num_steps': 'auto', 'warmup_min_lr': 'auto', 'warmup_max_lr': 'auto', 'warmup_num_steps': 'auto'}}, 'zero_optimization': {'stage': 2, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, batch_size=1, eval_batch_size=1, num_train_epochs=6, max_steps=-1, optim='adamw_torch', adam_beta1=0.9, adam_beta2=0.999, learning_rate=0.0001, weight_decay=0.1, gradient_accumulation_steps=1, max_grad_norm=0.5, predict_with_generate=False, lr_scheduler_type='linear', warmup_ratio=0.03, eval_steps=200, save_steps=1000, save_only_model=True, save_total_limit=10, logging_steps=10, dataloader_num_workers=1, dataloader_pin_memory=True, push_to_hub=False, hub_model_id=None, hub_token=None, hub_private_repo=False, push_hub_strategy='push_best', test_oom_error=False, disable_tqdm=False, lazy_tokenize=True, preprocess_num_proc=1, use_flash_attn=None, ignore_args_error=False, check_model_is_latest=True, logging_dir='/root/autodl-tmp/output/uniCate2k-prune-cogvlm-lora-rk8-b1-1e4-dpspd2/runs', report_to=['wandb'], acc_strategy='token', save_on_each_node=True, evaluation_strategy='steps', save_strategy='steps', save_safetensors=True, gpu_memory_fraction=None, max_new_tokens=2048, do_sample=True, temperature=0.3, top_k=20, top_p=0.7, repetition_penalty=1.0, num_beams=1, per_device_train_batch_size=None, per_device_eval_batch_size=None, only_save_model=None, neftune_alpha=None, deepspeed_config_path=None, model_cache_dir=None) device_count: 1 rank: -1, local_rank: -1, world_size: 1, local_world_size: 1 [INFO:swift] Global seed set to 42 [INFO:swift] quantization_config: {'quant_method': <QuantizationMethod.BITS_AND_BYTES: 'bitsandbytes'>, '_load_in_8bit': False, '_load_in_4bit': True, 'llm_int8_threshold': 6.0, 'llm_int8_skip_modules': None, 'llm_int8_enable_fp32_cpu_offload': False, 'llm_int8_has_fp16_weight': False, 'bnb_4bit_quant_type': 'nf4', 'bnb_4bit_use_double_quant': True, 'bnb_4bit_compute_dtype': torch.bfloat16} [INFO:swift] Downloading the model from ModelScope Hub, model_id: ZhipuAI/cogvlm-chat [WARNING:modelscope] Using the master branch is fragile, please use it with caution! [INFO:modelscope] Use user-specified model revision: master [WARNING:swift] CogAgent with FusedLayerNorm will cause an training loss of NAN, to avoid this, please uninstall apex. Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s] Traceback (most recent call last): File "/root/swift/swift/cli/sft.py", line 5, in sft_main() File "/root/swift/swift/utils/run_utils.py", line 31, in x_main result = llm_x(args, **kwargs) File "/root/swift/swift/llm/sft.py", line 73, in llm_sft model, tokenizer = get_model_tokenizer( File "/root/swift/swift/llm/utils/model.py", line 2740, in get_model_tokenizer model, tokenizer = get_function(model_dir, torch_dtype, model_kwargs, File "/root/swift/swift/llm/utils/model.py", line 544, in get_model_tokenizer_cogagent model, tokenizer = get_model_tokenizer_from_repo( File "/root/swift/swift/llm/utils/model.py", line 447, in get_model_tokenizer_from_repo model = automodel_class.from_pretrained( File "/root/miniconda3/lib/python3.10/site-packages/modelscope/utils/hf_util.py", line 113, in from_pretrained module_obj = module_class.from_pretrained(model_dir, *model_args, File "/root/miniconda3/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 556, in from_pretrained return model_class.from_pretrained( File "/root/miniconda3/lib/python3.10/site-packages/modelscope/utils/hf_util.py", line 76, in from_pretrained return ori_from_pretrained(cls, model_dir, *model_args, **kwargs) File "/root/miniconda3/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3502, in from_pretrained ) = cls._load_pretrained_model( File "/root/miniconda3/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3926, in _load_pretrained_model new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( File "/root/miniconda3/lib/python3.10/site-packages/transformers/modeling_utils.py", line 802, in _load_state_dict_into_meta_model or (not hf_quantizer.check_quantized_param(model, param, param_name, state_dict)) File "/root/miniconda3/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py", line 124, in check_quantized_param if isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): KeyError: 'inv_freq'

zhuliyi0 avatar Mar 31 '24 03:03 zhuliyi0