Assertion error regarding CUDA_VISIBLE_DEVICES encountered during training.
I encountered the following error AssertionError: Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values found: 0,1,2,3 and 0. while attempting to submit a training task using the Slurm cluster.
#!/bin/bash
#SBATCH --job-name=DAPO-Qwen2.5-7B
#SBATCH --partition=gpuA800
#SBATCH -n 1
#SBATCH --ntasks-per-node=1
#SBATCH --gpus-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --output=DAPO.out
#SBATCH --error=DAPO.err
#SBATCH --nodelist=gpu4
# load necessary modules
# replace these information with your own
cd ~/project/verl
source activate verl
export CUDA_VISIBLE_DEVICES=0,1,2,3
set -xeuo pipefail
project_name='DAPO'
exp_name='DAPO-Qwen2.5-7B'
adv_estimator=grpo
use_kl_in_reward=False
kl_coef=0.0
use_kl_loss=False
kl_loss_coef=0.0
clip_ratio_low=0.2
clip_ratio_high=0.28
max_prompt_length=$((1024 * 2))
max_response_length=$((1024 * 20))
enable_overlong_buffer=True
overlong_buffer_len=$((1024 * 4))
overlong_penalty_factor=1.0
loss_agg_mode="token-mean"
enable_filter_groups=True
filter_groups_metric=acc
max_num_gen_batches=10
train_prompt_bsz=512
gen_prompt_bsz=$((train_prompt_bsz * 3))
n_resp_per_prompt=16
train_prompt_mini_bsz=32
# Ray
RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"}
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
NNODES=1
# Paths
RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/project/verl"}
MODEL_PATH=${MODEL_PATH:-"~/models/Qwen/Qwen2.5-7B-Instruct"}
CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/output/Qwen2.5-7B/dapo"}
TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"}
TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"}
# Algorithm
temperature=1.0
top_p=1.0
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
val_top_p=0.7
# Performance Related Parameter
sp_size=8
use_dynamic_bsz=True
actor_ppo_max_token_len=$((max_prompt_length + max_response_length))
infer_ppo_max_token_len=$((max_prompt_length + max_response_length))
offload=True
gen_tp=4
ray stop
CUDA_VISIBLE_DEVICES=0,1,2,3 ray start --head --num-gpus=4
# ray job submit --runtime-env="${RUNTIME_ENV}" \
# --working-dir "${WORKING_DIR}" \
# --
# 检查CUDA_VISIBLE_DEVICES环境变量是否设置
echo "CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES"
CUDA_VISIBLE_DEVICES=0,1,2,3 python3 -m recipe.dapo.main_dapo \
data.train_files="${TRAIN_FILE}" \
data.val_files="${TEST_FILE}" \
data.prompt_key=prompt \
data.truncation='left' \
data.max_prompt_length=${max_prompt_length} \
data.max_response_length=${max_response_length} \
data.gen_batch_size=${gen_prompt_bsz} \
data.train_batch_size=${train_prompt_bsz} \
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
algorithm.adv_estimator=${adv_estimator} \
algorithm.use_kl_in_reward=${use_kl_in_reward} \
algorithm.kl_ctrl.kl_coef=${kl_coef} \
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \
actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \
actor_rollout_ref.actor.clip_ratio_c=10.0 \
algorithm.filter_groups.enable=${enable_filter_groups} \
algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \
algorithm.filter_groups.metric=${filter_groups_metric} \
actor_rollout_ref.model.use_remove_padding=True \
actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \
actor_rollout_ref.model.path="${MODEL_PATH}" \
actor_rollout_ref.model.enable_gradient_checkpointing=True \
actor_rollout_ref.actor.optim.lr=1e-6 \
actor_rollout_ref.actor.optim.lr_warmup_steps=10 \
actor_rollout_ref.actor.optim.weight_decay=0.1 \
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
actor_rollout_ref.actor.entropy_coeff=0 \
actor_rollout_ref.actor.grad_clip=1.0 \
actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \
actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \
actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \
actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \
actor_rollout_ref.rollout.enable_chunked_prefill=True \
actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \
actor_rollout_ref.rollout.temperature=${temperature} \
actor_rollout_ref.rollout.top_p=${top_p} \
actor_rollout_ref.rollout.top_k="${top_k}" \
actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \
actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \
actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \
actor_rollout_ref.rollout.val_kwargs.do_sample=True \
actor_rollout_ref.rollout.val_kwargs.n=1 \
actor_rollout_ref.rollout.name=vllm \
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
reward_model.reward_manager=dapo \
reward_model.overlong_buffer.enable=${enable_overlong_buffer} \
reward_model.overlong_buffer.len=${overlong_buffer_len} \
reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \
trainer.logger='["console","swanlab"]' \
trainer.project_name="${project_name}" \
trainer.experiment_name="${exp_name}" \
trainer.n_gpus_per_node=2 \
trainer.nnodes="${NNODES}" \
trainer.val_before_train=True \
trainer.test_freq=5 \
trainer.save_freq=5 \
trainer.total_epochs=1 \
trainer.default_local_dir="${CKPTS_DIR}" \
trainer.resume_mode=auto
wait
I submitted the script using the sbatch DAPO.sh command, and the error log is as follows:
+ project_name=DAPO
+ exp_name=DAPO-Qwen2.5-7B
+ adv_estimator=grpo
+ use_kl_in_reward=False
+ kl_coef=0.0
+ use_kl_loss=False
+ kl_loss_coef=0.0
+ clip_ratio_low=0.2
+ clip_ratio_high=0.28
+ max_prompt_length=2048
+ max_response_length=20480
+ enable_overlong_buffer=True
+ overlong_buffer_len=4096
+ overlong_penalty_factor=1.0
+ loss_agg_mode=token-mean
+ enable_filter_groups=True
+ filter_groups_metric=acc
+ max_num_gen_batches=10
+ train_prompt_bsz=512
+ gen_prompt_bsz=1536
+ n_resp_per_prompt=16
+ train_prompt_mini_bsz=32
+ RAY_ADDRESS=http://localhost:8265
+ WORKING_DIR=/public/home/Link/project/verl
+ RUNTIME_ENV=/public/home/Link/project/verl/verl/trainer/runtime_env.yaml
+ NNODES=1
+ RAY_DATA_HOME=/public/home/Link/project/verl
+ MODEL_PATH=/public/home/Link/models/Qwen/Qwen2.5-7B-Instruct
+ CKPTS_DIR=/public/home/Link/project/verl/output/Qwen2.5-7B/dapo
+ TRAIN_FILE=/public/home/Link/project/verl/data/dapo-math-17k.parquet
+ TEST_FILE=/public/home/Link/project/verl/data/aime-2024.parquet
+ temperature=1.0
+ top_p=1.0
+ top_k=-1
+ val_top_p=0.7
+ sp_size=8
+ use_dynamic_bsz=True
+ actor_ppo_max_token_len=22528
+ infer_ppo_max_token_len=22528
+ offload=True
+ gen_tp=4
+ ray stop
+ CUDA_VISIBLE_DEVICES=0,1,2,3
+ ray start --head --num-gpus=4
+ echo 'CUDA_VISIBLE_DEVICES: 0,1,2,3'
+ CUDA_VISIBLE_DEVICES=0,1,2,3
+ python3 -m recipe.dapo.main_dapo data.train_files=/public/home/Link/project/verl/data/dapo-math-17k.parquet data.val_files=/public/home/Link/project/verl/data/aime-2024.parquet data.prompt_key=prompt data.truncation=left data.max_prompt_length=2048 data.max_response_length=20480 data.gen_batch_size=1536 data.train_batch_size=512 actor_rollout_ref.rollout.n=16 algorithm.adv_estimator=grpo algorithm.use_kl_in_reward=False algorithm.kl_ctrl.kl_coef=0.0 actor_rollout_ref.actor.use_kl_loss=False actor_rollout_ref.actor.kl_loss_coef=0.0 actor_rollout_ref.actor.clip_ratio_low=0.2 actor_rollout_ref.actor.clip_ratio_high=0.28 actor_rollout_ref.actor.clip_ratio_c=10.0 algorithm.filter_groups.enable=True algorithm.filter_groups.max_num_gen_batches=10 algorithm.filter_groups.metric=acc actor_rollout_ref.model.use_remove_padding=True actor_rollout_ref.actor.use_dynamic_bsz=True actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True actor_rollout_ref.actor.ppo_max_token_len_per_gpu=22528 actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=22528 actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=22528 actor_rollout_ref.model.path=/public/home/Link/models/Qwen/Qwen2.5-7B-Instruct actor_rollout_ref.model.enable_gradient_checkpointing=True actor_rollout_ref.actor.optim.lr=1e-6 actor_rollout_ref.actor.optim.lr_warmup_steps=10 actor_rollout_ref.actor.optim.weight_decay=0.1 actor_rollout_ref.actor.ppo_mini_batch_size=32 actor_rollout_ref.actor.fsdp_config.param_offload=True actor_rollout_ref.actor.fsdp_config.optimizer_offload=True actor_rollout_ref.actor.entropy_coeff=0 actor_rollout_ref.actor.grad_clip=1.0 actor_rollout_ref.actor.loss_agg_mode=token-mean actor_rollout_ref.actor.ulysses_sequence_parallel_size=8 actor_rollout_ref.rollout.gpu_memory_utilization=0.80 actor_rollout_ref.rollout.tensor_model_parallel_size=4 actor_rollout_ref.rollout.enable_chunked_prefill=True actor_rollout_ref.rollout.max_num_batched_tokens=22528 actor_rollout_ref.rollout.temperature=1.0 actor_rollout_ref.rollout.top_p=1.0 actor_rollout_ref.rollout.top_k=-1 actor_rollout_ref.rollout.val_kwargs.temperature=1.0 actor_rollout_ref.rollout.val_kwargs.top_p=0.7 actor_rollout_ref.rollout.val_kwargs.top_k=-1 actor_rollout_ref.rollout.val_kwargs.do_sample=True actor_rollout_ref.rollout.val_kwargs.n=1 actor_rollout_ref.rollout.name=vllm actor_rollout_ref.ref.fsdp_config.param_offload=True actor_rollout_ref.ref.ulysses_sequence_parallel_size=8 actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 reward_model.reward_manager=dapo reward_model.overlong_buffer.enable=True reward_model.overlong_buffer.len=4096 reward_model.overlong_buffer.penalty_factor=1.0 'trainer.logger=["console","swanlab"]' trainer.project_name=DAPO trainer.experiment_name=DAPO-Qwen2.5-7B trainer.n_gpus_per_node=2 trainer.nnodes=1 trainer.val_before_train=True trainer.test_freq=5 trainer.save_freq=5 trainer.total_epochs=1 trainer.default_local_dir=/public/home/Link/project/verl/output/Qwen2.5-7B/dapo trainer.resume_mode=auto
2025-08-02 19:44:15,940 INFO worker.py:1747 -- Connecting to existing Ray cluster at address: 192.168.4.215:6379...
2025-08-02 19:44:15,955 INFO worker.py:1918 -- Connected to Ray cluster. View the dashboard at [1m[32m127.0.0.1:8265 [39m[22m
[36m(TaskRunner pid=3251445)[0m /public/home/Link/project/verl/recipe/dapo/main_dapo.py:155: UserWarning: Disabled critic as algorithm.adv_estimator != gae. If it is not intended, please set critic.enable=True
[36m(TaskRunner pid=3251445)[0m trainer = RayDAPOTrainer(
[36m(TaskRunner pid=3251445)[0m DeprecationWarning: `ray.state.available_resources_per_node` is a private attribute and access will be removed in a future Ray version.
[36m(TaskRunner pid=3251445)[0m WARNING:2025-08-02 19:44:35,708:Waiting for register center actor 2xDqeP_register_center to be ready. Elapsed time: 0 seconds out of 300 seconds.
Error executing job with overrides: ['data.train_files=/public/home/Link/project/verl/data/dapo-math-17k.parquet', 'data.val_files=/public/home/Link/project/verl/data/aime-2024.parquet', 'data.prompt_key=prompt', 'data.truncation=left', 'data.max_prompt_length=2048', 'data.max_response_length=20480', 'data.gen_batch_size=1536', 'data.train_batch_size=512', 'actor_rollout_ref.rollout.n=16', 'algorithm.adv_estimator=grpo', 'algorithm.use_kl_in_reward=False', 'algorithm.kl_ctrl.kl_coef=0.0', 'actor_rollout_ref.actor.use_kl_loss=False', 'actor_rollout_ref.actor.kl_loss_coef=0.0', 'actor_rollout_ref.actor.clip_ratio_low=0.2', 'actor_rollout_ref.actor.clip_ratio_high=0.28', 'actor_rollout_ref.actor.clip_ratio_c=10.0', 'algorithm.filter_groups.enable=True', 'algorithm.filter_groups.max_num_gen_batches=10', 'algorithm.filter_groups.metric=acc', 'actor_rollout_ref.model.use_remove_padding=True', 'actor_rollout_ref.actor.use_dynamic_bsz=True', 'actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True', 'actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True', 'actor_rollout_ref.actor.ppo_max_token_len_per_gpu=22528', 'actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=22528', 'actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=22528', 'actor_rollout_ref.model.path=/public/home/Link/models/Qwen/Qwen2.5-7B-Instruct', 'actor_rollout_ref.model.enable_gradient_checkpointing=True', 'actor_rollout_ref.actor.optim.lr=1e-6', 'actor_rollout_ref.actor.optim.lr_warmup_steps=10', 'actor_rollout_ref.actor.optim.weight_decay=0.1', 'actor_rollout_ref.actor.ppo_mini_batch_size=32', 'actor_rollout_ref.actor.fsdp_config.param_offload=True', 'actor_rollout_ref.actor.fsdp_config.optimizer_offload=True', 'actor_rollout_ref.actor.entropy_coeff=0', 'actor_rollout_ref.actor.grad_clip=1.0', 'actor_rollout_ref.actor.loss_agg_mode=token-mean', 'actor_rollout_ref.actor.ulysses_sequence_parallel_size=8', 'actor_rollout_ref.rollout.gpu_memory_utilization=0.80', 'actor_rollout_ref.rollout.tensor_model_parallel_size=4', 'actor_rollout_ref.rollout.enable_chunked_prefill=True', 'actor_rollout_ref.rollout.max_num_batched_tokens=22528', 'actor_rollout_ref.rollout.temperature=1.0', 'actor_rollout_ref.rollout.top_p=1.0', 'actor_rollout_ref.rollout.top_k=-1', 'actor_rollout_ref.rollout.val_kwargs.temperature=1.0', 'actor_rollout_ref.rollout.val_kwargs.top_p=0.7', 'actor_rollout_ref.rollout.val_kwargs.top_k=-1', 'actor_rollout_ref.rollout.val_kwargs.do_sample=True', 'actor_rollout_ref.rollout.val_kwargs.n=1', 'actor_rollout_ref.rollout.name=vllm', 'actor_rollout_ref.ref.fsdp_config.param_offload=True', 'actor_rollout_ref.ref.ulysses_sequence_parallel_size=8', 'actor_rollout_ref.actor.fsdp_config.fsdp_size=-1', 'reward_model.reward_manager=dapo', 'reward_model.overlong_buffer.enable=True', 'reward_model.overlong_buffer.len=4096', 'reward_model.overlong_buffer.penalty_factor=1.0', 'trainer.logger=["console","swanlab"]', 'trainer.project_name=DAPO', 'trainer.experiment_name=DAPO-Qwen2.5-7B', 'trainer.n_gpus_per_node=2', 'trainer.nnodes=1', 'trainer.val_before_train=True', 'trainer.test_freq=5', 'trainer.save_freq=5', 'trainer.total_epochs=1', 'trainer.default_local_dir=/public/home/Link/project/verl/output/Qwen2.5-7B/dapo', 'trainer.resume_mode=auto']
Traceback (most recent call last):
File "/public/home/Link/project/verl/recipe/dapo/main_dapo.py", line 33, in main
run_ppo(config)
File "/public/home/Link/project/verl/recipe/dapo/main_dapo.py", line 55, in run_ppo
ray.get(runner.run.remote(config))
File "/public/home/Link/anaconda3/envs/verl/lib/python3.10/site-packages/ray/_private/auto_init_hook.py", line 22, in auto_init_wrapper
return fn(*args, **kwargs)
File "/public/home/Link/anaconda3/envs/verl/lib/python3.10/site-packages/ray/_private/client_mode_hook.py", line 104, in wrapper
return func(*args, **kwargs)
File "/public/home/Link/anaconda3/envs/verl/lib/python3.10/site-packages/ray/_private/worker.py", line 2858, in get
values, debugger_breakpoint = worker.get_objects(object_refs, timeout=timeout)
File "/public/home/Link/anaconda3/envs/verl/lib/python3.10/site-packages/ray/_private/worker.py", line 958, in get_objects
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ActorDiedError): [36mray::TaskRunner.run()[39m (pid=3251445, ip=192.168.4.215, actor_id=145647aa798a3d84d247c38301000000, repr=<main_dapo.TaskRunner object at 0x7efac81867a0>)
File "/public/home/Link/project/verl/recipe/dapo/main_dapo.py", line 165, in run
trainer.init_workers()
File "/public/home/Link/project/verl/verl/trainer/ppo/ray_trainer.py", line 864, in init_workers
self.actor_rollout_wg.init_model()
File "/public/home/Link/project/verl/verl/single_controller/ray/base.py", line 50, in __call__
output = ray.get(output)
ray.exceptions.ActorDiedError: The actor died because of an error raised in its creation task, [36mray::2xDqePWorkerDict_0:0:WorkerDict.__init__()[39m (pid=3251903, ip=192.168.4.215, actor_id=08d9fc3aa64262e55e6e538501000000, repr=<verl.single_controller.ray.base.WorkerDict object at 0x7f8e63a7e8f0>)
File "/public/home/Link/project/verl/verl/single_controller/ray/base.py", line 791, in __init__
super().__init__()
File "/public/home/Link/project/verl/verl/single_controller/base/worker.py", line 148, in __init__
self._setup_env_cuda_visible_devices()
File "/public/home/Link/project/verl/verl/single_controller/base/worker.py", line 201, in _setup_env_cuda_visible_devices
assert val == cuda_val, (
AssertionError: Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values found: 0,1,2,3 and 0.
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
[36m(WorkerDict pid=3251903)[0m Exception raised in creation task: The actor died because of an error raised in its creation task, [36mray::2xDqePWorkerDict_0:0:WorkerDict.__init__()[39m (pid=3251903, ip=192.168.4.215, actor_id=08d9fc3aa64262e55e6e538501000000, repr=<verl.single_controller.ray.base.WorkerDict object at 0x7f8e63a7e8f0>)
[36m(WorkerDict pid=3251903)[0m File "/public/home/Link/project/verl/verl/single_controller/ray/base.py", line 791, in __init__
[36m(WorkerDict pid=3251903)[0m super().__init__()
[36m(WorkerDict pid=3251903)[0m File "/public/home/Link/project/verl/verl/single_controller/base/worker.py", line 148, in __init__
[36m(WorkerDict pid=3251903)[0m self._setup_env_cuda_visible_devices()
[36m(WorkerDict pid=3251903)[0m File "/public/home/Link/project/verl/verl/single_controller/base/worker.py", line 201, in _setup_env_cuda_visible_devices
[36m(WorkerDict pid=3251903)[0m assert val == cuda_val, (
[36m(WorkerDict pid=3251903)[0m AssertionError: Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values found: 0,1,2,3 and 0.
I set up the environment variable at the beginning of the script, but it doesn’t seem to be working. Maybe some line of code is overriding it, but I can’t seem to find it.
New issue: I get an error when I try to run with a single GPU:
ValueError: Please don't set ROCR_VISIBLE_DEVICES when HIP/CUDA_VISIBLE_DEVICES is set.
Same issue when using sbatch, but fine with salloc🤔 Have you solved this problem?
Same issue when using sbatch, but fine with salloc🤔 Have you solved this problem?
Not yet. Could you provide the script for running the training using salloc?
salloc --partition=gpu_llm --gres=gpu:8 --time=3-00:00:00 --mem=1000G --cpus-per-task=8 /bin/bash
singularity exec --nv /public/home/.../verl_llamafactory_20250722.sif
bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh
✅
#!/bin/bash
#SBATCH --job-name=test_grpo
#SBATCH --partition=gpu_llm
#SBATCH --gres=gpu:8
#SBATCH --cpus-per-task=8
#SBATCH --time=3-00:00:00
#SBATCH --ntasks-per-node=1
#SBATCH --mem=1000G
#SBATCH --array=0
#SBATCH --output=./slurm_logs/%A_%a.log
#SBATCH --error=./slurm_logs/%A_%a.log
singularity exec --nv /public/home/.../verl_llamafactory_20250722.sif
bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh
❌
salloc --partition=gpu_llm --gres=gpu:8 --time=3-00:00:00 --mem=1000G --cpus-per-task=8 /bin/bash singularity exec --nv /public/home/.../verl_llamafactory_20250722.sif bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh✅
#!/bin/bash #SBATCH --job-name=test_grpo #SBATCH --partition=gpu_llm #SBATCH --gres=gpu:8 #SBATCH --cpus-per-task=8 #SBATCH --time=3-00:00:00 #SBATCH --ntasks-per-node=1 #SBATCH --mem=1000G #SBATCH --array=0 #SBATCH --output=./slurm_logs/%A_%a.log #SBATCH --error=./slurm_logs/%A_%a.log singularity exec --nv /public/home/.../verl_llamafactory_20250722.sif bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh❌
It worked! Thank you for your sharing. 👍
same issue, I simply comment related code
# if cuda_val:
# raise ValueError("Please don't set ROCR_VISIBLE_DEVICES when HIP/CUDA_VISIBLE_DEVICES is set.")
Setting this should work.
export ROCR_VISIBLE_DEVICES=None
If you are using ray, then you can do something like this in your head node and other nodes:
# Start Ray head node
srun --nodes=1 --ntasks=1 -w "$head_node" --export=ALL \
env -u ROCR_VISIBLE_DEVICES -u HIP_VISIBLE_DEVICES \
${CONDA_BIN_PATH}ray start --head --node-ip-address="$head_node_ip" --port=$port \
--num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus 8 --include-dashboard=True --block &
# Start Ray worker nodes
for ((i = 1; i < worker_num; i++)); do
node_i=${nodes[$i]}
echo "Starting WORKER $i at $node_i"
srun --nodes=1 --ntasks=1 -w "$node_i" --export=ALL \
env -u ROCR_VISIBLE_DEVICES -u HIP_VISIBLE_DEVICES \
${CONDA_BIN_PATH}ray start --address "$address_head" \
--num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus 8 --block &
done
You could also unset the variables corresponding to ROCm; this worked for my uni cluster
# ! For some reason, great lakes sets these two env vars
unset ROCR_VISIBLE_DEVICES
unset HIP_VISIBLE_DEVICES
# Sanity check
echo "Using $NNODES nodes for training..."
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"