MAC mini M2
os 15.3.1 (24D70)
Python 3.13.2
pip 25.0.1
Requirement already satisfied: torch in /opt/homebrew/lib/python3.13/site-packages (2.6.0)
Requirement already satisfied: torchvision in /opt/homebrew/lib/python3.13/site-packages (0.21.0)
Requirement already satisfied: torchaudio in /opt/homebrew/lib/python3.13/site-packages (2.6.0)
File "/Users/haloai01/Downloads/qwenvl.py", line 55, in
generated_ids = model.generate(**inputs, max_new_tokens=128)
File "/opt/homebrew/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/generation/utils.py", line 2228, in generate
result = self._sample(
input_ids,
...<5 lines>...
**model_kwargs,
)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/generation/utils.py", line 3211, in _sample
outputs = self(**model_inputs, return_dict=True)
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 1740, in forward
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 501, in forward
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens_now,
rotary_pos_emb=rotary_pos_emb,
)
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 297, in forward
hidden_states = hidden_states + self.attn(
~~~~~~~~~^
self.norm1(hidden_states),
^^^^^^^^^^^^^^^^^^^^^^^^^^
cu_seqlens=cu_seqlens,
^^^^^^^^^^^^^^^^^^^^^^
rotary_pos_emb=rotary_pos_emb,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/homebrew/lib/python3.13/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 272, in forward
attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0)
IndexError: Dimension out of range (expected to be in range of [-3, 2], but got 3)