UI-TARS
UI-TARS copied to clipboard
Unable to load 7B model(vllm无法加载7B模型)
配置文件
services:
vllm-ui-tars:
container_name: vllm-ui-tars
hostname: vllm-ui-tars
restart: no
image: vllm/vllm-openai
ipc: host
environment:
- use_fast=False
- NVIDIA_VISIBLE_DEVICES=0
- CUDA_DEVICE_ORDER=PCI_BUS_ID
- HF_ENDPOINT=https://hf-mirror.com
- TZ=Asia/Shanghai
volumes:
- ./:/models
- ./sources.list:/etc/apt/sources.list
- ./root:/root
command: ["--model=/models/UI-TARS-7B-DPO", "--served-model-name=ui-tars", "--api-key=1qaz@WSX3edc", "--trust_remote_code"]
ports:
- 8000:8000
networks:
- vllm
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
networks:
vllm:
driver: bridge
name: vllm
版本和目录:
root@vllm-ui-tars:/vllm-workspace# vllm --version
INFO 04-09 09:41:45 [__init__.py:239] Automatically detected platform cuda.
0.8.3
root@vllm-ui-tars:/vllm-workspace#
root@vllm-ui-tars:/vllm-workspace# nvidia-smi
Wed Apr 9 09:41:52 2025
+-----------------------------------------------------------------------------------------+
| NVIDIA-SMI 570.133.07 Driver Version: 572.83 CUDA Version: 12.8 |
|-----------------------------------------+------------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+========================+======================|
| 0 NVIDIA GeForce RTX 4080 ... On | 00000000:01:00.0 Off | N/A |
| 0% 32C P8 12W / 320W | 14032MiB / 16376MiB | 0% Default |
| | | N/A |
+-----------------------------------------+------------------------+----------------------+
| 1 NVIDIA GeForce RTX 3060 On | 00000000:06:00.0 On | N/A |
| 0% 34C P0 30W / 170W | 3704MiB / 12288MiB | 4% Default |
| | | N/A |
+-----------------------------------------+------------------------+----------------------+
+-----------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=========================================================================================|
| No running processes found |
+-----------------------------------------------------------------------------------------+
root@vllm-ui-tars:/vllm-workspace# nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2024 NVIDIA Corporation
Built on Thu_Mar_28_02:18:24_PDT_2024
Cuda compilation tools, release 12.4, V12.4.131
Build cuda_12.4.r12.4/compiler.34097967_0
root@vllm-ui-tars:/vllm-workspace#
root@vllm-ui-tars:/vllm-workspace# tree /models
/models
|-- UI-TARS-7B-DPO
| |-- README.md
| |-- added_tokens.json
| |-- chat_template.json
| |-- config.json
| |-- generation_config.json
| |-- merges.txt
| |-- model-00001-of-00004.safetensors
| |-- model-00002-of-00004.safetensors
| |-- model-00003-of-00004.safetensors
| |-- model-00004-of-00004.safetensors
| |-- model.safetensors.index.json
| |-- preprocessor_config.json
| |-- special_tokens_map.json
| |-- tokenizer.json
| |-- tokenizer_config.json
| `-- vocab.json
|-- conda.bat
|-- docker-compose.yaml
|-- root
|-- sources.list
`-- test
3 directories, 19 files
root@vllm-ui-tars:/vllm-workspace#
错误信息:
vllm-ui-tars | INFO 04-10 00:44:08 [config.py:600] This model supports multiple tasks: {'reward', 'score', 'classify', 'generate', 'embed'}. Defaulting to 'generate'.
vllm-ui-tars | INFO 04-10 00:44:08 [config.py:1780] Chunked prefill is enabled with max_num_batched_tokens=2048.
vllm-ui-tars | INFO 04-10 00:44:11 [__init__.py:239] Automatically detected platform cuda.
vllm-ui-tars | INFO 04-10 00:44:13 [core.py:61] Initializing a V1 LLM engine (v0.8.3) with config: model='/models/UI-TARS-7B-DPO', speculative_config=None, tokenizer='/models/UI-TARS-7B-DPO', skip_tokenizer_init=False, tokenizer_mode=auto, revi
sion=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=32768, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_al
l_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_me
trics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=ui-tars, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill
_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_w
ith_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,
280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
vllm-ui-tars | WARNING 04-10 00:44:14 [utils.py:2413] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f6fce94e330>
vllm-ui-tars | INFO 04-10 00:44:14 [parallel_state.py:957] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0
vllm-ui-tars | WARNING 04-10 00:44:14 [interface.py:304] Using 'pin_memory=False' as WSL is detected. This may slow down the performance.
vllm-ui-tars | INFO 04-10 00:44:14 [cuda.py:221] Using Flash Attention backend on V1 engine.
vllm-ui-tars | Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in min
or differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] EngineCore hit an exception: Traceback (most recent call last):
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core.py", line 378, in run_engine_core
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] engine_core = EngineCoreProc(*args, **kwargs)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core.py", line 319, in __init__
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] super().__init__(vllm_config, executor_class, log_stats)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core.py", line 67, in __init__
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self.model_executor = executor_class(vllm_config)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/executor/executor_base.py", line 52, in __init__
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self._init_executor()
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/executor/uniproc_executor.py", line 46, in _init_executor
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self.collective_rpc("init_device")
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/executor/uniproc_executor.py", line 56, in collective_rpc
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] answer = run_method(self.driver_worker, method, args, kwargs)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/utils.py", line 2347, in run_method
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] return func(*args, **kwargs)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/worker/worker_base.py", line 604, in init_device
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self.worker.init_device() # type: ignore
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/worker/gpu_worker.py", line 120, in init_device
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self.model_runner: GPUModelRunner = GPUModelRunner(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/worker/gpu_model_runner.py", line 144, in __init__
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/core/encoder_cache_manager.py", line 94, in compute_encoder_budget
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ) = _compute_encoder_budget_multimodal(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/v1/core/encoder_cache_manager.py", line 124, in _compute_encoder_budget_multimodal
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] .get_max_tokens_per_item_by_nonzero_modality(model_config)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/multimodal/registry.py", line 289, in get_max_tokens_per_item_by_nonzero_modality
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] self.get_max_tokens_per_item_by_modality(model_config).items()
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/multimodal/registry.py", line 263, in get_max_tokens_per_item_by_modality
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] return processor.info.get_mm_max_tokens_per_item(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 827, in get_mm_max_tokens_per_item
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] "image": self.get_max_image_tokens(),
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 915, in get_max_image_tokens
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] target_width, target_height = self.get_image_size_with_most_features()
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 907, in get_image_size_with_most_features
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] max_image_size, _ = self._get_vision_info(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 841, in _get_vision_info
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] image_processor = self.get_image_processor()
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 810, in get_image_processor
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] return cached_image_processor_from_config(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/transformers_utils/processor.py", line 157, in cached_image_processor_from_config
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] return cached_get_image_processor(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/transformers_utils/processor.py", line 145, in get_image_processor
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] raise e
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/vllm/transformers_utils/processor.py", line 127, in get_image_processor
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] processor = AutoImageProcessor.from_pretrained(
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/transformers/models/auto/image_processing_auto.py", line 559, in from_pretrained
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] return image_processor_class.from_dict(config_dict, **kwargs)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/transformers/image_processing_base.py", line 422, in from_dict
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] image_processor = cls(**image_processor_dict)
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] File "/usr/local/lib/python3.12/dist-packages/transformers/models/qwen2_vl/image_processing_qwen2_vl.py", line 144, in __init__
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390] ValueError: size must contain 'shortest_edge' and 'longest_edge' keys.
vllm-ui-tars | ERROR 04-10 00:44:15 [core.py:390]
vllm-ui-tars | CRITICAL 04-10 00:44:15 [core_client.py:361] Got fatal signal from worker processes, shutting down. See stack trace above for root cause issue.
vllm-ui-tars | Traceback (most recent call last):
vllm-ui-tars | File "<frozen runpy>", line 198, in _run_module_as_main
vllm-ui-tars | File "<frozen runpy>", line 88, in _run_code
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 1121, in <module>
vllm-ui-tars | uvloop.run(run_server(args))
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/uvloop/__init__.py", line 109, in run
vllm-ui-tars | return __asyncio.run(
vllm-ui-tars | ^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/asyncio/runners.py", line 195, in run
vllm-ui-tars | return runner.run(main)
vllm-ui-tars | ^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/asyncio/runners.py", line 118, in run
vllm-ui-tars | return self._loop.run_until_complete(task)
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/uvloop/__init__.py", line 61, in wrapper
vllm-ui-tars | return await main
vllm-ui-tars | ^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 1069, in run_server
vllm-ui-tars | async with build_async_engine_client(args) as engine_client:
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/contextlib.py", line 210, in __aenter__
vllm-ui-tars | return await anext(self.gen)
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 146, in build_async_engine_client
vllm-ui-tars | async with build_async_engine_client_from_engine_args(
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/contextlib.py", line 210, in __aenter__
vllm-ui-tars | return await anext(self.gen)
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 178, in build_async_engine_client_from_engine_args
vllm-ui-tars | async_llm = AsyncLLM.from_vllm_config(
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/async_llm.py", line 136, in from_vllm_config
vllm-ui-tars | return cls(
vllm-ui-tars | ^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/async_llm.py", line 102, in __init__
vllm-ui-tars | self.engine_core = EngineCoreClient.make_client(
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 69, in make_client
vllm-ui-tars | return AsyncMPClient(vllm_config, executor_class, log_stats)
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 570, in __init__
vllm-ui-tars | super().__init__(
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 401, in __init__
vllm-ui-tars | engine.proc_handle.wait_for_startup()
vllm-ui-tars | File "/usr/local/lib/python3.12/dist-packages/vllm/v1/utils.py", line 127, in wait_for_startup
vllm-ui-tars | if self.reader.recv()["status"] != "READY":
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/multiprocessing/connection.py", line 250, in recv
vllm-ui-tars | buf = self._recv_bytes()
vllm-ui-tars | ^^^^^^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/multiprocessing/connection.py", line 430, in _recv_bytes
vllm-ui-tars | buf = self._recv(4)
vllm-ui-tars | ^^^^^^^^^^^^^
vllm-ui-tars | File "/usr/lib/python3.12/multiprocessing/connection.py", line 399, in _recv
vllm-ui-tars | raise EOFError
vllm-ui-tars | EOFError
vllm-ui-tars exited with code 1
It seems that error was raised from qwen config. Would you please show more information?
You can follow this issue to solve it