UI-TARS-desktop icon indicating copy to clipboard operation
UI-TARS-desktop copied to clipboard

Build failed in HuggingFace Inference with 7B

Open tanys123 opened this issue 9 months ago • 1 comments

Exit code: 1. Reason: e076413b8db03f/build/torch26-cxx98-cu124-x86_64-linux/rotary/__init__.py\", line 16, in apply_rotary\n    ops.apply_rotary(x1, x2, cos, sin, out1, out2, conj)\n  File \"/usr/src/.venv/lib/python3.11/site-packages/torch/_ops.py\", line 1123, in __call__\n    return self._op(*args, **(kwargs or {}))\nRuntimeError: The size of tensor a (32768) must match the size of tensor b (32767) at non-singleton dimension 0"},"target":"text_generation_launcher"}
{"timestamp":"2025-03-17T15:22:13.292006Z","level":"ERROR","message":"Server error: The size of tensor a (32768) must match the size of tensor b (32767) at non-singleton dimension 0","target":"text_generation_router_v3::client","filename":"backends/v3/src/client/mod.rs","line_number":45,"span":{"name":"warmup"},"spans":[{"max_batch_size":"None","max_input_length":"Some(32767)","max_prefill_tokens":32768,"max_total_tokens":"Some(32768)","name":"warmup"},{"name":"warmup"}]}
Error: Backend(Warmup(Generation("The size of tensor a (32768) must match the size of tensor b (32767) at non-singleton dimension 0")))
{"timestamp":"2025-03-17T15:22:13.315573Z","level":"ERROR","fields":{"message":"Webserver Crashed"},"target":"text_generation_launcher"}
{"timestamp":"2025-03-17T15:22:13.315598Z","level":"INFO","fields":{"message":"Shutting down shards"},"target":"text_generation_launcher"}
{"timestamp":"2025-03-17T15:22:13.380843Z","level":"INFO","fields":{"message":"Terminating shard"},"target":"text_generation_launcher","span":{"rank":0,"name":"shard-manager"},"spans":[{"rank":0,"name":"shard-manager"}]}
{"timestamp":"2025-03-17T15:22:13.381044Z","level":"INFO","fields":{"message":"Waiting for shard to gracefully shutdown"},"target":"text_generation_launcher","span":{"rank":0,"name":"shard-manager"},"spans":[{"rank":0,"name":"shard-manager"}]}
{"timestamp":"2025-03-17T15:22:13.781570Z","level":"INFO","fields":{"message":"shard terminated"},"target":"text_generation_launcher","span":{"rank":0,"name":"shard-manager"},"spans":[{"rank":0,"name":"shard-manager"}]}
Error: WebserverFailed

This error ...The size of tensor a (32768) must match the size of tensor b (32767) at non-singleton dimension 0... should be resolved by setting CUDA_GRAPHS=0

Image

It doesn't work despite that. What could be missing?

tanys123 avatar Mar 17 '25 15:03 tanys123

Basically similar error here:

fields: {"message":"Method Warmup encountered an error.\nTraceback (most recent call last):\n File "/usr/src/.venv/bin/text-generation-server", line 10, in \n sys.exit(app())\n File "/usr/src/.venv/lib/python3.11/site-packages/typer/main.py", line 323, in call\n return get_command(self)(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/click/core.py", line 1161, in call\n return self.main(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/typer/core.py", line 743, in main\n return _main(\n File "/usr/src/.venv/lib/python3.11/site-packages/typer/core.py", line 198, in _main\n rv = self.invoke(ctx)\n File "/usr/src/.venv/lib/python3.11/site-packages/click/core.py", line 1697, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/usr/src/.venv/lib/python3.11/site-packages/click/core.py", line 1443, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/usr/src/.venv/lib/python3.11/site-packages/click/core.py", line 788, in invoke\n return __callback(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/typer/main.py", line 698, in wrapper\n return callback(**use_params)\n File "/usr/src/server/text_generation_server/cli.py", line 119, in serve\n server.serve(\n File "/usr/src/server/text_generation_server/server.py", line 315, in serve\n asyncio.run(\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/runners.py", line 190, in run\n return runner.run(main)\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/runners.py", line 118, in run\n return self._loop.run_until_complete(task)\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/base_events.py", line 641, in run_until_complete\n self.run_forever()\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/base_events.py", line 608, in run_forever\n self._run_once()\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/base_events.py", line 1936, in _run_once\n handle._run()\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/asyncio/events.py", line 84, in _run\n self._context.run(self._callback, *self._args)\n File "/usr/src/.venv/lib/python3.11/site-packages/grpc_interceptor/server.py", line 165, in invoke_intercept_method\n return await self.intercept(\n> File "/usr/src/server/text_generation_server/interceptor.py", line 24, in intercept\n return await response\n File "/usr/src/.venv/lib/python3.11/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 120, in _unary_interceptor\n raise error\n File "/usr/src/.venv/lib/python3.11/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 111, in _unary_interceptor\n return await behavior(request_or_iterator, context)\n File "/usr/src/server/text_generation_server/server.py", line 144, in Warmup\n self.model.warmup(batch, max_input_tokens, max_total_tokens)\n File "/usr/src/server/text_generation_server/models/flash_causal_lm.py", line 1585, in warmup\n _, _batch, _ = self.generate_token(batch)\n File "/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/python3.11/contextlib.py", line 81, in inner\n return func(*args, **kwds)\n File "/usr/src/server/text_generation_server/models/flash_causal_lm.py", line 1971, in generate_token\n out, speculative_logits = self.forward(batch, adapter_data)\n File "/usr/src/server/text_generation_server/models/vlm_causal_lm.py", line 474, in forward\n logits, speculative_logits = self.model.forward(\n File "/usr/src/server/text_generation_server/models/custom_modeling/qwen2_vl.py", line 534, in forward\n hidden_states = self.text_model(\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl\n return forward_call(*args, **kwargs)\n File "/usr/src/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py", line 315, in forward\n hidden_states = layer(\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl\n return forward_call(*args, **kwargs)\n File "/usr/src/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py", line 241, in forward\n attn_output = self.self_attn(\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl\n return forward_call(*args, **kwargs)\n File "/usr/src/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py", line 125, in forward\n self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl\n return forward_call(*args, **kwargs)\n File "/usr/src/server/text_generation_server/layers/rotary.py", line 59, in forward\n rotary.apply_rotary(q1, q2, cos, sin, q1, q2, False)\n File "/kernels/models--kernels-community--rotary/snapshots/4db658e027ec752840bb3f557ee076413b8db03f/build/torch26-cxx98-cu124-x86_64-linux/rotary/init.py", line 16, in apply_rotary\n ops.apply_rotary(x1, x2, cos, sin, out1, out2, conj)\n File "/usr/src/.venv/lib/python3.11/site-packages/torch/_ops.py", line 1123, in call\n return self._op(*args, **(kwargs or {}))\nRuntimeError: The size of tensor a (32768) must match the size of tensor b (32767) at non-singleton dimension 0"} target: "text_generation_launcher"

charlieqf avatar Mar 23 '25 03:03 charlieqf