Panic in wgpuDeviceRelease
Version: v25.0.2.2 OS: macOS 15.6.1
I get panic error when I run this in class destructor:
{
wgpuQueueRelease(_properties->queue);
wgpuDeviceRelease(_properties->device);
wgpuAdapterRelease(_properties->adapter);
wgpuSurfaceUnconfigure(_properties->surface);
wgpuSurfaceRelease(_properties->surface);
#ifdef __APPLE__
SDL_Metal_DestroyView(_properties->metal_view);
#endif
wgpuInstanceRelease(_properties->instance);
}
Trace:
thread '<unnamed>' panicked at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/thread/local.rs:281:25:
cannot access a Thread Local Storage value during or after destruction: AccessError
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
thread '<unnamed>' panicked at library/core/src/panicking.rs:225:5:
panic in a function that cannot unwind
stack backtrace:
0: 0x103a38c28 - std::backtrace_rs::backtrace::libunwind::trace::h674dcd02776dcc9c
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/../../backtrace/src/backtrace/libunwind.rs:117:9
1: 0x103a38c28 - std::backtrace_rs::backtrace::trace_unsynchronized::haccaae8fb80e4531
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/../../backtrace/src/backtrace/mod.rs:66:14
2: 0x103a38c28 - std::sys::backtrace::_print_fmt::h3191fc6495b0a516
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/sys/backtrace.rs:66:9
3: 0x103a38c28 - <std::sys::backtrace::BacktraceLock::print::DisplayBacktrace as core::fmt::Display>::fmt::h373e57e2286956dc
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/sys/backtrace.rs:39:26
4: 0x103a7ab00 - core::fmt::rt::Argument::fmt::hcee930b009d69e38
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/fmt/rt.rs:173:76
5: 0x103a7ab00 - core::fmt::write::h2c4a0b98b09e3b30
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/fmt/mod.rs:1465:25
6: 0x103a2e570 - std::io::default_write_fmt::h1b8f25d7cf9c86a4
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/io/mod.rs:639:11
7: 0x103a2e570 - std::io::Write::write_fmt::h00b4007fff731b84
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/io/mod.rs:1954:13
8: 0x103a38adc - std::sys::backtrace::BacktraceLock::print::h3eb1535b8d3666ca
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/sys/backtrace.rs:42:9
9: 0x103a3c24c - std::panicking::default_hook::{{closure}}::hf623c44b740b115f
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:300:27
10: 0x103a3c09c - std::panicking::default_hook::h8875fb31ec87dfad
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:327:9
11: 0x103a3cd5c - std::panicking::rust_panic_with_hook::hdd8ceeeb04975c2b
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:833:13
12: 0x103a3c968 - std::panicking::begin_panic_handler::{{closure}}::hdf417b72ab8ffff8
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:699:13
13: 0x103a390d4 - std::sys::backtrace::__rust_end_short_backtrace::h507d79c50996742e
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/sys/backtrace.rs:168:18
14: 0x103a3c66c - __rustc[5224e6b81cd82a8f]::rust_begin_unwind
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:697:5
15: 0x103aebbe8 - core::panicking::panic_nounwind_fmt::runtime::h6c329565596434a4
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/panicking.rs:117:22
16: 0x103aebbe8 - core::panicking::panic_nounwind_fmt::h9dceef42c0a45ea6
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/intrinsics/mod.rs:2340:9
17: 0x103aebc60 - core::panicking::panic_nounwind::h9f7565b148aca0ea
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/panicking.rs:225:5
18: 0x103aebddc - core::panicking::panic_cannot_unwind::hf7c38fa209f60374
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/panicking.rs:330:5
19: 0x1030003b4 - wgpuDeviceRelease
at /Users/runner/work/wgpu-native/wgpu-native/src/lib.rs:2632:1
How to find out what is the reason? I use the same order of release as Texture Arrays example - https://github.com/gfx-rs/wgpu-native/blob/trunk/examples/texture_arrays/main.c
Any ideas?
Here is stack trace with "RUST_BACKTRACE=1":
thread '<unnamed>' panicked at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/thread/local.rs:281:25:
cannot access a Thread Local Storage value during or after destruction: AccessError
stack backtrace:
0: __rustc::rust_begin_unwind
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/panicking.rs:697:5
1: core::panicking::panic_fmt
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/panicking.rs:75:14
2: std::thread::local::panic_access_error
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/thread/local.rs:237:5
3: std::thread::local::LocalKey<T>::with
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/thread/local.rs:281:25
4: std::thread::local::LocalKey<core::cell::Cell<T>>::take
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/std/src/thread/local.rs:442:14
5: wgpu_core::snatch::trace::LockTrace::enter
at /Users/runner/.cargo/git/checkouts/wgpu-045f9a3b3e40a5c0/f35cf94/wgpu-core/src/snatch.rs:94:51
6: wgpu_core::snatch::SnatchLock::read
at /Users/runner/.cargo/git/checkouts/wgpu-045f9a3b3e40a5c0/f35cf94/wgpu-core/src/snatch.rs:147:9
7: wgpu_core::device::global::<impl wgpu_core::global::Global>::poll_single_device
at /Users/runner/.cargo/git/checkouts/wgpu-045f9a3b3e40a5c0/f35cf94/wgpu-core/src/device/global.rs:1976:51
8: wgpu_core::device::global::<impl wgpu_core::global::Global>::device_poll
at /Users/runner/.cargo/git/checkouts/wgpu-045f9a3b3e40a5c0/f35cf94/wgpu-core/src/device/global.rs:1965:34
9: <wgpu_native::WGPUDeviceImpl as core::ops::drop::Drop>::drop
at /Users/runner/work/wgpu-native/wgpu-native/src/lib.rs:185:27
10: core::ptr::drop_in_place<wgpu_native::WGPUDeviceImpl>
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/ptr/mod.rs:799:1
11: alloc::sync::Arc<T,A>::drop_slow
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/alloc/src/sync.rs:1943:18
12: <alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/alloc/src/sync.rs:2686:18
13: core::ptr::drop_in_place<alloc::sync::Arc<wgpu_native::WGPUDeviceImpl>>
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/ptr/mod.rs:799:1
14: core::mem::drop
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/core/src/mem/mod.rs:957:24
15: alloc::sync::Arc<T,A>::decrement_strong_count_in
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/alloc/src/sync.rs:1917:18
16: alloc::sync::Arc<T>::decrement_strong_count
at /rustc/29483883eed69d5fb4db01964cdf2af4d86e9cb2/library/alloc/src/sync.rs:1546:18
17: wgpuDeviceRelease
at /Users/runner/work/wgpu-native/wgpu-native/src/lib.rs:2634:5
I'm unsure but my guess would be that there is another object, under that device, that has not yet been released.
@almarklein it looks like order of runtime destructions. I have C++ class like this (without details):
class GPUManager
{
public:
static GPUManager &get()
{
static GPUManager manager;
return manager;
}
~GPUManager()
{
wgpuDeviceRelease(_device);
}
};
And trace in snatch.rs has static variable too:
mod trace {
std::thread_local! {
static SNATCH_LOCK_TRACE: Cell<Option<LockTrace>> = const { Cell::new(None) };
}
}
So when process terminates it looks like Rust runtime calls rt::thread_cleanup() before C++ runtime calls destructor of GPUManager class and this leads to panic because thread local storage not available in Rust code anymore. If I change my code and call wgpuDeviceRelease explicitly before exit, I don't have that panic.
If I had to guess, this is caused by both C++ and Rust using some form of atexit() to call destructors on statics
Rust's atexit callback is being called before C++, and because C++'s atexit callback is calling back into Rust, which Rust isn't really designed to handle, this is causing a crash
The best way I could see us fixing this would be going full no_std, which wgpu now supports