diff --git a/crates/c-api/src/exnref.rs b/crates/c-api/src/exnref.rs index 659dd288e95e..26995e813d36 100644 --- a/crates/c-api/src/exnref.rs +++ b/crates/c-api/src/exnref.rs @@ -91,7 +91,7 @@ pub unsafe extern "C" fn wasmtime_context_set_exception( let Err(thrown) = scope .as_context_mut() .throw::(rooted); - Some(Box::new(wasm_trap_t::new(wasmtime::Error::new(thrown)))) + Some(Box::new(wasm_trap_t::new(thrown))) } #[unsafe(no_mangle)] diff --git a/crates/wasmtime/src/runtime/externals/global.rs b/crates/wasmtime/src/runtime/externals/global.rs index 31c8dfba9180..c9fae3127c14 100644 --- a/crates/wasmtime/src/runtime/externals/global.rs +++ b/crates/wasmtime/src/runtime/externals/global.rs @@ -2,7 +2,7 @@ use crate::prelude::*; use crate::runtime::vm::{self, VMGlobalDefinition, VMGlobalKind, VMOpaqueContext}; use crate::{ AnyRef, AsContext, AsContextMut, ExnRef, ExternRef, Func, GlobalType, HeapType, Mutability, - Ref, RootedGcRefImpl, Val, ValType, + Ref, Val, ValType, store::{AutoAssertNoGc, InstanceId, StoreId, StoreInstanceId, StoreOpaque}, trampoline::generate_global_export, }; diff --git a/crates/wasmtime/src/runtime/externals/table.rs b/crates/wasmtime/src/runtime/externals/table.rs index 44ac15e5b6a1..90fd55251995 100644 --- a/crates/wasmtime/src/runtime/externals/table.rs +++ b/crates/wasmtime/src/runtime/externals/table.rs @@ -1,5 +1,4 @@ use crate::prelude::*; -use crate::runtime::RootedGcRefImpl; use crate::runtime::vm::{ self, GcStore, SendSyncPtr, TableElementType, VMFuncRef, VMGcRef, VMStore, }; diff --git a/crates/wasmtime/src/runtime/gc/disabled/rooting.rs b/crates/wasmtime/src/runtime/gc/disabled/rooting.rs index ccc03dcc75f2..e33730394357 100644 --- a/crates/wasmtime/src/runtime/gc/disabled/rooting.rs +++ b/crates/wasmtime/src/runtime/gc/disabled/rooting.rs @@ -114,6 +114,10 @@ impl Rooted { ) -> Result { a.assert_unreachable() } + + pub(crate) fn try_gc_ref<'a>(&self, _store: &'a StoreOpaque) -> Result<&'a VMGcRef> { + match self.inner {} + } } /// This type has been disabled because the `gc` cargo feature was not enabled diff --git a/crates/wasmtime/src/runtime/gc/enabled/rooting.rs b/crates/wasmtime/src/runtime/gc/enabled/rooting.rs index 7c589b716309..87364fdea364 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/rooting.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/rooting.rs @@ -1277,6 +1277,10 @@ impl Rooted { let gc_ref = store.clone_gc_ref(&gc_ref); Some(from_cloned_gc_ref(store, gc_ref)) } + + pub(crate) fn try_gc_ref<'a>(&self, store: &'a StoreOpaque) -> Result<&'a VMGcRef> { + >::try_gc_ref(self, store) + } } /// Nested rooting scopes. diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index a478446a51a7..fe93748a94be 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -97,8 +97,6 @@ use crate::trampoline::VMHostGlobalContext; #[cfg(feature = "debug")] use crate::{BreakpointState, DebugHandler, FrameDataCache}; use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry}; -#[cfg(feature = "gc")] -use crate::{ExnRef, Rooted, ThrownException}; use crate::{Global, Instance, Table}; use core::convert::Infallible; use core::fmt; @@ -129,8 +127,6 @@ mod async_; #[cfg(all(feature = "async", feature = "call-hook"))] pub use self::async_::CallHookHandler; -#[cfg(feature = "gc")] -use super::vm::VMExnRef; #[cfg(feature = "gc")] mod gc; @@ -496,8 +492,12 @@ pub struct StoreOpaque { /// `throw()`, `take_pending_exception()`, /// `peek_pending_exception()`, `has_pending_exception()`, and /// `catch()`. + /// + /// Also note that the underlying reference here is a `VMExnRef`, a + /// refinement of `VMGcRef`, but rooting APIs right now make it difficult to + /// work with that directly so this is stored as `VMGcRef` instead. #[cfg(feature = "gc")] - pending_exception: Option, + pending_exception: Option, // Numbers of resources instantiated in this store, and their limits instance_count: usize, @@ -1000,35 +1000,6 @@ impl Store { self.inner.engine() } - /// Perform garbage collection. - /// - /// Note that it is not required to actively call this function. GC will - /// automatically happen according to various internal heuristics. This is - /// provided if fine-grained control over the GC is desired. - /// - /// If you are calling this method after an attempted allocation failed, you - /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error. - /// When you do so, this method will attempt to create enough space in the - /// GC heap for that allocation, so that it will succeed on the next - /// attempt. - /// - /// # Errors - /// - /// This method will fail if an [async limiter is - /// configured](Store::limiter_async) in which case [`Store::gc_async`] must - /// be used instead. - #[cfg(feature = "gc")] - pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> { - StoreContextMut(&mut self.inner).gc(why) - } - - /// Returns the current capacity of the GC heap in bytes, or 0 if the GC - /// heap has not been initialized yet. - #[cfg(feature = "gc")] - pub fn gc_heap_capacity(&self) -> usize { - self.inner.gc_heap_capacity() - } - /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must /// be configured via [`Store::set_fuel`]. /// @@ -1201,62 +1172,6 @@ impl Store { self.inner.epoch_deadline_callback(Box::new(callback)); } - /// Set an exception as the currently pending exception, and - /// return an error that propagates the throw. - /// - /// This method takes an exception object and stores it in the - /// `Store` as the currently pending exception. This is a special - /// rooted slot that holds the exception as long as it is - /// propagating. This method then returns a `ThrownException` - /// error, which is a special type that indicates a pending - /// exception exists. When this type propagates as an error - /// returned from a Wasm-to-host call, the pending exception is - /// thrown within the Wasm context, and either caught or - /// propagated further to the host-to-Wasm call boundary. If an - /// exception is thrown out of Wasm (or across Wasm from a - /// hostcall) back to the host-to-Wasm call boundary, *that* - /// invocation returns a `ThrownException`, and the pending - /// exception slot is again set. In other words, the - /// `ThrownException` error type should propagate upward exactly - /// and only when a pending exception is set. - /// - /// To take the pending exception, use [`Self::take_pending_exception`]. - /// - /// This method is parameterized over `R` for convenience, but - /// will always return an `Err`. - /// - /// # Panics - /// - /// - Will panic if `exception` has been unrooted. - /// - Will panic if `exception` is a null reference. - /// - Will panic if a pending exception has already been set. - #[cfg(feature = "gc")] - pub fn throw(&mut self, exception: Rooted) -> Result { - self.inner.throw_impl(exception); - Err(ThrownException) - } - - /// Take the currently pending exception, if any, and return it, - /// removing it from the "pending exception" slot. - /// - /// If there is no pending exception, returns `None`. - /// - /// Note: the returned exception is a LIFO root (see - /// [`crate::Rooted`]), rooted in the current handle scope. Take - /// care to ensure that it is re-rooted or otherwise does not - /// escape this scope! It is usually best to allow an exception - /// object to be rooted in the store's "pending exception" slot - /// until the final consumer has taken it, rather than root it and - /// pass it up the callstack in some other way. - /// - /// This method is useful to implement ad-hoc exception plumbing - /// in various ways, but for the most idiomatic handling, see - /// [`StoreContextMut::throw`]. - #[cfg(feature = "gc")] - pub fn take_pending_exception(&mut self) -> Option> { - self.inner.take_pending_exception_rooted() - } - /// Tests whether there is a pending exception. /// /// Ordinarily, a pending exception will be set on a store if and @@ -1398,21 +1313,6 @@ impl<'a, T> StoreContextMut<'a, T> { self.0.engine() } - /// Perform garbage collection of `ExternRef`s. - /// - /// Same as [`Store::gc`]. - #[cfg(feature = "gc")] - pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> { - let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?; - vm::assert_ready(store.gc( - limiter.as_mut(), - None, - why.map(|e| e.bytes_needed()), - Asyncness::No, - )); - Ok(()) - } - /// Returns remaining fuel in this store. /// /// For more information see [`Store::get_fuel`] @@ -1451,25 +1351,6 @@ impl<'a, T> StoreContextMut<'a, T> { self.0.epoch_deadline_trap(); } - /// Set an exception as the currently pending exception, and - /// return an error that propagates the throw. - /// - /// See [`Store::throw`] for more details. - #[cfg(feature = "gc")] - pub fn throw(&mut self, exception: Rooted) -> Result { - self.0.inner.throw_impl(exception); - Err(ThrownException) - } - - /// Take the currently pending exception, if any, and return it, - /// removing it from the "pending exception" slot. - /// - /// See [`Store::take_pending_exception`] for more details. - #[cfg(feature = "gc")] - pub fn take_pending_exception(&mut self) -> Option> { - self.0.inner.take_pending_exception_rooted() - } - /// Tests whether there is a pending exception. /// /// See [`Store::has_pending_exception`] for more details. @@ -2016,38 +1897,6 @@ impl StoreOpaque { } } - /// Helper method to require that a `GcStore` was previously allocated for - /// this store, failing if it has not yet been allocated. - /// - /// Note that this should only be used in a context where allocation of a - /// `GcStore` is sure to have already happened prior, otherwise this may - /// return a confusing error to embedders which is a bug in Wasmtime. - /// - /// Some situations where it's safe to call this method: - /// - /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing - /// this shows proof that the `GcStore` was previously allocated. - /// * During instantiation and instance's `needs_gc_heap` flag will be - /// handled and instantiation will automatically create a GC store. - #[inline] - #[cfg(feature = "gc")] - pub(crate) fn require_gc_store(&self) -> Result<&GcStore> { - match &self.gc_store { - Some(gc_store) => Ok(gc_store), - None => bail!("GC heap not initialized yet"), - } - } - - /// Same as [`Self::require_gc_store`], but mutable. - #[inline] - #[cfg(feature = "gc")] - pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> { - match &mut self.gc_store { - Some(gc_store) => Ok(gc_store), - None => bail!("GC heap not initialized yet"), - } - } - /// Attempts to access the GC store that has been previously allocated. /// /// This method will return `Some` if the GC store was previously allocated. @@ -2065,16 +1914,6 @@ impl StoreOpaque { } } - /// Returns the current capacity of the GC heap in bytes, or 0 if the GC - /// heap has not been initialized yet. - #[cfg(feature = "gc")] - pub(crate) fn gc_heap_capacity(&self) -> usize { - match self.gc_store.as_ref() { - Some(gc_store) => gc_store.gc_heap_capacity(), - None => 0, - } - } - /// Helper to assert that a GC store was previously allocated and is /// present. /// @@ -2123,255 +1962,6 @@ impl StoreOpaque { self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope); } - #[cfg(feature = "gc")] - async fn do_gc(&mut self, asyncness: Asyncness) { - // If the GC heap hasn't been initialized, there is nothing to collect. - if self.gc_store.is_none() { - return; - } - - log::trace!("============ Begin GC ==========="); - - // Take the GC roots out of `self` so we can borrow it mutably but still - // call mutable methods on `self`. - let mut roots = core::mem::take(&mut self.gc_roots_list); - - self.trace_roots(&mut roots, asyncness).await; - self.unwrap_gc_store_mut() - .gc( - asyncness, - unsafe { roots.iter() }, - // TODO: Once `Config` has an optional `AsyncFn` field for - // yielding to the current async runtime - // (e.g. `tokio::task::yield_now`), use that if set; otherwise - // fall back to the runtime-agnostic code. - yield_now, - ) - .await; - - // Restore the GC roots for the next GC. - roots.clear(); - self.gc_roots_list = roots; - - log::trace!("============ End GC ==========="); - } - - #[cfg(feature = "gc")] - async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) { - log::trace!("Begin trace GC roots"); - - // We shouldn't have any leftover, stale GC roots. - assert!(gc_roots_list.is_empty()); - - self.trace_wasm_stack_roots(gc_roots_list); - if asyncness != Asyncness::No { - self.yield_now().await; - } - - #[cfg(feature = "stack-switching")] - { - self.trace_wasm_continuation_roots(gc_roots_list); - if asyncness != Asyncness::No { - self.yield_now().await; - } - } - - self.trace_vmctx_roots(gc_roots_list); - if asyncness != Asyncness::No { - self.yield_now().await; - } - - self.trace_instance_roots(gc_roots_list); - if asyncness != Asyncness::No { - self.yield_now().await; - } - - self.trace_user_roots(gc_roots_list); - if asyncness != Asyncness::No { - self.yield_now().await; - } - - self.trace_pending_exception_roots(gc_roots_list); - - log::trace!("End trace GC roots") - } - - #[cfg(feature = "gc")] - fn trace_wasm_stack_frame( - &self, - gc_roots_list: &mut GcRootsList, - frame: crate::runtime::vm::Frame, - ) { - let pc = frame.pc(); - debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames"); - - let fp = frame.fp() as *mut usize; - debug_assert!( - !fp.is_null(), - "we should always get a valid frame pointer for Wasm frames" - ); - - let (module_with_code, _offset) = self - .modules() - .module_and_code_by_pc(pc) - .expect("should have module info for Wasm frame"); - - if let Some(stack_map) = module_with_code.lookup_stack_map(pc) { - log::trace!( - "We have a stack map that maps {} bytes in this Wasm frame", - stack_map.frame_size() - ); - - let sp = unsafe { stack_map.sp(fp) }; - for stack_slot in unsafe { stack_map.live_gc_refs(sp) } { - unsafe { - self.trace_wasm_stack_slot(gc_roots_list, stack_slot); - } - } - } - - #[cfg(feature = "debug")] - if let Some(frame_table) = module_with_code.module().frame_table() { - let relpc = module_with_code - .text_offset(pc) - .expect("PC should be within module"); - for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) { - unsafe { - self.trace_wasm_stack_slot(gc_roots_list, stack_slot); - } - } - } - } - - #[cfg(feature = "gc")] - unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) { - use crate::runtime::vm::SendSyncPtr; - use core::ptr::NonNull; - - let raw: u32 = unsafe { core::ptr::read(stack_slot) }; - log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}"); - - let gc_ref = vm::VMGcRef::from_raw_u32(raw); - if gc_ref.is_some() { - unsafe { - gc_roots_list - .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap())); - } - } - } - - #[cfg(feature = "gc")] - fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) { - use crate::runtime::vm::Backtrace; - log::trace!("Begin trace GC roots :: Wasm stack"); - - Backtrace::trace(self, |frame| { - self.trace_wasm_stack_frame(gc_roots_list, frame); - core::ops::ControlFlow::Continue(()) - }); - - log::trace!("End trace GC roots :: Wasm stack"); - } - - #[cfg(all(feature = "gc", feature = "stack-switching"))] - fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) { - use crate::{runtime::vm::Backtrace, vm::VMStackState}; - log::trace!("Begin trace GC roots :: continuations"); - - for continuation in &self.continuations { - let state = continuation.common_stack_information.state; - - // FIXME(frank-emrich) In general, it is not enough to just trace - // through the stacks of continuations; we also need to look through - // their `cont.bind` arguments. However, we don't currently have - // enough RTTI information to check if any of the values in the - // buffers used by `cont.bind` are GC values. As a workaround, note - // that we currently disallow cont.bind-ing GC values altogether. - // This way, it is okay not to check them here. - match state { - VMStackState::Suspended => { - Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| { - self.trace_wasm_stack_frame(gc_roots_list, frame); - core::ops::ControlFlow::Continue(()) - }); - } - VMStackState::Running => { - // Handled by `trace_wasm_stack_roots`. - } - VMStackState::Parent => { - // We don't know whether our child is suspended or running, but in - // either case things should be handled correctly when traversing - // further along in the chain, nothing required at this point. - } - VMStackState::Fresh | VMStackState::Returned => { - // Fresh/Returned continuations have no gc values on their stack. - } - } - } - - log::trace!("End trace GC roots :: continuations"); - } - - #[cfg(feature = "gc")] - fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) { - log::trace!("Begin trace GC roots :: vmctx"); - self.for_each_global(|store, global| global.trace_root(store, gc_roots_list)); - self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list)); - log::trace!("End trace GC roots :: vmctx"); - } - - #[cfg(feature = "gc")] - fn trace_instance_roots(&mut self, gc_roots_list: &mut GcRootsList) { - log::trace!("Begin trace GC roots :: instance"); - for (_id, instance) in &mut self.instances { - // SAFETY: the instance's GC roots will remain valid for the - // duration of this GC cycle. - unsafe { - instance - .handle - .get_mut() - .trace_element_segment_roots(gc_roots_list); - } - } - log::trace!("End trace GC roots :: instance"); - } - - #[cfg(feature = "gc")] - fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) { - log::trace!("Begin trace GC roots :: user"); - self.gc_roots.trace_roots(gc_roots_list); - log::trace!("End trace GC roots :: user"); - } - - #[cfg(feature = "gc")] - fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) { - log::trace!("Begin trace GC roots :: pending exception"); - if let Some(pending_exception) = self.pending_exception.as_mut() { - unsafe { - let root = pending_exception.as_gc_ref_mut(); - gc_roots_list.add_vmgcref_root(root.into(), "Pending exception"); - } - } - log::trace!("End trace GC roots :: pending exception"); - } - - /// Insert a host-allocated GC type into this store. - /// - /// This makes it suitable for the embedder to allocate instances of this - /// type in this store, and we don't have to worry about the type being - /// reclaimed (since it is possible that none of the Wasm modules in this - /// store are holding it alive). - #[cfg(feature = "gc")] - pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) { - // If a GC heap is already allocated, eagerly register trace info - // now. Otherwise, trace info will be registered when the GC heap - // is allocated in `StoreOpaque::allocate_gc_store`. - if let Some(gc_store) = self.optional_gc_store_mut() { - gc_store.ensure_trace_info(ty.index()); - } - self.gc_host_alloc_types.insert(ty); - } - /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`. /// /// This avoids allocating `GcStore` where possible. @@ -2749,21 +2339,6 @@ at https://bytecodealliance.org/security. Ok(id) } - /// Set a pending exception. The `exnref` is taken and held on - /// this store to be fetched later by an unwind. This method does - /// *not* set up an unwind request on the TLS call state; that - /// must be done separately. - #[cfg(feature = "gc")] - pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) { - self.pending_exception = Some(exnref); - } - - /// Take a pending exception, if any. - #[cfg(feature = "gc")] - pub(crate) fn take_pending_exception(&mut self) -> Option { - self.pending_exception.take() - } - /// Tests whether there is a pending exception. pub fn has_pending_exception(&self) -> bool { #[cfg(feature = "gc")] @@ -2776,40 +2351,6 @@ at https://bytecodealliance.org/security. } } - #[cfg(feature = "gc")] - fn take_pending_exception_rooted(&mut self) -> Option> { - let vmexnref = self.take_pending_exception()?; - let mut nogc = AutoAssertNoGc::new(self); - Some(Rooted::new(&mut nogc, vmexnref.into())) - } - - /// Get an owned rooted reference to the pending exception, - /// without taking it off the store. - #[cfg(all(feature = "debug", feature = "gc"))] - pub(crate) fn pending_exception_owned_rooted( - &mut self, - ) -> Result>, crate::error::OutOfMemory> { - let mut nogc = AutoAssertNoGc::new(self); - nogc.pending_exception - .take() - .map(|vmexnref| { - let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref()); - nogc.pending_exception = Some(cloned.into_exnref_unchecked()); - crate::OwnedRooted::new(&mut nogc, vmexnref.into()) - }) - .transpose() - } - - #[cfg(feature = "gc")] - fn throw_impl(&mut self, exception: Rooted) { - let mut nogc = AutoAssertNoGc::new(self); - let exnref = exception._to_raw(&mut nogc).unwrap(); - let exnref = VMGcRef::from_raw_u32(exnref) - .expect("exception cannot be null") - .into_exnref_unchecked(); - nogc.set_pending_exception(exnref); - } - #[cfg(target_has_atomic = "64")] pub(crate) fn set_epoch_deadline(&mut self, delta: u64) { // Set a new deadline based on the "epoch deadline delta". diff --git a/crates/wasmtime/src/runtime/store/gc.rs b/crates/wasmtime/src/runtime/store/gc.rs index 351d9ba61cb8..066169971ac0 100644 --- a/crates/wasmtime/src/runtime/store/gc.rs +++ b/crates/wasmtime/src/runtime/store/gc.rs @@ -1,8 +1,137 @@ //! GC-related methods for stores. -use super::*; -use crate::runtime::vm::VMGcRef; +use crate::store::{ + Asyncness, AutoAssertNoGc, InstanceId, StoreOpaque, StoreResourceLimiter, yield_now, +}; +use crate::type_registry::RegisteredType; +use crate::vm::{self, Backtrace, Frame, GcRootsList, GcStore, SendSyncPtr, VMGcRef}; +use crate::{ + ExnRef, GcHeapOutOfMemory, Result, Rooted, Store, StoreContextMut, ThrownException, bail, + format_err, +}; +use core::mem::ManuallyDrop; use core::num::NonZeroU32; +use core::ops::{Deref, DerefMut}; +use core::ptr::NonNull; +use wasmtime_environ::DefinedTagIndex; + +impl Store { + /// Perform garbage collection. + /// + /// Note that it is not required to actively call this function. GC will + /// automatically happen according to various internal heuristics. This is + /// provided if fine-grained control over the GC is desired. + /// + /// If you are calling this method after an attempted allocation failed, you + /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error. + /// When you do so, this method will attempt to create enough space in the + /// GC heap for that allocation, so that it will succeed on the next + /// attempt. + /// + /// # Errors + /// + /// This method will fail if an [async limiter is + /// configured](Store::limiter_async) in which case [`Store::gc_async`] must + /// be used instead. + pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> { + StoreContextMut(&mut self.inner).gc(why) + } + + /// Returns the current capacity of the GC heap in bytes, or 0 if the GC + /// heap has not been initialized yet. + pub fn gc_heap_capacity(&self) -> usize { + self.inner.gc_heap_capacity() + } + + /// Set an exception as the currently pending exception, and + /// return an error that propagates the throw. + /// + /// This method takes an exception object and stores it in the + /// `Store` as the currently pending exception. This is a special + /// rooted slot that holds the exception as long as it is + /// propagating. This method then returns a `ThrownException` + /// error, which is a special type that indicates a pending + /// exception exists. When this type propagates as an error + /// returned from a Wasm-to-host call, the pending exception is + /// thrown within the Wasm context, and either caught or + /// propagated further to the host-to-Wasm call boundary. If an + /// exception is thrown out of Wasm (or across Wasm from a + /// hostcall) back to the host-to-Wasm call boundary, *that* + /// invocation returns a `ThrownException`, and the pending + /// exception slot is again set. In other words, the + /// `ThrownException` error type should propagate upward exactly + /// and only when a pending exception is set. + /// + /// To take the pending exception, use [`Self::take_pending_exception`]. + /// + /// This method is parameterized over `R` for convenience, but + /// will always return an `Err`. + /// + /// If there is already a pending exception in the store then the previous + /// one will be overwritten. + /// + /// # Errors + /// + /// This method will return an error if `exception` is unrooted. Otherwise + /// this method will always return `ThrownException`. + pub fn throw(&mut self, exception: Rooted) -> Result { + self.inner.throw_impl(exception) + } + + /// Take the currently pending exception, if any, and return it, + /// removing it from the "pending exception" slot. + /// + /// If there is no pending exception, returns `None`. + /// + /// Note: the returned exception is a LIFO root (see + /// [`crate::Rooted`]), rooted in the current handle scope. Take + /// care to ensure that it is re-rooted or otherwise does not + /// escape this scope! It is usually best to allow an exception + /// object to be rooted in the store's "pending exception" slot + /// until the final consumer has taken it, rather than root it and + /// pass it up the callstack in some other way. + /// + /// This method is useful to implement ad-hoc exception plumbing + /// in various ways, but for the most idiomatic handling, see + /// [`StoreContextMut::throw`]. + pub fn take_pending_exception(&mut self) -> Option> { + self.inner.take_pending_exception_rooted() + } +} + +impl<'a, T> StoreContextMut<'a, T> { + /// Perform garbage collection. + /// + /// Same as [`Store::gc`]. + pub fn gc(&mut self, why: Option<&GcHeapOutOfMemory<()>>) -> Result<()> { + let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?; + vm::assert_ready(store.gc( + limiter.as_mut(), + None, + why.map(|e| e.bytes_needed()), + Asyncness::No, + )); + Ok(()) + } + + /// Set an exception as the currently pending exception, and + /// return an error that propagates the throw. + /// + /// See [`Store::throw`] for more details. + #[cfg(feature = "gc")] + pub fn throw(&mut self, exception: Rooted) -> Result { + self.0.inner.throw_impl(exception) + } + + /// Take the currently pending exception, if any, and return it, + /// removing it from the "pending exception" slot. + /// + /// See [`Store::take_pending_exception`] for more details. + #[cfg(feature = "gc")] + pub fn take_pending_exception(&mut self) -> Option> { + self.0.inner.take_pending_exception_rooted() + } +} impl StoreOpaque { /// Perform any growth or GC needed to allocate `bytes_needed` bytes. @@ -294,6 +423,348 @@ impl StoreOpaque { }, } } + + /// Set a pending exception. + /// + /// The `exnref` is cloned internally and held on this store to be fetched + /// later by an unwind. This method does *not* set up an unwind request on + /// the TLS call state; that must be done separately. + /// + /// GC barriers are not required by the caller of this function. + pub(crate) fn set_pending_exception(&mut self, exnref: &VMGcRef) -> ThrownException { + debug_assert!(exnref.is_exnref(&*self.unwrap_gc_store_mut().gc_heap)); + let gc_store = self.gc_store.as_mut().unwrap(); + gc_store.write_gc_ref(&mut self.pending_exception, Some(exnref)); + ThrownException + } + + /// Takes the pending exception from this store, if any, and exposes it to + /// WebAssembly, returning the raw representation. + pub(crate) fn expose_pending_exception_to_wasm(&mut self) -> Option { + let exnref = self.pending_exception.take()?; + let gc_store = self.unwrap_gc_store_mut(); + debug_assert!(exnref.is_exnref(&*gc_store.gc_heap)); + Some(gc_store.expose_gc_ref_to_wasm(exnref)) + } + + /// Takes the pending exception of the store, yielding ownership of its + /// reference to the `Rooted` that's returned. + fn take_pending_exception_rooted(&mut self) -> Option> { + let vmexnref = self.pending_exception.take()?; + debug_assert!(vmexnref.is_exnref(&*self.unwrap_gc_store().gc_heap)); + let mut nogc = AutoAssertNoGc::new(self); + Some(Rooted::new(&mut nogc, vmexnref)) + } + + /// Returns the (instance,tag) pair that the pending exception in this + /// store, if any, references. + pub(crate) fn pending_exception_tag_and_instance( + &mut self, + ) -> Option<(InstanceId, DefinedTagIndex)> { + let pending_exnref = self.pending_exception.as_ref()?.unchecked_copy(); + debug_assert!(pending_exnref.is_exnref(&*self.unwrap_gc_store_mut().gc_heap)); + let mut store = AutoAssertNoGc::new(self); + Some( + pending_exnref + .into_exnref_unchecked() + .tag(&mut store) + .expect("cannot read tag"), + ) + } + + /// Get an owned rooted reference to the pending exception, + /// without taking it off the store. + #[cfg(feature = "debug")] + pub(crate) fn pending_exception_owned_rooted( + &mut self, + ) -> Result>, crate::OutOfMemory> { + let pending = match &self.pending_exception { + Some(r) => r, + None => return Ok(None), + }; + let cloned = self.gc_store.as_mut().unwrap().clone_gc_ref(pending); + let mut nogc = AutoAssertNoGc::new(self); + Ok(Some(crate::OwnedRooted::new(&mut nogc, cloned)?)) + } + + /// Stores `exception` within the store to later get thrown. + /// + /// Delegates to `self.set_pending_exception` after accessing the internal + /// exception pointer. + fn throw_impl(&mut self, exception: Rooted) -> Result { + let exception = exception.try_gc_ref(self)?.unchecked_copy(); + Err(self.set_pending_exception(&exception).into()) + } + + /// Helper method to require that a `GcStore` was previously allocated for + /// this store, failing if it has not yet been allocated. + /// + /// Note that this should only be used in a context where allocation of a + /// `GcStore` is sure to have already happened prior, otherwise this may + /// return a confusing error to embedders which is a bug in Wasmtime. + /// + /// Some situations where it's safe to call this method: + /// + /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing + /// this shows proof that the `GcStore` was previously allocated. + /// * During instantiation and instance's `needs_gc_heap` flag will be + /// handled and instantiation will automatically create a GC store. + #[inline] + pub(crate) fn require_gc_store(&self) -> Result<&GcStore> { + match &self.gc_store { + Some(gc_store) => Ok(gc_store), + None => bail!("GC heap not initialized yet"), + } + } + + /// Same as [`Self::require_gc_store`], but mutable. + #[inline] + pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> { + match &mut self.gc_store { + Some(gc_store) => Ok(gc_store), + None => bail!("GC heap not initialized yet"), + } + } + + /// Returns the current capacity of the GC heap in bytes, or 0 if the GC + /// heap has not been initialized yet. + pub(crate) fn gc_heap_capacity(&self) -> usize { + match self.gc_store.as_ref() { + Some(gc_store) => gc_store.gc_heap_capacity(), + None => 0, + } + } + + async fn do_gc(&mut self, asyncness: Asyncness) { + // If the GC heap hasn't been initialized, there is nothing to collect. + if self.gc_store.is_none() { + return; + } + + log::trace!("============ Begin GC ==========="); + + // Take the GC roots out of `self` so we can borrow it mutably but still + // call mutable methods on `self`. + let mut roots = core::mem::take(&mut self.gc_roots_list); + + self.trace_roots(&mut roots, asyncness).await; + self.unwrap_gc_store_mut() + .gc( + asyncness, + unsafe { roots.iter() }, + // TODO: Once `Config` has an optional `AsyncFn` field for + // yielding to the current async runtime + // (e.g. `tokio::task::yield_now`), use that if set; otherwise + // fall back to the runtime-agnostic code. + yield_now, + ) + .await; + + // Restore the GC roots for the next GC. + roots.clear(); + self.gc_roots_list = roots; + + log::trace!("============ End GC ==========="); + } + + async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) { + log::trace!("Begin trace GC roots"); + + // We shouldn't have any leftover, stale GC roots. + assert!(gc_roots_list.is_empty()); + + self.trace_wasm_stack_roots(gc_roots_list); + if asyncness != Asyncness::No { + self.yield_now().await; + } + + #[cfg(feature = "stack-switching")] + { + self.trace_wasm_continuation_roots(gc_roots_list); + if asyncness != Asyncness::No { + self.yield_now().await; + } + } + + self.trace_vmctx_roots(gc_roots_list); + if asyncness != Asyncness::No { + self.yield_now().await; + } + + self.trace_instance_roots(gc_roots_list); + if asyncness != Asyncness::No { + self.yield_now().await; + } + + self.trace_user_roots(gc_roots_list); + if asyncness != Asyncness::No { + self.yield_now().await; + } + + self.trace_pending_exception_roots(gc_roots_list); + + log::trace!("End trace GC roots") + } + + fn trace_wasm_stack_frame(&self, gc_roots_list: &mut GcRootsList, frame: Frame) { + let pc = frame.pc(); + debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames"); + + let fp = frame.fp() as *mut usize; + debug_assert!( + !fp.is_null(), + "we should always get a valid frame pointer for Wasm frames" + ); + + let (module_with_code, _offset) = self + .modules() + .module_and_code_by_pc(pc) + .expect("should have module info for Wasm frame"); + + if let Some(stack_map) = module_with_code.lookup_stack_map(pc) { + log::trace!( + "We have a stack map that maps {} bytes in this Wasm frame", + stack_map.frame_size() + ); + + let sp = unsafe { stack_map.sp(fp) }; + for stack_slot in unsafe { stack_map.live_gc_refs(sp) } { + unsafe { + self.trace_wasm_stack_slot(gc_roots_list, stack_slot); + } + } + } + + #[cfg(feature = "debug")] + if let Some(frame_table) = module_with_code.module().frame_table() { + let relpc = module_with_code + .text_offset(pc) + .expect("PC should be within module"); + for stack_slot in crate::debug::gc_refs_in_frame(frame_table, relpc, fp) { + unsafe { + self.trace_wasm_stack_slot(gc_roots_list, stack_slot); + } + } + } + } + + unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) { + let raw: u32 = unsafe { core::ptr::read(stack_slot) }; + log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}"); + + let gc_ref = vm::VMGcRef::from_raw_u32(raw); + if gc_ref.is_some() { + unsafe { + gc_roots_list + .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap())); + } + } + } + + fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: Wasm stack"); + + Backtrace::trace(self, |frame| { + self.trace_wasm_stack_frame(gc_roots_list, frame); + core::ops::ControlFlow::Continue(()) + }); + + log::trace!("End trace GC roots :: Wasm stack"); + } + + #[cfg(feature = "stack-switching")] + fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) { + use crate::vm::VMStackState; + + log::trace!("Begin trace GC roots :: continuations"); + + for continuation in &self.continuations { + let state = continuation.common_stack_information.state; + + // FIXME(frank-emrich) In general, it is not enough to just trace + // through the stacks of continuations; we also need to look through + // their `cont.bind` arguments. However, we don't currently have + // enough RTTI information to check if any of the values in the + // buffers used by `cont.bind` are GC values. As a workaround, note + // that we currently disallow cont.bind-ing GC values altogether. + // This way, it is okay not to check them here. + match state { + VMStackState::Suspended => { + Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| { + self.trace_wasm_stack_frame(gc_roots_list, frame); + core::ops::ControlFlow::Continue(()) + }); + } + VMStackState::Running => { + // Handled by `trace_wasm_stack_roots`. + } + VMStackState::Parent => { + // We don't know whether our child is suspended or running, but in + // either case things should be handled correctly when traversing + // further along in the chain, nothing required at this point. + } + VMStackState::Fresh | VMStackState::Returned => { + // Fresh/Returned continuations have no gc values on their stack. + } + } + } + + log::trace!("End trace GC roots :: continuations"); + } + + fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: vmctx"); + self.for_each_global(|store, global| global.trace_root(store, gc_roots_list)); + self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list)); + log::trace!("End trace GC roots :: vmctx"); + } + + fn trace_instance_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: instance"); + for (_id, instance) in &mut self.instances { + // SAFETY: the instance's GC roots will remain valid for the + // duration of this GC cycle. + unsafe { + instance + .handle + .get_mut() + .trace_element_segment_roots(gc_roots_list); + } + } + log::trace!("End trace GC roots :: instance"); + } + + fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: user"); + self.gc_roots.trace_roots(gc_roots_list); + log::trace!("End trace GC roots :: user"); + } + + fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) { + log::trace!("Begin trace GC roots :: pending exception"); + if let Some(pending_exception) = self.pending_exception.as_mut() { + unsafe { + gc_roots_list.add_vmgcref_root(pending_exception.into(), "Pending exception"); + } + } + log::trace!("End trace GC roots :: pending exception"); + } + + /// Insert a host-allocated GC type into this store. + /// + /// This makes it suitable for the embedder to allocate instances of this + /// type in this store, and we don't have to worry about the type being + /// reclaimed (since it is possible that none of the Wasm modules in this + /// store are holding it alive). + pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: RegisteredType) { + // If a GC heap is already allocated, eagerly register trace info + // now. Otherwise, trace info will be registered when the GC heap + // is allocated in `StoreOpaque::allocate_gc_store`. + if let Some(gc_store) = self.optional_gc_store_mut() { + gc_store.ensure_trace_info(ty.index()); + } + self.gc_host_alloc_types.insert(ty); + } } /// RAII type to temporarily disable the GC zeal allocation counter. diff --git a/crates/wasmtime/src/runtime/trampoline/global.rs b/crates/wasmtime/src/runtime/trampoline/global.rs index 169c284c535c..0670de188a2d 100644 --- a/crates/wasmtime/src/runtime/trampoline/global.rs +++ b/crates/wasmtime/src/runtime/trampoline/global.rs @@ -1,7 +1,7 @@ use crate::runtime::vm::{StoreBox, VMGlobalDefinition}; use crate::store::{AutoAssertNoGc, StoreOpaque}; use crate::type_registry::RegisteredType; -use crate::{GlobalType, Mutability, Result, RootedGcRefImpl, Val}; +use crate::{GlobalType, Mutability, Result, Val}; use core::ptr; use wasmtime_environ::Global; diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index 2ef761ed3527..92f7e019872d 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -1777,12 +1777,7 @@ fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 { #[cfg(feature = "gc")] fn throw_ref(store: &mut dyn VMStore, _instance: InstanceId, exnref: u32) -> Result<()> { let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?; - let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref); - let exnref = exnref - .into_exnref(&*store.unwrap_gc_store().gc_heap) - .expect("gc ref should be an exception object"); - store.set_pending_exception(exnref); - Err(crate::ThrownException.into()) + Err(store.set_pending_exception(&exnref).into()) } fn breakpoint(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> { diff --git a/crates/wasmtime/src/runtime/vm/throw.rs b/crates/wasmtime/src/runtime/vm/throw.rs index 5463e930b4b9..643613f8bd96 100644 --- a/crates/wasmtime/src/runtime/vm/throw.rs +++ b/crates/wasmtime/src/runtime/vm/throw.rs @@ -17,13 +17,9 @@ pub unsafe fn compute_handler(store: &mut dyn VMStore) -> Option { // Get the tag identity relative to the store. - // Temporarily take, to avoid borrowing issues. - let exnref = nogc - .take_pending_exception() + let (throwing_tag_instance_id, throwing_tag_defined_tag_index) = nogc + .pending_exception_tag_and_instance() .expect("Only invoked when an exception is pending"); - let (throwing_tag_instance_id, throwing_tag_defined_tag_index) = - exnref.tag(&mut nogc).expect("cannot read tag"); - nogc.set_pending_exception(exnref); log::trace!( "throwing: tag defined in instance {throwing_tag_instance_id:?} defined-tag {throwing_tag_defined_tag_index:?}" ); diff --git a/crates/wasmtime/src/runtime/vm/traphandlers.rs b/crates/wasmtime/src/runtime/vm/traphandlers.rs index 3df7153d0136..227c2fbe8945 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers.rs @@ -827,11 +827,7 @@ impl CallThreadState { // Take the pending exception at this time and use it as // payload. payload1 = usize::try_from( - store - .take_pending_exception() - .unwrap() - .as_gc_ref() - .as_raw_u32(), + store.expose_pending_exception_to_wasm().unwrap().get(), ) .expect("GC ref does not fit in usize"); payload2 = 0; diff --git a/tests/all/exceptions.rs b/tests/all/exceptions.rs index 18520627ab5a..e4f326877a8c 100644 --- a/tests/all/exceptions.rs +++ b/tests/all/exceptions.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; use wasmtime::*; use wasmtime_test_macros::wasmtime_test; @@ -294,3 +296,178 @@ fn wasm_exceptions_have_backtraces(config: &mut Config) -> Result<()> { Ok(()) } + +#[wasmtime_test(wasm_features(exceptions))] +#[cfg_attr(miri, ignore)] +fn store_pending_exnref_is_cloned(config: &mut Config) -> wasmtime::Result<()> { + config.collector(Collector::DeferredReferenceCounting); + let engine = Engine::new(&config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (import "h" "t1" (tag $t1 (param i32))) + (import "h" "throw_t1" (func $throw_t1)) + (func (export "run") (result i32) + (block $h (result i32) + (try_table (result i32) (catch $t1 $h) + call $throw_t1 + unreachable + ) + ) + ) + ) + "#, + )?; + + let functy = FuncType::new(&engine, [ValType::I32], []); + let tagty = TagType::new(functy); + let t1 = Tag::new(&mut store, &tagty)?; + let exnty = ExnType::from_tag_type(&tagty)?; + let exnpre_for_t1 = ExnRefPre::new(&mut store, exnty); + + let throw_t1 = Func::wrap( + &mut store, + move |mut caller: Caller<'_, ()>| -> Result<()> { + let err = { + let mut scope = RootScope::new(&mut caller); + let exn = ExnRef::new(&mut scope, &exnpre_for_t1, &t1, &[Val::I32(0x1111_1111)])?; + scope.as_context_mut().throw::<()>(exn) + }; + caller.as_context_mut().gc(None)?; + err + }, + ); + + let instance = Instance::new( + &mut store, + &module, + &[Extern::Tag(t1), Extern::Func(throw_t1)], + )?; + let run = instance.get_typed_func::<(), i32>(&mut store, "run")?; + let result = run.call(&mut store, ())?; + assert_eq!(result, 0x1111_1111); + Ok(()) +} + +#[wasmtime_test(wasm_features(exceptions, reference_types))] +#[cfg_attr(miri, ignore)] +fn store_pending_exnref_is_exposed(config: &mut Config) -> wasmtime::Result<()> { + config.collector(Collector::DeferredReferenceCounting); + let engine = Engine::new(&config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (import "h" "t1" (tag $t1 (param i32))) + (import "h" "throw_t1" (func $throw_t1)) + (import "" "gc" (func $gc)) + + (func (export "run") (result i32 (ref exn)) + (block $h (result i32 (ref exn)) + (try_table (result i32) (catch_ref $t1 $h) + call $throw_t1 + unreachable + ) + unreachable + ) + call $gc + ) + ) + "#, + )?; + + let functy = FuncType::new(&engine, [ValType::I32], []); + let tagty = TagType::new(functy); + let t1 = Tag::new(&mut store, &tagty)?; + let exnty = ExnType::from_tag_type(&tagty)?; + let exnpre_for_t1 = ExnRefPre::new(&mut store, exnty); + + let throw_t1 = Func::wrap( + &mut store, + move |mut caller: Caller<'_, ()>| -> Result<()> { + let err = { + let mut scope = RootScope::new(&mut caller); + let exn = ExnRef::new(&mut scope, &exnpre_for_t1, &t1, &[Val::I32(0x1111_1111)])?; + scope.as_context_mut().throw::<()>(exn) + }; + caller.as_context_mut().gc(None)?; + err + }, + ); + let gc = Func::wrap( + &mut store, + move |mut caller: Caller<'_, ()>| -> Result<()> { + caller.gc(None)?; + Ok(()) + }, + ); + + let instance = Instance::new( + &mut store, + &module, + &[t1.into(), throw_t1.into(), gc.into()], + )?; + let run = instance.get_typed_func::<(), (i32, Rooted)>(&mut store, "run")?; + let (result, exnref) = run.call(&mut store, ())?; + assert_eq!(result, 0x1111_1111); + + store.gc(None)?; + + assert_eq!(exnref.field(&mut store, 0)?.unwrap_i32(), 0x1111_1111); + Ok(()) +} + +struct SetFlagOnDrop(Arc); + +impl Drop for SetFlagOnDrop { + fn drop(&mut self) { + self.0.store(true, Relaxed); + } +} + +#[wasmtime_test(wasm_features(exceptions))] +fn store_pending_exnref_has_write_barrier(config: &mut Config) -> wasmtime::Result<()> { + config.collector(Collector::DeferredReferenceCounting); + let engine = Engine::new(&config)?; + let mut store = Store::new(&engine, ()); + + let functy = FuncType::new(&engine, [ValType::EXTERNREF], []); + let tagty = TagType::new(functy); + let tag = Tag::new(&mut store, &tagty)?; + let exnty = ExnType::from_tag_type(&tagty)?; + let exnpre = ExnRefPre::new(&mut store, exnty); + + let dropped = Arc::new(AtomicBool::new(false)); + + eprintln!("a1"); + + { + let mut scope = RootScope::new(&mut store); + let r = ExternRef::new(&mut scope, SetFlagOnDrop(dropped.clone()))?; + let exn1 = ExnRef::new(&mut scope, &exnpre, &tag, &[Val::ExternRef(Some(r))])?; + let _ = scope.as_context_mut().throw::<()>(exn1); + } + eprintln!("a2"); + + store.gc(None)?; + eprintln!("a5"); + assert!(!dropped.load(Relaxed)); + + { + let mut scope = RootScope::new(&mut store); + let exn2 = ExnRef::new(&mut scope, &exnpre, &tag, &[Val::ExternRef(None)])?; + let _ = scope.as_context_mut().throw::<()>(exn2); + } + eprintln!("a3"); + + store.gc(None)?; + eprintln!("a4"); + assert!(dropped.load(Relaxed)); + + Ok(()) +}