diff --git a/crates/environ/src/builtin.rs b/crates/environ/src/builtin.rs index 015797ffd695..07d8594354c3 100644 --- a/crates/environ/src/builtin.rs +++ b/crates/environ/src/builtin.rs @@ -467,10 +467,12 @@ impl BuiltinFunctionIndex { // The final epoch represents a trap (@get new_epoch u64) => (TrapSentinel::NegativeOne); + // Failure here indicates GC heap corruption. + (@get get_interned_func_ref pointer) => (TrapSentinel::NegativeOne); + // These libcalls can't trap (@get ref_func pointer) => (return None); (@get table_get_lazy_init_func_ref pointer) => (return None); - (@get get_interned_func_ref pointer) => (return None); (@get intern_func_ref_for_gc_heap u64) => (return None); (@get is_subtype u32) => (return None); (@get ceil_f32 f32) => (return None); diff --git a/crates/wasmtime/src/runtime/externals/global.rs b/crates/wasmtime/src/runtime/externals/global.rs index 31c8dfba9180..6b4a48651060 100644 --- a/crates/wasmtime/src/runtime/externals/global.rs +++ b/crates/wasmtime/src/runtime/externals/global.rs @@ -2,7 +2,7 @@ use crate::prelude::*; use crate::runtime::vm::{self, VMGlobalDefinition, VMGlobalKind, VMOpaqueContext}; use crate::{ AnyRef, AsContext, AsContextMut, ExnRef, ExternRef, Func, GlobalType, HeapType, Mutability, - Ref, RootedGcRefImpl, Val, ValType, + Ref, Val, ValType, store::{AutoAssertNoGc, InstanceId, StoreId, StoreInstanceId, StoreOpaque}, trampoline::generate_global_export, }; @@ -270,7 +270,7 @@ impl Global { Some(e) => Some(e.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - definition.write_gc_ref(&mut store, new); + definition.write_gc_ref(&mut store, new)?; } Val::AnyRef(a) => { let new = match a { @@ -278,7 +278,7 @@ impl Global { Some(a) => Some(a.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - definition.write_gc_ref(&mut store, new); + definition.write_gc_ref(&mut store, new)?; } Val::ExnRef(e) => { let new = match e { @@ -286,11 +286,11 @@ impl Global { Some(e) => Some(e.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - definition.write_gc_ref(&mut store, new); + definition.write_gc_ref(&mut store, new)?; } Val::ContRef(None) => { // Allow null continuation references for globals - these are just placeholders - definition.write_gc_ref(&mut store, None); + definition.write_gc_ref(&mut store, None)?; } Val::ContRef(Some(_)) => { // TODO(#10248): Implement non-null global continuation reference handling diff --git a/crates/wasmtime/src/runtime/externals/table.rs b/crates/wasmtime/src/runtime/externals/table.rs index 44ac15e5b6a1..2bf6d1936f18 100644 --- a/crates/wasmtime/src/runtime/externals/table.rs +++ b/crates/wasmtime/src/runtime/externals/table.rs @@ -1,5 +1,4 @@ use crate::prelude::*; -use crate::runtime::RootedGcRefImpl; use crate::runtime::vm::{ self, GcStore, SendSyncPtr, TableElementType, VMFuncRef, VMGcRef, VMStore, }; @@ -7,7 +6,7 @@ use crate::store::{AutoAssertNoGc, StoreInstanceId, StoreOpaque, StoreResourceLi use crate::trampoline::generate_table_export; use crate::{ AnyRef, AsContext, AsContextMut, ExnRef, ExternRef, Func, HeapType, Ref, RefType, - StoreContextMut, TableType, Trap, + StoreContextMut, TableType, }; use core::iter; use core::ptr::NonNull; @@ -441,7 +440,7 @@ impl Table { src_table: &Table, src_index: u64, len: u64, - ) -> Result<(), Trap> { + ) -> Result<()> { // Handle lazy initialization of the source table first before doing // anything else. let src_range = src_index..(src_index.checked_add(len).unwrap_or(u64::MAX)); @@ -479,7 +478,7 @@ impl Table { dst_index, src_index, len, - ) + )?; } // 2. Intra-instance, distinct-tables copy: split the mutable @@ -491,7 +490,7 @@ impl Table { .tables_mut() .get_disjoint_mut([src_table.index, dst_table.index]) .unwrap(); - src_table.copy_to(dst_table, gc_store, dst_index, src_index, len) + src_table.copy_to(dst_table, gc_store, dst_index, src_index, len)?; } // 3. Intra-table copy: get the table and copy within it! @@ -499,9 +498,10 @@ impl Table { let (gc_store, instance) = store.optional_gc_store_and_instance_mut(src_instance); instance .get_defined_table(src_table.index) - .copy_within(gc_store, dst_index, src_index, len) + .copy_within(gc_store, dst_index, src_index, len)?; } } + Ok(()) } /// Fill `table[dst..(dst + len)]` with the given value. diff --git a/crates/wasmtime/src/runtime/func.rs b/crates/wasmtime/src/runtime/func.rs index a0ec000c9891..4c6dc006df5e 100644 --- a/crates/wasmtime/src/runtime/func.rs +++ b/crates/wasmtime/src/runtime/func.rs @@ -2157,11 +2157,11 @@ impl Caller<'_, T> { /// /// Same as [`Store::gc_async`](crate::Store::gc_async). #[cfg(all(feature = "async", feature = "gc"))] - pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) + pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> where T: Send + 'static, { - self.store.gc_async(why).await; + self.store.gc_async(why).await } /// Returns the remaining fuel in the store. diff --git a/crates/wasmtime/src/runtime/gc/disabled/rooting.rs b/crates/wasmtime/src/runtime/gc/disabled/rooting.rs index ccc03dcc75f2..ba2514b4d5d4 100644 --- a/crates/wasmtime/src/runtime/gc/disabled/rooting.rs +++ b/crates/wasmtime/src/runtime/gc/disabled/rooting.rs @@ -114,6 +114,14 @@ impl Rooted { ) -> Result { a.assert_unreachable() } + + pub(crate) fn try_gc_ref<'a>(&self, _: &'a StoreOpaque) -> Result<&'a VMGcRef> { + match self.inner {} + } + + pub(crate) fn try_clone_gc_ref(&self, _: &mut AutoAssertNoGc<'_>) -> Result { + match self.inner {} + } } /// This type has been disabled because the `gc` cargo feature was not enabled diff --git a/crates/wasmtime/src/runtime/gc/enabled/anyref.rs b/crates/wasmtime/src/runtime/gc/enabled/anyref.rs index 497e5953974d..8a811d0aaea4 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/anyref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/anyref.rs @@ -1,11 +1,11 @@ //! Implementation of `anyref` in Wasmtime. -use super::{ExternRef, RootedGcRefImpl}; +use super::ExternRef; use crate::prelude::*; use crate::runtime::vm::VMGcRef; use crate::{ ArrayRef, ArrayType, AsContext, AsContextMut, EqRef, GcRefImpl, GcRootIndex, HeapType, I31, - OwnedRooted, RefType, Result, Rooted, StructRef, StructType, ValRaw, ValType, WasmTy, + OwnedRooted, RefType, Result, Rooted, StructRef, StructType, ValRaw, ValType, WasmTy, bail_bug, store::{AutoAssertNoGc, StoreOpaque}, }; use core::mem; @@ -298,11 +298,13 @@ impl AnyRef { || store .unwrap_gc_store() .header(&gc_ref) + .unwrap() .kind() .matches(VMGcKind::AnyRef) || store .unwrap_gc_store() .header(&gc_ref) + .unwrap() .kind() .matches(VMGcKind::ExternRef) ); @@ -336,7 +338,9 @@ impl AnyRef { let raw = if gc_ref.is_i31() { gc_ref.as_raw_non_zero_u32() } else { - store.require_gc_store_mut()?.expose_gc_ref_to_wasm(gc_ref) + store + .require_gc_store_mut()? + .expose_gc_ref_to_wasm(gc_ref)? }; Ok(raw.get()) } @@ -360,7 +364,7 @@ impl AnyRef { return Ok(HeapType::I31); } - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; if header.kind().matches(VMGcKind::ExternRef) { return Ok(HeapType::Any); @@ -368,21 +372,25 @@ impl AnyRef { debug_assert!(header.kind().matches(VMGcKind::AnyRef)); debug_assert!(header.kind().matches(VMGcKind::EqRef)); + let ty = match header.ty() { + Some(ty) => ty, + None => bail_bug!("ty should be present"), + }; if header.kind().matches(VMGcKind::StructRef) { return Ok(HeapType::ConcreteStruct( - StructType::from_shared_type_index(store.engine(), header.ty().unwrap()), + StructType::from_shared_type_index(store.engine(), ty), )); } if header.kind().matches(VMGcKind::ArrayRef) { return Ok(HeapType::ConcreteArray(ArrayType::from_shared_type_index( store.engine(), - header.ty().unwrap(), + ty, ))); } - unreachable!("no other kinds of `anyref`s") + bail_bug!("no other kinds of `anyref`s") } /// Does this `anyref` match the given type? @@ -436,7 +444,7 @@ impl AnyRef { Ok(gc_ref.is_i31() || store .require_gc_store()? - .kind(gc_ref) + .kind(gc_ref)? .matches(VMGcKind::EqRef)) } @@ -564,7 +572,7 @@ impl AnyRef { Ok(!gc_ref.is_i31() && store .require_gc_store()? - .kind(gc_ref) + .kind(gc_ref)? .matches(VMGcKind::StructRef)) } @@ -632,7 +640,7 @@ impl AnyRef { Ok(!gc_ref.is_i31() && store .require_gc_store()? - .kind(gc_ref) + .kind(gc_ref)? .matches(VMGcKind::ArrayRef)) } diff --git a/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs b/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs index fb8b2d8126f9..f7565aa9c3b8 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs @@ -393,7 +393,7 @@ impl ArrayRef { "attempted to use a `ArrayRefPre` with the wrong store" ); - let len = u32::try_from(elems.len()).unwrap(); + let len = u32::try_from(elems.len())?; // Allocate the array. let arrayref = store @@ -416,7 +416,7 @@ impl ArrayRef { match (|| { let elem_ty = allocator.ty.element_type(); for (i, elem) in elems.enumerate() { - let i = u32::try_from(i).unwrap(); + let i = u32::try_from(i)?; debug_assert!(i < len); arrayref.initialize_elem(&mut store, allocator.layout(), &elem_ty, i, *elem)?; } @@ -424,7 +424,9 @@ impl ArrayRef { })() { Ok(()) => Ok(Rooted::new(&mut store, arrayref.into())), Err(e) => { - store.require_gc_store_mut()?.dealloc_uninit_array(arrayref); + store + .require_gc_store_mut()? + .dealloc_uninit_array(arrayref)?; Err(e) } } @@ -613,11 +615,11 @@ impl ArrayRef { assert!(self.comes_from_same_store(store)); let gc_ref = self.inner.try_gc_ref(store)?; debug_assert!({ - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; header.kind().matches(VMGcKind::ArrayRef) }); let arrayref = gc_ref.as_arrayref_unchecked(); - Ok(arrayref.len(store)) + arrayref.len(store) } /// Get the values of this array's elements. @@ -647,7 +649,7 @@ impl ArrayRef { let store = AutoAssertNoGc::new(store); let gc_ref = self.inner.try_gc_ref(&store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::ArrayRef)); let len = self._len(&store)?; @@ -677,7 +679,7 @@ impl ArrayRef { return None; } self.index += 1; - Some(self.arrayref._get(&mut self.store, i).unwrap()) + self.arrayref._get(&mut self.store, i).ok() } #[inline] @@ -700,7 +702,7 @@ impl ArrayRef { fn header<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMGcHeader> { assert!(self.comes_from_same_store(&store)); let gc_ref = self.inner.try_gc_ref(store)?; - Ok(store.require_gc_store()?.header(gc_ref)) + Ok(store.require_gc_store()?.header(gc_ref)?) } fn arrayref<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMArrayRef> { @@ -755,12 +757,12 @@ impl ArrayRef { let arrayref = self.arrayref(store)?.unchecked_copy(); let field_ty = self.field_ty(store)?; let layout = self.layout(store)?; - let len = arrayref.len(store); + let len = arrayref.len(store)?; ensure!( index < len, "index out of bounds: the length is {len} but the index is {index}" ); - Ok(arrayref.read_elem(store, &layout, field_ty.element_type(), index)) + arrayref.read_elem(store, &layout, field_ty.element_type(), index) } /// Set this array's `index`th element. @@ -811,7 +813,7 @@ impl ArrayRef { let layout = self.layout(&store)?; let arrayref = self.arrayref(&store)?.unchecked_copy(); - let len = arrayref.len(&store); + let len = arrayref.len(&store)?; ensure!( index < len, "index out of bounds: the length is {len} but the index is {index}" @@ -822,7 +824,7 @@ impl ArrayRef { pub(crate) fn type_index(&self, store: &StoreOpaque) -> Result { let gc_ref = self.inner.try_gc_ref(store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::ArrayRef)); Ok(header.ty().expect("arrayrefs should have concrete types")) } diff --git a/crates/wasmtime/src/runtime/gc/enabled/eqref.rs b/crates/wasmtime/src/runtime/gc/enabled/eqref.rs index eba879ecc400..0d95bc763d5f 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/eqref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/eqref.rs @@ -2,7 +2,7 @@ #![cfg(feature = "gc")] use crate::{ AnyRef, ArrayRef, ArrayType, AsContext, AsContextMut, GcRefImpl, GcRootIndex, HeapType, I31, - OwnedRooted, RefType, Rooted, StructRef, StructType, ValRaw, ValType, WasmTy, + OwnedRooted, RefType, Rooted, StructRef, StructType, ValRaw, ValType, WasmTy, bail_bug, prelude::*, runtime::vm::VMGcRef, store::{AutoAssertNoGc, StoreOpaque}, @@ -168,6 +168,7 @@ impl EqRef { || store .unwrap_gc_store() .header(&gc_ref) + .unwrap() .kind() .matches(VMGcKind::EqRef) ); @@ -198,22 +199,26 @@ impl EqRef { return Ok(HeapType::I31); } - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; + let ty = match header.ty() { + Some(ty) => ty, + None => bail_bug!("ty should be present"), + }; if header.kind().matches(VMGcKind::StructRef) { return Ok(HeapType::ConcreteStruct( - StructType::from_shared_type_index(store.engine(), header.ty().unwrap()), + StructType::from_shared_type_index(store.engine(), ty), )); } if header.kind().matches(VMGcKind::ArrayRef) { return Ok(HeapType::ConcreteArray(ArrayType::from_shared_type_index( store.engine(), - header.ty().unwrap(), + ty, ))); } - unreachable!("no other kinds of `eqref`s") + bail_bug!("no other kinds of `eqref`s") } /// Does this `eqref` match the given type? @@ -350,7 +355,7 @@ impl EqRef { Ok(!gc_ref.is_i31() && store .require_gc_store()? - .kind(gc_ref) + .kind(gc_ref)? .matches(VMGcKind::StructRef)) } @@ -418,7 +423,7 @@ impl EqRef { Ok(!gc_ref.is_i31() && store .require_gc_store()? - .kind(gc_ref) + .kind(gc_ref)? .matches(VMGcKind::ArrayRef)) } diff --git a/crates/wasmtime/src/runtime/gc/enabled/exnref.rs b/crates/wasmtime/src/runtime/gc/enabled/exnref.rs index 4374492cf712..a66bec616f80 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/exnref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/exnref.rs @@ -6,8 +6,8 @@ use crate::store::{Asyncness, StoreId, StoreResourceLimiter}; use crate::vm::VMStore; use crate::vm::{self, VMExnRef, VMGcHeader}; use crate::{ - AsContext, AsContextMut, GcRefImpl, GcRootIndex, HeapType, OwnedRooted, RefType, Result, - Rooted, Val, ValRaw, ValType, WasmTy, + AsContext, AsContextMut, GcRefImpl, GcRootIndex, HeapType, OwnedRooted, RefType, Rooted, Val, + ValRaw, ValType, WasmTy, store::{AutoAssertNoGc, StoreOpaque}, }; use crate::{ExnType, FieldType, GcHeapOutOfMemory, StoreContextMut, Tag, prelude::*}; @@ -360,7 +360,7 @@ impl ExnRef { })() { Ok(()) => Ok(Rooted::new(&mut store, exnref.into())), Err(e) => { - store.require_gc_store_mut()?.dealloc_uninit_exn(exnref); + store.require_gc_store_mut()?.dealloc_uninit_exn(exnref)?; Err(e) } } @@ -368,7 +368,7 @@ impl ExnRef { pub(crate) fn type_index(&self, store: &StoreOpaque) -> Result { let gc_ref = self.inner.try_gc_ref(store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::ExnRef)); Ok(header.ty().expect("exnrefs should have concrete types")) } @@ -386,8 +386,8 @@ impl ExnRef { debug_assert!( store .unwrap_gc_store() - .header(&gc_ref) - .kind() + .kind(&gc_ref) + .unwrap() .matches(VMGcKind::ExnRef) ); Rooted::new(store, gc_ref) @@ -420,7 +420,9 @@ impl ExnRef { let raw = if gc_ref.is_i31() { gc_ref.as_raw_non_zero_u32() } else { - store.require_gc_store_mut()?.expose_gc_ref_to_wasm(gc_ref) + store + .require_gc_store_mut()? + .expose_gc_ref_to_wasm(gc_ref)? }; Ok(raw.get()) } @@ -500,7 +502,7 @@ impl ExnRef { let store = AutoAssertNoGc::new(store); let gc_ref = self.inner.try_gc_ref(&store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::ExnRef)); let index = header.ty().expect("exnrefs should have concrete types"); @@ -532,7 +534,7 @@ impl ExnRef { return None; } self.index += 1; - Some(self.exnref._field(&mut self.store, i).unwrap()) + self.exnref._field(&mut self.store, i).ok() } #[inline] @@ -553,7 +555,7 @@ impl ExnRef { fn header<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMGcHeader> { assert!(self.comes_from_same_store(&store)); let gc_ref = self.inner.try_gc_ref(store)?; - Ok(store.require_gc_store()?.header(gc_ref)) + Ok(store.require_gc_store()?.header(gc_ref)?) } fn exnref<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMExnRef> { @@ -608,7 +610,7 @@ impl ExnRef { let exnref = self.exnref(store)?.unchecked_copy(); let field_ty = self.field_ty(store, index)?; let layout = self.layout(store)?; - Ok(exnref.read_field(store, &layout, field_ty.element_type(), index)) + exnref.read_field(store, &layout, field_ty.element_type(), index) } /// Get this exception object's associated tag. diff --git a/crates/wasmtime/src/runtime/gc/enabled/externref.rs b/crates/wasmtime/src/runtime/gc/enabled/externref.rs index 17788ca4e20a..c56378ede113 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/externref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/externref.rs @@ -1,6 +1,6 @@ //! Implementation of `externref` in Wasmtime. -use super::{AnyRef, RootedGcRefImpl}; +use super::AnyRef; use crate::prelude::*; use crate::runtime::vm::{self, VMGcRef}; use crate::store::Asyncness; @@ -471,7 +471,7 @@ impl ExternRef { } let gc_store = store.require_gc_store()?; if let Some(externref) = gc_ref.as_externref(&*gc_store.gc_heap) { - Ok(Some(gc_store.externref_host_data(externref))) + Ok(Some(gc_store.externref_host_data(externref)?)) } else { Ok(None) } @@ -525,7 +525,7 @@ impl ExternRef { } let gc_store = store.require_gc_store_mut()?; if let Some(externref) = gc_ref.as_externref(&*gc_store.gc_heap) { - Ok(Some(gc_store.externref_host_data_mut(externref))) + Ok(Some(gc_store.externref_host_data_mut(externref)?)) } else { Ok(None) } @@ -593,7 +593,7 @@ impl ExternRef { pub(crate) fn _to_raw(&self, store: &mut AutoAssertNoGc) -> Result { let gc_ref = self.inner.try_clone_gc_ref(store)?; - let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref); + let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref)?; Ok(raw.get()) } } diff --git a/crates/wasmtime/src/runtime/gc/enabled/rooting.rs b/crates/wasmtime/src/runtime/gc/enabled/rooting.rs index 7c589b716309..afaebbf6813e 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/rooting.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/rooting.rs @@ -1213,7 +1213,7 @@ impl Rooted { let gc_ref = self.inner.try_clone_gc_ref(store)?; let raw = match store.optional_gc_store_mut() { - Some(s) => s.expose_gc_ref_to_wasm(gc_ref), + Some(s) => s.expose_gc_ref_to_wasm(gc_ref)?, None => { // NB: do not force the allocation of a GC heap just because the // program is using `i31ref`s. @@ -1277,6 +1277,14 @@ impl Rooted { let gc_ref = store.clone_gc_ref(&gc_ref); Some(from_cloned_gc_ref(store, gc_ref)) } + + pub(crate) fn try_gc_ref<'a>(&self, store: &'a StoreOpaque) -> Result<&'a VMGcRef> { + >::try_gc_ref(self, store) + } + + pub(crate) fn try_clone_gc_ref(&self, store: &mut AutoAssertNoGc<'_>) -> Result { + >::try_clone_gc_ref(self, store) + } } /// Nested rooting scopes. @@ -1829,7 +1837,7 @@ where let gc_ref = self.try_clone_gc_ref(store)?; let raw = match store.optional_gc_store_mut() { - Some(s) => s.expose_gc_ref_to_wasm(gc_ref), + Some(s) => s.expose_gc_ref_to_wasm(gc_ref)?, None => { debug_assert!(gc_ref.is_i31()); gc_ref.as_raw_non_zero_u32() diff --git a/crates/wasmtime/src/runtime/gc/enabled/structref.rs b/crates/wasmtime/src/runtime/gc/enabled/structref.rs index 74294788cc18..379c57165abb 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/structref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/structref.rs @@ -363,7 +363,7 @@ impl StructRef { Err(e) => { store .require_gc_store_mut()? - .dealloc_uninit_struct(structref); + .dealloc_uninit_struct(structref)?; Err(e) } } @@ -453,7 +453,7 @@ impl StructRef { let store = AutoAssertNoGc::new(store); let gc_ref = self.inner.try_gc_ref(&store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::StructRef)); let index = header.ty().expect("structrefs should have concrete types"); @@ -485,7 +485,7 @@ impl StructRef { return None; } self.index += 1; - Some(self.structref._field(&mut self.store, i).unwrap()) + self.structref._field(&mut self.store, i).ok() } #[inline] @@ -506,7 +506,7 @@ impl StructRef { fn header<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMGcHeader> { assert!(self.comes_from_same_store(&store)); let gc_ref = self.inner.try_gc_ref(store)?; - Ok(store.require_gc_store()?.header(gc_ref)) + Ok(store.require_gc_store()?.header(gc_ref)?) } fn structref<'a>(&self, store: &'a AutoAssertNoGc<'_>) -> Result<&'a VMStructRef> { @@ -564,7 +564,7 @@ impl StructRef { let structref = self.structref(store)?.unchecked_copy(); let field_ty = self.field_ty(store, index)?; let layout = self.layout(store)?; - Ok(structref.read_field(store, &layout, field_ty.element_type(), index)) + structref.read_field(store, &layout, field_ty.element_type(), index) } /// Set this struct's `index`th field. @@ -617,7 +617,7 @@ impl StructRef { pub(crate) fn type_index(&self, store: &StoreOpaque) -> Result { let gc_ref = self.inner.try_gc_ref(store)?; - let header = store.require_gc_store()?.header(gc_ref); + let header = store.require_gc_store()?.header(gc_ref)?; debug_assert!(header.kind().matches(VMGcKind::StructRef)); Ok(header.ty().expect("structrefs should have concrete types")) } diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index a478446a51a7..9447bb31c5b9 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -1409,7 +1409,7 @@ impl<'a, T> StoreContextMut<'a, T> { None, why.map(|e| e.bytes_needed()), Asyncness::No, - )); + ))?; Ok(()) } @@ -2040,7 +2040,6 @@ impl StoreOpaque { /// Same as [`Self::require_gc_store`], but mutable. #[inline] - #[cfg(feature = "gc")] pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> { match &mut self.gc_store { Some(gc_store) => Ok(gc_store), @@ -2124,10 +2123,10 @@ impl StoreOpaque { } #[cfg(feature = "gc")] - async fn do_gc(&mut self, asyncness: Asyncness) { + async fn do_gc(&mut self, asyncness: Asyncness) -> Result<()> { // If the GC heap hasn't been initialized, there is nothing to collect. if self.gc_store.is_none() { - return; + return Ok(()); } log::trace!("============ Begin GC ==========="); @@ -2147,13 +2146,15 @@ impl StoreOpaque { // fall back to the runtime-agnostic code. yield_now, ) - .await; + .await?; // Restore the GC roots for the next GC. roots.clear(); self.gc_roots_list = roots; log::trace!("============ End GC ==========="); + + Ok(()) } #[cfg(feature = "gc")] @@ -2379,18 +2380,23 @@ impl StoreOpaque { &mut self, dest: &mut MaybeUninit>, gc_ref: Option<&VMGcRef>, - ) { + ) -> Result<()> { if GcStore::needs_init_barrier(gc_ref) { self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref) } else { dest.write(gc_ref.map(|r| r.copy_i31())); + Ok(()) } } /// Helper function execute a write barrier when placing `gc_ref` in `dest`. /// /// This avoids allocating `GcStore` where possible. - pub(crate) fn write_gc_ref(&mut self, dest: &mut Option, gc_ref: Option<&VMGcRef>) { + pub(crate) fn write_gc_ref( + &mut self, + dest: &mut Option, + gc_ref: Option<&VMGcRef>, + ) -> Result<()> { GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref) } diff --git a/crates/wasmtime/src/runtime/store/async_.rs b/crates/wasmtime/src/runtime/store/async_.rs index d28a3fd19f39..09a98b24c553 100644 --- a/crates/wasmtime/src/runtime/store/async_.rs +++ b/crates/wasmtime/src/runtime/store/async_.rs @@ -97,7 +97,7 @@ impl Store { /// /// This method is only available when the `gc` Cargo feature is enabled. #[cfg(feature = "gc")] - pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) + pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> where T: Send, { @@ -140,7 +140,7 @@ impl<'a, T> StoreContextMut<'a, T> { /// /// This method is only available when the `gc` Cargo feature is enabled. #[cfg(feature = "gc")] - pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) + pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> where T: Send + 'static, { @@ -152,7 +152,8 @@ impl<'a, T> StoreContextMut<'a, T> { why.map(|e| e.bytes_needed()), crate::store::Asyncness::Yes, ) - .await; + .await?; + Ok(()) } /// Configures epoch-deadline expiration to yield to the async diff --git a/crates/wasmtime/src/runtime/store/gc.rs b/crates/wasmtime/src/runtime/store/gc.rs index 351d9ba61cb8..c72ee38738d1 100644 --- a/crates/wasmtime/src/runtime/store/gc.rs +++ b/crates/wasmtime/src/runtime/store/gc.rs @@ -1,9 +1,22 @@ //! GC-related methods for stores. use super::*; +use crate::Result; use crate::runtime::vm::VMGcRef; +use core::fmt; use core::num::NonZeroU32; +#[derive(Debug)] +struct GcHeapGrowthFailed; + +impl fmt::Display for GcHeapGrowthFailed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("GC heap growth failed") + } +} + +impl core::error::Error for GcHeapGrowthFailed {} + impl StoreOpaque { /// Perform any growth or GC needed to allocate `bytes_needed` bytes. /// @@ -21,7 +34,7 @@ impl StoreOpaque { root: Option, bytes_needed: Option, asyncness: Asyncness, - ) -> Option { + ) -> Result> { let mut scope = crate::OpaqueRootScope::new(self); scope.trim_gc_liveness_flags(true); let store_id = scope.id(); @@ -29,15 +42,15 @@ impl StoreOpaque { scope .collect_and_maybe_grow_gc_heap(limiter, bytes_needed, asyncness) - .await; + .await?; - root.map(|r| { + Ok(root.map(|r| { let r = r .get_gc_ref(&scope) .expect("still in scope") .unchecked_copy(); scope.clone_gc_ref(&r) - }) + })) } // This lives on the Store because it must simultaneously borrow @@ -57,18 +70,25 @@ impl StoreOpaque { limiter: Option<&mut StoreResourceLimiter<'_>>, bytes_needed: Option, asyncness: Asyncness, - ) { + ) -> Result<()> { log::trace!("collect_and_maybe_grow_gc_heap(bytes_needed = {bytes_needed:#x?})"); - self.do_gc(asyncness).await; + self.do_gc(asyncness).await?; if let Some(n) = bytes_needed - && n > u64::try_from(self.gc_heap_capacity()) - .unwrap() - .saturating_sub(self.gc_store.as_ref().map_or(0, |gc| { + && n > u64::try_from(self.gc_heap_capacity())?.saturating_sub( + self.gc_store.as_ref().map_or(0, |gc| { u64::try_from(gc.last_post_gc_allocated_bytes.unwrap_or(0)).unwrap() - })) + }), + ) { - let _ = self.grow_gc_heap(limiter, n, asyncness).await; + if let Err(e) = self.grow_gc_heap(limiter, n, asyncness).await { + if e.is::() { + log::trace!("ignoring GC heap growth failure: {e}"); + } else { + return Err(e); + } + } } + Ok(()) } /// Attempt to grow the GC heap by `bytes_needed` bytes. @@ -93,7 +113,7 @@ impl StoreOpaque { .as_ref() .map_or(false, |gc| gc.gc_heap.needs_gc_before_next_growth()) { - self.do_gc(asyncness).await; + self.do_gc(asyncness).await?; debug_assert!( !self .gc_store @@ -109,7 +129,7 @@ impl StoreOpaque { // grow it, then replace it. let mut heap = TakenGcHeap::new(self); - let current_size_in_bytes = u64::try_from(heap.memory.byte_size()).unwrap(); + let current_size_in_bytes = u64::try_from(heap.memory.byte_size())?; let current_size_in_pages = current_size_in_bytes / page_size; // Aim to double the heap size, amortizing the cost of growth. @@ -144,12 +164,13 @@ impl StoreOpaque { unsafe { heap.memory .grow(delta_pages_for_alloc, limiter) - .await? - .ok_or_else(|| format_err!("failed to grow GC heap"))?; + .await + .context(GcHeapGrowthFailed)? + .ok_or(GcHeapGrowthFailed)?; } heap.store.vm_store_context.gc_heap = heap.memory.vmmemory(); - let new_size_in_bytes = u64::try_from(heap.memory.byte_size()).unwrap(); + let new_size_in_bytes = u64::try_from(heap.memory.byte_size())?; assert!(new_size_in_bytes > current_size_in_bytes); heap.delta_bytes_grown = new_size_in_bytes - current_size_in_bytes; let delta_bytes_for_alloc = delta_pages_for_alloc.checked_mul(page_size).unwrap(); @@ -254,7 +275,7 @@ impl StoreOpaque { ); store .gc(limiter.as_deref_mut(), None, None, asyncness) - .await; + .await?; match alloc_func(&mut store, value) { Ok(x) => Ok(x), @@ -284,7 +305,7 @@ impl StoreOpaque { .await { log::trace!("growing GC heap failed: {e}"); - store.gc(limiter, None, None, asyncness).await; + store.gc(limiter, None, None, asyncness).await?; } alloc_func(&mut store, value) diff --git a/crates/wasmtime/src/runtime/trampoline/global.rs b/crates/wasmtime/src/runtime/trampoline/global.rs index 169c284c535c..b1ac7376c6fd 100644 --- a/crates/wasmtime/src/runtime/trampoline/global.rs +++ b/crates/wasmtime/src/runtime/trampoline/global.rs @@ -1,7 +1,7 @@ use crate::runtime::vm::{StoreBox, VMGlobalDefinition}; use crate::store::{AutoAssertNoGc, StoreOpaque}; use crate::type_registry::RegisteredType; -use crate::{GlobalType, Mutability, Result, RootedGcRefImpl, Val}; +use crate::{GlobalType, Mutability, Result, Val}; use core::ptr; use wasmtime_environ::Global; @@ -52,7 +52,7 @@ pub fn generate_global_export( Some(x) => Some(x.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - global.write_gc_ref(&mut store, new); + global.write_gc_ref(&mut store, new)?; } Val::AnyRef(a) => { let new = match a { @@ -60,7 +60,7 @@ pub fn generate_global_export( Some(a) => Some(a.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - global.write_gc_ref(&mut store, new); + global.write_gc_ref(&mut store, new)?; } Val::ExnRef(e) => { let new = match e { @@ -68,11 +68,11 @@ pub fn generate_global_export( Some(e) => Some(e.try_gc_ref(&store)?.unchecked_copy()), }; let new = new.as_ref(); - global.write_gc_ref(&mut store, new); + global.write_gc_ref(&mut store, new)?; } Val::ContRef(None) => { // Allow null continuation references for trampoline globals - these are just placeholders - global.write_gc_ref(&mut store, None); + global.write_gc_ref(&mut store, None)?; } Val::ContRef(Some(_)) => { // TODO(#10248): Implement non-null trampoline continuation reference handling diff --git a/crates/wasmtime/src/runtime/vm/gc.rs b/crates/wasmtime/src/runtime/vm/gc.rs index ea4d197ecab1..05b57d38f676 100644 --- a/crates/wasmtime/src/runtime/vm/gc.rs +++ b/crates/wasmtime/src/runtime/vm/gc.rs @@ -8,12 +8,14 @@ mod disabled; #[cfg(not(feature = "gc"))] pub use disabled::*; +mod data; mod func_ref; mod gc_ref; mod gc_runtime; mod host_data; mod i31; +pub use data::*; pub use func_ref::*; pub use gc_ref::*; pub use gc_runtime::*; @@ -28,6 +30,23 @@ use core::mem::MaybeUninit; use core::{alloc::Layout, num::NonZeroU32}; use wasmtime_environ::{GcArrayLayout, GcStructLayout, VMGcKind, VMSharedTypeIndex}; +// Explicit methods to clearly indicate that truncation is desired when used. +#[expect( + clippy::cast_possible_truncation, + reason = "that's the purpose of this method" +)] +fn truncate_i32_to_i16(a: i32) -> i16 { + a as i16 +} + +#[expect( + clippy::cast_possible_truncation, + reason = "that's the purpose of this method" +)] +fn truncate_i32_to_i8(a: i32) -> i8 { + a as i8 +} + /// GC-related data that is one-to-one with a `wasmtime::Store`. /// /// Contains everything we need to do collections, invoke barriers, etc... @@ -107,24 +126,25 @@ impl GcStore { asyncness: Asyncness, roots: GcRootsIter<'_>, yield_fn: impl AsyncFn(), - ) { + ) -> Result<()> { let collection = self.gc_heap.gc(roots, &mut self.host_data_table); - collect_async(collection, asyncness, yield_fn).await; + collect_async(collection, asyncness, yield_fn).await?; self.last_post_gc_allocated_bytes = Some({ let size = self.gc_heap.allocated_bytes(); log::trace!("After collection, GC heap's allocated bytes = {size:#x} bytes"); size }); + Ok(()) } /// Get the kind of the given GC reference. - pub fn kind(&self, gc_ref: &VMGcRef) -> VMGcKind { + pub fn kind(&self, gc_ref: &VMGcRef) -> Result { debug_assert!(!gc_ref.is_i31()); - self.header(gc_ref).kind() + Ok(self.header(gc_ref)?.kind()) } /// Get the header of the given GC reference. - pub fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader { + pub fn header(&self, gc_ref: &VMGcRef) -> Result<&VMGcHeader> { debug_assert!(!gc_ref.is_i31()); self.gc_heap.header(gc_ref) } @@ -144,11 +164,11 @@ impl GcStore { &mut self, destination: &mut MaybeUninit>, source: Option<&VMGcRef>, - ) { + ) -> Result<()> { // Initialize the destination to `None`, at which point the regular GC // write barrier is safe to reuse. let destination = destination.write(None); - self.write_gc_ref(destination, source); + self.write_gc_ref(destination, source) } /// Dynamically tests whether a `init_gc_ref` is needed to write `gc_ref` @@ -180,26 +200,32 @@ impl GcStore { store: Option<&mut Self>, dest: &mut Option, gc_ref: Option<&VMGcRef>, - ) { + ) -> Result<()> { if Self::needs_write_barrier(dest, gc_ref) { store.unwrap().write_gc_ref(dest, gc_ref) } else { *dest = gc_ref.map(|r| r.copy_i31()); + Ok(()) } } /// Write the `source` GC reference into the `destination` slot, performing /// write barriers as necessary. - pub fn write_gc_ref(&mut self, destination: &mut Option, source: Option<&VMGcRef>) { + pub fn write_gc_ref( + &mut self, + destination: &mut Option, + source: Option<&VMGcRef>, + ) -> Result<()> { // If neither the source nor destination actually point to a GC object // (that is, they are both either null or `i31ref`s) then we can skip // the GC barrier. if Self::needs_write_barrier(destination, source) { self.gc_heap - .write_gc_ref(&mut self.host_data_table, destination, source); + .write_gc_ref(&mut self.host_data_table, destination, source)?; } else { *destination = source.map(|s| s.copy_i31()); } + Ok(()) } /// Drop the given GC reference, performing drop barriers as necessary. @@ -214,13 +240,13 @@ impl GcStore { /// Returns the raw representation of this GC ref, ready to be passed to /// Wasm. #[must_use] - pub fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> NonZeroU32 { + pub fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> Result { let raw = gc_ref.as_raw_non_zero_u32(); if !gc_ref.is_i31() { log::trace!("exposing GC ref to Wasm: {gc_ref:p}"); - self.gc_heap.expose_gc_ref_to_wasm(gc_ref); + self.gc_heap.expose_gc_ref_to_wasm(gc_ref)?; } - raw + Ok(raw) } /// Allocate a new `externref`. @@ -241,7 +267,7 @@ impl GcStore { let host_data_id = self.host_data_table.alloc(value); match self.gc_heap.alloc_externref(host_data_id)? { Ok(x) => Ok(Ok(x)), - Err(n) => Ok(Err((self.host_data_table.dealloc(host_data_id), n))), + Err(n) => Ok(Err((self.host_data_table.dealloc(host_data_id)?, n))), } } @@ -250,8 +276,8 @@ impl GcStore { /// Passing invalid `VMExternRef`s (eg garbage values or `externref`s /// associated with a different heap is memory safe but will lead to general /// incorrectness such as panics and wrong results. - pub fn externref_host_data(&self, externref: &VMExternRef) -> &(dyn Any + Send + Sync) { - let host_data_id = self.gc_heap.externref_host_data(externref); + pub fn externref_host_data(&self, externref: &VMExternRef) -> Result<&(dyn Any + Send + Sync)> { + let host_data_id = self.gc_heap.externref_host_data(externref)?; self.host_data_table.get(host_data_id) } @@ -263,8 +289,8 @@ impl GcStore { pub fn externref_host_data_mut( &mut self, externref: &VMExternRef, - ) -> &mut (dyn Any + Send + Sync) { - let host_data_id = self.gc_heap.externref_host_data(externref); + ) -> Result<&mut (dyn Any + Send + Sync)> { + let host_data_id = self.gc_heap.externref_host_data(externref)?; self.host_data_table.get_mut(host_data_id) } @@ -313,14 +339,14 @@ impl GcStore { } /// Deallocate an uninitialized struct. - pub fn dealloc_uninit_struct(&mut self, structref: VMStructRef) { + pub fn dealloc_uninit_struct(&mut self, structref: VMStructRef) -> Result<()> { self.gc_heap.dealloc_uninit_struct_or_exn(structref.into()) } /// Get the data for the given object reference. /// /// Panics when the structref and its size is out of the GC heap bounds. - pub fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData { + pub fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcObjectData> { self.gc_heap.gc_object_data_mut(gc_ref) } @@ -331,7 +357,7 @@ impl GcStore { &mut self, a: &VMGcRef, b: &VMGcRef, - ) -> (&mut VMGcObjectData, &mut VMGcObjectData) { + ) -> Result<(&mut VMGcObjectData, &mut VMGcObjectData)> { assert_ne!(a, b); self.gc_heap.gc_object_data_pair(a, b) } @@ -352,12 +378,12 @@ impl GcStore { } /// Deallocate an uninitialized array. - pub fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) { - self.gc_heap.dealloc_uninit_array(arrayref); + pub fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) -> Result<()> { + self.gc_heap.dealloc_uninit_array(arrayref) } /// Get the length of the given array. - pub fn array_len(&self, arrayref: &VMArrayRef) -> u32 { + pub fn array_len(&self, arrayref: &VMArrayRef) -> Result { self.gc_heap.array_len(arrayref) } @@ -379,8 +405,8 @@ impl GcStore { } /// Deallocate an uninitialized exception object. - pub fn dealloc_uninit_exn(&mut self, exnref: VMExnRef) { - self.gc_heap.dealloc_uninit_struct_or_exn(exnref.into()); + pub fn dealloc_uninit_exn(&mut self, exnref: VMExnRef) -> Result<()> { + self.gc_heap.dealloc_uninit_struct_or_exn(exnref.into()) } #[cfg(feature = "gc")] diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/data.rs b/crates/wasmtime/src/runtime/vm/gc/data.rs similarity index 70% rename from crates/wasmtime/src/runtime/vm/gc/enabled/data.rs rename to crates/wasmtime/src/runtime/vm/gc/data.rs index 21af99235151..5227d9172891 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/data.rs +++ b/crates/wasmtime/src/runtime/vm/gc/data.rs @@ -1,7 +1,8 @@ //! Unchecked methods for working with the data inside GC objects. -use crate::V128; +use crate::{Result, V128, bail_bug}; use core::mem; +use core::ops::Range; /// A plain-old-data type that can be stored in a `ValType` or a `StorageType`. pub trait PodValType: Copy { @@ -74,7 +75,7 @@ macro_rules! impl_pod_methods { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn $read(&self, offset: u32) -> $T + pub fn $read(&self, offset: u32) -> Result<$T> { self.read_pod::<{ mem::size_of::<$T>() }, $T>(offset) } @@ -85,9 +86,9 @@ macro_rules! impl_pod_methods { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn $write(&mut self, offset: u32, val: $T) + pub fn $write(&mut self, offset: u32, val: $T) -> Result<()> { - self.write_pod::<{ mem::size_of::<$T>() }, $T>(offset, val); + self.write_pod::<{ mem::size_of::<$T>() }, $T>(offset, val) } )* }; @@ -127,15 +128,17 @@ impl VMGcObjectData { /// Don't generally use this method, use `read_u8`, `read_i64`, /// etc... instead. #[inline] - fn read_pod(&self, offset: u32) -> T + fn read_pod(&self, offset: u32) -> Result where T: PodValType, { assert_eq!(N, mem::size_of::()); - let offset = usize::try_from(offset).unwrap(); - let end = offset.checked_add(N).unwrap(); - let bytes = self.data.get(offset..end).expect("out of bounds field"); - T::read_le(bytes.as_array().unwrap()) + let offset = usize::try_from(offset)?; + let bytes = match self.data.get(offset..).and_then(|s| s.first_chunk()) { + Some(data) => data, + None => bail_bug!("out of bounds field"), + }; + Ok(T::read_le(bytes)) } /// Read a POD field out of this object. @@ -145,21 +148,21 @@ impl VMGcObjectData { /// Don't generally use this method, use `write_u8`, `write_i64`, /// etc... instead. #[inline] - fn write_pod(&mut self, offset: u32, val: T) + fn write_pod(&mut self, offset: u32, val: T) -> Result<()> where T: PodValType, { assert_eq!(N, mem::size_of::()); - let offset = usize::try_from(offset).unwrap(); - let end = offset.checked_add(N).unwrap(); - let into = match self.data.get_mut(offset..end) { + let offset = usize::try_from(offset)?; + let into = match self + .data + .get_mut(offset..) + .and_then(|s| s.first_chunk_mut()) + { Some(into) => into, - None => panic!( - "out of bounds field! field range = {offset:#x}..{end:#x}; object len = {:#x}", - self.data.as_mut().len(), - ), + None => bail_bug!("out of bounds field"), }; - val.write_le(into.as_mut_array().unwrap()); + Ok(val.write_le(into)) } /// Get a slice of this object's data. @@ -169,11 +172,13 @@ impl VMGcObjectData { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn slice(&self, offset: u32, len: u32) -> &[u8] { - let start = usize::try_from(offset).unwrap(); - let len = usize::try_from(len).unwrap(); - let end = start.checked_add(len).unwrap(); - self.data.get(start..end).expect("out of bounds slice") + pub fn slice(&self, offset: u32, len: u32) -> Result<&[u8]> { + let start = usize::try_from(offset)?; + let len = usize::try_from(len)?; + match self.data.get(start..).and_then(|s| s.get(..len)) { + Some(slice) => Ok(slice), + None => bail_bug!("out of bounds slice"), + } } /// Get a mutable slice of this object's data. @@ -183,11 +188,13 @@ impl VMGcObjectData { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn slice_mut(&mut self, offset: u32, len: u32) -> &mut [u8] { - let start = usize::try_from(offset).unwrap(); - let len = usize::try_from(len).unwrap(); - let end = start.checked_add(len).unwrap(); - self.data.get_mut(start..end).expect("out of bounds slice") + pub fn slice_mut(&mut self, offset: u32, len: u32) -> Result<&mut [u8]> { + let start = usize::try_from(offset)?; + let len = usize::try_from(len)?; + match self.data.get_mut(start..).and_then(|s| s.get_mut(..len)) { + Some(slice) => Ok(slice), + None => bail_bug!("out of bounds slice"), + } } /// Copy the given slice into this object's data at the given offset. @@ -197,11 +204,18 @@ impl VMGcObjectData { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn copy_from_slice(&mut self, offset: u32, src: &[u8]) { - let offset = usize::try_from(offset).unwrap(); - let end = offset.checked_add(src.len()).unwrap(); - let into = self.data.get_mut(offset..end).expect("out of bounds copy"); + pub fn copy_from_slice(&mut self, offset: u32, src: &[u8]) -> Result<()> { + let offset = usize::try_from(offset)?; + let into = match self + .data + .get_mut(offset..) + .and_then(|s| s.get_mut(..src.len())) + { + Some(into) => into, + None => bail_bug!("out of bounds copy"), + }; into.copy_from_slice(src); + Ok(()) } /// Copy within this this object's data. @@ -211,14 +225,21 @@ impl VMGcObjectData { /// /// Panics on out-of-bounds accesses. #[inline] - pub fn copy_within(&mut self, src: R, dest: u32) - where - R: core::ops::RangeBounds, - { - let start = src.start_bound().map(|s| usize::try_from(*s).unwrap()); - let end = src.end_bound().map(|e| usize::try_from(*e).unwrap()); - let dest = usize::try_from(dest).unwrap(); - self.data.copy_within((start, end), dest); + pub fn copy_within(&mut self, src: Range, dest: u32) -> Result<()> { + let start = usize::try_from(src.start)?; + let end = usize::try_from(src.end)?; + let dest = usize::try_from(dest)?; + if self.data.get(start..end).is_none() + || self + .data + .get(dest..) + .and_then(|s| s.get(..src.len())) + .is_none() + { + bail_bug!("invalid copy range"); + } + self.data.copy_within(start..end, dest); + Ok(()) } impl_pod_methods! { diff --git a/crates/wasmtime/src/runtime/vm/gc/disabled.rs b/crates/wasmtime/src/runtime/vm/gc/disabled.rs index 53717b31b73e..4435d76796cd 100644 --- a/crates/wasmtime/src/runtime/vm/gc/disabled.rs +++ b/crates/wasmtime/src/runtime/vm/gc/disabled.rs @@ -10,11 +10,6 @@ pub enum VMArrayRef {} pub enum VMExnRef {} -pub struct VMGcObjectData { - _inner: VMStructRef, - _phantom: core::marker::PhantomData<[u8]>, -} - impl VMGcRef { pub fn into_structref_unchecked(self) -> VMStructRef { unreachable!() @@ -36,15 +31,3 @@ impl From for VMGcRef { match e {} } } - -impl<'a> From<&'a [u8]> for &'a VMGcObjectData { - fn from(_: &'a [u8]) -> Self { - unreachable!() - } -} - -impl<'a> From<&'a mut [u8]> for &'a mut VMGcObjectData { - fn from(_: &'a mut [u8]) -> Self { - unreachable!() - } -} diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled.rs b/crates/wasmtime/src/runtime/vm/gc/enabled.rs index 2eb3d299690f..478cd85fec08 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled.rs @@ -1,7 +1,6 @@ //! Implementation of garbage collection and GC types in Wasmtime. mod arrayref; -mod data; mod exnref; mod externref; #[cfg(feature = "gc-drc")] @@ -11,7 +10,6 @@ mod structref; mod trace_info; pub use arrayref::*; -pub use data::*; pub use exnref::*; pub use externref::*; pub use structref::*; @@ -30,20 +28,3 @@ pub use null::*; mod copying; #[cfg(feature = "gc-copying")] pub use copying::*; - -// Explicit methods to clearly indicate that truncation is desired when used. -#[expect( - clippy::cast_possible_truncation, - reason = "that's the purpose of this method" -)] -fn truncate_i32_to_i16(a: i32) -> i16 { - a as i16 -} - -#[expect( - clippy::cast_possible_truncation, - reason = "that's the purpose of this method" -)] -fn truncate_i32_to_i8(a: i32) -> i8 { - a as i8 -} diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs index 2cd708b3c082..f7354bd1c311 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs @@ -1,10 +1,8 @@ -use super::{truncate_i32_to_i8, truncate_i32_to_i16}; use crate::{ - AnyRef, ExnRef, ExternRef, Func, HeapType, RootedGcRefImpl, StorageType, Val, ValType, + StorageType, Val, prelude::*, runtime::vm::{GcHeap, GcStore, VMGcRef}, store::{AutoAssertNoGc, StoreOpaque}, - vm::{FuncRefTableId, SendSyncPtr}, }; use core::fmt; use wasmtime_environ::{GcArrayLayout, VMGcKind}; @@ -42,8 +40,10 @@ impl VMGcRef { return false; } - let header = gc_heap.header(&self); - header.kind().matches(VMGcKind::ArrayRef) + match gc_heap.header(&self) { + Ok(header) => header.kind().matches(VMGcKind::ArrayRef), + Err(_) => false, + } } /// Create a new `VMArrayRef` from the given `gc_ref`. @@ -124,7 +124,7 @@ impl VMArrayRef { } /// Get the length of this array. - pub fn len(&self, store: &StoreOpaque) -> u32 { + pub fn len(&self, store: &StoreOpaque) -> Result { store.unwrap_gc_store().array_len(self) } @@ -144,44 +144,9 @@ impl VMArrayRef { layout: &GcArrayLayout, ty: &StorageType, index: u32, - ) -> Val { + ) -> Result { let offset = layout.elem_offset(index).unwrap(); - let data = store.unwrap_gc_store_mut().gc_object_data(self.as_gc_ref()); - match ty { - StorageType::I8 => Val::I32(data.read_u8(offset).into()), - StorageType::I16 => Val::I32(data.read_u16(offset).into()), - StorageType::ValType(ValType::I32) => Val::I32(data.read_i32(offset)), - StorageType::ValType(ValType::I64) => Val::I64(data.read_i64(offset)), - StorageType::ValType(ValType::F32) => Val::F32(data.read_u32(offset)), - StorageType::ValType(ValType::F64) => Val::F64(data.read_u64(offset)), - StorageType::ValType(ValType::V128) => Val::V128(data.read_v128(offset)), - StorageType::ValType(ValType::Ref(r)) => match r.heap_type().top() { - HeapType::Extern => { - let raw = data.read_u32(offset); - Val::ExternRef(ExternRef::_from_raw(store, raw)) - } - HeapType::Any => { - let raw = data.read_u32(offset); - Val::AnyRef(AnyRef::_from_raw(store, raw)) - } - HeapType::Exn => { - let raw = data.read_u32(offset); - Val::ExnRef(ExnRef::_from_raw(store, raw)) - } - HeapType::Func => { - let func_ref_id = data.read_u32(offset); - let func_ref_id = FuncRefTableId::from_raw(func_ref_id); - let func_ref = store - .unwrap_gc_store() - .func_ref_table - .get_untyped(func_ref_id); - Val::FuncRef(unsafe { - func_ref.map(|p| Func::from_vm_func_ref(store.id(), p.as_non_null())) - }) - } - otherwise => unreachable!("not a top type: {otherwise:?}"), - }, - } + self.as_gc_ref().read_val(store, ty, offset) } /// Write the given value into this array at the given offset. @@ -206,84 +171,7 @@ impl VMArrayRef { debug_assert!(val._matches_ty(&store, &ty.unpack())?); let offset = layout.elem_offset(index).unwrap(); - let data = store.unwrap_gc_store_mut().gc_object_data(self.as_gc_ref()); - match val { - Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)), - Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)), - Val::I32(i) => data.write_i32(offset, i), - Val::I64(i) => data.write_i64(offset, i), - Val::F32(f) => data.write_u32(offset, f), - Val::F64(f) => data.write_u64(offset, f), - Val::V128(v) => data.write_v128(offset, v), - - // For GC-managed references, we need to take care to run the - // appropriate barriers, even when we are writing null references - // into the array. - // - // POD-read the old value into a local copy, run the GC write - // barrier on that local copy, and then POD-write the updated - // value back into the array. This avoids transmuting the inner - // data, which would probably be fine, but this approach is - // Obviously Correct and should get us by for now. If LLVM isn't - // able to elide some of these unnecessary copies, and this - // method is ever hot enough, we can always come back and clean - // it up in the future. - Val::ExternRef(e) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let e = match e { - Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, e.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - Val::AnyRef(a) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let a = match a { - Some(a) => Some(a.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, a.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - Val::ExnRef(e) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let e = match e { - Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, e.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - - Val::FuncRef(f) => { - let func_ref = match f { - Some(f) => Some(SendSyncPtr::new(f.vm_func_ref(store))), - None => None, - }; - let store = store.require_gc_store_mut()?; - let id = unsafe { store.func_ref_table.intern(func_ref) }; - store - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, id.into_raw()); - } - Val::ContRef(_) => { - // TODO(#10248): Implement array continuation reference element handling - return Err(crate::format_err!( - "setting continuation references in array elements not yet supported" - )); - } - } - Ok(()) + self.as_gc_ref().write_val(store, ty, offset, val) } /// Initialize an element in this arrayref that is currently uninitialized. @@ -320,82 +208,6 @@ impl VMArrayRef { ) -> Result<()> { debug_assert!(val._matches_ty(&store, &ty.unpack())?); let offset = layout.elem_offset(index).unwrap(); - let gcstore = store.require_gc_store_mut()?; - match val { - Val::I32(i) if ty.is_i8() => gcstore - .gc_object_data(self.as_gc_ref()) - .write_i8(offset, truncate_i32_to_i8(i)), - Val::I32(i) if ty.is_i16() => gcstore - .gc_object_data(self.as_gc_ref()) - .write_i16(offset, truncate_i32_to_i16(i)), - Val::I32(i) => gcstore - .gc_object_data(self.as_gc_ref()) - .write_i32(offset, i), - Val::I64(i) => gcstore - .gc_object_data(self.as_gc_ref()) - .write_i64(offset, i), - Val::F32(f) => gcstore - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, f), - Val::F64(f) => gcstore - .gc_object_data(self.as_gc_ref()) - .write_u64(offset, f), - Val::V128(v) => gcstore - .gc_object_data(self.as_gc_ref()) - .write_v128(offset, v), - - // NB: We don't need to do a write barrier when initializing a - // field, because there is nothing being overwritten. Therefore, we - // just the clone barrier. - Val::ExternRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, x); - } - Val::AnyRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, x); - } - Val::ExnRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, x); - } - - Val::FuncRef(f) => { - let func_ref = match f { - Some(f) => Some(SendSyncPtr::new(f.vm_func_ref(store))), - None => None, - }; - let gcstore = store.require_gc_store_mut()?; - let id = unsafe { gcstore.func_ref_table.intern(func_ref) }; - gcstore - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, id.into_raw()); - } - Val::ContRef(_) => { - // TODO(#10248): Implement array continuation reference init handling - return Err(crate::format_err!( - "initializing continuation references in array elements not yet supported" - )); - } - } - Ok(()) + self.as_gc_ref().initialize_val(store, ty, offset, val) } } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/copying.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/copying.rs index 48f559160512..36010698ab30 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/copying.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/copying.rs @@ -17,7 +17,7 @@ use crate::runtime::vm::{ GcProgress, GcRootsIter, GcRuntime, TypedGcRef, VMExternRef, VMGcHeader, VMGcRef, VMMemoryDefinition, }; -use crate::{Engine, prelude::*}; +use crate::{Engine, bail_bug, prelude::*}; use core::sync::atomic::AtomicUsize; use core::{alloc::Layout, any::Any, mem, num::NonZeroU32, ptr::NonNull}; use wasmtime_environ::copying::{ @@ -107,21 +107,19 @@ unsafe impl GcHeapObject for VMCopyingHeaderAndForwardingRef { impl VMCopyingHeaderAndForwardingRef { /// Get the forwarding reference for this object, if it has been copied /// during the current collection. - fn forwarding_ref(&self) -> Option { + fn forwarding_ref(&self) -> Result> { debug_assert!( self.header.object_size() >= u32::try_from(mem::size_of::()).unwrap() ); - if self.header.copied() { - Some( - self.forwarding_ref - .as_ref() - .expect("should always have a forwarding ref if the copied bit is set") - .unchecked_copy(), - ) + Ok(if self.header.copied() { + match self.forwarding_ref.as_ref() { + Some(r) => Some(r.unchecked_copy()), + None => bail_bug!("should always have a forwarding ref if the copied bit is set"), + } } else { None - } + }) } /// Set the forwarding reference for this object and mark it as copied. @@ -424,7 +422,7 @@ impl CopyingHeap { /// Pop the next item off the worklist, or return `None` if the worklist is /// empty. - fn worklist_pop(&mut self) -> Option { + fn worklist_pop(&mut self) -> Result> { debug_assert!( self.is_in_active_space(self.worklist_ptr) || self.worklist_ptr == self.active_space_end() @@ -435,158 +433,199 @@ impl CopyingHeap { debug_assert!(self.worklist_ptr <= self.bump_ptr()); if self.worklist_ptr == self.bump_ptr() { - return None; + return Ok(None); } let result = self.worklist_ptr; - let result = NonZeroU32::new(result).unwrap(); - let result = VMGcRef::from_heap_index(result).unwrap(); + let result = match NonZeroU32::new(result) { + Some(result) => result, + None => bail_bug!("worklist ptr is zero"), + }; + let result = match VMGcRef::from_heap_index(result) { + Some(result) => result, + None => bail_bug!("corrupt worklist_ptr"), + }; - let obj_size = self.index(copying_ref(&result)).object_size(); + let obj_size = self.index(copying_ref(&result))?.object_size(); self.worklist_ptr += obj_size; debug_assert!(self.worklist_ptr <= self.bump_ptr()); - Some(result) + Ok(Some(result)) } /// Insert `gc_ref`, which points to a grey object, into the worklist. - fn worklist_insert(&self, gc_ref: &VMGcRef) { + fn worklist_insert(&self, gc_ref: &VMGcRef) -> Result<()> { // This is a no-op: insertion happens implicitly in `copy` when // allocating space for the copied object and advancing the // `bump_ptr`. But we still define and call this method just for the // debug assertions. if !cfg!(debug_assertions) { - return; + return Ok(()); } - let index = gc_ref.as_heap_index().unwrap().get(); + let index = gc_ref.heap_index()?.get(); debug_assert!(self.is_in_active_space(index)); - let obj_size = self.index(copying_ref(gc_ref)).object_size(); + let obj_size = self.index(copying_ref(gc_ref))?.object_size(); debug_assert_eq!(index + obj_size, self.bump_ptr()); debug_assert!(self.worklist_ptr <= index); + Ok(()) } /// Get-or-create the location of this idle-space ref in the new active /// semi-space. - fn forward(&mut self, from_ref: &VMGcRef) -> VMGcRef { + fn forward(&mut self, from_ref: &VMGcRef) -> Result { debug_assert!(!from_ref.is_i31()); - debug_assert!(self.is_in_idle_space(from_ref.as_heap_index().unwrap().get())); + debug_assert!(self.is_in_idle_space(from_ref.heap_index()?.get())); if let Some(to_ref) = self - .index(header_and_forwarding_ref(from_ref)) - .forwarding_ref() + .index(header_and_forwarding_ref(from_ref))? + .forwarding_ref()? { - return to_ref; + return Ok(to_ref); } self.copy(from_ref) } /// Copy this idle-space ref into the new active semi-space and return its /// new location. - fn copy(&mut self, from_ref: &VMGcRef) -> VMGcRef { + fn copy(&mut self, from_ref: &VMGcRef) -> Result { debug_assert!(!from_ref.is_i31()); - let from_index = from_ref.as_heap_index().unwrap().get(); + let from_index = from_ref.heap_index()?.get(); debug_assert!(self.is_in_idle_space(from_index)); - debug_assert!(!self.index(copying_ref(from_ref)).copied()); - - let size = self.index(copying_ref(from_ref)).object_size(); - let to_index = self.allocate(size).expect( - "there should always be enough room in the active semi-space for objects that \ - survived collection, since the active space is the same size as the idle space", - ); + debug_assert!(!self.index(copying_ref(from_ref))?.copied()); + + let size = self.index(copying_ref(from_ref))?.object_size(); + let to_index = match self.allocate(size) { + Some(i) => i, + None => { + bail_bug!( + "\ +there should always be enough room in the active semi-space for objects that \ +survived collection, since the active space is the same size as the idle space", + ) + } + }; debug_assert!(self.is_in_active_space(to_index)); - let to_ref = - VMGcRef::from_heap_index(NonZeroU32::new(to_index).unwrap()).expect("valid heap index"); + let to_ref = match NonZeroU32::new(to_index).and_then(VMGcRef::from_heap_index) { + Some(r) => r, + None => bail_bug!("invalid to_index"), + }; // Copy the object bytes. - let from_start = usize::try_from(from_index).unwrap(); - let to_start = usize::try_from(to_index).unwrap(); - let size_usize = usize::try_from(size).unwrap(); - self.heap_slice_mut() - .copy_within(from_start..from_start + size_usize, to_start); + let from_start = usize::try_from(from_index)?; + let to_start = usize::try_from(to_index)?; + let size_usize = usize::try_from(size)?; + let heap = self.heap_slice_mut(); + if heap + .get(from_start..) + .and_then(|s| s.get(..size_usize)) + .is_none() + || heap + .get(to_start..) + .and_then(|s| s.get(..size_usize)) + .is_none() + { + bail_bug!("corrupt gc heap"); + } + heap.copy_within(from_start..from_start + size_usize, to_start); // Set the forwarding ref in the old (idle-space) object. - self.index_mut(header_and_forwarding_ref(from_ref)) + self.index_mut(header_and_forwarding_ref(from_ref))? .set_forwarding_ref(to_ref.unchecked_copy()); // If this is an externref, insert it into the active externref list. if self - .index(copying_ref(&to_ref)) + .index(copying_ref(&to_ref))? .header .kind() .matches(VMGcKind::ExternRef) { let old_head = self.active_extern_ref_set_head.take(); - self.index_mut::(to_ref.as_typed_unchecked()) + self.index_mut::(to_ref.as_typed_unchecked())? .next_extern_ref = old_head; self.active_extern_ref_set_head = Some(to_ref.unchecked_copy().into_externref_unchecked()); } - self.worklist_insert(&to_ref); - to_ref + self.worklist_insert(&to_ref)?; + Ok(to_ref) } /// Trace a grey object's outgoing edges, copying their referents into the /// new semi-space if necessary, and updating the object's references with /// their forwarded locations in the new semi-space. - fn scan(&mut self, gc_ref: &VMGcRef, trace_infos: &TraceInfos) { + fn scan(&mut self, gc_ref: &VMGcRef, trace_infos: &TraceInfos) -> Result<()> { debug_assert!(!gc_ref.is_i31()); - let index = gc_ref.as_heap_index().unwrap().get(); + let index = gc_ref.heap_index()?.get(); debug_assert!(self.is_in_active_space(index)); - let ty = self.index(copying_ref(gc_ref)).header.ty(); + let ty = self.index(copying_ref(gc_ref))?.header.ty(); // `externref`s have no GC edges. let Some(ty) = ty else { - return; + return Ok(()); }; - let object_start = usize::try_from(index).unwrap(); + let object_start = usize::try_from(index)?; match trace_infos.trace_info(&ty) { TraceInfo::Struct { gc_ref_offsets } => { for &offset in gc_ref_offsets { - self.scan_field(object_start, offset); + self.scan_field(object_start, offset)?; } } TraceInfo::Array { gc_ref_elems } => { if *gc_ref_elems { let array_ref = gc_ref.as_arrayref_unchecked(); - let len = self.array_len(array_ref); + let len = self.array_len(array_ref)?; for i in 0..len { - let elem_offset = GC_REF_ARRAY_ELEMS_OFFSET - + i * u32::try_from(mem::size_of::()).unwrap(); - self.scan_field(object_start, elem_offset); + let elem_offset = match i + .checked_mul(u32::try_from(mem::size_of::())?) + .and_then(|i| i.checked_add(GC_REF_ARRAY_ELEMS_OFFSET)) + { + Some(i) => i, + None => bail_bug!("array element offset overflow"), + }; + self.scan_field(object_start, elem_offset)?; } } } } + Ok(()) } #[inline] - fn scan_field(&mut self, object_start: usize, offset: u32) { - let offset = usize::try_from(offset).unwrap(); - let field_start = object_start + offset; + fn scan_field(&mut self, object_start: usize, offset: u32) -> Result<()> { + let offset = usize::try_from(offset)?; + let field_start = match object_start.checked_add(offset) { + Some(start) => start, + None => bail_bug!("field offset overflow"), + }; let field_end = field_start + mem::size_of::(); - let raw: [u8; 4] = self.heap_slice()[field_start..field_end] - .try_into() - .unwrap(); - let raw = u32::from_le_bytes(raw); + let raw = match self + .heap_slice() + .get(field_start..) + .and_then(|s| s.first_chunk()) + { + Some(raw) => u32::from_le_bytes(*raw), + None => bail_bug!("corrupt gc heap"), + }; if let Some(child) = VMGcRef::from_raw_u32(raw) && !child.is_i31() { - debug_assert!(self.is_in_idle_space(child.as_heap_index().unwrap().get())); - let new_ref = self.forward(&child); - debug_assert!(self.is_in_active_space(new_ref.as_heap_index().unwrap().get())); + debug_assert!(self.is_in_idle_space(child.heap_index()?.get())); + let new_ref = self.forward(&child)?; + debug_assert!(self.is_in_active_space(new_ref.heap_index()?.get())); // Write the new reference back. let new_raw = new_ref.as_raw_u32().to_le_bytes(); self.heap_slice_mut()[field_start..field_end].copy_from_slice(&new_raw); } + + Ok(()) } } @@ -674,15 +713,17 @@ unsafe impl GcHeap for CopyingHeap { _host_data_table: &mut ExternRefHostDataTable, destination: &mut Option, source: Option<&VMGcRef>, - ) { + ) -> Result<()> { // The copying collector doesn't use reference counting; writes are // simple overwrites. *destination = source.map(|s| s.unchecked_copy()); + Ok(()) } - fn expose_gc_ref_to_wasm(&mut self, _gc_ref: VMGcRef) { + fn expose_gc_ref_to_wasm(&mut self, _gc_ref: VMGcRef) -> Result<()> { // The copying collector doesn't need any special handling when exposing // a GC ref to Wasm. There is no over-approximated-stack-roots list. + Ok(()) } fn alloc_externref( @@ -694,14 +735,14 @@ unsafe impl GcHeap for CopyingHeap { let size = (size + align - 1) & !(align - 1); let gc_ref = match self.alloc_raw( VMGcHeader::externref(), - Layout::from_size_align(size, align).unwrap(), + Layout::from_size_align(size, align)?, )? { Err(n) => return Ok(Err(n)), Ok(gc_ref) => gc_ref, }; // Take the old list head before borrowing self mutably through index_mut. let old_head = self.active_extern_ref_set_head.take(); - let externref_obj = self.index_mut::(gc_ref.as_typed_unchecked()); + let externref_obj = self.index_mut::(gc_ref.as_typed_unchecked())?; externref_obj.host_data = host_data; externref_obj.next_extern_ref = old_head; let externref = gc_ref.into_externref_unchecked(); @@ -709,13 +750,13 @@ unsafe impl GcHeap for CopyingHeap { Ok(Ok(externref)) } - fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId { + fn externref_host_data(&self, externref: &VMExternRef) -> Result { let typed_ref = externref_to_copying(externref); - self.index(typed_ref).host_data + Ok(self.index(typed_ref)?.host_data) } - fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader { - let header: &VMGcHeader = self.index(gc_ref.as_typed_unchecked()); + fn header(&self, gc_ref: &VMGcRef) -> Result<&VMGcHeader> { + let header: &VMGcHeader = self.index(gc_ref.as_typed_unchecked())?; debug_assert!( VMGcKind::try_from_u32(header.kind().as_u32()).is_some(), @@ -723,11 +764,11 @@ unsafe impl GcHeap for CopyingHeap { header.kind().as_u32(), ); - header + Ok(header) } - fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader { - let header: &mut VMGcHeader = self.index_mut(gc_ref.as_typed_unchecked()); + fn header_mut(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcHeader> { + let header: &mut VMGcHeader = self.index_mut(gc_ref.as_typed_unchecked())?; debug_assert!( VMGcKind::try_from_u32(header.kind().as_u32()).is_some(), @@ -735,11 +776,13 @@ unsafe impl GcHeap for CopyingHeap { header.kind().as_u32(), ); - header + Ok(header) } - fn object_size(&self, gc_ref: &VMGcRef) -> usize { - usize::try_from(self.index(copying_ref(gc_ref)).object_size()).unwrap() + fn object_size(&self, gc_ref: &VMGcRef) -> Result { + Ok(usize::try_from( + self.index(copying_ref(gc_ref))?.object_size(), + )?) } fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result> { @@ -788,7 +831,7 @@ unsafe impl GcHeap for CopyingHeap { // Assert that the newly-allocated memory is still filled with the // poison pattern. if cfg!(gc_zeal) { - let start = usize::try_from(gc_ref.as_heap_index().unwrap().get()).unwrap(); + let start = usize::try_from(gc_ref.heap_index()?.get()).unwrap(); let slice = &self.heap_slice()[start..][..layout.size()]; gc_assert!( slice.iter().all(|&b| b == POISON), @@ -798,7 +841,7 @@ unsafe impl GcHeap for CopyingHeap { } let object_size = size; - *self.index_mut(copying_ref(&gc_ref)) = VMCopyingHeader { + *self.index_mut(copying_ref(&gc_ref))? = VMCopyingHeader { header, object_size, }; @@ -824,9 +867,10 @@ unsafe impl GcHeap for CopyingHeap { Ok(Ok(gc_ref)) } - fn dealloc_uninit_struct_or_exn(&mut self, _gcref: VMGcRef) { + fn dealloc_uninit_struct_or_exn(&mut self, _gcref: VMGcRef) -> Result<()> { // The copying collector doesn't support individual deallocation; memory // is reclaimed during collection. + Ok(()) } fn alloc_uninit_array( @@ -847,21 +891,23 @@ unsafe impl GcHeap for CopyingHeap { Ok(gc_ref) => gc_ref, }; - self.index_mut(gc_ref.as_typed_unchecked::()) + self.index_mut(gc_ref.as_typed_unchecked::())? .length = length; Ok(Ok(gc_ref.into_arrayref_unchecked())) } - fn dealloc_uninit_array(&mut self, _arrayref: VMArrayRef) { + fn dealloc_uninit_array(&mut self, _arrayref: VMArrayRef) -> Result<()> { // The copying collector doesn't support individual deallocation; memory // is reclaimed during collection. + Ok(()) } - fn array_len(&self, arrayref: &VMArrayRef) -> u32 { + fn array_len(&self, arrayref: &VMArrayRef) -> Result { debug_assert!(arrayref.as_gc_ref().is_typed::(self)); - self.index::(arrayref.as_gc_ref().as_typed_unchecked()) - .length + Ok(self + .index::(arrayref.as_gc_ref().as_typed_unchecked())? + .length) } fn allocated_bytes(&self) -> usize { @@ -951,67 +997,64 @@ enum CopyingCollectionPhase { impl CopyingCollection<'_> { /// Forward all GC roots from the idle space to the active space. - fn process_roots(&mut self) { + fn process_roots(&mut self) -> Result<()> { log::trace!("Begin processing GC roots"); let roots = self.roots.take().unwrap(); for mut root in roots { - let gc_ref = root.get(); + let gc_ref = root.get()?; if gc_ref.is_i31() { continue; } - let old_index = gc_ref.as_heap_index().unwrap().get(); + let old_index = gc_ref.heap_index()?.get(); debug_assert!(self.heap.is_in_idle_space(old_index)); - let new_ref = self.heap.forward(&gc_ref); + let new_ref = self.heap.forward(&gc_ref)?; root.set(new_ref); } log::trace!("End processing GC roots"); + Ok(()) } /// Scan all grey objects until the worklist is empty. - fn process_worklist(&mut self) { + fn process_worklist(&mut self) -> Result<()> { log::trace!("Begin processing worklist"); let trace_infos = mem::take(&mut self.heap.trace_infos); - while let Some(gc_ref) = self.heap.worklist_pop() { - debug_assert!( - self.heap - .is_in_active_space(gc_ref.as_heap_index().unwrap().get()) - ); - self.heap.scan(&gc_ref, &trace_infos); + while let Some(gc_ref) = self.heap.worklist_pop()? { + debug_assert!(self.heap.is_in_active_space(gc_ref.heap_index()?.get())); + self.heap.scan(&gc_ref, &trace_infos)?; } self.heap.trace_infos = trace_infos; log::trace!("End processing worklist"); + Ok(()) } /// Clean up dead externrefs by iterating the idle semi-space's externref /// linked list and deallocating host data for any that were not forwarded. - fn sweep_extern_refs(&mut self) { + fn sweep_extern_refs(&mut self) -> Result<()> { log::trace!("Begin sweeping `externref`s"); let mut link = self.heap.idle_extern_ref_set_head.take(); while let Some(externref) = link { let gc_ref = externref.as_gc_ref(); - debug_assert!( - self.heap - .is_in_idle_space(gc_ref.as_heap_index().unwrap().get()) - ); - let header = self.heap.index(copying_ref(gc_ref)); + debug_assert!(self.heap.is_in_idle_space(gc_ref.heap_index()?.get())); + let header = self.heap.index(copying_ref(gc_ref))?; if !header.copied() { let typed: &TypedGcRef = gc_ref.as_typed_unchecked(); - let host_data_id = self.heap.index(typed).host_data; - self.host_data_table.dealloc(host_data_id); + let host_data_id = self.heap.index(typed)?.host_data; + self.host_data_table.dealloc(host_data_id)?; } link = self .heap - .index::(gc_ref.as_typed_unchecked()) + .index::(gc_ref.as_typed_unchecked())? .next_extern_ref .as_ref() .map(|e| e.unchecked_copy()); } log::trace!("End sweeping `externref`s"); + Ok(()) } } impl GarbageCollection<'_> for CopyingCollection<'_> { - fn collect_increment(&mut self) -> GcProgress { + fn collect_increment(&mut self) -> Result { match self.phase { CopyingCollectionPhase::Collect => { log::trace!("Begin copying collection"); @@ -1028,8 +1071,8 @@ impl GarbageCollection<'_> for CopyingCollection<'_> { self.heap.flip(); self.heap.initialize_worklist(); - self.process_roots(); - self.process_worklist(); + self.process_roots()?; + self.process_worklist()?; assert!(self.heap.active_space_start <= self.heap.bump_ptr()); assert!(self.heap.bump_ptr() <= self.heap.active_space_end()); @@ -1039,7 +1082,7 @@ impl GarbageCollection<'_> for CopyingCollection<'_> { || self.heap.idle_space_end <= self.heap.active_space_start ); - self.sweep_extern_refs(); + self.sweep_extern_refs()?; self.heap.resize_semi_spaces(); debug_assert_eq!( @@ -1057,9 +1100,9 @@ impl GarbageCollection<'_> for CopyingCollection<'_> { log::trace!("End copying collection"); self.phase = CopyingCollectionPhase::Done; - GcProgress::Complete + Ok(GcProgress::Complete) } - CopyingCollectionPhase::Done => GcProgress::Complete, + CopyingCollectionPhase::Done => Ok(GcProgress::Complete), } } } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs index ac76c470ddc2..2f3f0d1e7cae 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs @@ -44,16 +44,17 @@ //! Examination of Deferred Reference Counting and Cycle Detection* by Quinane: //! +use super::VMArrayRef; use super::free_list::FreeList; use super::trace_info::{TraceInfo, TraceInfos}; -use super::{VMArrayRef, VMGcObjectData}; use crate::hash_set::HashSet; use crate::runtime::vm::{ ExternRefHostDataId, ExternRefHostDataTable, GarbageCollection, GcHeap, GcHeapObject, - GcProgress, GcRootsIter, GcRuntime, TypedGcRef, VMExternRef, VMGcHeader, VMGcRef, + GcProgress, GcRootsIter, GcRuntime, TypedGcRef, VMExternRef, VMGcHeader, VMGcObjectData, + VMGcRef, }; use crate::vm::VMMemoryDefinition; -use crate::{Engine, prelude::*}; +use crate::{Engine, bail_bug, prelude::*}; use core::sync::atomic::AtomicUsize; use core::{ alloc::Layout, @@ -62,6 +63,7 @@ use core::{ ops::{Deref, DerefMut, Range}, ptr::NonNull, }; +use wasmtime_core::undo::Undo; use wasmtime_environ::drc::{ARRAY_LENGTH_OFFSET, DrcTypeLayouts}; use wasmtime_environ::{ GcArrayLayout, GcStructLayout, GcTypeLayouts, POISON, VMGcKind, VMSharedTypeIndex, gc_assert, @@ -122,29 +124,30 @@ struct DrcHeap { /// A free list describing which ranges of the heap are available for use. free_list: Option, + /// Allocations used during tracing, temporarily removed from `self` for + /// easier borrow-checker management. + tracing_allocs: Option, + + /// Running total of bytes currently allocated (live objects) in this heap. + allocated_bytes: usize, +} + +struct TracingAllocs { /// An explicit stack to avoid recursion when deallocating one object needs /// to dec-ref another object, which can then be deallocated and dec-refs /// yet another object, etc... /// /// We store this stack here to reuse the storage and avoid repeated /// allocations. - /// - /// Note that the `Option` is perhaps technically unnecessary (we could - /// remove the `Option` and, when we take the stack out of `self`, leave - /// behind an empty vec instead of `None`) but we keep it because it will - /// help us catch unexpected re-entry, similar to how a `RefCell` would. - dec_ref_stack: Option>, + dec_ref_stack: Vec, /// An explicit stack for arrays that are too large to push all their /// elements onto `dec_ref_stack` at once. Each entry is an array GC /// reference and the range of element indices remaining to process. - large_array_dec_ref_stack: Option)>>, + large_array_dec_ref_stack: Vec<(VMGcRef, Range)>, /// A batched set of GC refs to deallocate all at once. - to_dealloc: Option>, - - /// Running total of bytes currently allocated (live objects) in this heap. - allocated_bytes: usize, + to_dealloc: Vec, } impl DrcHeap { @@ -158,43 +161,50 @@ impl DrcHeap { memory: None, vmmemory: None, free_list: None, - dec_ref_stack: Some(Vec::with_capacity(1)), - large_array_dec_ref_stack: Some(Vec::with_capacity(1)), - to_dealloc: Some(Vec::with_capacity(1)), + tracing_allocs: Some(TracingAllocs { + dec_ref_stack: Vec::with_capacity(1), + large_array_dec_ref_stack: Vec::with_capacity(1), + to_dealloc: Vec::with_capacity(1), + }), allocated_bytes: 0, }) } - fn dealloc(&mut self, gc_ref: VMGcRef) { + fn dealloc(&mut self, gc_ref: VMGcRef) -> Result<()> { let drc_ref = drc_ref(&gc_ref); - let size = self.index(drc_ref).object_size; - let alloc_size = FreeList::aligned_size(size).unwrap(); - let index = gc_ref.as_heap_index().unwrap(); + let size = self.index(drc_ref)?.object_size; + let alloc_size = match FreeList::aligned_size(size) { + Some(size) => size, + None => bail_bug!("aligned size overflow"), + }; + let index = gc_ref.heap_index()?; // Poison the freed memory so that any stale access is detectable. if cfg!(gc_zeal) { - let index = usize::try_from(index.get()).unwrap(); - let alloc_size = usize::try_from(alloc_size).unwrap(); + let index = usize::try_from(index.get())?; + let alloc_size = usize::try_from(alloc_size)?; self.heap_slice_mut()[index..][..alloc_size].fill(POISON); } - self.allocated_bytes -= usize::try_from(alloc_size).unwrap(); + self.allocated_bytes -= usize::try_from(alloc_size)?; self.free_list .as_mut() .unwrap() .dealloc_fast(index, alloc_size); + Ok(()) } /// Increment the ref count for the associated object. - fn inc_ref(&mut self, gc_ref: &VMGcRef) { + fn inc_ref(&mut self, gc_ref: &VMGcRef) -> Result<()> { if gc_ref.is_i31() { - return; + return Ok(()); } let drc_ref = drc_ref(gc_ref); - let header = self.index_mut(&drc_ref); + let header = self.index_mut(&drc_ref)?; header.inc_ref(); log::trace!("increment {:#p} ref count -> {}", *gc_ref, header.ref_count); + Ok(()) } /// Decrement the ref count for the associated object. @@ -209,18 +219,26 @@ impl DrcHeap { &mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: &VMGcRef, - ) { + ) -> Result<()> { if gc_ref.is_i31() { - return; + return Ok(()); } - let mut stack = self.dec_ref_stack.take().unwrap(); - debug_assert!(stack.is_empty()); + let allocs = match self.tracing_allocs.take() { + Some(allocs) => allocs, + None => bail_bug!("allocs missing during tracing"), + }; + let mut undo = Undo::new((self, allocs), |(this, allocs)| { + debug_assert!(this.tracing_allocs.is_none()); + this.tracing_allocs = Some(allocs); + }); + let (this, allocs) = &mut *undo; + let stack = &mut allocs.dec_ref_stack; + let large_array_stack = &mut allocs.large_array_dec_ref_stack; + let to_dealloc = &mut allocs.to_dealloc; - let mut large_array_stack = self.large_array_dec_ref_stack.take().unwrap(); + debug_assert!(stack.is_empty()); debug_assert!(large_array_stack.is_empty()); - - let mut to_dealloc = self.to_dealloc.take().unwrap(); debug_assert!(to_dealloc.is_empty()); stack.push(gc_ref.unchecked_copy()); @@ -230,7 +248,7 @@ impl DrcHeap { debug_assert!(!gc_ref.is_i31()); // Read the DRC header once to get ref_count, type, and object_size. - let drc_header = self.index_mut(drc_ref(&gc_ref)); + let drc_header = this.index_mut(drc_ref(&gc_ref))?; log::trace!( "decrement {:#p} ref count -> {}", gc_ref, @@ -246,33 +264,33 @@ impl DrcHeap { // Trace: enqueue child GC refs for dec-ref'ing. if let Some(ty) = ty { - match self.trace_infos.trace_info(&ty) { + match this.trace_infos.trace_info(&ty) { TraceInfo::Struct { gc_ref_offsets } => { stack.reserve(gc_ref_offsets.len()); - let data = self.gc_object_data(&gc_ref); + let data = this.gc_object_data(&gc_ref)?; for offset in gc_ref_offsets { - Self::trace_offset(&mut stack, data, *offset); + Self::trace_offset(stack, data, *offset)?; } } TraceInfo::Array { gc_ref_elems: true } => { - let len = self.array_len(gc_ref.as_arrayref_unchecked()); - let len_usize = usize::try_from(len).unwrap(); + let len = this.array_len(gc_ref.as_arrayref_unchecked())?; + let len_usize = usize::try_from(len)?; if stack.len() + len_usize <= MAX_ARRAY_STACK_DEPTH { - let data = self.gc_object_data(&gc_ref); + let data = this.gc_object_data(&gc_ref)?; stack.reserve(len_usize); for i in 0..len { - Self::trace_array_elem(&mut stack, data, i); + Self::trace_array_elem(stack, data, i)?; } } else { // Only push the first `n` elements onto the // stack; process the rest via the // `large_array_stack`. let n = MAX_ARRAY_STACK_DEPTH.saturating_sub(stack.len()); - let n = u32::try_from(n).unwrap(); - let data = self.gc_object_data(&gc_ref); + let n = u32::try_from(n)?; + let data = this.gc_object_data(&gc_ref)?; for i in 0..n { - Self::trace_array_elem(&mut stack, data, i); + Self::trace_array_elem(stack, data, i)?; } large_array_stack.push((gc_ref.unchecked_copy(), n..len)); @@ -292,9 +310,12 @@ impl DrcHeap { // data, and `ty` is `None` only for `externref`s, so we skip // this for `struct` and `array` objects entirely. debug_assert!(drc_header.header.kind().matches(VMGcKind::ExternRef)); - let externref = gc_ref.as_typed::(self).unwrap(); - let host_data_id = self.index(externref).host_data; - host_data_table.dealloc(host_data_id); + let externref = match gc_ref.as_typed::(*this) { + Some(r) => r, + None => bail_bug!("expected externref"), + }; + let host_data_id = this.index(externref)?.host_data; + host_data_table.dealloc(host_data_id)?; } to_dealloc.push(gc_ref); @@ -302,9 +323,9 @@ impl DrcHeap { if let Some((gc_ref, mut elems)) = large_array_stack.pop() { // Add the next chunk of array elements onto the stack. - let data = self.gc_object_data(&gc_ref); + let data = this.gc_object_data(&gc_ref)?; for i in elems.by_ref().take(MAX_ARRAY_STACK_DEPTH) { - Self::trace_array_elem(&mut stack, data, i); + Self::trace_array_elem(stack, data, i)?; } // If we are done processing this array, then enqueue it for @@ -322,37 +343,31 @@ impl DrcHeap { // Deallocate the dead objects and return their memory blocks to the // free list. for gc_ref in to_dealloc.drain(..) { - self.dealloc(gc_ref); + this.dealloc(gc_ref)?; } debug_assert!(stack.is_empty()); - debug_assert!(self.dec_ref_stack.is_none()); - self.dec_ref_stack = Some(stack); - debug_assert!(large_array_stack.is_empty()); - debug_assert!(self.large_array_dec_ref_stack.is_none()); - self.large_array_dec_ref_stack = Some(large_array_stack); - debug_assert!(to_dealloc.is_empty()); - debug_assert!(self.to_dealloc.is_none()); - self.to_dealloc = Some(to_dealloc); + + Ok(()) } #[inline] - fn trace_array_elem(stack: &mut Vec, data: &VMGcObjectData, i: u32) { - let elem_offset = - GC_REF_ARRAY_ELEMS_OFFSET + i * u32::try_from(mem::size_of::()).unwrap(); + fn trace_array_elem(stack: &mut Vec, data: &VMGcObjectData, i: u32) -> Result<()> { + let elem_offset = GC_REF_ARRAY_ELEMS_OFFSET + i * u32::try_from(mem::size_of::())?; Self::trace_offset(stack, data, elem_offset) } #[inline] - fn trace_offset(stack: &mut Vec, data: &VMGcObjectData, offset: u32) { - let raw = data.read_u32(offset); + fn trace_offset(stack: &mut Vec, data: &VMGcObjectData, offset: u32) -> Result<()> { + let raw = data.read_u32(offset)?; if let Some(gc_ref) = VMGcRef::from_raw_u32(raw) && !gc_ref.is_i31() { stack.push(gc_ref); } + Ok(()) } /// Iterate over the over-approximated-stack-roots list. @@ -363,23 +378,26 @@ impl DrcHeap { core::iter::from_fn(move || { let r = link.as_ref()?.unchecked_copy(); - link = self.index(drc_ref(&r)).next_over_approximated_stack_root(); + link = self + .index(drc_ref(&r)) + .ok()? + .next_over_approximated_stack_root(); Some(r) }) } /// Assert the integrity of the over-approximated stack roots list. - fn assert_over_approximated_stack_roots_integrity(&self) { + fn assert_over_approximated_stack_roots_integrity(&self) -> Result<()> { if !cfg!(gc_zeal) { - return; + return Ok(()); } let mut visited = HashSet::new(); for gc_ref in self.iter_over_approximated_stack_roots() { - let idx = gc_ref.as_heap_index().unwrap().get(); + let idx = gc_ref.heap_index()?.get(); // Each entry must have a valid `VMGcKind`. - let header = self.header(&gc_ref); + let header = self.header(&gc_ref)?; let kind = header.kind().as_u32(); assert!( VMGcKind::try_from_u32(kind).is_some(), @@ -387,7 +405,7 @@ impl DrcHeap { ); // Each entry must have its in-list bit set. - let drc_header = self.index(drc_ref(&gc_ref)); + let drc_header = self.index(drc_ref(&gc_ref))?; assert!( drc_header.is_in_over_approximated_stack_roots(), "over-approx list: entry at heap index {idx} does not have in-list bit set", @@ -405,6 +423,8 @@ impl DrcHeap { "over-approx list: cycle or duplicate detected at heap index {idx}", ); } + + Ok(()) } /// Assert that every free block in the free list is filled with the poison @@ -426,7 +446,7 @@ impl DrcHeap { } } - fn trace(&mut self, roots: &mut GcRootsIter<'_>) { + fn trace(&mut self, roots: &mut GcRootsIter<'_>) -> Result<()> { // The `over_approx_set` is used for `debug_assert!`s checking that // every reference we read out from the stack via stack maps is actually // in the table. If that weren't true, than either we forgot to insert a @@ -447,7 +467,7 @@ impl DrcHeap { continue; } - let gc_ref = root.get(); + let gc_ref = root.get()?; if gc_ref.is_i31() { continue; @@ -462,21 +482,22 @@ impl DrcHeap { but {gc_ref:#p} is not in the set", ); debug_assert!( - self.index(drc_ref(&gc_ref)) + self.index(drc_ref(&gc_ref))? .is_in_over_approximated_stack_roots(), "every on-stack gc ref inside a Wasm frame should have \ its in-the-over-approximated-stack-roots-list bit set", ); debug_assert_ne!( - self.index_mut(drc_ref(&gc_ref)).ref_count, + self.index_mut(drc_ref(&gc_ref))?.ref_count, 0, "{gc_ref:#p} is on the Wasm stack and therefore should be held \ alive by the over-approximated-stack-roots set; should have \ nonzero ref count", ); - self.index_mut(drc_ref(&gc_ref)).set_marked(); + self.index_mut(drc_ref(&gc_ref))?.set_marked(); } + Ok(()) } #[inline(never)] @@ -498,7 +519,7 @@ impl DrcHeap { /// Sweep the bump allocation table after we've discovered our precise stack /// roots. - fn sweep(&mut self, host_data_table: &mut ExternRefHostDataTable) { + fn sweep(&mut self, host_data_table: &mut ExternRefHostDataTable) -> Result<()> { if log::log_enabled!(log::Level::Trace) { Self::log_gc_ref_set( "over-approximated-stack-roots set before sweeping", @@ -549,7 +570,7 @@ impl DrcHeap { while let Some(gc_ref) = next { log::trace!("sweeping gc ref: {gc_ref:#p}"); - let header = self.index_mut(drc_ref(&gc_ref)); + let header = self.index_mut(drc_ref(&gc_ref))?; debug_assert!(header.is_in_over_approximated_stack_roots()); if header.clear_marked() { @@ -578,10 +599,10 @@ impl DrcHeap { match &prev { None => *self.over_approximated_stack_roots = prev_next, Some(prev) => self - .index_mut(drc_ref(prev)) + .index_mut(drc_ref(prev))? .set_next_over_approximated_stack_root(prev_next), } - self.dec_ref_and_maybe_dealloc(host_data_table, &gc_ref); + self.dec_ref_and_maybe_dealloc(host_data_table, &gc_ref)?; } log::trace!("Done sweeping"); @@ -592,6 +613,8 @@ impl DrcHeap { self.iter_over_approximated_stack_roots(), ); } + + Ok(()) } } @@ -783,9 +806,7 @@ unsafe impl GcHeap for DrcHeap { no_gc_count, over_approximated_stack_roots, free_list, - dec_ref_stack, - large_array_dec_ref_stack, - to_dealloc, + tracing_allocs, memory, vmmemory, allocated_bytes, @@ -801,13 +822,11 @@ unsafe impl GcHeap for DrcHeap { *free_list = None; *vmmemory = None; *allocated_bytes = 0; - debug_assert!(dec_ref_stack.as_ref().is_some_and(|s| s.is_empty())); - debug_assert!( - large_array_dec_ref_stack - .as_ref() - .is_some_and(|s| s.is_empty()) - ); - debug_assert!(to_dealloc.as_ref().is_some_and(|d| d.is_empty())); + debug_assert!(tracing_allocs.as_ref().is_some_and(|allocs| { + allocs.dec_ref_stack.is_empty() + && allocs.large_array_dec_ref_stack.is_empty() + && allocs.to_dealloc.is_empty() + })); memory.take().unwrap() } @@ -833,7 +852,18 @@ unsafe impl GcHeap for DrcHeap { } fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef { - self.inc_ref(gc_ref); + // If incrementing the reference count fails then that means that the GC + // heap is corrupted. Plumbing this result all throughout Wasmtime has + // quite large implications which aren't necessarily worth the tradeoff. + // This is the only collector where this is a fallible operation, for + // example. For now catch this in debug mode but otherwise just leave + // the corruption to get detected later. This corrupted reference will + // trigger an error later on instead. + if let Err(e) = self.inc_ref(gc_ref) { + if cfg!(debug_assertions) { + panic!("gc heap corrupted: {e}"); + } + } gc_ref.unchecked_copy() } @@ -842,29 +872,30 @@ unsafe impl GcHeap for DrcHeap { host_data_table: &mut ExternRefHostDataTable, destination: &mut Option, source: Option<&VMGcRef>, - ) { + ) -> Result<()> { // Increment the ref count of the object being written into the slot. if let Some(src) = source { - self.inc_ref(src); + self.inc_ref(src)?; } // Decrement the ref count of the value being overwritten and, if // necessary, deallocate the GC object. if let Some(dest) = destination { - self.dec_ref_and_maybe_dealloc(host_data_table, dest); + self.dec_ref_and_maybe_dealloc(host_data_table, dest)?; } // Do the actual write. *destination = source.map(|s| s.unchecked_copy()); + Ok(()) } - fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) { + fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> Result<()> { // Read the current list head before borrowing through index_mut. let next = (*self.over_approximated_stack_roots) .as_ref() .map(|r| r.unchecked_copy()); - let header = self.index_mut(drc_ref(&gc_ref)); + let header = self.index_mut(drc_ref(&gc_ref))?; if header.is_in_over_approximated_stack_roots() { // Already in the over-approximated-stack-roots list. Decrement the // object's ref count because the OASR list can't hold multiple @@ -875,7 +906,7 @@ unsafe impl GcHeap for DrcHeap { "should not have reached refcount == 0 because the OASR list \ is holding a reference" ); - return; + return Ok(()); } // Push this object onto the head of the over-approximated-stack-roots @@ -883,6 +914,7 @@ unsafe impl GcHeap for DrcHeap { header.set_in_over_approximated_stack_roots_bit(true); header.set_next_over_approximated_stack_root(next); *self.over_approximated_stack_roots = Some(gc_ref); + Ok(()) } fn alloc_externref( @@ -894,18 +926,18 @@ unsafe impl GcHeap for DrcHeap { Err(n) => return Ok(Err(n)), Ok(gc_ref) => gc_ref, }; - self.index_mut::(gc_ref.as_typed_unchecked()) + self.index_mut::(gc_ref.as_typed_unchecked())? .host_data = host_data; Ok(Ok(gc_ref.into_externref_unchecked())) } - fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId { + fn externref_host_data(&self, externref: &VMExternRef) -> Result { let typed_ref = externref_to_drc(externref); - self.index(typed_ref).host_data + Ok(self.index(typed_ref)?.host_data) } - fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader { - let header: &VMGcHeader = self.index(gc_ref.as_typed_unchecked()); + fn header(&self, gc_ref: &VMGcRef) -> Result<&VMGcHeader> { + let header: &VMGcHeader = self.index(gc_ref.as_typed_unchecked())?; debug_assert!( VMGcKind::try_from_u32(header.kind().as_u32()).is_some(), @@ -913,11 +945,11 @@ unsafe impl GcHeap for DrcHeap { header.kind().as_u32(), ); - header + Ok(header) } - fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader { - let header: &mut VMGcHeader = self.index_mut(gc_ref.as_typed_unchecked()); + fn header_mut(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcHeader> { + let header: &mut VMGcHeader = self.index_mut(gc_ref.as_typed_unchecked())?; debug_assert!( VMGcKind::try_from_u32(header.kind().as_u32()).is_some(), @@ -925,11 +957,11 @@ unsafe impl GcHeap for DrcHeap { header.kind().as_u32(), ); - header + Ok(header) } - fn object_size(&self, gc_ref: &VMGcRef) -> usize { - self.index(drc_ref(gc_ref)).object_size() + fn object_size(&self, gc_ref: &VMGcRef) -> Result { + Ok(self.index(drc_ref(gc_ref))?.object_size()) } fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result> { @@ -958,17 +990,20 @@ unsafe impl GcHeap for DrcHeap { .ok_or_else(|| format_err!("allocation size too large"))?; let gc_ref = match self.free_list.as_mut().unwrap().alloc_fast(alloc_size) { - None => return Ok(Err(u64::try_from(layout.size()).unwrap())), - Some(index) => VMGcRef::from_heap_index(index).unwrap_or_else(|| { - panic!("invalid GC heap index returned from free list alloc: {index:#x}") - }), + None => return Ok(Err(u64::try_from(layout.size())?)), + Some(index) => match VMGcRef::from_heap_index(index) { + Some(r) => r, + None => { + bail_bug!("invalid GC heap index returned from free list alloc: {index:#x}") + } + }, }; // Assert that the newly-allocated memory is still filled with the // poison pattern, and hasn't been corrupted since deallocation (or // initial heap creation). if cfg!(gc_zeal) { - let start = usize::try_from(gc_ref.as_heap_index().unwrap().get()).unwrap(); + let start = usize::try_from(gc_ref.heap_index()?.get())?; let slice = &self.heap_slice()[start..][..layout.size()]; gc_assert!( slice.iter().all(|&b| b == POISON), @@ -977,13 +1012,13 @@ unsafe impl GcHeap for DrcHeap { ); } - *self.index_mut(drc_ref(&gc_ref)) = VMDrcHeader { + *self.index_mut(drc_ref(&gc_ref))? = VMDrcHeader { header, ref_count: 1, next_over_approximated_stack_root: None, object_size, }; - self.allocated_bytes += usize::try_from(alloc_size).unwrap(); + self.allocated_bytes += usize::try_from(alloc_size)?; log::trace!("new object: increment {gc_ref:#p} ref count -> 1"); Ok(Ok(gc_ref)) } @@ -1007,8 +1042,8 @@ unsafe impl GcHeap for DrcHeap { Ok(Ok(gc_ref)) } - fn dealloc_uninit_struct_or_exn(&mut self, gcref: VMGcRef) { - self.dealloc(gcref); + fn dealloc_uninit_struct_or_exn(&mut self, gcref: VMGcRef) -> Result<()> { + self.dealloc(gcref) } fn alloc_uninit_array( @@ -1028,20 +1063,21 @@ unsafe impl GcHeap for DrcHeap { Ok(gc_ref) => gc_ref, }; - self.index_mut(gc_ref.as_typed_unchecked::()) + self.index_mut(gc_ref.as_typed_unchecked::())? .length = length; Ok(Ok(gc_ref.into_arrayref_unchecked())) } - fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) { + fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) -> Result<()> { self.dealloc(arrayref.into()) } - fn array_len(&self, arrayref: &VMArrayRef) -> u32 { + fn array_len(&self, arrayref: &VMArrayRef) -> Result { debug_assert!(arrayref.as_gc_ref().is_typed::(self)); - self.index::(arrayref.as_gc_ref().as_typed_unchecked()) - .length + Ok(self + .index::(arrayref.as_gc_ref().as_typed_unchecked())? + .length) } fn allocated_bytes(&self) -> usize { @@ -1121,39 +1157,39 @@ enum DrcCollectionPhase { } impl<'a> GarbageCollection<'a> for DrcCollection<'a> { - fn collect_increment(&mut self) -> GcProgress { + fn collect_increment(&mut self) -> Result { match self.phase { DrcCollectionPhase::Trace => { log::trace!("Begin DRC trace"); - self.heap.assert_over_approximated_stack_roots_integrity(); + self.heap.assert_over_approximated_stack_roots_integrity()?; self.heap.assert_free_blocks_are_poisoned(); - self.heap.trace(&mut self.roots); + self.heap.trace(&mut self.roots)?; - self.heap.assert_over_approximated_stack_roots_integrity(); + self.heap.assert_over_approximated_stack_roots_integrity()?; self.heap.assert_free_blocks_are_poisoned(); log::trace!("End DRC trace"); self.phase = DrcCollectionPhase::Sweep; - GcProgress::Continue + Ok(GcProgress::Continue) } DrcCollectionPhase::Sweep => { log::trace!("Begin DRC sweep"); - self.heap.assert_over_approximated_stack_roots_integrity(); + self.heap.assert_over_approximated_stack_roots_integrity()?; self.heap.assert_free_blocks_are_poisoned(); - self.heap.sweep(self.host_data_table); + self.heap.sweep(self.host_data_table)?; - self.heap.assert_over_approximated_stack_roots_integrity(); + self.heap.assert_over_approximated_stack_roots_integrity()?; self.heap.assert_free_blocks_are_poisoned(); log::trace!("End DRC sweep"); self.phase = DrcCollectionPhase::Done; - GcProgress::Complete + Ok(GcProgress::Complete) } - DrcCollectionPhase::Done => GcProgress::Complete, + DrcCollectionPhase::Done => Ok(GcProgress::Complete), } } } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs index 122663fc0a65..d84f0187fe3d 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/exnref.rs @@ -1,4 +1,3 @@ -use super::structref::{initialize_field_impl, read_field_impl}; use crate::{ StorageType, Val, prelude::*, @@ -41,8 +40,10 @@ impl VMGcRef { return false; } - let header = gc_heap.header(&self); - header.kind().matches(VMGcKind::ExnRef) + match gc_heap.header(&self) { + Ok(header) => header.kind().matches(VMGcKind::ExnRef), + Err(_) => false, + } } /// Create a new `VMExnRef` from the given `gc_ref`. @@ -147,9 +148,9 @@ impl VMExnRef { layout: &GcStructLayout, ty: &StorageType, field: usize, - ) -> Val { + ) -> Result { let offset = layout.fields[field].offset; - read_field_impl(self.as_gc_ref(), store, ty, offset) + self.as_gc_ref().read_val(store, ty, offset) } /// Initialize a field in this exnref that is currently uninitialized. @@ -178,7 +179,7 @@ impl VMExnRef { ) -> Result<()> { debug_assert!(val._matches_ty(&store, &ty.unpack())?); let offset = layout.fields[field].offset; - initialize_field_impl(self.as_gc_ref(), store, ty, offset, val) + self.as_gc_ref().initialize_val(store, ty, offset, val) } /// Initialize the tag referenced by this exception object. @@ -193,11 +194,11 @@ impl VMExnRef { let tag_offset = layouts.exception_tag_defined_offset(); let store = store.require_gc_store_mut()?; store - .gc_object_data(&self.0) - .write_u32(instance_offset, instance.as_u32()); + .gc_object_data(&self.0)? + .write_u32(instance_offset, instance.as_u32())?; store - .gc_object_data(&self.0) - .write_u32(tag_offset, tag.as_u32()); + .gc_object_data(&self.0)? + .write_u32(tag_offset, tag.as_u32())?; Ok(()) } @@ -208,11 +209,11 @@ impl VMExnRef { let tag_offset = layouts.exception_tag_defined_offset(); let instance = store .require_gc_store_mut()? - .gc_object_data(&self.0) - .read_u32(instance_offset); + .gc_object_data(&self.0)? + .read_u32(instance_offset)?; let instance = InstanceId::from_u32(instance); let store = store.require_gc_store_mut()?; - let tag = store.gc_object_data(&self.0).read_u32(tag_offset); + let tag = store.gc_object_data(&self.0)?.read_u32(tag_offset)?; let tag = DefinedTagIndex::from_u32(tag); Ok((instance, tag)) } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/externref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/externref.rs index e0b684e36666..f34992817769 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/externref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/externref.rs @@ -1,3 +1,4 @@ +use crate::Result; use crate::runtime::vm::{GcHeap, GcStore, VMGcRef}; use core::fmt; use wasmtime_environ::VMGcKind; @@ -29,14 +30,23 @@ impl From for VMGcRef { } impl VMGcRef { + /// Is this `VMGcRef` pointing to an `extern`? + pub fn is_externref(&self, gc_heap: &(impl GcHeap + ?Sized)) -> bool { + if self.is_i31() { + return false; + } + + match gc_heap.header(&self) { + Ok(header) => header.kind() == VMGcKind::ExternRef, + Err(_) => false, + } + } + /// Create a new `VMExternRef` from the given `gc_ref`. /// /// If this is not GC reference to an `externref`, `Err(self)` is returned. pub fn into_externref(self, gc_heap: &impl GcHeap) -> Result { - if self.is_i31() { - return Err(self); - } - if gc_heap.header(&self).kind() == VMGcKind::ExternRef { + if self.is_externref(gc_heap) { Ok(VMExternRef(self)) } else { Err(self) @@ -59,10 +69,7 @@ impl VMGcRef { /// Get this GC reference as an `externref` reference, if it actually is an /// `externref` reference. pub fn as_externref(&self, gc_heap: &(impl GcHeap + ?Sized)) -> Option<&VMExternRef> { - if self.is_i31() { - return None; - } - if gc_heap.header(&self).kind() == VMGcKind::ExternRef { + if self.is_externref(gc_heap) { let ptr = self as *const VMGcRef; let ret = unsafe { &*ptr.cast() }; assert!(matches!(ret, VMExternRef(VMGcRef { .. }))); diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs index f8da85eeffa3..8d310307511d 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs @@ -159,7 +159,7 @@ impl NullHeap { let len = self.memory.as_ref().unwrap().byte_size(); let len = u32::try_from(len).unwrap_or(u32::MAX); if end_of_object > len { - return Ok(Err(u64::try_from(layout.size()).unwrap())); + return Ok(Err(u64::try_from(layout.size())?)); } // Update the bump pointer, write the header, and return the GC ref. @@ -170,7 +170,7 @@ impl NullHeap { debug_assert_eq!(header.reserved_u26(), 0); header.set_reserved_u26(size); - *self.header_mut(&gc_ref) = header; + *self.header_mut(&gc_ref)? = header; Ok(Ok(gc_ref)) } @@ -243,12 +243,14 @@ unsafe impl GcHeap for NullHeap { _host_data_table: &mut ExternRefHostDataTable, destination: &mut Option, source: Option<&VMGcRef>, - ) { + ) -> Result<()> { *destination = source.map(|s| s.unchecked_copy()); + Ok(()) } - fn expose_gc_ref_to_wasm(&mut self, _gc_ref: VMGcRef) { + fn expose_gc_ref_to_wasm(&mut self, _gc_ref: VMGcRef) -> Result<()> { // Don't need to do anything special here. + Ok(()) } fn alloc_externref( @@ -259,26 +261,26 @@ unsafe impl GcHeap for NullHeap { Ok(r) => r, Err(bytes_needed) => return Ok(Err(bytes_needed)), }; - self.index_mut::(gc_ref.as_typed_unchecked()) + self.index_mut::(gc_ref.as_typed_unchecked())? .host_data = host_data; Ok(Ok(gc_ref.into_externref_unchecked())) } - fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId { + fn externref_host_data(&self, externref: &VMExternRef) -> Result { let typed_ref = VMNullExternRef::typed_ref(self, externref); - self.index(typed_ref).host_data + Ok(self.index(typed_ref)?.host_data) } - fn object_size(&self, gc_ref: &VMGcRef) -> usize { - let size = self.header(gc_ref).reserved_u26(); - usize::try_from(size).unwrap() + fn object_size(&self, gc_ref: &VMGcRef) -> Result { + let size = self.header(gc_ref)?.reserved_u26(); + Ok(usize::try_from(size)?) } - fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader { + fn header(&self, gc_ref: &VMGcRef) -> Result<&VMGcHeader> { self.index(gc_ref.as_typed_unchecked()) } - fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader { + fn header_mut(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcHeader> { self.index_mut(gc_ref.as_typed_unchecked()) } @@ -299,7 +301,9 @@ unsafe impl GcHeap for NullHeap { self.alloc(VMGcHeader::from_kind_and_index(kind, ty), layout.layout()) } - fn dealloc_uninit_struct_or_exn(&mut self, _struct_ref: VMGcRef) {} + fn dealloc_uninit_struct_or_exn(&mut self, _struct_ref: VMGcRef) -> Result<()> { + Ok(()) + } fn alloc_uninit_array( &mut self, @@ -310,24 +314,25 @@ unsafe impl GcHeap for NullHeap { let layout = layout .layout(length) .ok_or_else(|| format_err!("allocation size too large"))?; - self.alloc( + let gc_ref = match self.alloc( VMGcHeader::from_kind_and_index(VMGcKind::ArrayRef, ty), layout, - ) - .map(|r| { - r.map(|r| { - self.index_mut::(r.as_typed_unchecked()) - .length = length; - r.into_arrayref_unchecked() - }) - }) + )? { + Ok(r) => r, + Err(bytes_needed) => return Ok(Err(bytes_needed)), + }; + self.index_mut::(gc_ref.as_typed_unchecked())? + .length = length; + Ok(Ok(gc_ref.into_arrayref_unchecked())) } - fn dealloc_uninit_array(&mut self, _array_ref: VMArrayRef) {} + fn dealloc_uninit_array(&mut self, _array_ref: VMArrayRef) -> Result<()> { + Ok(()) + } - fn array_len(&self, arrayref: &VMArrayRef) -> u32 { + fn array_len(&self, arrayref: &VMArrayRef) -> Result { let arrayref = VMNullArrayHeader::typed_ref(self, arrayref); - self.index(arrayref).length + Ok(self.index(arrayref)?.length) } fn allocated_bytes(&self) -> usize { @@ -357,8 +362,8 @@ unsafe impl GcHeap for NullHeap { struct NullCollection {} impl<'a> GarbageCollection<'a> for NullCollection { - fn collect_increment(&mut self) -> GcProgress { - GcProgress::Complete + fn collect_increment(&mut self) -> Result { + Ok(GcProgress::Complete) } } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs index 74333e568325..299b13858013 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs @@ -1,10 +1,8 @@ -use super::{truncate_i32_to_i8, truncate_i32_to_i16}; use crate::{ - AnyRef, ExnRef, ExternRef, Func, HeapType, RootedGcRefImpl, StorageType, Val, ValType, + StorageType, Val, prelude::*, runtime::vm::{GcHeap, GcStore, VMGcRef}, store::AutoAssertNoGc, - vm::{FuncRefTableId, SendSyncPtr}, }; use core::fmt; use wasmtime_environ::{GcStructLayout, VMGcKind}; @@ -42,8 +40,10 @@ impl VMGcRef { return false; } - let header = gc_heap.header(&self); - header.kind().matches(VMGcKind::StructRef) + match gc_heap.header(&self) { + Ok(header) => header.kind().matches(VMGcKind::StructRef), + Err(_) => false, + } } /// Create a new `VMStructRef` from the given `gc_ref`. @@ -138,9 +138,9 @@ impl VMStructRef { layout: &GcStructLayout, ty: &StorageType, field: usize, - ) -> Val { + ) -> Result { let offset = layout.fields[field].offset; - read_field_impl(self.as_gc_ref(), store, ty, offset) + self.as_gc_ref().read_val(store, ty, offset) } /// Write the given value into this struct at the given offset. @@ -165,82 +165,7 @@ impl VMStructRef { debug_assert!(val._matches_ty(&store, &ty.unpack())?); let offset = layout.fields[field].offset; - let gcstore = store.require_gc_store_mut()?; - let data = gcstore.gc_object_data(self.as_gc_ref()); - match val { - Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)), - Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)), - Val::I32(i) => data.write_i32(offset, i), - Val::I64(i) => data.write_i64(offset, i), - Val::F32(f) => data.write_u32(offset, f), - Val::F64(f) => data.write_u64(offset, f), - Val::V128(v) => data.write_v128(offset, v), - - // For GC-managed references, we need to take care to run the - // appropriate barriers, even when we are writing null references - // into the struct. - // - // POD-read the old value into a local copy, run the GC write - // barrier on that local copy, and then POD-write the updated - // value back into the struct. This avoids transmuting the inner - // data, which would probably be fine, but this approach is - // Obviously Correct and should get us by for now. If LLVM isn't - // able to elide some of these unnecessary copies, and this - // method is ever hot enough, we can always come back and clean - // it up in the future. - Val::ExternRef(e) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let e = match e { - Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, e.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - Val::AnyRef(a) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let a = match a { - Some(a) => Some(a.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, a.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - Val::ExnRef(e) => { - let raw = data.read_u32(offset); - let mut gc_ref = VMGcRef::from_raw_u32(raw); - let e = match e { - Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), - None => None, - }; - let store = store.require_gc_store_mut()?; - store.write_gc_ref(&mut gc_ref, e.as_ref()); - let data = store.gc_object_data(self.as_gc_ref()); - data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())); - } - - Val::FuncRef(f) => { - let f = f.map(|f| SendSyncPtr::new(f.vm_func_ref(store))); - let gcstore = store.require_gc_store_mut()?; - let id = unsafe { gcstore.func_ref_table.intern(f) }; - gcstore - .gc_object_data(self.as_gc_ref()) - .write_u32(offset, id.into_raw()); - } - Val::ContRef(_) => { - // TODO(#10248): Implement struct continuation reference field handling - return Err(crate::format_err!( - "setting continuation references in struct fields not yet supported" - )); - } - } - Ok(()) + self.as_gc_ref().write_val(store, ty, offset, val) } /// Initialize a field in this structref that is currently uninitialized. @@ -277,127 +202,6 @@ impl VMStructRef { ) -> Result<()> { debug_assert!(val._matches_ty(&store, &ty.unpack())?); let offset = layout.fields[field].offset; - initialize_field_impl(self.as_gc_ref(), store, ty, offset, val) - } -} - -/// Read a field from a GC object at a given offset. -/// -/// This factored-out function allows a shared implementation for both -/// structs (this module) and exception objects. -pub(crate) fn read_field_impl( - gc_ref: &VMGcRef, - store: &mut AutoAssertNoGc, - ty: &StorageType, - offset: u32, -) -> Val { - let data = store.unwrap_gc_store_mut().gc_object_data(gc_ref); - match ty { - StorageType::I8 => Val::I32(data.read_u8(offset).into()), - StorageType::I16 => Val::I32(data.read_u16(offset).into()), - StorageType::ValType(ValType::I32) => Val::I32(data.read_i32(offset)), - StorageType::ValType(ValType::I64) => Val::I64(data.read_i64(offset)), - StorageType::ValType(ValType::F32) => Val::F32(data.read_u32(offset)), - StorageType::ValType(ValType::F64) => Val::F64(data.read_u64(offset)), - StorageType::ValType(ValType::V128) => Val::V128(data.read_v128(offset)), - StorageType::ValType(ValType::Ref(r)) => match r.heap_type().top() { - HeapType::Extern => { - let raw = data.read_u32(offset); - Val::ExternRef(ExternRef::_from_raw(store, raw)) - } - HeapType::Any => { - let raw = data.read_u32(offset); - Val::AnyRef(AnyRef::_from_raw(store, raw)) - } - HeapType::Exn => { - let raw = data.read_u32(offset); - Val::ExnRef(ExnRef::_from_raw(store, raw)) - } - HeapType::Func => { - let func_ref_id = data.read_u32(offset); - let func_ref_id = FuncRefTableId::from_raw(func_ref_id); - let func_ref = store - .unwrap_gc_store() - .func_ref_table - .get_untyped(func_ref_id); - Val::FuncRef(unsafe { - func_ref.map(|p| Func::from_vm_func_ref(store.id(), p.as_non_null())) - }) - } - otherwise => unreachable!("not a top type: {otherwise:?}"), - }, - } -} - -pub(crate) fn initialize_field_impl( - gc_ref: &VMGcRef, - store: &mut AutoAssertNoGc, - ty: &StorageType, - offset: u32, - val: Val, -) -> Result<()> { - let gcstore = store.require_gc_store_mut()?; - match val { - Val::I32(i) if ty.is_i8() => gcstore - .gc_object_data(gc_ref) - .write_i8(offset, truncate_i32_to_i8(i)), - Val::I32(i) if ty.is_i16() => gcstore - .gc_object_data(gc_ref) - .write_i16(offset, truncate_i32_to_i16(i)), - Val::I32(i) => gcstore.gc_object_data(gc_ref).write_i32(offset, i), - Val::I64(i) => gcstore.gc_object_data(gc_ref).write_i64(offset, i), - Val::F32(f) => gcstore.gc_object_data(gc_ref).write_u32(offset, f), - Val::F64(f) => gcstore.gc_object_data(gc_ref).write_u64(offset, f), - Val::V128(v) => gcstore.gc_object_data(gc_ref).write_v128(offset, v), - - // NB: We don't need to do a write barrier when initializing a - // field, because there is nothing being overwritten. Therefore, we - // just the clone barrier. - Val::ExternRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(gc_ref) - .write_u32(offset, x); - } - Val::AnyRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(gc_ref) - .write_u32(offset, x); - } - Val::ExnRef(x) => { - let x = match x { - None => 0, - Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), - }; - store - .require_gc_store_mut()? - .gc_object_data(gc_ref) - .write_u32(offset, x); - } - - Val::FuncRef(f) => { - let f = f.map(|f| SendSyncPtr::new(f.vm_func_ref(store))); - let gcstore = store.require_gc_store_mut()?; - let id = unsafe { gcstore.func_ref_table.intern(f) }; - gcstore - .gc_object_data(gc_ref) - .write_u32(offset, id.into_raw()); - } - Val::ContRef(_) => { - // TODO(#10248): Implement struct continuation reference field init handling - return Err(crate::format_err!( - "initializing continuation references in struct fields not yet supported" - )); - } + self.as_gc_ref().initialize_val(store, ty, offset, val) } - Ok(()) } diff --git a/crates/wasmtime/src/runtime/vm/gc/func_ref.rs b/crates/wasmtime/src/runtime/vm/gc/func_ref.rs index 4635a5d01580..dab931d546a1 100644 --- a/crates/wasmtime/src/runtime/vm/gc/func_ref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/func_ref.rs @@ -9,6 +9,7 @@ //! `funcref` we got from inside the GC heap. use crate::{ + Result, bail_bug, hash_map::HashMap, type_registry::TypeRegistry, vm::{SendSyncPtr, VMFuncRef}, @@ -67,8 +68,10 @@ impl FuncRefTable { types: &TypeRegistry, id: FuncRefTableId, expected_ty: VMSharedTypeIndex, - ) -> Option> { - let f = self.slab.get(id.0).copied().expect("bad FuncRefTableId"); + ) -> Result>> { + let Some(f) = self.slab.get(id.0).copied() else { + bail_bug!("bad FuncRefTableId") + }; if let Some(f) = f { // The safety contract for `intern` ensures that deref'ing `f` is safe. @@ -84,7 +87,7 @@ impl FuncRefTable { assert!(types.is_subtype(actual_ty, expected_ty)); } - f + Ok(f) } /// Get the `VMFuncRef` associated with the given ID, without checking the @@ -93,7 +96,10 @@ impl FuncRefTable { /// Prefer `get_typed`. This method is only suitable for getting a /// `VMFuncRef` as an untyped `funcref` function reference, and never as a /// typed `(ref $some_func_type)` function reference. - pub fn get_untyped(&self, id: FuncRefTableId) -> Option> { - self.slab.get(id.0).copied().expect("bad FuncRefTableId") + pub fn get_untyped(&self, id: FuncRefTableId) -> Result>> { + match self.slab.get(id.0).copied() { + Some(f) => Ok(f), + None => bail_bug!("bad FuncRefTableId"), + } } } diff --git a/crates/wasmtime/src/runtime/vm/gc/gc_ref.rs b/crates/wasmtime/src/runtime/vm/gc/gc_ref.rs index 54f45dc588f9..e56e3e154a9b 100644 --- a/crates/wasmtime/src/runtime/vm/gc/gc_ref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/gc_ref.rs @@ -1,5 +1,9 @@ -use crate::prelude::*; -use crate::runtime::vm::{GcHeap, GcStore, I31}; +use super::{truncate_i32_to_i8, truncate_i32_to_i16}; +use crate::runtime::vm::{FuncRefTableId, GcHeap, GcStore, I31, SendSyncPtr}; +use crate::store::AutoAssertNoGc; +use crate::{ + AnyRef, ExnRef, ExternRef, Func, HeapType, Result, StorageType, Val, ValType, bail_bug, +}; use core::fmt; use core::marker; use core::num::NonZeroU32; @@ -245,9 +249,13 @@ impl VMGcRef { /// Get this GC reference as a u32 index into its GC heap. /// - /// Returns `None` for `i31ref`s. - pub fn as_heap_index(&self) -> Option { - if self.is_i31() { None } else { Some(self.0) } + /// Panics when called for `i31ref`s. + pub fn heap_index(&self) -> Result { + if self.is_i31() { + bail_bug!("attempted to get heap index of i31") + } + + Ok(self.0) } /// Get this GC reference as a raw, non-zero u32 value, regardless whether @@ -273,7 +281,7 @@ impl VMGcRef { if self.is_i31() { return Err(self); } - if T::is(gc_heap.header(&self)) { + if self.is_typed::(gc_heap) { Ok(TypedGcRef { gc_ref: self, _phantom: marker::PhantomData, @@ -308,7 +316,10 @@ impl VMGcRef { if self.is_i31() { return false; } - T::is(gc_heap.header(&self)) + match gc_heap.header(&self) { + Ok(header) => T::is(header), + Err(_) => false, + } } /// Borrow `self` as a typed GC reference, checking that `self` actually is @@ -321,7 +332,7 @@ impl VMGcRef { if self.is_i31() { return None; } - if T::is(gc_heap.header(&self)) { + if self.is_typed::(gc_heap) { let ptr = self as *const VMGcRef; let ret = unsafe { &*ptr.cast() }; assert!(matches!( @@ -368,7 +379,7 @@ impl VMGcRef { if self.is_i31() { None } else { - Some(gc_heap.header(self)) + gc_heap.header(self).ok() } } @@ -412,6 +423,204 @@ impl VMGcRef { .gc_header(gc_heap) .map_or(false, |h| h.kind().matches(VMGcKind::AnyRef)) } + + pub fn read_val( + &self, + store: &mut AutoAssertNoGc, + ty: &StorageType, + offset: u32, + ) -> Result { + let data = store.require_gc_store_mut()?.gc_object_data(self)?; + Ok(match ty { + StorageType::I8 => Val::I32(data.read_u8(offset)?.into()), + StorageType::I16 => Val::I32(data.read_u16(offset)?.into()), + StorageType::ValType(ValType::I32) => Val::I32(data.read_i32(offset)?), + StorageType::ValType(ValType::I64) => Val::I64(data.read_i64(offset)?), + StorageType::ValType(ValType::F32) => Val::F32(data.read_u32(offset)?), + StorageType::ValType(ValType::F64) => Val::F64(data.read_u64(offset)?), + StorageType::ValType(ValType::V128) => Val::V128(data.read_v128(offset)?), + StorageType::ValType(ValType::Ref(r)) => match r.heap_type().top() { + HeapType::Extern => { + let raw = data.read_u32(offset)?; + Val::ExternRef(ExternRef::_from_raw(store, raw)) + } + HeapType::Any => { + let raw = data.read_u32(offset)?; + Val::AnyRef(AnyRef::_from_raw(store, raw)) + } + HeapType::Exn => { + let raw = data.read_u32(offset)?; + Val::ExnRef(ExnRef::_from_raw(store, raw)) + } + HeapType::Func => { + let func_ref_id = data.read_u32(offset)?; + let func_ref_id = FuncRefTableId::from_raw(func_ref_id); + let func_ref = store + .unwrap_gc_store() + .func_ref_table + .get_untyped(func_ref_id)?; + Val::FuncRef(unsafe { + func_ref.map(|p| Func::from_vm_func_ref(store.id(), p.as_non_null())) + }) + } + otherwise => bail_bug!("not a top type: {otherwise:?}"), + }, + }) + } + + pub fn initialize_val( + &self, + store: &mut AutoAssertNoGc, + ty: &StorageType, + offset: u32, + val: Val, + ) -> Result<()> { + let gcstore = store.require_gc_store_mut()?; + match val { + Val::I32(i) if ty.is_i8() => gcstore + .gc_object_data(self)? + .write_i8(offset, truncate_i32_to_i8(i)), + Val::I32(i) if ty.is_i16() => gcstore + .gc_object_data(self)? + .write_i16(offset, truncate_i32_to_i16(i)), + Val::I32(i) => gcstore.gc_object_data(self)?.write_i32(offset, i), + Val::I64(i) => gcstore.gc_object_data(self)?.write_i64(offset, i), + Val::F32(f) => gcstore.gc_object_data(self)?.write_u32(offset, f), + Val::F64(f) => gcstore.gc_object_data(self)?.write_u64(offset, f), + Val::V128(v) => gcstore.gc_object_data(self)?.write_v128(offset, v), + + // NB: We don't need to do a write barrier when initializing a + // field, because there is nothing being overwritten. Therefore, we + // just the clone barrier. + Val::ExternRef(x) => { + let x = match x { + None => 0, + Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), + }; + store + .require_gc_store_mut()? + .gc_object_data(self)? + .write_u32(offset, x) + } + Val::AnyRef(x) => { + let x = match x { + None => 0, + Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), + }; + store + .require_gc_store_mut()? + .gc_object_data(self)? + .write_u32(offset, x) + } + Val::ExnRef(x) => { + let x = match x { + None => 0, + Some(x) => x.try_clone_gc_ref(store)?.as_raw_u32(), + }; + store + .require_gc_store_mut()? + .gc_object_data(self)? + .write_u32(offset, x) + } + + Val::FuncRef(f) => { + let f = f.map(|f| SendSyncPtr::new(f.vm_func_ref(store))); + let gcstore = store.require_gc_store_mut()?; + let id = unsafe { gcstore.func_ref_table.intern(f) }; + gcstore + .gc_object_data(self)? + .write_u32(offset, id.into_raw()) + } + Val::ContRef(_) => { + // TODO(#10248): Implement struct continuation reference field init handling + bail_bug!( + "initializing continuation references in struct fields not yet supported" + ); + } + } + } + + pub fn write_val( + &self, + store: &mut AutoAssertNoGc, + ty: &StorageType, + offset: u32, + val: Val, + ) -> Result<()> { + let gcstore = store.require_gc_store_mut()?; + let data = gcstore.gc_object_data(self)?; + match val { + Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)), + Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)), + Val::I32(i) => data.write_i32(offset, i), + Val::I64(i) => data.write_i64(offset, i), + Val::F32(f) => data.write_u32(offset, f), + Val::F64(f) => data.write_u64(offset, f), + Val::V128(v) => data.write_v128(offset, v), + + // For GC-managed references, we need to take care to run the + // appropriate barriers, even when we are writing null references + // into the struct. + // + // POD-read the old value into a local copy, run the GC write + // barrier on that local copy, and then POD-write the updated + // value back into the struct. This avoids transmuting the inner + // data, which would probably be fine, but this approach is + // Obviously Correct and should get us by for now. If LLVM isn't + // able to elide some of these unnecessary copies, and this + // method is ever hot enough, we can always come back and clean + // it up in the future. + Val::ExternRef(e) => { + let raw = data.read_u32(offset)?; + let mut gc_ref = VMGcRef::from_raw_u32(raw); + let e = match e { + Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), + None => None, + }; + let store = store.require_gc_store_mut()?; + store.write_gc_ref(&mut gc_ref, e.as_ref())?; + let data = store.gc_object_data(self)?; + data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())) + } + Val::AnyRef(a) => { + let raw = data.read_u32(offset)?; + let mut gc_ref = VMGcRef::from_raw_u32(raw); + let a = match a { + Some(a) => Some(a.try_gc_ref(store)?.unchecked_copy()), + None => None, + }; + let store = store.require_gc_store_mut()?; + store.write_gc_ref(&mut gc_ref, a.as_ref())?; + let data = store.gc_object_data(self)?; + data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())) + } + Val::ExnRef(e) => { + let raw = data.read_u32(offset)?; + let mut gc_ref = VMGcRef::from_raw_u32(raw); + let e = match e { + Some(e) => Some(e.try_gc_ref(store)?.unchecked_copy()), + None => None, + }; + let store = store.require_gc_store_mut()?; + store.write_gc_ref(&mut gc_ref, e.as_ref())?; + let data = store.gc_object_data(self)?; + data.write_u32(offset, gc_ref.map_or(0, |r| r.as_raw_u32())) + } + + Val::FuncRef(f) => { + let f = f.map(|f| SendSyncPtr::new(f.vm_func_ref(store))); + let gcstore = store.require_gc_store_mut()?; + let id = unsafe { gcstore.func_ref_table.intern(f) }; + gcstore + .gc_object_data(self)? + .write_u32(offset, id.into_raw()) + } + Val::ContRef(_) => { + // TODO(#10248): Implement struct continuation reference field handling + bail_bug!("setting continuation references in struct fields not yet supported"); + } + } + } } /// A trait implemented by all objects allocated inside a GC heap. diff --git a/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs b/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs index 8eab8d3fbf04..6bbca9979dd0 100644 --- a/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs +++ b/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs @@ -1,5 +1,6 @@ //! Traits for abstracting over our different garbage collectors. +use crate::bail_bug; use crate::prelude::*; use crate::runtime::vm::{ ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef, @@ -170,7 +171,18 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// The given `gc_ref` should not be used again. fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) { let mut dest = Some(gc_ref); - self.write_gc_ref(host_data_table, &mut dest, None); + + // Similar to `clone_gc_ref` not being fallible this method, + // `drop_gc_ref`, is also not fallible as it's seen as too painful to + // propagate this result. The consequence is that if the GC heap is + // corrupted at this point it'll get a ltitle more corrupted from this + // operation, but that's the tradeoff we're making ignoring the error + // here. + if let Err(e) = self.write_gc_ref(host_data_table, &mut dest, None) { + if cfg!(debug_assertions) { + panic!("heap corruption detected: {e}"); + } + } } /// Write barrier called every time the runtime overwrites a GC reference. @@ -190,7 +202,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { host_data_table: &mut ExternRefHostDataTable, destination: &mut Option, source: Option<&VMGcRef>, - ); + ) -> Result<()>; /// Read barrier called whenever a GC reference is passed from the runtime /// to Wasm: an argument to a host-to-Wasm call, or a return from a @@ -199,7 +211,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// Callers should pass a valid `VMGcRef` that belongs to the given /// heap. Failure to do so is memory safe, but may result in general /// failures such as panics or incorrect results. - fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef); + fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> Result<()>; //////////////////////////////////////////////////////////////////////////// // `externref` Methods @@ -230,23 +242,23 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// Callers should pass a valid `externref` that belongs to the given /// heap. Failure to do so is memory safe, but may result in general /// failures such as panics or incorrect results. - fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId; + fn externref_host_data(&self, externref: &VMExternRef) -> Result; //////////////////////////////////////////////////////////////////////////// // Struct, array, and general GC object methods /// Get the header of the object that `gc_ref` points to. - fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader; + fn header(&self, gc_ref: &VMGcRef) -> Result<&VMGcHeader>; /// Get the header of the object that `gc_ref` points to. - fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader; + fn header_mut(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcHeader>; /// Get the size (in bytes) of the object referenced by `gc_ref`. /// /// # Panics /// /// Panics on out of bounds or if the `gc_ref` is an `i31ref`. - fn object_size(&self, gc_ref: &VMGcRef) -> usize; + fn object_size(&self, gc_ref: &VMGcRef) -> Result; /// Allocate a raw, uninitialized GC-managed object with the given header /// and layout. @@ -314,7 +326,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// that the struct's allocation can be eagerly reclaimed, and so that the /// collector doesn't attempt to treat any of the uninitialized fields as /// valid GC references, or something like that. - fn dealloc_uninit_struct_or_exn(&mut self, structref: VMGcRef); + fn dealloc_uninit_struct_or_exn(&mut self, structref: VMGcRef) -> Result<()>; /// * `Ok(Ok(_))`: The allocation was successful. /// @@ -340,7 +352,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// that the array's allocation can be eagerly reclaimed, and so that the /// collector doesn't attempt to treat any of the uninitialized fields as /// valid GC references, or something like that. - fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef); + fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) -> Result<()>; /// Get the length of the given array. /// @@ -349,7 +361,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// The given `arrayref` should be valid and of the given size. Failure to /// do so is memory safe, but may result in general failures such as panics /// or incorrect results. - fn array_len(&self, arrayref: &VMArrayRef) -> u32; + fn array_len(&self, arrayref: &VMArrayRef) -> Result; //////////////////////////////////////////////////////////////////////////// // Garbage Collection Methods @@ -462,18 +474,24 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// /// Panics on out of bounds or if the `gc_ref` is an `i31ref`. #[inline] - fn index(&self, gc_ref: &TypedGcRef) -> &T + fn index(&self, gc_ref: &TypedGcRef) -> Result<&T> where Self: Sized, T: GcHeapObject, { assert!(!mem::needs_drop::()); let gc_ref = gc_ref.as_untyped(); - let start = gc_ref.as_heap_index().unwrap().get(); - let start = usize::try_from(start).unwrap(); + let start = gc_ref.heap_index()?.get(); + let start = usize::try_from(start)?; let len = mem::size_of::(); - let slice = &self.heap_slice()[start..][..len]; - unsafe { &*(slice.as_ptr().cast::()) } + let slice = match self.heap_slice().get(start..).and_then(|s| s.get(..len)) { + Some(slice) => slice, + None => bail_bug!("gc object out-of-bounds"), + }; + if slice.as_ptr().addr() % mem::align_of::() != 0 { + bail_bug!("gc object and/or heap misaligned"); + } + Ok(unsafe { &*(slice.as_ptr().cast::()) }) } /// Index into this heap and get an exclusive reference to the `T` that @@ -483,18 +501,26 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// /// Panics on out of bounds or if the `gc_ref` is an `i31ref`. #[inline] - fn index_mut(&mut self, gc_ref: &TypedGcRef) -> &mut T + fn index_mut(&mut self, gc_ref: &TypedGcRef) -> Result<&mut T> where Self: Sized, T: GcHeapObject, { assert!(!mem::needs_drop::()); let gc_ref = gc_ref.as_untyped(); - let start = gc_ref.as_heap_index().unwrap().get(); - let start = usize::try_from(start).unwrap(); + let start = gc_ref.heap_index()?.get(); + let start = usize::try_from(start)?; let len = mem::size_of::(); - let slice = &mut self.heap_slice_mut()[start..][..len]; - unsafe { &mut *(slice.as_mut_ptr().cast::()) } + let slice = match self + .heap_slice_mut() + .get_mut(start..) + .and_then(|s| s.get_mut(..len)) + { + Some(slice) => slice, + None => bail_bug!("gc object out-of-bounds"), + }; + assert!(slice.as_ptr().addr() % mem::align_of::() == 0); + Ok(unsafe { &mut *(slice.as_mut_ptr().cast::()) }) } /// Get the range of bytes that the given object occupies in the heap. @@ -502,12 +528,15 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// # Panics /// /// Panics on out of bounds or if the `gc_ref` is an `i31ref`. - fn object_range(&self, gc_ref: &VMGcRef) -> Range { - let start = gc_ref.as_heap_index().unwrap().get(); - let start = usize::try_from(start).unwrap(); - let size = self.object_size(gc_ref); - let end = start.checked_add(size).unwrap(); - start..end + fn object_range(&self, gc_ref: &VMGcRef) -> Result> { + let start = gc_ref.heap_index()?.get(); + let start = usize::try_from(start)?; + let size = self.object_size(gc_ref)?; + let end = match start.checked_add(size) { + Some(end) => end, + None => bail_bug!("object size overflow"), + }; + Ok(start..end) } /// Get a mutable borrow of the given object's data. @@ -515,10 +544,13 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// # Panics /// /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`. - fn gc_object_data(&self, gc_ref: &VMGcRef) -> &VMGcObjectData { - let range = self.object_range(gc_ref); - let data = &self.heap_slice()[range]; - data.into() + fn gc_object_data(&self, gc_ref: &VMGcRef) -> Result<&VMGcObjectData> { + let range = self.object_range(gc_ref)?; + let data = match self.heap_slice().get(range) { + Some(data) => data, + None => bail_bug!("gc object out of bounds"), + }; + Ok(data.into()) } /// Get a mutable borrow of the given object's data. @@ -526,10 +558,13 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// # Panics /// /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`. - fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData { - let range = self.object_range(gc_ref); - let data = &mut self.heap_slice_mut()[range]; - data.into() + fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> Result<&mut VMGcObjectData> { + let range = self.object_range(gc_ref)?; + let data = match self.heap_slice_mut().get_mut(range) { + Some(data) => data, + None => bail_bug!("gc object out of bounds"), + }; + Ok(data.into()) } /// Get a pair of mutable borrows of the given objects' data. @@ -542,16 +577,22 @@ pub unsafe trait GcHeap: 'static + Send + Sync { &mut self, a: &VMGcRef, b: &VMGcRef, - ) -> (&mut VMGcObjectData, &mut VMGcObjectData) { + ) -> Result<(&mut VMGcObjectData, &mut VMGcObjectData)> { assert_ne!(a, b); - let a_range = self.object_range(a); - let b_range = self.object_range(b); + let a_range = self.object_range(a)?; + let b_range = self.object_range(b)?; // Assert that the two objects do not overlap. assert!(a_range.start <= a_range.end); assert!(b_range.start <= b_range.end); - assert!(a_range.end <= b_range.start || b_range.end <= a_range.start); + if !(a_range.end <= b_range.start || b_range.end <= a_range.start) { + bail_bug!("gc object overlap"); + } + let heap = self.heap_slice_mut(); + if a_range.end > heap.len() || b_range.end > heap.len() { + bail_bug!("gc object out of bounds"); + } let (a_data, b_data) = if a_range.start < b_range.start { let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start); @@ -563,7 +604,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { (&mut a_half[..a_len], &mut b_half[b_range]) }; - (a_data.into(), b_data.into()) + Ok((a_data.into(), b_data.into())) } } @@ -726,16 +767,22 @@ impl GcRoot<'_> { /// /// Does NOT run GC barriers. #[inline] - pub fn get(&self) -> VMGcRef { + pub fn get(&self) -> Result { match self.raw { - RawGcRoot::VMGcRef(ptr) => unsafe { ptr::read(ptr.as_ptr()) }, + RawGcRoot::VMGcRef(ptr) => Ok(unsafe { ptr::read(ptr.as_ptr()) }), RawGcRoot::Stack(ptr) => unsafe { let raw: u32 = ptr::read(ptr.as_ptr()); - VMGcRef::from_raw_u32(raw).expect("non-null") + match VMGcRef::from_raw_u32(raw) { + Some(r) => Ok(r), + None => bail_bug!("stacked contained null gcref"), + } }, RawGcRoot::ValRaw(ptr) => unsafe { let val: ValRaw = ptr::read(ptr.as_ptr()); - val.get_vmgcref().expect("non-null") + match val.get_vmgcref() { + Some(r) => Ok(r), + None => bail_bug!("val contained null gcref"), + } }, } } @@ -785,17 +832,17 @@ pub trait GarbageCollection<'a>: Send + Sync { /// /// The mutator does *not* run in between increments. This method exists /// solely to allow cooperative yielding - fn collect_increment(&mut self) -> GcProgress; + fn collect_increment(&mut self) -> Result; /// Run this GC process to completion. /// /// Keeps calling `collect_increment` in a loop until the GC process is /// complete. - fn collect(&mut self) { + fn collect(&mut self) -> Result<()> { loop { - match self.collect_increment() { + match self.collect_increment()? { GcProgress::Continue => continue, - GcProgress::Complete => return, + GcProgress::Complete => return Ok(()), } } } @@ -815,21 +862,21 @@ pub async fn collect_async<'a>( mut collection: Box + 'a>, asyncness: Asyncness, yield_fn: impl AsyncFn(), -) { +) -> Result<()> { #[cfg(not(feature = "async"))] { _ = yield_fn; } loop { - match collection.collect_increment() { + match collection.collect_increment()? { GcProgress::Continue => { if asyncness != Asyncness::No { #[cfg(feature = "async")] yield_fn().await } } - GcProgress::Complete => return, + GcProgress::Complete => return Ok(()), } } } diff --git a/crates/wasmtime/src/runtime/vm/gc/host_data.rs b/crates/wasmtime/src/runtime/vm/gc/host_data.rs index 570cf6b15a53..ae097ed69165 100644 --- a/crates/wasmtime/src/runtime/vm/gc/host_data.rs +++ b/crates/wasmtime/src/runtime/vm/gc/host_data.rs @@ -13,6 +13,7 @@ //! less catastrophic than doing an indirect call to an attacker-controlled //! function pointer. +use crate::bail_bug; use crate::prelude::*; use core::any::Any; use wasmtime_core::{ @@ -50,21 +51,29 @@ impl ExternRefHostDataTable { } /// Deallocate an `externref` host data value. - pub fn dealloc(&mut self, id: ExternRefHostDataId) -> Box { + pub fn dealloc(&mut self, id: ExternRefHostDataId) -> Result> { + // Verify this exists before deleting it + self.get(id)?; log::trace!("deallocated externref host data: {id:?}"); - self.slab.dealloc(id.0) + Ok(self.slab.dealloc(id.0)) } /// Get a shared borrow of the host data associated with the given ID. - pub fn get(&self, id: ExternRefHostDataId) -> &(dyn Any + Send + Sync) { - let data: &Box = self.slab.get(id.0).unwrap(); - deref_box(data) + pub fn get(&self, id: ExternRefHostDataId) -> Result<&(dyn Any + Send + Sync)> { + let data: &Box = match self.slab.get(id.0) { + Some(data) => data, + None => bail_bug!("invalid `ExternRefHostDataId`"), + }; + Ok(deref_box(data)) } /// Get a mutable borrow of the host data associated with the given ID. - pub fn get_mut(&mut self, id: ExternRefHostDataId) -> &mut (dyn Any + Send + Sync) { - let data: &mut Box = self.slab.get_mut(id.0).unwrap(); - deref_box_mut(data) + pub fn get_mut(&mut self, id: ExternRefHostDataId) -> Result<&mut (dyn Any + Send + Sync)> { + let data: &mut Box = match self.slab.get_mut(id.0) { + Some(data) => data, + None => bail_bug!("invalid `ExternRefHostDataId`"), + }; + Ok(deref_box_mut(data)) } } @@ -78,9 +87,12 @@ mod tests { let x = 42_u32; let id = table.alloc(Box::new(x)); - assert!(table.get(id).is::()); - assert_eq!(*table.get(id).downcast_ref::().unwrap(), 42); - assert!(table.get_mut(id).is::()); - assert_eq!(*table.get_mut(id).downcast_ref::().unwrap(), 42); + assert!(table.get(id).unwrap().is::()); + assert_eq!(*table.get(id).unwrap().downcast_ref::().unwrap(), 42); + assert!(table.get_mut(id).unwrap().is::()); + assert_eq!( + *table.get_mut(id).unwrap().downcast_ref::().unwrap(), + 42 + ); } } diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index b65d5a9113cb..0cbc1912dcd7 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -479,7 +479,7 @@ fn table_copy( dst: u64, src: u64, len: u64, -) -> Result<(), Trap> { +) -> Result<()> { let dst_table_index = TableIndex::from_u32(dst_table_index); let src_table_index = TableIndex::from_u32(src_table_index); let store = store.store_opaque_mut(); @@ -708,7 +708,7 @@ fn gc_alloc_raw( let opaque = store.store_opaque_mut(); if let Some(gc_store) = opaque.try_gc_store_mut() { if let Ok(gc_ref) = gc_store.alloc_raw(header, layout)? { - let raw = gc_store.expose_gc_ref_to_wasm(gc_ref); + let raw = gc_store.expose_gc_ref_to_wasm(gc_ref)?; return Ok(raw); } } @@ -724,8 +724,7 @@ fn gc_alloc_raw( }) .await?; - let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref); - Ok(raw) + store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref) })? } @@ -765,7 +764,7 @@ fn get_interned_func_ref( instance: InstanceId, func_ref_id: u32, module_interned_type_index: u32, -) -> *mut u8 { +) -> Result<*mut u8> { use super::FuncRefTableId; use crate::store::AutoAssertNoGc; use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue}; @@ -779,7 +778,7 @@ fn get_interned_func_ref( store .unwrap_gc_store() .func_ref_table - .get_untyped(func_ref_id) + .get_untyped(func_ref_id)? } else { let types = store.engine().signatures(); let engine_ty = store @@ -788,10 +787,10 @@ fn get_interned_func_ref( store .unwrap_gc_store() .func_ref_table - .get_typed(types, func_ref_id, engine_ty) + .get_typed(types, func_ref_id, engine_ty)? }; - func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast()) + Ok(func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())) } /// Implementation of the `array.new_data` instruction. @@ -857,12 +856,11 @@ fn array_new_data( // Copy the data into the array, initializing it. gc_store - .gc_object_data(array_ref.as_gc_ref()) - .copy_from_slice(array_layout.base_size, data); + .gc_object_data(array_ref.as_gc_ref())? + .copy_from_slice(array_layout.base_size, data)?; // Return the array to Wasm! - let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into()); - Ok(raw) + gc_store.expose_gc_ref_to_wasm(array_ref.into()) })? } @@ -891,16 +889,17 @@ fn array_init_data( // Null check the array. let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?; - let array = gc_ref - .into_arrayref(&*store.unwrap_gc_store().gc_heap) - .expect("gc ref should be an array"); + let array = match gc_ref.into_arrayref(&*store.unwrap_gc_store().gc_heap) { + Ok(r) => r, + Err(_) => bail_bug!("expected arrayref"), + }; let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?; let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?; let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?; // Bounds check the array. - let array_len = array.len(store.store_opaque()); + let array_len = array.len(store.store_opaque())?; let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?; if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len { return Err(Trap::ArrayOutOfBounds.into()); @@ -945,8 +944,8 @@ fn array_init_data( let gc_store = gc_store.unwrap(); let data = &instance.wasm_data(data_range)[src..][..data_len]; gc_store - .gc_object_data(array.as_gc_ref()) - .copy_from_slice(obj_offset, data); + .gc_object_data(array.as_gc_ref())? + .copy_from_slice(obj_offset, data)?; Ok(()) } @@ -960,10 +959,7 @@ fn array_new_elem( src: u32, len: u32, ) -> Result { - use crate::{ - ArrayRef, ArrayRefPre, ArrayType, OpaqueRootScope, RootedGcRefImpl, Val, - store::AutoAssertNoGc, - }; + use crate::{ArrayRef, ArrayRefPre, ArrayType, OpaqueRootScope, Val, store::AutoAssertNoGc}; use wasmtime_environ::ModuleInternedTypeIndex; // Convert indices to their typed forms. @@ -1015,8 +1011,7 @@ fn array_new_elem( let mut store = AutoAssertNoGc::new(&mut store); let gc_ref = array.try_clone_gc_ref(&mut store)?; - let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref); - Ok(raw) + store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref) })? } @@ -1190,23 +1185,25 @@ fn array_copy_non_gc_ref_elems( // Null checks and conversion to `VMArrayRef`. let dst_gc_ref = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?; - let dst_arr = dst_gc_ref - .into_arrayref(&*store.unwrap_gc_store().gc_heap) - .expect("gc ref should be an array"); + let dst_arr = match dst_gc_ref.into_arrayref(&*store.unwrap_gc_store().gc_heap) { + Ok(r) => r, + Err(_) => bail_bug!("expected arrayref"), + }; let src_gc_ref = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?; - let src_arr = src_gc_ref - .into_arrayref(&*store.unwrap_gc_store().gc_heap) - .expect("gc ref should be an array"); + let src_arr = match src_gc_ref.into_arrayref(&*store.unwrap_gc_store().gc_heap) { + Ok(r) => r, + Err(_) => bail_bug!("expected arrayref"), + }; // Bounds check the destination array's elements. - let dst_len = dst_arr.len(store.store_opaque()); + let dst_len = dst_arr.len(store.store_opaque())?; let dst_end = dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)?; if dst_end > dst_len { return Err(Trap::ArrayOutOfBounds.into()); } // Bounds check the source array's elements. - let src_len = src_arr.len(store.store_opaque()); + let src_len = src_arr.len(store.store_opaque())?; let src_end = src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)?; if src_end > src_len { return Err(Trap::ArrayOutOfBounds.into()); @@ -1238,14 +1235,14 @@ fn array_copy_non_gc_ref_elems( .checked_add(byte_len) .expect("already checked bounds"); gc_store - .gc_object_data(dst_arr.as_gc_ref()) - .copy_within(src_byte_start..src_byte_end, dst_byte_start); + .gc_object_data(dst_arr.as_gc_ref())? + .copy_within(src_byte_start..src_byte_end, dst_byte_start)?; } else { // Different arrays: non-overlapping. let (src_data, dst_data) = - gc_store.gc_object_data_pair(src_arr.as_gc_ref(), dst_arr.as_gc_ref()); - let src_slice = src_data.slice(src_byte_start, byte_len); - let dst_slice = dst_data.slice_mut(dst_byte_start, byte_len); + gc_store.gc_object_data_pair(src_arr.as_gc_ref(), dst_arr.as_gc_ref())?; + let src_slice = src_data.slice(src_byte_start, byte_len)?; + let dst_slice = dst_data.slice_mut(dst_byte_start, byte_len)?; dst_slice.copy_from_slice(src_slice); } @@ -1783,9 +1780,10 @@ fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 { fn throw_ref(store: &mut dyn VMStore, _instance: InstanceId, exnref: u32) -> Result<()> { let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?; let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref); - let exnref = exnref - .into_exnref(&*store.unwrap_gc_store().gc_heap) - .expect("gc ref should be an exception object"); + let exnref = match exnref.into_exnref(&*store.unwrap_gc_store().gc_heap) { + Ok(r) => r, + Err(_) => bail_bug!("expected exnref"), + }; store.set_pending_exception(exnref); Err(crate::ThrownException.into()) } diff --git a/crates/wasmtime/src/runtime/vm/table.rs b/crates/wasmtime/src/runtime/vm/table.rs index c4c15c7598f8..8a49a84054ba 100644 --- a/crates/wasmtime/src/runtime/vm/table.rs +++ b/crates/wasmtime/src/runtime/vm/table.rs @@ -568,12 +568,12 @@ impl Table { dst: u64, val: Option<&VMGcRef>, len: u64, - ) -> Result<(), Trap> { + ) -> Result<()> { let range = self.validate_fill(dst, len)?; // Clone the init GC reference into each table slot. for slot in &mut self.gc_refs_mut()[range] { - GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), slot, val); + GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), slot, val)?; } Ok(()) @@ -625,7 +625,8 @@ impl Table { init_value: Option>, ) -> Result, Error> { self._grow(delta, limiter, |me, base, len| { - me.fill_func(base, init_value.map(|p| p.as_non_null()), len) + me.fill_func(base, init_value.map(|p| p.as_non_null()), len)?; + Ok(()) }) .await } @@ -652,7 +653,8 @@ impl Table { init_value: Option, ) -> Result, Error> { self._grow(delta, limiter, |me, base, len| { - me.fill_cont(base, init_value, len) + me.fill_cont(base, init_value, len)?; + Ok(()) }) .await } @@ -661,7 +663,7 @@ impl Table { &mut self, delta: u64, mut limiter: Option<&mut StoreResourceLimiter<'_>>, - fill: impl FnOnce(&mut Self, u64, u64) -> Result<(), Trap>, + fill: impl FnOnce(&mut Self, u64, u64) -> Result<()>, ) -> Result, Error> { let old_size = self.size(); @@ -747,8 +749,7 @@ impl Table { self, u64::try_from(old_size).unwrap(), u64::try_from(delta).unwrap(), - ) - .expect("table should not be out of bounds"); + )?; Ok(Some(old_size)) } @@ -819,14 +820,14 @@ impl Table { store: Option<&mut GcStore>, index: u64, elem: Option<&VMGcRef>, - ) -> Result<(), Trap> { + ) -> Result<()> { let trap = Trap::TableOutOfBounds; let index: usize = index.try_into().map_err(|_| trap)?; GcStore::write_gc_ref_optional_store( store, self.gc_refs_mut().get_mut(index).ok_or(trap)?, elem, - ); + )?; Ok(()) } @@ -844,9 +845,9 @@ impl Table { dst_index: u64, src_index: u64, len: u64, - ) -> Result<(), Trap> { + ) -> Result<()> { let (src_range, dst_range) = Table::validate_copy(self, dst, dst_index, src_index, len)?; - Self::copy_elements(gc_store, dst, self, dst_range, src_range); + Self::copy_elements(gc_store, dst, self, dst_range, src_range)?; Ok(()) } @@ -863,9 +864,9 @@ impl Table { dst_index: u64, src_index: u64, len: u64, - ) -> Result<(), Trap> { + ) -> Result<()> { let (src_range, dst_range) = Table::validate_copy(self, self, dst_index, src_index, len)?; - self.copy_elements_within(gc_store, dst_range, src_range); + self.copy_elements_within(gc_store, dst_range, src_range)?; Ok(()) } @@ -1048,7 +1049,7 @@ impl Table { src_table: &Self, dst_range: Range, src_range: Range, - ) { + ) -> Result<()> { // This can only be used when copying between different tables debug_assert!(!ptr::eq(dst_table, src_table)); @@ -1074,7 +1075,7 @@ impl Table { gc_store.as_deref_mut(), &mut dst_table.gc_refs_mut()[dst], src_table.gc_refs()[src].as_ref(), - ); + )?; } } TableElementType::Cont => { @@ -1083,6 +1084,7 @@ impl Table { .copy_from_slice(&src_table.contrefs()[src_range]); } } + Ok(()) } fn copy_elements_within( @@ -1090,7 +1092,7 @@ impl Table { mut gc_store: Option<&mut GcStore>, dst_range: Range, src_range: Range, - ) { + ) -> Result<()> { assert_eq!( dst_range.end - dst_range.start, src_range.end - src_range.start @@ -1098,7 +1100,7 @@ impl Table { // This is a no-op. if src_range.start == dst_range.start { - return; + return Ok(()); } let ty = self.element_type(); @@ -1117,14 +1119,14 @@ impl Table { let (ds, ss) = elements.split_at_mut(s); let dst = &mut ds[d]; let src = ss[0].as_ref(); - GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), dst, src); + GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), dst, src)?; } } else { for (s, d) in src_range.rev().zip(dst_range.rev()) { let (ss, ds) = elements.split_at_mut(d); let dst = &mut ds[0]; let src = ss[s].as_ref(); - GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), dst, src); + GcStore::write_gc_ref_optional_store(gc_store.as_deref_mut(), dst, src)?; } } } @@ -1133,6 +1135,7 @@ impl Table { self.contrefs_mut().copy_within(src_range, dst_range.start); } } + Ok(()) } /// Manually resets all table elements to a null bit pattern. diff --git a/crates/wasmtime/src/runtime/vm/traphandlers.rs b/crates/wasmtime/src/runtime/vm/traphandlers.rs index afd9168ba633..73411608d487 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers.rs @@ -244,7 +244,11 @@ where // generate the return value of this function. This is the // conditionally, below, passed to `catch_unwind`. let f = move || match f(store) { - Ok(ret) => (ret.into_abi(), None), + Ok(ret) => { + let abi = ret.into_abi(); + debug_assert!(abi != T::SENTINEL); + (abi, None) + } Err(reason) => (T::SENTINEL, Some(UnwindReason::from(reason))), }; @@ -280,7 +284,7 @@ where /// `into_abi` function. pub unsafe trait HostResultHasUnwindSentinel { /// The Cranelift-understood ABI of this value (should not be `Self`). - type Abi: Copy; + type Abi: Copy + PartialEq; /// A value that indicates that an unwind should happen and is tested for in /// Cranelift-generated code. @@ -340,6 +344,14 @@ unsafe impl HostResultHasUnwindSentinel for bool { } } +unsafe impl HostResultHasUnwindSentinel for *mut u8 { + type Abi = *mut u8; + const SENTINEL: Self::Abi = ptr::without_provenance_mut(usize::MAX); + fn into_abi(self) -> Self::Abi { + self + } +} + /// A helper structure to schlep from this module to the /// `crate::trap::from_runtime_box` function. /// diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index d2791a1498ea..9e7870083da5 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -4,6 +4,7 @@ mod vm_host_func_context; pub use self::vm_host_func_context::VMArrayCallHostFuncContext; +use crate::bail_bug; use crate::prelude::*; use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16}; use crate::store::StoreOpaque; @@ -620,17 +621,17 @@ impl VMGlobalDefinition { WasmValType::Ref(r) => match r.heap_type.top() { WasmHeapTopType::Extern => { let r = VMGcRef::from_raw_u32(raw.get_externref()); - global.init_gc_ref(store, r.as_ref()) + global.init_gc_ref(store, r.as_ref())? } WasmHeapTopType::Any => { let r = VMGcRef::from_raw_u32(raw.get_anyref()); - global.init_gc_ref(store, r.as_ref()) + global.init_gc_ref(store, r.as_ref())? } WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(), WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack. WasmHeapTopType::Exn => { let r = VMGcRef::from_raw_u32(raw.get_exnref()); - global.init_gc_ref(store, r.as_ref()) + global.init_gc_ref(store, r.as_ref())? } }, } @@ -673,7 +674,7 @@ impl VMGlobalDefinition { } }), WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()), - WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support. + WasmHeapTopType::Cont => bail_bug!("unimplemented"), // FIXME: #10248 stack switching support. }, }) } @@ -804,7 +805,11 @@ impl VMGlobalDefinition { } /// Initialize a global to the given GC reference. - pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) { + pub unsafe fn init_gc_ref( + &mut self, + store: &mut StoreOpaque, + gc_ref: Option<&VMGcRef>, + ) -> Result<()> { let dest = unsafe { &mut *(self .storage @@ -817,7 +822,11 @@ impl VMGlobalDefinition { } /// Write a GC reference into this global value. - pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) { + pub unsafe fn write_gc_ref( + &mut self, + store: &mut StoreOpaque, + gc_ref: Option<&VMGcRef>, + ) -> Result<()> { let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::>()) }; store.write_gc_ref(dest, gc_ref) } diff --git a/tests/all/missing_async.rs b/tests/all/missing_async.rs index 722ed13fac28..878458f5fc88 100644 --- a/tests/all/missing_async.rs +++ b/tests/all/missing_async.rs @@ -207,7 +207,7 @@ async fn async_disallows_typed_func_call() -> Result<()> { async fn async_disallows_gc() -> Result<()> { let mut store = async_limiter_store(); assert!(store.gc(None).is_err()); - store.gc_async(None).await; + store.gc_async(None).await?; Ok(()) }