Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions oscars/benches/oscars_vs_boa_gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,7 @@ fn bench_deep(c: &mut Criterion) {
}
};
OscarsGc::new_in(OscarsGcRefCell::new(node), collector)
.expect("benchmark allocation")
}

let root = build_tree(5, &collector);
Expand Down
22 changes: 19 additions & 3 deletions oscars/src/alloc/mempool3/alloc.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,23 @@
use core::{cell::Cell, marker::PhantomData, ptr::NonNull};

use rust_alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
use rust_alloc::alloc::{Layout, alloc, dealloc};

use crate::alloc::mempool3::PoolAllocError;

/// Abort the process on OS malloc failure.
#[inline(never)]
#[cold]
fn abort_on_alloc_failure() -> ! {
#[cfg(feature = "std")]
{
std::process::abort();
}
#[cfg(not(feature = "std"))]
{
panic!("OS memory allocation failed");
}
}

/// free slot pointing to the next free slot
/// `repr(C)` puts `next` exactly at the start of the slot
#[repr(C)]
Expand Down Expand Up @@ -143,7 +157,8 @@ impl SlotPool {
let buffer = unsafe {
let ptr = alloc(layout);
let Some(nn) = NonNull::new(ptr) else {
handle_alloc_error(layout)
// Abort on OS malloc failure per spec
abort_on_alloc_failure()
};
nn
};
Expand Down Expand Up @@ -331,7 +346,8 @@ impl BumpPage {
let buffer = unsafe {
let ptr = alloc(layout);
let Some(nn) = NonNull::new(ptr) else {
handle_alloc_error(layout)
// Abort on OS malloc failure per spec
abort_on_alloc_failure()
};
nn
};
Expand Down
32 changes: 32 additions & 0 deletions oscars/src/alloc/mempool3/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ pub struct PoolAllocator<'alloc> {
pub(crate) heap_threshold: usize,
pub(crate) page_size: usize,
pub(crate) current_heap_size: usize,
// hard memory limit before GC allocation fails with OutOfMemory.
// defaults to usize::MAX (effectively unlimited).
pub(crate) max_heap_size: usize,
// per size-class slot pools
pub(crate) slot_pools: Vec<SlotPool>,
// bump pages for raw byte allocs
Expand All @@ -74,6 +77,7 @@ impl<'alloc> Default for PoolAllocator<'alloc> {
heap_threshold: DEFAULT_HEAP_THRESHOLD,
page_size: DEFAULT_PAGE_SIZE,
current_heap_size: 0,
max_heap_size: usize::MAX,
slot_pools: Vec::new(),
bump_pages: Vec::new(),
free_cache: Cell::new(usize::MAX),
Expand Down Expand Up @@ -111,6 +115,12 @@ impl<'alloc> PoolAllocator<'alloc> {
self
}

pub fn with_max_heap_size(mut self, max_heap_size: u64) -> Self {
// Saturate to usize::MAX on 32-bit platforms
self.max_heap_size = max_heap_size.min(usize::MAX as u64) as usize;
self
}

/// total live slot pool + bump page count
pub fn pools_len(&self) -> usize {
self.slot_pools.len() + self.bump_pages.len()
Expand Down Expand Up @@ -229,6 +239,17 @@ impl<'alloc> PoolAllocator<'alloc> {

// Recycle list had no match, allocate a fresh page from the OS.
let total = self.page_size.max(slot_size * 4);

// Check if allocation would exceed max_heap_size
if let Some(new_size) = self.current_heap_size.checked_add(total) {
if new_size > self.max_heap_size {
return Err(PoolAllocError::OutOfMemory);
}
} else {
// Overflow in size calculation
return Err(PoolAllocError::OutOfMemory);
}

let new_pool = SlotPool::try_init(slot_size, total, 16)?;
self.current_heap_size += new_pool.layout.size();
let slot_ptr = new_pool.alloc_slot().ok_or(PoolAllocError::OutOfMemory)?;
Expand Down Expand Up @@ -294,6 +315,17 @@ impl<'alloc> PoolAllocator<'alloc> {
let margin = 64;
let total = self.page_size.max(layout.size() + layout.align() + margin);
let max_align = layout.align().max(16);

// Check if allocation would exceed max_heap_size
if let Some(new_size) = self.current_heap_size.checked_add(total) {
if new_size > self.max_heap_size {
return Err(PoolAllocError::OutOfMemory);
}
} else {
// Overflow in size calculation
return Err(PoolAllocError::OutOfMemory);
}

let page = BumpPage::try_init(total, max_align)?;
self.current_heap_size += page.layout.size();
let ptr = page
Expand Down
104 changes: 104 additions & 0 deletions oscars/src/alloc/mempool3/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,3 +234,107 @@ fn max_recycled_cap_respected() {
assert_eq!(allocator.recycled_pools.len(), 1);
assert!(allocator.current_heap_size < heap_before);
}

#[test]
fn max_heap_enforced() {
let mut allocator = PoolAllocator::default()
.with_page_size(512)
.with_max_heap_size(1024);

// Allocate until we hit the limit
let mut allocations = Vec::new();
loop {
match allocator.try_alloc(0u64) {
Ok(ptr) => allocations.push(ptr),
Err(crate::alloc::mempool3::PoolAllocError::OutOfMemory) => break,
Err(e) => panic!("unexpected error: {:?}", e),
}
}

assert!(
!allocations.is_empty(),
"should allocate at least some items before hitting limit"
);

assert!(allocator.current_heap_size <= 1024);

// Further allocations should fail
assert!(matches!(
allocator.try_alloc(0u64),
Err(crate::alloc::mempool3::PoolAllocError::OutOfMemory)
));
}

#[test]
fn max_heap_default() {
let mut allocator = PoolAllocator::default();

assert_eq!(allocator.max_heap_size, usize::MAX);

for _ in 0..1000 {
allocator
.try_alloc(0u64)
.expect("allocation should succeed with unlimited heap");
}
}

#[test]
fn max_heap_u64_max_saturates() {
// Setting u64::MAX should saturate to usize::MAX
let allocator = PoolAllocator::default().with_max_heap_size(u64::MAX);

assert_eq!(allocator.max_heap_size, usize::MAX);
}

#[test]
fn max_heap_accepts_large_values() {
let allocator = PoolAllocator::default().with_max_heap_size(1024 * 1024 * 1024); // 1GB

assert_eq!(allocator.max_heap_size, 1024 * 1024 * 1024);
}

#[test]
fn max_heap_gc_reclaim() {
let mut allocator = PoolAllocator::default()
.with_page_size(512)
.with_max_heap_size(2048);

// Allocate to near the limit
let mut first_batch = Vec::new();
for _ in 0..50 {
match allocator.try_alloc(0u64) {
Ok(ptr) => first_batch.push(ptr),
Err(_) => break,
}
}

let pools_before = allocator.pools_len();
assert!(pools_before > 0);

for ptr in &first_batch {
unsafe {
allocator.free_slot_typed(ptr.as_ptr());
}
}

// Disable recycling to force actual deallocation
allocator.max_recycled = 0;
allocator.drop_empty_pools();

// Pool count should decrease after cleanup
let pools_after = allocator.pools_len();
assert!(pools_after < pools_before || pools_after == 0);

// Should be able to allocate again after GC
let mut second_batch = Vec::new();
for _ in 0..20 {
match allocator.try_alloc(0u64) {
Ok(ptr) => second_batch.push(ptr),
Err(_) => break,
}
}
assert!(
!second_batch.is_empty(),
"should be able to allocate after GC reclaims memory"
);
}
7 changes: 7 additions & 0 deletions oscars/src/collectors/mark_sweep/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,13 @@ impl MarkSweepGarbageCollector {
self
}

pub fn with_max_heap_size(mut self, max_heap_size: u64) -> Self {
// Use allocator's method to ensure consistent saturation behavior
*self.allocator.get_mut() =
core::mem::take(self.allocator.get_mut()).with_max_heap_size(max_heap_size);
self
}

pub fn with_page_size(mut self, page_size: usize) -> Self {
self.allocator.get_mut().page_size = page_size;
self
Expand Down
16 changes: 9 additions & 7 deletions oscars/src/collectors/mark_sweep/pointers/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,14 @@ pub struct Gc<T: Trace + ?Sized + 'static> {
}

impl<T: Trace> Gc<T> {
#[must_use]
pub fn new_in<C: Collector>(value: T, collector: &C) -> Self {
let inner_ptr = collector
.alloc_gc_node(value)
.expect("Failed to allocate Gc node")
.to_erased();
/// Allocates a new `Gc` in the given collector.
/// Returns Err if allocation fails due to heap limit exceeded.
/// Note: OS malloc() failure will abort the process.
pub fn new_in<C: Collector>(
value: T,
collector: &C,
) -> Result<Self, crate::alloc::mempool3::PoolAllocError> {
let inner_ptr = collector.alloc_gc_node(value)?.to_erased();

// SAFETY: safe because the gc tracks this
let inner_ptr = unsafe { inner_ptr.extend_lifetime() };
Expand All @@ -32,7 +34,7 @@ impl<T: Trace> Gc<T> {
};
// GcBox is allocated with 0 roots, increment to 1 for the new handle
gc.inner_ptr().as_inner_ref().inc_roots();
gc
Ok(gc)
}

/// Converts a `Gc` into a raw [`PoolPointer`].
Expand Down
Loading