Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add tracking functionality for memory management in allocators #1273

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ static_assertions = "1.1.0"
strum = "0.26.2"
strum_macros = "0.26.2"
sysinfo = "0.30.9"
crabgrind = { version = "0.1.12", optional = true }


[dev-dependencies]
paste = "1.0.8"
Expand Down Expand Up @@ -218,4 +220,7 @@ eager_sweeping = []
# normal heap range, we will have to use chunk-based SFT table. Turning on this feature will use a different SFT map implementation on 64bits,
# and will affect all the plans in the build. Please be aware of the consequence, and this is only meant to be experimental use.
malloc_mark_sweep = []
# Enable Valgrind support in MMTk. This will invoke Valgrind interfaces on allocation and deallocation. At the moment only MarkSweep
# space is supported
crabgrind = ["dep:crabgrind", "vo_bit"]
# Group:end
2 changes: 2 additions & 0 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ pub fn create_plan<VM: VMBinding>(
plan.for_each_space(&mut |s| {
sft_map.notify_space_creation(s.as_sft());
s.initialize_sft(sft_map);
// after SFT is initialized, we can also initialize mempool tracking
s.get_page_resource().track();
});

plan
Expand Down
2 changes: 2 additions & 0 deletions src/policy/largeobjectspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use crate::util::heap::{FreeListPageResource, PageResource};
use crate::util::metadata;
use crate::util::object_enum::ObjectEnumerator;
use crate::util::opaque_pointer::*;
use crate::util::track::track_free;
use crate::util::treadmill::TreadMill;
use crate::util::{Address, ObjectReference};
use crate::vm::ObjectModel;
Expand Down Expand Up @@ -288,6 +289,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> {
let sweep = |object: ObjectReference| {
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::unset_vo_bit(object);
track_free(object.to_object_start::<VM>(), 0 /* TODO: Size */);
self.pr
.release_pages(get_super_page(object.to_object_start::<VM>()));
};
Expand Down
18 changes: 17 additions & 1 deletion src/policy/marksweepspace/native_ms/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -297,13 +297,21 @@ impl Block {
if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.is_marked::<VM>(potential_object, Ordering::SeqCst)
{
#[cfg(feature = "crabgrind")]
{
let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object);
if vo_bit {
crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0);
}
}
// clear VO bit if it is ever set. It is possible that the VO bit is never set for this cell (i.e. there was no object in this cell before this GC),
// we unset the bit anyway.
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::unset_vo_bit_nocheck(potential_object);
unsafe {
cell.store::<Address>(last);
}

last = cell;
}
cell += cell_size;
Expand Down Expand Up @@ -365,7 +373,14 @@ impl Block {
"{:?} Free cell: {}, last cell in freelist is {}",
self, cell, last
);

#[cfg(feature = "crabgrind")]
{
let vo_bit =
crate::util::metadata::vo_bit::is_vo_bit_set(potential_object_ref);
if vo_bit {
crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0);
}
}
// Clear VO bit: we don't know where the object reference actually is, so we bulk zero the cell.
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::bzero_vo_bit(cell, cell_size);
Expand All @@ -376,6 +391,7 @@ impl Block {
cell.store::<Address>(last);
}
last = cell;

cell += cell_size;
debug_assert_eq!(cursor, cell);
}
Expand Down
2 changes: 1 addition & 1 deletion src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use crate::util::metadata::side_metadata::{
SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
};
use crate::util::object_enum::ObjectEnumerator;
use crate::util::track::track_mempool_alloc;
use crate::util::Address;
use crate::util::ObjectReference;

Expand Down Expand Up @@ -214,7 +215,6 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
self.common().descriptor
);
}

debug!("Space.acquire(), returned = {}", res.start);
res.start
}
Expand Down
6 changes: 5 additions & 1 deletion src/util/alloc/bumpallocator.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use std::sync::Arc;

use crate::util::track::track_malloc;
use crate::util::Address;

use crate::util::alloc::Allocator;
Expand Down Expand Up @@ -116,13 +117,16 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
self.bump_pointer.cursor,
self.bump_pointer.limit
);
track_malloc(result, size, false);
result
}
}

fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
trace!("alloc_slow");
self.acquire_block(size, align, offset, false)
let block = self.acquire_block(size, align, offset, false);
track_malloc(block, size, false);
block
}

/// Slow path for allocation if precise stress testing has been enabled.
Expand Down
4 changes: 4 additions & 0 deletions src/util/alloc/free_list_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use crate::policy::marksweepspace::native_ms::*;
use crate::util::alloc::allocator;
use crate::util::alloc::Allocator;
use crate::util::linear_scan::Region;
use crate::util::track::track_malloc;
use crate::util::Address;
use crate::util::VMThread;
use crate::vm::VMBinding;
Expand Down Expand Up @@ -76,6 +77,7 @@ impl<VM: VMBinding> Allocator<VM> for FreeListAllocator<VM> {
size, align, offset, cell, cell_size, res + size, cell + cell_size
);
}

return res;
}
}
Expand Down Expand Up @@ -179,6 +181,8 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
}
}

track_malloc(cell, cell_size, true);

cell
}

Expand Down
6 changes: 5 additions & 1 deletion src/util/alloc/large_object_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::Space;
use crate::util::alloc::{allocator, Allocator};
use crate::util::opaque_pointer::*;
use crate::util::track::track_malloc;
use crate::util::Address;
use crate::vm::VMBinding;

Expand Down Expand Up @@ -42,7 +43,10 @@ impl<VM: VMBinding> Allocator<VM> for LargeObjectAllocator<VM> {
let cell: Address = self.alloc_slow(size, align, offset);
// We may get a null ptr from alloc due to the VM being OOM
if !cell.is_zero() {
allocator::align_allocation::<VM>(cell, align, offset)
let result = allocator::align_allocation::<VM>(cell, align, offset);

track_malloc(result, size, true);
result
} else {
cell
}
Expand Down
14 changes: 14 additions & 0 deletions src/util/heap/blockpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor;
use crate::util::linear_scan::Region;
use crate::util::opaque_pointer::*;
use crate::util::rust_util::zeroed_alloc::new_zeroed_vec;
use crate::util::track::{track_mempool, track_mempool_alloc, track_mempool_free, untrack_mempool};
use crate::vm::*;
use atomic::Ordering;
use spin::RwLock;
Expand All @@ -30,6 +31,10 @@ pub struct BlockPageResource<VM: VMBinding, B: Region + 'static> {
}

impl<VM: VMBinding, B: Region> PageResource<VM> for BlockPageResource<VM, B> {
fn track(&self) {
track_mempool(self, 0, false);
}

fn common(&self) -> &CommonPageResource {
self.flpr.common()
}
Expand Down Expand Up @@ -58,6 +63,12 @@ impl<VM: VMBinding, B: Region> PageResource<VM> for BlockPageResource<VM, B> {
}
}

impl<VM: VMBinding, B: Region> Drop for BlockPageResource<VM, B> {
fn drop(&mut self) {
untrack_mempool(self);
}
}

impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> {
/// Block granularity in pages
const LOG_PAGES: usize = B::LOG_BYTES - LOG_BYTES_IN_PAGE as usize;
Expand Down Expand Up @@ -136,6 +147,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> {
self.block_queue.add_global_array(array);
// Finish slow-allocation
self.commit_pages(reserved_pages, required_pages, tls);
track_mempool_alloc(self, first_block, required_pages * BYTES_IN_PAGE);
Result::Ok(PRAllocResult {
start: first_block,
pages: required_pages,
Expand All @@ -156,6 +168,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> {
// Fast allocate from the blocks list
if let Some(block) = self.block_queue.pop() {
self.commit_pages(reserved_pages, required_pages, tls);
track_mempool_alloc(self, block.start(), required_pages * BYTES_IN_PAGE);
return Result::Ok(PRAllocResult {
start: block.start(),
pages: required_pages,
Expand All @@ -170,6 +183,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> {
let pages = 1 << Self::LOG_PAGES;
debug_assert!(pages as usize <= self.common().accounting.get_committed_pages());
self.common().accounting.release(pages as _);
track_mempool_free(self, block.start());
self.block_queue.push(block)
}

Expand Down
4 changes: 4 additions & 0 deletions src/util/heap/externalpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ pub struct ExternalPages {
}

impl<VM: VMBinding> PageResource<VM> for ExternalPageResource<VM> {
fn track(&self) {
/* cannot track external pages reliably? */
}

fn common(&self) -> &CommonPageResource {
&self.common
}
Expand Down
15 changes: 15 additions & 0 deletions src/util/heap/freelistpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor;
use crate::util::memory;
use crate::util::opaque_pointer::*;
use crate::util::raw_memory_freelist::RawMemoryFreeList;
use crate::util::track::{track_mempool, track_mempool_alloc, track_mempool_free, untrack_mempool};
use crate::vm::*;
use std::marker::PhantomData;

Expand All @@ -41,6 +42,10 @@ struct FreeListPageResourceSync {
}

impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
fn track(&self) {
track_mempool(self, 0, false);
}

fn common(&self) -> &CommonPageResource {
&self.common
}
Expand Down Expand Up @@ -134,6 +139,9 @@ impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
}
}
};

track_mempool_alloc(self, rtn, conversions::pages_to_bytes(required_pages));

Result::Ok(PRAllocResult {
start: rtn,
pages: required_pages,
Expand Down Expand Up @@ -346,6 +354,7 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
}

self.common.accounting.release(pages as _);
track_mempool_free(self, first);
let freed = sync.free_list.free(page_offset as _, true);
sync.pages_currently_on_freelist += pages as usize;
if !self.common.contiguous {
Expand Down Expand Up @@ -391,3 +400,9 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
}
}
}

impl<VM: VMBinding> Drop for FreeListPageResource<VM> {
fn drop(&mut self) {
untrack_mempool(self);
}
}
9 changes: 7 additions & 2 deletions src/util/heap/monotonepageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK};
use crate::policy::space::required_chunks;
use crate::util::address::Address;
use crate::util::constants::BYTES_IN_PAGE;
use crate::util::conversions::*;
use crate::util::conversions::{self, *};
use crate::util::track::{track_mempool, track_mempool_alloc};
use std::ops::Range;
use std::sync::{Mutex, MutexGuard};

Expand Down Expand Up @@ -45,6 +46,10 @@ pub enum MonotonePageResourceConditional {
Discontiguous,
}
impl<VM: VMBinding> PageResource<VM> for MonotonePageResource<VM> {
fn track(&self) {
track_mempool(self, 0, true);
}

fn common(&self) -> &CommonPageResource {
&self.common
}
Expand Down Expand Up @@ -149,7 +154,7 @@ impl<VM: VMBinding> PageResource<VM> for MonotonePageResource<VM> {
sync.current_chunk = chunk_align_down(sync.cursor);
}
self.commit_pages(reserved_pages, required_pages, tls);

track_mempool_alloc(self, rtn, conversions::pages_to_bytes(required_pages));
Result::Ok(PRAllocResult {
start: rtn,
pages: required_pages,
Expand Down
3 changes: 3 additions & 0 deletions src/util/heap/pageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ use crate::util::heap::PageAccounting;
use crate::vm::VMBinding;

pub trait PageResource<VM: VMBinding>: 'static {
/// Track this page resource for memory tools like Valgrind.
fn track(&self);

/// Allocate pages from this resource.
/// Simply bump the cursor, and fail if we hit the sentinel.
/// Return The start of the first page if successful, zero on failure.
Expand Down
15 changes: 15 additions & 0 deletions src/util/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,21 @@ pub fn handle_mmap_error<VM: VMBinding>(
/// This function is currently left empty for non-linux, and should be implemented in the future.
/// As the function is only used for assertions, MMTk will still run even if we never panic.
pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnotation) {
#[cfg(feature = "crabgrind")]
{
use crabgrind::memcheck::Error;
let result = crabgrind::memcheck::is_defined(_start.to_mut_ptr(), _size);
match result {
Ok(_) => panic!("{} of size {} is not mapped", _start, _size),
Err(err) => match err {
Error::NotAddressable(addr) => {
panic!("Address {addr:x} is not addressable, start={_start}");
}

_ => (),
},
}
}
#[cfg(target_os = "linux")]
{
let flags = MMAP_FLAGS;
Expand Down
13 changes: 13 additions & 0 deletions src/util/metadata/vo_bit/helper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use crate::{
util::{
linear_scan::Region,
metadata::{vo_bit, MetadataSpec},
track::{track_free, tracking_enabled},
ObjectReference,
},
vm::{ObjectModel, VMBinding},
Expand Down Expand Up @@ -184,6 +185,18 @@ pub(crate) fn on_object_forwarded<VM: VMBinding>(new_object: ObjectReference) {
}

pub(crate) fn on_region_swept<VM: VMBinding, R: Region>(region: &R, is_occupied: bool) {
if tracking_enabled() {
let mut cursor = region.start();
while cursor < region.end() {
if let Some(object) = vo_bit::is_vo_bit_set_for_addr(cursor) {
if object.is_live() {
track_free(object.to_object_start::<VM>(), 0);
}
}
cursor += VM::MIN_ALIGNMENT;
}
}

match strategy::<VM>() {
VOBitUpdateStrategy::ClearAndReconstruct => {
// Do nothing. The VO bit metadata is already reconstructed.
Expand Down
1 change: 1 addition & 0 deletions src/util/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ pub(crate) mod sanity;
pub(crate) mod slot_logger;
/// Utils for collecting statistics.
pub(crate) mod statistics;
pub(crate) mod track;
/// A treadmill implementation.
pub(crate) mod treadmill;

Expand Down
Loading