Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Commit

Permalink
Merge pull request #34 from memN0ps/dev
Browse files Browse the repository at this point in the history
Optimize Stack and Memory Management
  • Loading branch information
memN0ps authored Jul 11, 2024
2 parents 438bb9e + 0d2e4a0 commit 51be218
Show file tree
Hide file tree
Showing 7 changed files with 66 additions and 170 deletions.
67 changes: 15 additions & 52 deletions hypervisor/src/global_const.rs
Original file line number Diff line number Diff line change
@@ -1,52 +1,15 @@
/// The default number of logical processors for a high-end desktop system.
///
/// This value is set to 1 for testing purposes but can be adjusted up to 64 or more based on the system.
/// Adjusting this value will increase the total heap size accordingly.
const DEFAULT_LOGICAL_PROCESSORS: usize = 16;

/// The number of pages for the stack per processor/core.
///
/// Each processor/core gets its own stack. The default stack size per processor is calculated as:
/// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page)
/// 0x4000 * 4096 = 67,108,864 bytes (64 MB)
///
/// This stack size is allocated individually for each processor.
pub const STACK_PAGES_PER_PROCESSOR: usize = 0x2000;

/// The size of a page table in bytes.
const PAGE_TABLE_SIZE: usize = 2 * 1024 * 1024; // 2 MB

/// The total number of page tables needed per processor to split the stack.
///
/// This is calculated as:
/// STACK_SIZE / PAGE_TABLE_SIZE
/// 64 MB / 2 MB = 32 page tables
const PAGE_TABLES_PER_PROCESSOR: usize = 32;

/// The padding added to the heap size for other allocations (e.g., vectors, boxes).
///
/// This is an additional memory buffer to ensure there's enough space for other dynamic allocations.
const HEAP_PADDING: usize = 8 * 1024 * 1024; // 8 MB

/// The total size of the heap in bytes, shared among all processors.
///
/// This base heap size is for 1 processor, calculated as:
/// 32 * 2 * 1024 * 1024 + 8 * 1024 * 1024 = 72,237,568 bytes (68 MB)
///
/// For 4 processors, the heap size would be:
/// (32 * 2 * 1024 * 1024 * 4) + 8 * 1024 * 1024 = 288,957,440 bytes (276 MB)
///
/// For 8 processors, the heap size would be:
/// (32 * 2 * 1024 * 1024 * 8) + 8 * 1024 * 1024 = 577,874,944 bytes (552 MB)
///
/// For 16 processors, the heap size would be:
/// (32 * 2 * 1024 * 1024 * 16) + 8 * 1024 * 1024 = 1,155,685,888 bytes (1.08 GB)
///
/// For 32 processors, the heap size would be:
/// (32 * 2 * 1024 * 1024 * 32) + 8 * 1024 * 1024 = 2,311,371,776 bytes (2.16 GB)
///
/// For 64 processors, the heap size would be:
/// (32 * 2 * 1024 * 1024 * 64) + 8 * 1024 * 1024 = 4,622,743,552 bytes (4.32 GB)
///
/// By adjusting the number of logical processors, the heap size will scale accordingly.
pub const TOTAL_HEAP_SIZE: usize = (PAGE_TABLES_PER_PROCESSOR * PAGE_TABLE_SIZE * DEFAULT_LOGICAL_PROCESSORS) + HEAP_PADDING;
use {crate::intel::vm::Vm, core::mem::size_of};

/// Number of stack pages per logical processor.
/// Includes size of `Vm` in pages plus 0x1000 (4096) pages for padding.
/// - Size of `Vm`: 1027 pages (0x403 pages).
/// - Padding: 4096 pages (0x1000 pages).
/// - Total: 1027 + 4096 pages = 5123 pages (0x1403 pages).
/// - Total size in bytes: 5123 * 4096 = 20,971,520 bytes (20 MB).
pub const STACK_PAGES_PER_PROCESSOR: usize = (size_of::<Vm>() / 0x1000) + 0x1000;

/// Total heap size (64 MB) shared across all logical processors.
/// - Total size in bytes: 64 * 1024 * 1024 = 67,108,864 bytes (64 MB).
/// - Total size in hexadecimal: 0x4000000 bytes.
/// Increase this value if additional heap memory is needed or if more hooks are required.
pub const TOTAL_HEAP_SIZE: usize = 0x4000000;
57 changes: 31 additions & 26 deletions hypervisor/src/intel/hooks/hook_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ use {
invvpid::invvpid_all_contexts,
vm::Vm,
},
tracker::{print_allocated_memory, ALLOCATED_MEMORY_HEAD},
windows::{
nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image},
ssdt::ssdt_hook::SsdtHook,
},
},
core::{intrinsics::copy_nonoverlapping, sync::atomic::Ordering},
alloc::vec::Vec,
core::intrinsics::copy_nonoverlapping,
lazy_static::lazy_static,
log::*,
spin::Mutex,
Expand Down Expand Up @@ -66,6 +66,8 @@ pub struct HookManager {
/// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized.
/// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0)
pub has_cpuid_cache_info_been_called: bool,

pub stack_memory: Vec<(usize, usize)>,
}

lazy_static! {
Expand All @@ -87,6 +89,7 @@ lazy_static! {
ntoskrnl_base_pa: 0,
ntoskrnl_size: 0,
has_cpuid_cache_info_been_called: false,
stack_memory: Vec::with_capacity(128),
});
}

Expand All @@ -109,6 +112,23 @@ impl HookManager {
.modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook);
}

/// Records a memory allocation for tracking purposes.
///
/// # Arguments
///
/// * `start` - The start address of the memory allocation.
/// * `size` - The size of the memory allocation.
pub fn record_allocation(&mut self, start: usize, size: usize) {
self.stack_memory.push((start, size));
}

/// Prints the allocated memory ranges for debugging purposes.
pub fn print_allocated_memory(&self) {
self.stack_memory.iter().for_each(|(start, size)| {
debug!("Memory Range: Start = {:#x}, Size = {:#x}", start, size);
});
}

/// Sets the base address and size of the Windows kernel.
///
/// # Arguments
Expand Down Expand Up @@ -197,30 +217,15 @@ impl HookManager {
///
/// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise.
pub fn hide_hypervisor_memory(&mut self, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> {
// Print the tracked memory allocations for debugging purposes.
print_allocated_memory();

// Load the head of the allocated memory list.
let mut current_node = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire);

// Iterate through the linked list and hide each memory range.
while !current_node.is_null() {
// Get a reference to the current node.
let node = unsafe { &*current_node };

// Print the memory range.
trace!("Memory Range: Start = {:#X}, Size = {}", node.start, node.size);

// Iterate through the memory range in 4KB steps.
for offset in (0..node.size).step_by(BASE_PAGE_SIZE) {
let guest_page_pa = node.start + offset;
// Print the page address before hiding it.
trace!("Hiding memory page at: {:#X}", guest_page_pa);
self.ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?;
}

// Move to the next node.
current_node = node.next.load(Ordering::Acquire);
let pages: Vec<u64> = self
.stack_memory
.iter()
.step_by(BASE_PAGE_SIZE)
.map(|(start, _size)| *start as u64)
.collect();

for guest_page_pa in pages {
self.ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?
}

Ok(())
Expand Down
1 change: 0 additions & 1 deletion hypervisor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,5 @@ pub mod error;
pub mod global_const;
pub mod intel;
pub mod logger;
pub mod tracker;
pub mod vmm;
pub mod windows;
81 changes: 0 additions & 81 deletions hypervisor/src/tracker.rs

This file was deleted.

12 changes: 8 additions & 4 deletions hypervisor/src/vmm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,14 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! {
trace!("VMCS Dump: {:#x?}", vm.vmcs_region);

/*
match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) {
Ok(_) => debug!("Hypervisor memory hidden"),
Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e),
};
{
let mut hook_manager = crate::intel::hooks::hook_manager::SHARED_HOOK_MANAGER.lock();
hook_manager.print_allocated_memory();
match hook_manager.hide_hypervisor_memory(&mut vm, crate::intel::ept::AccessType::READ_WRITE_EXECUTE) {
Ok(_) => debug!("Hypervisor memory hidden"),
Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e),
};
}
*/

info!("Launching the VM until a vmexit occurs...");
Expand Down
11 changes: 8 additions & 3 deletions uefi/src/setup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@ use {
alloc::boxed::Box,
hypervisor::{
allocator::box_zeroed,
intel::{hooks::hook_manager::HookManager, page::Page},
tracker::record_allocation,
intel::{
hooks::hook_manager::{HookManager, SHARED_HOOK_MANAGER},
page::Page,
},
},
log::debug,
uefi::{prelude::BootServices, proto::loaded_image::LoadedImage},
Expand Down Expand Up @@ -44,7 +46,10 @@ pub fn record_image_base(loaded_image: &LoadedImage) {
let (image_base, image_size) = loaded_image.info();
let image_range = image_base as usize..(image_base as usize + image_size as usize);
debug!("Loaded image base: {:#x?}", image_range);
record_allocation(image_base as usize, image_size as usize);

// Lock the shared hook manager
let mut hook_manager = SHARED_HOOK_MANAGER.lock();
hook_manager.record_allocation(image_base as usize, image_size as usize);
}

/// Creates a dummy page filled with a specific byte value.
Expand Down
7 changes: 4 additions & 3 deletions uefi/src/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use {
ptr,
sync::atomic::{AtomicPtr, AtomicU32, Ordering},
},
hypervisor::tracker::record_allocation,
hypervisor::intel::hooks::hook_manager::SHARED_HOOK_MANAGER,
uefi::{
prelude::{Boot, BootServices, SystemTable},
proto::loaded_image::LoadedImage,
Expand Down Expand Up @@ -89,8 +89,9 @@ pub unsafe fn allocate_host_stack(layout: Layout) -> *mut u8 {
.unwrap_or(ptr::null_mut())
};

// Record the allocation without causing a deadlock.
record_allocation(stack as usize, layout.size());
// Lock the shared hook manager
let mut hook_manager = SHARED_HOOK_MANAGER.lock();
hook_manager.record_allocation(stack as usize, layout.size());

stack
}
Expand Down

0 comments on commit 51be218

Please sign in to comment.