diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index cf2fb62..3cddc48 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,52 +1,15 @@ -/// The default number of logical processors for a high-end desktop system. -/// -/// This value is set to 1 for testing purposes but can be adjusted up to 64 or more based on the system. -/// Adjusting this value will increase the total heap size accordingly. -const DEFAULT_LOGICAL_PROCESSORS: usize = 16; - -/// The number of pages for the stack per processor/core. -/// -/// Each processor/core gets its own stack. The default stack size per processor is calculated as: -/// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page) -/// 0x4000 * 4096 = 67,108,864 bytes (64 MB) -/// -/// This stack size is allocated individually for each processor. -pub const STACK_PAGES_PER_PROCESSOR: usize = 0x2000; - -/// The size of a page table in bytes. -const PAGE_TABLE_SIZE: usize = 2 * 1024 * 1024; // 2 MB - -/// The total number of page tables needed per processor to split the stack. -/// -/// This is calculated as: -/// STACK_SIZE / PAGE_TABLE_SIZE -/// 64 MB / 2 MB = 32 page tables -const PAGE_TABLES_PER_PROCESSOR: usize = 32; - -/// The padding added to the heap size for other allocations (e.g., vectors, boxes). -/// -/// This is an additional memory buffer to ensure there's enough space for other dynamic allocations. -const HEAP_PADDING: usize = 8 * 1024 * 1024; // 8 MB - -/// The total size of the heap in bytes, shared among all processors. -/// -/// This base heap size is for 1 processor, calculated as: -/// 32 * 2 * 1024 * 1024 + 8 * 1024 * 1024 = 72,237,568 bytes (68 MB) -/// -/// For 4 processors, the heap size would be: -/// (32 * 2 * 1024 * 1024 * 4) + 8 * 1024 * 1024 = 288,957,440 bytes (276 MB) -/// -/// For 8 processors, the heap size would be: -/// (32 * 2 * 1024 * 1024 * 8) + 8 * 1024 * 1024 = 577,874,944 bytes (552 MB) -/// -/// For 16 processors, the heap size would be: -/// (32 * 2 * 1024 * 1024 * 16) + 8 * 1024 * 1024 = 1,155,685,888 bytes (1.08 GB) -/// -/// For 32 processors, the heap size would be: -/// (32 * 2 * 1024 * 1024 * 32) + 8 * 1024 * 1024 = 2,311,371,776 bytes (2.16 GB) -/// -/// For 64 processors, the heap size would be: -/// (32 * 2 * 1024 * 1024 * 64) + 8 * 1024 * 1024 = 4,622,743,552 bytes (4.32 GB) -/// -/// By adjusting the number of logical processors, the heap size will scale accordingly. -pub const TOTAL_HEAP_SIZE: usize = (PAGE_TABLES_PER_PROCESSOR * PAGE_TABLE_SIZE * DEFAULT_LOGICAL_PROCESSORS) + HEAP_PADDING; +use {crate::intel::vm::Vm, core::mem::size_of}; + +/// Number of stack pages per logical processor. +/// Includes size of `Vm` in pages plus 0x1000 (4096) pages for padding. +/// - Size of `Vm`: 1027 pages (0x403 pages). +/// - Padding: 4096 pages (0x1000 pages). +/// - Total: 1027 + 4096 pages = 5123 pages (0x1403 pages). +/// - Total size in bytes: 5123 * 4096 = 20,971,520 bytes (20 MB). +pub const STACK_PAGES_PER_PROCESSOR: usize = (size_of::() / 0x1000) + 0x1000; + +/// Total heap size (64 MB) shared across all logical processors. +/// - Total size in bytes: 64 * 1024 * 1024 = 67,108,864 bytes (64 MB). +/// - Total size in hexadecimal: 0x4000000 bytes. +/// Increase this value if additional heap memory is needed or if more hooks are required. +pub const TOTAL_HEAP_SIZE: usize = 0x4000000; diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 0177e5e..b15c839 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -13,13 +13,13 @@ use { invvpid::invvpid_all_contexts, vm::Vm, }, - tracker::{print_allocated_memory, ALLOCATED_MEMORY_HEAD}, windows::{ nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image}, ssdt::ssdt_hook::SsdtHook, }, }, - core::{intrinsics::copy_nonoverlapping, sync::atomic::Ordering}, + alloc::vec::Vec, + core::intrinsics::copy_nonoverlapping, lazy_static::lazy_static, log::*, spin::Mutex, @@ -66,6 +66,8 @@ pub struct HookManager { /// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized. /// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0) pub has_cpuid_cache_info_been_called: bool, + + pub stack_memory: Vec<(usize, usize)>, } lazy_static! { @@ -87,6 +89,7 @@ lazy_static! { ntoskrnl_base_pa: 0, ntoskrnl_size: 0, has_cpuid_cache_info_been_called: false, + stack_memory: Vec::with_capacity(128), }); } @@ -109,6 +112,23 @@ impl HookManager { .modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); } + /// Records a memory allocation for tracking purposes. + /// + /// # Arguments + /// + /// * `start` - The start address of the memory allocation. + /// * `size` - The size of the memory allocation. + pub fn record_allocation(&mut self, start: usize, size: usize) { + self.stack_memory.push((start, size)); + } + + /// Prints the allocated memory ranges for debugging purposes. + pub fn print_allocated_memory(&self) { + self.stack_memory.iter().for_each(|(start, size)| { + debug!("Memory Range: Start = {:#x}, Size = {:#x}", start, size); + }); + } + /// Sets the base address and size of the Windows kernel. /// /// # Arguments @@ -197,30 +217,15 @@ impl HookManager { /// /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(&mut self, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { - // Print the tracked memory allocations for debugging purposes. - print_allocated_memory(); - - // Load the head of the allocated memory list. - let mut current_node = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); - - // Iterate through the linked list and hide each memory range. - while !current_node.is_null() { - // Get a reference to the current node. - let node = unsafe { &*current_node }; - - // Print the memory range. - trace!("Memory Range: Start = {:#X}, Size = {}", node.start, node.size); - - // Iterate through the memory range in 4KB steps. - for offset in (0..node.size).step_by(BASE_PAGE_SIZE) { - let guest_page_pa = node.start + offset; - // Print the page address before hiding it. - trace!("Hiding memory page at: {:#X}", guest_page_pa); - self.ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?; - } - - // Move to the next node. - current_node = node.next.load(Ordering::Acquire); + let pages: Vec = self + .stack_memory + .iter() + .step_by(BASE_PAGE_SIZE) + .map(|(start, _size)| *start as u64) + .collect(); + + for guest_page_pa in pages { + self.ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)? } Ok(()) diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index ce6c9d8..5c5cbc9 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -18,6 +18,5 @@ pub mod error; pub mod global_const; pub mod intel; pub mod logger; -pub mod tracker; pub mod vmm; pub mod windows; diff --git a/hypervisor/src/tracker.rs b/hypervisor/src/tracker.rs deleted file mode 100644 index 262ffaa..0000000 --- a/hypervisor/src/tracker.rs +++ /dev/null @@ -1,81 +0,0 @@ -use { - alloc::boxed::Box, - core::{ - ptr::null_mut, - sync::atomic::{AtomicPtr, Ordering}, - }, - log::trace, -}; - -/// Structure to represent a memory range. -/// -/// This struct holds the start address and size of an allocated memory range. -/// It also includes an atomic pointer to the next memory range in a linked list. -#[derive(Debug)] -pub struct MemoryRangeTracker { - pub start: usize, - pub size: usize, - pub next: AtomicPtr, -} - -/// Global atomic pointer to the head of the allocated memory list. -/// -/// This static variable holds the head of the linked list that keeps track of all allocated memory ranges. -/// It is initialized to a null pointer. -pub static ALLOCATED_MEMORY_HEAD: AtomicPtr = AtomicPtr::new(null_mut()); - -/// Records an allocation by adding the memory range to the global list. -/// -/// This function is called whenever a new memory block is allocated. It stores the start address -/// and size of the allocated memory in the global list. -/// -/// # Arguments -/// -/// * `start` - The start address of the allocated memory range. -/// * `size` - The size of the allocated memory range. -pub fn record_allocation(start: usize, size: usize) { - // Create a new memory range node. - let new_node = Box::into_raw(Box::new(MemoryRangeTracker { - start, - size, - next: AtomicPtr::new(null_mut()), - })); - - // Update the head of the list in a lock-free manner. - let mut current_head = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); - loop { - // Set the new node's next pointer to the current head. - unsafe { (*new_node).next.store(current_head, Ordering::Release) }; - - // Attempt to update the head to the new node. - match ALLOCATED_MEMORY_HEAD.compare_exchange(current_head, new_node, Ordering::AcqRel, Ordering::Acquire) { - // If the head was successfully updated, break out of the loop. - Ok(_) => break, - // If the head was changed by another thread, update current_head and retry. - Err(head) => current_head = head, - } - } -} - -/// Prints the entire allocated memory range one by one. -/// -/// This function iterates through the linked list of allocated memory ranges -/// and prints the start address and size of each range. -pub fn print_allocated_memory() { - // Load the head of the allocated memory list. - let mut current_node = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); - - // Iterate through the linked list and print each memory range. - while !current_node.is_null() { - unsafe { - // Get a reference to the current node. - let node = &*current_node; - - // Print the memory range. - trace!("Memory Range: Start = {:#X}, Size = {}", node.start, node.size); - - // Move to the next node. - current_node = node.next.load(Ordering::Acquire); - } - } -} diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index d4c6e46..896e4d2 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -80,10 +80,14 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { trace!("VMCS Dump: {:#x?}", vm.vmcs_region); /* - match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) { - Ok(_) => debug!("Hypervisor memory hidden"), - Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e), - }; + { + let mut hook_manager = crate::intel::hooks::hook_manager::SHARED_HOOK_MANAGER.lock(); + hook_manager.print_allocated_memory(); + match hook_manager.hide_hypervisor_memory(&mut vm, crate::intel::ept::AccessType::READ_WRITE_EXECUTE) { + Ok(_) => debug!("Hypervisor memory hidden"), + Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e), + }; + } */ info!("Launching the VM until a vmexit occurs..."); diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs index f6620bf..7736c62 100644 --- a/uefi/src/setup.rs +++ b/uefi/src/setup.rs @@ -6,8 +6,10 @@ use { alloc::boxed::Box, hypervisor::{ allocator::box_zeroed, - intel::{hooks::hook_manager::HookManager, page::Page}, - tracker::record_allocation, + intel::{ + hooks::hook_manager::{HookManager, SHARED_HOOK_MANAGER}, + page::Page, + }, }, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, @@ -44,7 +46,10 @@ pub fn record_image_base(loaded_image: &LoadedImage) { let (image_base, image_size) = loaded_image.info(); let image_range = image_base as usize..(image_base as usize + image_size as usize); debug!("Loaded image base: {:#x?}", image_range); - record_allocation(image_base as usize, image_size as usize); + + // Lock the shared hook manager + let mut hook_manager = SHARED_HOOK_MANAGER.lock(); + hook_manager.record_allocation(image_base as usize, image_size as usize); } /// Creates a dummy page filled with a specific byte value. diff --git a/uefi/src/stack.rs b/uefi/src/stack.rs index 9af3fd0..5ef5b3f 100644 --- a/uefi/src/stack.rs +++ b/uefi/src/stack.rs @@ -5,7 +5,7 @@ use { ptr, sync::atomic::{AtomicPtr, AtomicU32, Ordering}, }, - hypervisor::tracker::record_allocation, + hypervisor::intel::hooks::hook_manager::SHARED_HOOK_MANAGER, uefi::{ prelude::{Boot, BootServices, SystemTable}, proto::loaded_image::LoadedImage, @@ -89,8 +89,9 @@ pub unsafe fn allocate_host_stack(layout: Layout) -> *mut u8 { .unwrap_or(ptr::null_mut()) }; - // Record the allocation without causing a deadlock. - record_allocation(stack as usize, layout.size()); + // Lock the shared hook manager + let mut hook_manager = SHARED_HOOK_MANAGER.lock(); + hook_manager.record_allocation(stack as usize, layout.size()); stack }