Refine log for VM module

This commit is contained in:
Hui, Chunyang 2023-11-14 07:00:26 +00:00 committed by volcano
parent 32c7cc44f1
commit c2db186854
9 changed files with 57 additions and 42 deletions

@ -97,18 +97,27 @@ impl Log for SimpleLogger {
let tid = current!().tid(); let tid = current!().tid();
let rounds = round_count(); let rounds = round_count();
let desc = round_desc(); let desc = round_desc();
let target = record.target().split("::").skip(1).next().unwrap();
// Message (null-terminated) // Message (null-terminated)
let message = if let Some(desc) = desc { let message = if let Some(desc) = desc {
format!( format!(
"[{:>5}][T{}][#{}][{:·>8}] {}\0", "[{:>5}][T{}][#{}][{:·>8}][{}] {}\0",
level, level,
tid, tid,
rounds, rounds,
desc, desc,
target,
record.args() record.args()
) )
} else { } else {
format!("[{:>5}][T{}][#{}] {}\0", level, tid, rounds, record.args()) format!(
"[{:>5}][T{}][#{}][{}] {}\0",
level,
tid,
rounds,
target,
record.args()
)
}; };
// Print the message // Print the message
unsafe { unsafe {

@ -58,7 +58,7 @@ impl Debug for Chunk {
write!(f, "range = {:?}, ", self.range); write!(f, "range = {:?}, ", self.range);
match self.internal() { match self.internal() {
ChunkType::SingleVMA(vma) => write!(f, "Single VMA chunk: {:?}", vma), ChunkType::SingleVMA(vma) => write!(f, "Single VMA chunk: {:?}", vma),
ChunkType::MultiVMA(internal_manager) => write!(f, "default chunk: {:?}", self.range()), ChunkType::MultiVMA(internal_manager) => write!(f, "default chunk"),
} }
} }
} }
@ -146,7 +146,7 @@ impl Chunk {
pub fn mmap(&self, options: &VMMapOptions) -> Result<usize> { pub fn mmap(&self, options: &VMMapOptions) -> Result<usize> {
debug_assert!(!self.is_single_vma()); debug_assert!(!self.is_single_vma());
trace!("try allocate in chunk: {:?}", self); debug!("try allocate in chunk: {:?}", self);
let mut internal_manager = if let ChunkType::MultiVMA(internal_manager) = &self.internal { let mut internal_manager = if let ChunkType::MultiVMA(internal_manager) = &self.internal {
internal_manager.lock().unwrap() internal_manager.lock().unwrap()
} else { } else {

@ -42,7 +42,10 @@ impl VMFreeSpaceManager {
let mut result_idx: Option<usize> = None; let mut result_idx: Option<usize> = None;
let mut free_list = &mut self.free_manager; let mut free_list = &mut self.free_manager;
trace!("find free range, free list = {:?}", free_list); debug!(
"try to find free range, current free list = {:?}",
free_list
);
match addr { match addr {
VMMapAddr::Any => {} VMMapAddr::Any => {}
@ -125,10 +128,9 @@ impl VMFreeSpaceManager {
let result_free_range = result_free_range.unwrap(); let result_free_range = result_free_range.unwrap();
self.free_list_update_range(index, result_free_range); self.free_list_update_range(index, result_free_range);
trace!( debug!(
"result free range = {:?}. After find free range, free list = {:?}", "result free range = {:?}. After finding free range, free list = {:?}",
result_free_range, result_free_range, self.free_manager
self.free_manager
); );
return Ok(result_free_range); return Ok(result_free_range);
} }
@ -148,6 +150,7 @@ impl VMFreeSpaceManager {
} }
pub fn add_range_back_to_free_manager(&mut self, dirty_range: &VMRange) -> Result<()> { pub fn add_range_back_to_free_manager(&mut self, dirty_range: &VMRange) -> Result<()> {
debug!("add back dirty range: {:?}", dirty_range);
let mut free_list = &mut self.free_manager; let mut free_list = &mut self.free_manager;
// If the free list is empty, insert the dirty range and it's done. // If the free list is empty, insert the dirty range and it's done.

@ -69,6 +69,8 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
pub fn build(self) -> Result<ProcessVM> { pub fn build(self) -> Result<ProcessVM> {
self.validate()?; self.validate()?;
info!("allocate for new process");
let heap_size = self let heap_size = self
.heap_size .heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size); .unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
@ -142,7 +144,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
&self.handle_error_when_init(&chunks); &self.handle_error_when_init(&chunks);
e e
})?; })?;
trace!("elf range = {:?}", elf_range); debug!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range); elf_ranges.push(elf_range);
Ok(()) Ok(())
}) })
@ -167,7 +169,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
e e
})?; })?;
debug_assert!(heap_range.start() % heap_layout.align() == 0); debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range); debug!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start()); let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref); chunks.insert(chunk_ref);
@ -190,7 +192,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
})?; })?;
debug_assert!(stack_range.start() % stack_layout.align() == 0); debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref); chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range); debug!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks)); let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM { Ok(ProcessVM {
@ -567,6 +569,7 @@ impl ProcessVM {
.initializer(initializer) .initializer(initializer)
.page_policy(page_policy) .page_policy(page_policy)
.build()?; .build()?;
debug!("mmap options = {:?}", mmap_options);
let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?; let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?;
Ok(mmap_addr) Ok(mmap_addr)
} }

@ -23,14 +23,13 @@ impl UserSpaceVMManager {
let sgx_platform = SGXPlatform::new(); let sgx_platform = SGXPlatform::new();
let init_size = LIBOS_CONFIG.resource_limits.user_space_init_size; let init_size = LIBOS_CONFIG.resource_limits.user_space_init_size;
let max_size = LIBOS_CONFIG.resource_limits.user_space_max_size; let max_size = LIBOS_CONFIG.resource_limits.user_space_max_size;
info!(
"alloc user space init size = {:?}, max size = {:?}",
init_size, max_size
);
let (userspace_vm_range, gap_range) = sgx_platform.alloc_user_space(init_size, max_size)?; let (userspace_vm_range, gap_range) = sgx_platform.alloc_user_space(init_size, max_size)?;
info!(
"user space allocated, range = {:?}, gap_range = {:?}",
userspace_vm_range, gap_range
);
// Use pkey_mprotect to set the whole userspace to R/W permissions. If user specifies a new // Use pkey_mprotect to set the whole userspace to R/W permissions. If user specifies a new
// permission, the mprotect ocall will update the permission. // permission, the mprotect ocall will update the permission.
pku_util::pkey_mprotect_userspace_mem( pku_util::pkey_mprotect_userspace_mem(
@ -41,6 +40,11 @@ impl UserSpaceVMManager {
let vm_manager = VMManager::init(userspace_vm_range, gap_range)?; let vm_manager = VMManager::init(userspace_vm_range, gap_range)?;
info!(
"user space allocated, total size = {:?}",
userspace_vm_range.size()
);
Ok(Self { Ok(Self {
inner: vm_manager, inner: vm_manager,
sgx_platform, sgx_platform,
@ -63,7 +67,7 @@ fn free_user_space() {
assert!(USER_SPACE_VM_MANAGER.verified_clean_when_exit()); assert!(USER_SPACE_VM_MANAGER.verified_clean_when_exit());
let addr = total_user_space_range.start(); let addr = total_user_space_range.start();
let size = total_user_space_range.size(); let size = total_user_space_range.size();
info!("free user space VM: {:?}", total_user_space_range); debug!("free user space VM: {:?}", total_user_space_range);
pku_util::clear_pku_when_libos_exit( pku_util::clear_pku_when_libos_exit(
total_user_space_range, total_user_space_range,

@ -306,7 +306,7 @@ impl VMArea {
errcd: u32, errcd: u32,
kernel_triggers: bool, kernel_triggers: bool,
) -> Result<()> { ) -> Result<()> {
trace!("PF vma = {:?}", self); debug!("PF vma = {:?}", self);
if (self.perms() == VMPerms::NONE) if (self.perms() == VMPerms::NONE)
|| (crate::exception::check_rw_bit(errcd) == false || (crate::exception::check_rw_bit(errcd) == false
&& !self.perms().contains(VMPerms::READ)) && !self.perms().contains(VMPerms::READ))
@ -340,6 +340,7 @@ impl VMArea {
} }
if kernel_triggers || self.pf_count >= PF_NUM_THRESHOLD { if kernel_triggers || self.pf_count >= PF_NUM_THRESHOLD {
info!("commit whole vma");
return self.commit_current_vma_whole(); return self.commit_current_vma_whole();
} }
@ -347,14 +348,12 @@ impl VMArea {
// The return commit_size can be 0 when other threads already commit the PF-containing range but the vma is not fully committed yet. // The return commit_size can be 0 when other threads already commit the PF-containing range but the vma is not fully committed yet.
let commit_size = self.commit_once_for_page_fault(pf_addr).unwrap(); let commit_size = self.commit_once_for_page_fault(pf_addr).unwrap();
trace!("page fault commit memory size = {:?}", commit_size); debug!("page fault commit memory size = {:?}", commit_size);
if commit_size == 0 { if commit_size == 0 {
warn!("This PF has been handled by other threads already."); warn!("This PF has been handled by other threads already.");
} }
info!("page fault handle success");
Ok(()) Ok(())
} }

@ -101,7 +101,6 @@ impl ChunkManager {
if let VMMapAddr::Force(addr) = addr { if let VMMapAddr::Force(addr) = addr {
self.munmap(addr, size)?; self.munmap(addr, size)?;
} }
trace!("mmap options = {:?}", options);
// Find and allocate a new range for this mmap request // Find and allocate a new range for this mmap request
let new_range = self let new_range = self
@ -130,7 +129,6 @@ impl ChunkManager {
new_vma.unwrap() new_vma.unwrap()
}; };
trace!("new vma is ready");
self.free_size -= new_vma.size(); self.free_size -= new_vma.size();
// After initializing, we can safely insert the new VMA // After initializing, we can safely insert the new VMA
@ -501,7 +499,7 @@ impl VMRemapParser for ChunkManager {
impl Drop for ChunkManager { impl Drop for ChunkManager {
fn drop(&mut self) { fn drop(&mut self) {
info!("drop chunk manager = {:?}", self); debug!("drop chunk manager = {:?}", self);
assert!(self.is_empty()); assert!(self.is_empty());
assert!(self.free_size == self.range.size()); assert!(self.free_size == self.range.size());
assert!(self.free_manager.free_size() == self.range.size()); assert!(self.free_manager.free_size() == self.range.size());

@ -227,10 +227,6 @@ impl SGXPlatform {
init_size: usize, init_size: usize,
max_size: usize, max_size: usize,
) -> Result<(VMRange, Option<VMRange>)> { ) -> Result<(VMRange, Option<VMRange>)> {
debug!(
"alloc user space init size = {:?}, max size = {:?}",
init_size, max_size
);
if matches!(self, SGXPlatform::WithEDMM) && max_size > init_size { if matches!(self, SGXPlatform::WithEDMM) && max_size > init_size {
let user_region_size = max_size - init_size; let user_region_size = max_size - init_size;
@ -245,7 +241,7 @@ impl SGXPlatform {
let gap_range = let gap_range =
VMRange::new(reserved_mem_start_addr + init_size, user_region_start_addr)?; VMRange::new(reserved_mem_start_addr + init_size, user_region_start_addr)?;
info!( debug!(
"allocated user space range is {:?}, gap range is {:?}. reserved_mem range is {:?}, user region range is {:?}", "allocated user space range is {:?}, gap range is {:?}. reserved_mem range is {:?}, user region range is {:?}",
total_user_space_range, gap_range, VMRange::new_with_size(reserved_mem_start_addr, init_size), total_user_space_range, gap_range, VMRange::new_with_size(reserved_mem_start_addr, init_size),
VMRange::new_with_size(user_region_start_addr, user_region_size) VMRange::new_with_size(user_region_start_addr, user_region_size)
@ -258,7 +254,7 @@ impl SGXPlatform {
let total_user_space_range = let total_user_space_range =
VMRange::new(reserved_mem_start_addr, reserved_mem_start_addr + max_size)?; VMRange::new(reserved_mem_start_addr, reserved_mem_start_addr + max_size)?;
info!( debug!(
"allocated user space range is {:?}, gap range is None", "allocated user space range is {:?}, gap range is None",
total_user_space_range total_user_space_range
); );
@ -384,10 +380,9 @@ pub fn enclave_page_fault_handler(
) -> Result<()> { ) -> Result<()> {
let pf_addr = exception_info.faulting_address as usize; let pf_addr = exception_info.faulting_address as usize;
let pf_errcd = exception_info.error_code; let pf_errcd = exception_info.error_code;
trace!( debug!(
"enclave page fault caught, pf_addr = 0x{:x}, error code = {:?}", "enclave page fault caught, pf_addr = 0x{:x}, error code = {:?}",
pf_addr, pf_addr, pf_errcd
pf_errcd
); );
USER_SPACE_VM_MANAGER.handle_page_fault(rip, pf_addr, pf_errcd, kernel_triggers)?; USER_SPACE_VM_MANAGER.handle_page_fault(rip, pf_addr, pf_errcd, kernel_triggers)?;

@ -93,7 +93,8 @@ impl VMManager {
let res = self.internal().mmap_shared_chunk(options); let res = self.internal().mmap_shared_chunk(options);
match res { match res {
Ok(addr) => { Ok(addr) => {
trace!( // Important info if we reach here
debug!(
"mmap_shared_chunk success: addr = 0x{:X}, pid = {}", "mmap_shared_chunk success: addr = 0x{:X}, pid = {}",
res.as_ref().unwrap(), res.as_ref().unwrap(),
current!().process().pid() current!().process().pid()
@ -127,6 +128,7 @@ impl VMManager {
} }
if size > CHUNK_DEFAULT_SIZE { if size > CHUNK_DEFAULT_SIZE {
info!("allocate Single-VMA chunk");
if let Ok(new_chunk) = self.internal().mmap_chunk(options) { if let Ok(new_chunk) = self.internal().mmap_chunk(options) {
let start = new_chunk.range().start(); let start = new_chunk.range().start();
current!().vm().add_mem_chunk(new_chunk); current!().vm().add_mem_chunk(new_chunk);
@ -168,6 +170,7 @@ impl VMManager {
// Slow path: Sadly, there is no free chunk, iterate every chunk to find a range // Slow path: Sadly, there is no free chunk, iterate every chunk to find a range
{ {
info!("iterate every chunk to find a range");
// Release lock after this block // Release lock after this block
let mut result_start = Ok(0); let mut result_start = Ok(0);
let chunks = &self.internal().chunks; let chunks = &self.internal().chunks;
@ -186,6 +189,7 @@ impl VMManager {
} }
// Can't find a range in default chunks. Maybe there is still free range in the global free list. // Can't find a range in default chunks. Maybe there is still free range in the global free list.
info!("try find free range from the global free list");
if let Ok(new_chunk) = self.internal().mmap_chunk(options) { if let Ok(new_chunk) = self.internal().mmap_chunk(options) {
let start = new_chunk.range().start(); let start = new_chunk.range().start();
current!().vm().add_mem_chunk(new_chunk); current!().vm().add_mem_chunk(new_chunk);
@ -216,7 +220,7 @@ impl VMManager {
// The man page of munmap states that "it is not an error if the indicated // The man page of munmap states that "it is not an error if the indicated
// range does not contain any mapped pages". This is not considered as // range does not contain any mapped pages". This is not considered as
// an error! // an error!
trace!("the munmap range is not mapped"); debug!("the munmap range is not mapped");
return Ok(()); return Ok(());
} }
chunk.unwrap().clone() chunk.unwrap().clone()
@ -498,7 +502,7 @@ impl VMManager {
self.parse_mremap_options_for_single_vma_chunk(options, vma) self.parse_mremap_options_for_single_vma_chunk(options, vma)
} }
}?; }?;
trace!("mremap options after parsing = {:?}", remap_result_option); debug!("mremap options after parsing = {:?}", remap_result_option);
let ret_addr = if let Some(mmap_options) = remap_result_option.mmap_options() { let ret_addr = if let Some(mmap_options) = remap_result_option.mmap_options() {
let mmap_addr = self.mmap(mmap_options); let mmap_addr = self.mmap(mmap_options);
@ -629,7 +633,7 @@ impl InternalVMManager {
// Add this range to chunks // Add this range to chunks
let chunk = Arc::new(Chunk::new_default_chunk(free_range)?); let chunk = Arc::new(Chunk::new_default_chunk(free_range)?);
trace!("allocate a default chunk = {:?}", chunk); debug!("allocate a default chunk: {:?}", chunk);
self.chunks.insert(chunk.clone()); self.chunks.insert(chunk.clone());
Ok(chunk) Ok(chunk)
} }
@ -668,7 +672,7 @@ impl InternalVMManager {
munmap_range: Option<&VMRange>, munmap_range: Option<&VMRange>,
flag: MunmapChunkFlag, flag: MunmapChunkFlag,
) -> Result<()> { ) -> Result<()> {
trace!( debug!(
"munmap_chunk range = {:?}, munmap_range = {:?}", "munmap_chunk range = {:?}, munmap_range = {:?}",
chunk.range(), chunk.range(),
munmap_range munmap_range
@ -696,7 +700,7 @@ impl InternalVMManager {
}; };
if LIBOS_CONFIG.feature.enable_posix_shm && chunk.is_shared() { if LIBOS_CONFIG.feature.enable_posix_shm && chunk.is_shared() {
trace!( debug!(
"munmap_shared_chunk, chunk_range = {:?}, munmap_range = {:?}", "munmap_shared_chunk, chunk_range = {:?}, munmap_range = {:?}",
chunk.range(), chunk.range(),
munmap_range, munmap_range,
@ -1049,7 +1053,7 @@ impl InternalVMManager {
return self.force_mmap_across_multiple_chunks(target_range, options); return self.force_mmap_across_multiple_chunks(target_range, options);
} }
trace!( debug!(
"mmap with addr in existing default chunk: {:?}", "mmap with addr in existing default chunk: {:?}",
chunk.range() chunk.range()
); );
@ -1183,7 +1187,7 @@ impl InternalVMManager {
.collect::<Vec<VMMapOptions>>() .collect::<Vec<VMMapOptions>>()
}; };
trace!( debug!(
"force mmap across multiple chunks mmap ranges = {:?}", "force mmap across multiple chunks mmap ranges = {:?}",
target_contained_ranges target_contained_ranges
); );