From 6107a32675bbf5c5efdd707bd2ec0c5b2413a7ed Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Mon, 30 Jan 2023 16:39:15 +0800 Subject: [PATCH] Fix mremap lock range when merging connecting chunks --- src/libos/src/vm/process_vm.rs | 6 ++++-- src/libos/src/vm/vm_manager.rs | 13 ++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/libos/src/vm/process_vm.rs b/src/libos/src/vm/process_vm.rs index 912d4b41..8f1d86a5 100644 --- a/src/libos/src/vm/process_vm.rs +++ b/src/libos/src/vm/process_vm.rs @@ -11,6 +11,7 @@ use super::vm_util::{ FileBacked, VMInitializer, VMMapAddr, VMMapOptions, VMMapOptionsBuilder, VMRemapOptions, }; use std::collections::HashSet; +use util::sync::rw_lock::RwLockWriteGuard; // Used for heap and stack start address randomization. const RANGE_FOR_RANDOMIZATION: usize = 256 * 4096; // 1M @@ -346,9 +347,10 @@ impl ProcessVM { // Try merging all connecting single VMAs of the process. // This is a very expensive operation. - pub fn merge_all_single_vma_chunks(&self) -> Result> { + pub fn merge_all_single_vma_chunks( + mem_chunks: &mut RwLockWriteGuard>, + ) -> Result> { // Get all single VMA chunks - let mut mem_chunks = self.mem_chunks.write().unwrap(); let mut single_vma_chunks = mem_chunks .drain_filter(|chunk| chunk.is_single_vma()) .collect::>(); diff --git a/src/libos/src/vm/vm_manager.rs b/src/libos/src/vm/vm_manager.rs index 97c3da88..aebabe71 100644 --- a/src/libos/src/vm/vm_manager.rs +++ b/src/libos/src/vm/vm_manager.rs @@ -392,11 +392,19 @@ impl VMManager { { // Must lock the internal manager first here in case the chunk's range and vma are conflict when other threads are operating the VM let mut internal_manager = self.internal.lock().unwrap(); - let mut merged_vmas = current.vm().merge_all_single_vma_chunks()?; + // Lock process mem_chunks during the whole merging process to avoid conflict + let mut process_mem_chunks = current.vm().mem_chunks().write().unwrap(); + + let mut merged_vmas = ProcessVM::merge_all_single_vma_chunks(&mut process_mem_chunks)?; internal_manager.clean_single_vma_chunks(); + + // Add merged chunks to internal manager and process mem_chunks while merged_vmas.len() != 0 { let merged_vma = merged_vmas.pop().unwrap(); - internal_manager.add_new_chunk(¤t, merged_vma); + let new_vma_chunk = Arc::new(Chunk::new_chunk_with_vma(merged_vma)); + let success = internal_manager.chunks.insert(new_vma_chunk.clone()); + process_mem_chunks.insert(new_vma_chunk); + debug_assert!(success); } } @@ -845,7 +853,6 @@ impl InternalVMManager { } // Munmap the corresponding single vma chunk - // let mut internal_manager = self.internal(); self.munmap_chunk(&chunk, Some(&target_range))?; } VMMapAddr::Any => unreachable!(),