Support munmap multiple single VMA chunks with remaining ranges
This commit is contained in:
parent
5c10af738e
commit
f87ee7c7a4
@ -257,55 +257,37 @@ impl VMManager {
|
||||
chunk.unwrap().clone()
|
||||
};
|
||||
|
||||
// Case 1: the overlapping chunk IS NOT a super set of munmap range
|
||||
if !chunk.range().is_superset_of(&munmap_range) {
|
||||
// munmap range spans multiple chunks
|
||||
let munmap_single_vma_chunks = {
|
||||
let overlapping_chunks = {
|
||||
let current = current!();
|
||||
let mut process_mem_chunks = current.vm().mem_chunks().write().unwrap();
|
||||
let munmap_single_vma_chunks = process_mem_chunks
|
||||
.drain_filter(|p_chunk| {
|
||||
p_chunk.is_single_vma() && p_chunk.range().overlap_with(&munmap_range)
|
||||
})
|
||||
.collect::<Vec<ChunkRef>>();
|
||||
if munmap_single_vma_chunks
|
||||
.iter()
|
||||
.find(|chunk| !munmap_range.is_superset_of(chunk.range()))
|
||||
.is_some()
|
||||
{
|
||||
// TODO: Support munmap multiple single VMA chunk with remaining ranges.
|
||||
return_errno!(
|
||||
EINVAL,
|
||||
"munmap multiple chunks with remaining ranges is not supported"
|
||||
);
|
||||
}
|
||||
|
||||
// Munmap ranges in default chunks
|
||||
for chunk in process_mem_chunks
|
||||
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
|
||||
process_mem_chunks
|
||||
.iter()
|
||||
.filter(|p_chunk| p_chunk.range().overlap_with(&munmap_range))
|
||||
{
|
||||
match chunk.internal() {
|
||||
ChunkType::SingleVMA(_) => {
|
||||
unreachable!() // single-vma chunks should be drained already
|
||||
}
|
||||
ChunkType::MultiVMA(manager) => manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.chunk_manager_mut()
|
||||
.munmap_range(munmap_range)?,
|
||||
}
|
||||
}
|
||||
|
||||
munmap_single_vma_chunks
|
||||
.cloned()
|
||||
.collect::<Vec<ChunkRef>>()
|
||||
};
|
||||
|
||||
let mut internl_manager = self.internal();
|
||||
munmap_single_vma_chunks.iter().for_each(|p_chunk| {
|
||||
internl_manager.munmap_chunk(p_chunk, None);
|
||||
});
|
||||
for chunk in overlapping_chunks.iter() {
|
||||
match chunk.internal() {
|
||||
ChunkType::SingleVMA(_) => {
|
||||
let mut internl_manager = self.internal();
|
||||
internl_manager.munmap_chunk(chunk, Some(&munmap_range))?
|
||||
}
|
||||
ChunkType::MultiVMA(manager) => manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.chunk_manager_mut()
|
||||
.munmap_range(munmap_range)?,
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Case 2: the overlapping chunk IS a super set of munmap range
|
||||
debug_assert!(chunk.range().is_superset_of(&munmap_range));
|
||||
match chunk.internal() {
|
||||
ChunkType::MultiVMA(manager) => {
|
||||
return manager
|
||||
@ -696,7 +678,9 @@ impl InternalVMManager {
|
||||
munmap_range.unwrap()
|
||||
}
|
||||
};
|
||||
debug_assert!(chunk.range().is_superset_of(munmap_range));
|
||||
|
||||
// Either the munmap range is a subset of the chunk range or the munmap range is
|
||||
// a superset of the chunk range. We can handle both cases.
|
||||
|
||||
let mut vma = vma.lock().unwrap();
|
||||
debug_assert!(chunk.range() == vma.range());
|
||||
|
@ -24,6 +24,7 @@
|
||||
#define ALIGN_UP(x, a) ALIGN_DOWN((x+(a-1)), (a))
|
||||
|
||||
#define MAX_MMAP_USED_MEMORY (4 * MB)
|
||||
#define DEFAULT_CHUNK_SIZE (32 * MB) // This is the default chunk size used in Occlum kernel.
|
||||
|
||||
// ============================================================================
|
||||
// Helper functions
|
||||
@ -676,6 +677,49 @@ int test_munmap_whose_range_intersects_with_multiple_mmap_regions() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_munmap_whose_range_intersects_with_several_chunks() {
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
|
||||
size_t len = 1 * MB;
|
||||
|
||||
// Allocate three continuous single vma chunks at the end of the default chunk
|
||||
size_t hint_1 = HINT_BEGIN + DEFAULT_CHUNK_SIZE;
|
||||
void *addr = mmap((void *)hint_1, len, prot, flags, -1, 0);
|
||||
if ((size_t)addr != hint_1) {
|
||||
THROW_ERROR("fixed mmap with good hint failed");
|
||||
}
|
||||
|
||||
size_t hint_2 = hint_1 + len;
|
||||
addr = mmap((void *)hint_2, len, prot, flags, -1, 0);
|
||||
if ((size_t)addr != hint_2) {
|
||||
THROW_ERROR("fixed mmap spans over two chunks failed");
|
||||
}
|
||||
|
||||
size_t hint_3 = hint_2 + len;
|
||||
addr = mmap((void *)hint_3, len, prot, flags, -1, 0);
|
||||
if ((size_t)addr != hint_3) {
|
||||
THROW_ERROR("fixed mmap spans over two chunks failed");
|
||||
}
|
||||
|
||||
// Munmap the range spans above three ranges
|
||||
size_t munmap_start = hint_1 + len / 2;
|
||||
size_t munmap_end = hint_3 + len / 2;
|
||||
|
||||
if (munmap((void *)munmap_start, munmap_end - munmap_start) < 0) {
|
||||
THROW_ERROR("munmap failed");
|
||||
}
|
||||
|
||||
if (check_buf_is_munmapped((void *)munmap_start, munmap_end - munmap_start) < 0) {
|
||||
THROW_ERROR("munmap does not really free the memory");
|
||||
}
|
||||
|
||||
if (munmap((void *)hint_1, 3 * len) < 0) {
|
||||
THROW_ERROR("munmap remaining ranges failed");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_munmap_with_null_addr() {
|
||||
// Set the address for munmap to NULL!
|
||||
//
|
||||
@ -1328,6 +1372,7 @@ static test_case_t test_cases[] = {
|
||||
TEST_CASE(test_munmap_whose_range_intersects_with_a_mmap_region),
|
||||
TEST_CASE(test_munmap_whose_range_intersects_with_no_mmap_regions),
|
||||
TEST_CASE(test_munmap_whose_range_intersects_with_multiple_mmap_regions),
|
||||
TEST_CASE(test_munmap_whose_range_intersects_with_several_chunks),
|
||||
TEST_CASE(test_munmap_with_null_addr),
|
||||
TEST_CASE(test_munmap_with_zero_len),
|
||||
TEST_CASE(test_munmap_with_non_page_aligned_len),
|
||||
|
Loading…
Reference in New Issue
Block a user