[libos] Fix munmap conflict chunk range and vma range

This commit is contained in:
Hui, Chunyang 2022-09-05 03:49:59 +00:00 committed by volcano
parent 61b9639081
commit 171faccea7
4 changed files with 96 additions and 6 deletions

@ -54,6 +54,7 @@ impl Eq for Chunk {}
impl Debug for Chunk {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "range = {:?}, ", self.range);
match self.internal() {
ChunkType::SingleVMA(vma) => write!(f, "Single VMA chunk: {:?}", vma),
ChunkType::MultiVMA(internal_manager) => write!(f, "default chunk: {:?}", self.range()),

@ -240,8 +240,8 @@ impl VMManager {
align_up(size, PAGE_SIZE)
};
let munmap_range = { VMRange::new_with_size(addr, size) }?;
let current = current!();
let chunk = {
let current = current!();
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
let chunk = process_mem_chunks
.iter()
@ -260,8 +260,10 @@ impl VMManager {
// Case 1: the overlapping chunk IS NOT a super set of munmap range
if !chunk.range().is_superset_of(&munmap_range) {
// munmap range spans multiple chunks
// Must lock the internal manager first here in case the chunk's range and vma are conflict when other threads are operating the VM
let mut internal_manager = self.internal();
let overlapping_chunks = {
let current = current!();
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
process_mem_chunks
.iter()
@ -273,8 +275,7 @@ impl VMManager {
for chunk in overlapping_chunks.iter() {
match chunk.internal() {
ChunkType::SingleVMA(_) => {
let mut internl_manager = self.internal();
internl_manager.munmap_chunk(chunk, Some(&munmap_range))?
internal_manager.munmap_chunk(chunk, Some(&munmap_range))?
}
ChunkType::MultiVMA(manager) => manager
.lock()
@ -297,8 +298,22 @@ impl VMManager {
.munmap_range(munmap_range);
}
ChunkType::SingleVMA(_) => {
// Single VMA Chunk could be updated during the release of internal manager lock. Get overlapping chunk again.
// This is done here because we don't want to acquire the big internal manager lock as soon as entering this function.
let mut internal_manager = self.internal();
return internal_manager.munmap_chunk(&chunk, Some(&munmap_range));
let overlapping_chunk = {
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
process_mem_chunks
.iter()
.find(|&chunk| chunk.range().intersect(&munmap_range).is_some())
.map(|chunk| chunk.clone())
};
if let Some(overlapping_chunk) = overlapping_chunk {
return internal_manager.munmap_chunk(&overlapping_chunk, Some(&munmap_range));
} else {
warn!("no overlapping chunks anymore");
return Ok(());
}
}
}
}

@ -1,5 +1,5 @@
include ../test_common.mk
EXTRA_C_FLAGS := -Wno-return-stack-address
EXTRA_LINK_FLAGS :=
EXTRA_LINK_FLAGS := -lpthread
BIN_ARGS :=

@ -10,6 +10,7 @@
#include <string.h>
#include <fcntl.h>
#include <sys/syscall.h>
#include <pthread.h>
#include "test_fs.h"
// ============================================================================
@ -677,6 +678,78 @@ int test_munmap_whose_range_intersects_with_multiple_mmap_regions() {
return 0;
}
static void *child_routine(void *_arg) {
size_t len = 1 * MB;
void *old_addr = _arg;
void *new_addr = old_addr + 3 * len;
void *addr = mremap(old_addr, len, 2 * len, MREMAP_MAYMOVE | MREMAP_FIXED,
new_addr);
if (addr != new_addr) {
printf("mremap fail in client routine\n");
return (void *) -1;
}
int ret = munmap(addr, 2 * len);
if (ret < 0) {
printf("munmap failed");
return (void *) -1;
}
return NULL;
}
int test_mremap_concurrent() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
size_t len = 1 * MB;
void *child_ret;
// Allocate continuous single vma chunks at the end of the default chunk
size_t hint_1 = HINT_BEGIN + DEFAULT_CHUNK_SIZE;
void *addr = mmap((void *)hint_1, len, prot, flags, -1, 0);
if ((size_t)addr != hint_1) {
THROW_ERROR("fixed mmap with good hint failed");
}
size_t hint_2 = hint_1 + len;
addr = mmap((void *)hint_2, len, prot, flags, -1, 0);
if ((size_t)addr != hint_2) {
THROW_ERROR("fixed mmap spans over two chunks failed");
}
// create a child thread to mremap chunk from hint_2 to address after the chunk which starts with hint_3
pthread_t child_thread = 0;
int ret = pthread_create(&child_thread, NULL, child_routine, (void *)hint_2);
if (ret < 0) {
THROW_ERROR("pthread create failed");
}
// mremap chunk from hint_1 to hint_3
size_t hint_3 = hint_2 + len;
void *ret_addr = mremap((void *)hint_1, len, len * 2, MREMAP_MAYMOVE | MREMAP_FIXED,
(void *)hint_3);
if (ret_addr != (void *)hint_3) {
THROW_ERROR("mremap failed");
}
ret = pthread_join(child_thread, &child_ret);
if (ret < 0 || child_ret != NULL) {
THROW_ERROR("pthread_join failed");
}
if (munmap((void *)hint_3, len * 2) < 0) {
THROW_ERROR("munmap failed");
}
if (check_buf_is_munmapped((void *)hint_1, len * 5) < 0) {
THROW_ERROR("munmap does not really free the memory");
}
return 0;
}
int test_munmap_whose_range_intersects_with_several_chunks() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
@ -1390,6 +1463,7 @@ static test_case_t test_cases[] = {
TEST_CASE(test_mprotect_with_non_page_aligned_size),
TEST_CASE(test_mprotect_multiple_vmas),
TEST_CASE(test_mprotect_grow_down),
TEST_CASE(test_mremap_concurrent),
};
int main() {