Add support for mmap spans over two chunks with MAP_FIXED
This commit is contained in:
parent
8872acaeda
commit
3e15eb059c
@ -119,6 +119,25 @@ impl Chunk {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_owned_by_current_process(&self) -> bool {
|
||||||
|
let current = current!();
|
||||||
|
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
|
||||||
|
if !process_mem_chunks
|
||||||
|
.iter()
|
||||||
|
.any(|chunk| chunk.range() == self.range())
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.internal() {
|
||||||
|
ChunkType::SingleVMA(vma) => true,
|
||||||
|
ChunkType::MultiVMA(internal_manager) => {
|
||||||
|
let internal_manager = internal_manager.lock().unwrap();
|
||||||
|
internal_manager.is_owned_by_current_process()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn add_process(&self, current: &ThreadRef) {
|
pub fn add_process(&self, current: &ThreadRef) {
|
||||||
match self.internal() {
|
match self.internal() {
|
||||||
ChunkType::SingleVMA(vma) => unreachable!(),
|
ChunkType::SingleVMA(vma) => unreachable!(),
|
||||||
@ -217,6 +236,17 @@ impl Chunk {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_free_range(&self, request_range: &VMRange) -> bool {
|
||||||
|
match self.internal() {
|
||||||
|
ChunkType::SingleVMA(_) => false, // single-vma chunk can't be free
|
||||||
|
ChunkType::MultiVMA(internal_manager) => internal_manager
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.chunk_manager
|
||||||
|
.is_free_range(request_range),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -149,7 +149,7 @@ impl ChunkManager {
|
|||||||
let mut vmas_cursor = self.vmas.upper_bound_mut(Bound::Included(&bound));
|
let mut vmas_cursor = self.vmas.upper_bound_mut(Bound::Included(&bound));
|
||||||
while !vmas_cursor.is_null() && vmas_cursor.get().unwrap().vma().start() <= range.end() {
|
while !vmas_cursor.is_null() && vmas_cursor.get().unwrap().vma().start() <= range.end() {
|
||||||
let vma = &vmas_cursor.get().unwrap().vma();
|
let vma = &vmas_cursor.get().unwrap().vma();
|
||||||
warn!("munmap related vma = {:?}", vma);
|
trace!("munmap related vma = {:?}", vma);
|
||||||
if vma.size() == 0 || current_pid != vma.pid() {
|
if vma.size() == 0 || current_pid != vma.pid() {
|
||||||
vmas_cursor.move_next();
|
vmas_cursor.move_next();
|
||||||
continue;
|
continue;
|
||||||
@ -190,7 +190,7 @@ impl ChunkManager {
|
|||||||
|
|
||||||
// Reset zero
|
// Reset zero
|
||||||
unsafe {
|
unsafe {
|
||||||
warn!("intersection vma = {:?}", intersection_vma);
|
trace!("intersection vma = {:?}", intersection_vma);
|
||||||
let buf = intersection_vma.as_slice_mut();
|
let buf = intersection_vma.as_slice_mut();
|
||||||
buf.iter_mut().for_each(|b| *b = 0)
|
buf.iter_mut().for_each(|b| *b = 0)
|
||||||
}
|
}
|
||||||
@ -301,7 +301,7 @@ impl ChunkManager {
|
|||||||
// The whole containing_vma is mprotected
|
// The whole containing_vma is mprotected
|
||||||
containing_vma.set_perms(new_perms);
|
containing_vma.set_perms(new_perms);
|
||||||
VMPerms::apply_perms(&containing_vma, containing_vma.perms());
|
VMPerms::apply_perms(&containing_vma, containing_vma.perms());
|
||||||
warn!("containing_vma = {:?}", containing_vma);
|
trace!("containing_vma = {:?}", containing_vma);
|
||||||
containing_vmas.replace_with(VMAObj::new_vma_obj(containing_vma));
|
containing_vmas.replace_with(VMAObj::new_vma_obj(containing_vma));
|
||||||
containing_vmas.move_next();
|
containing_vmas.move_next();
|
||||||
continue;
|
continue;
|
||||||
|
@ -157,7 +157,7 @@ impl VMManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If addr is specified, use single VMA chunk to record this
|
// If addr is specified, use single VMA chunk to record this
|
||||||
fn mmap_with_addr(&self, range: VMRange, options: &VMMapOptions) -> Result<usize> {
|
fn mmap_with_addr(&self, target_range: VMRange, options: &VMMapOptions) -> Result<usize> {
|
||||||
let addr = *options.addr();
|
let addr = *options.addr();
|
||||||
let size = *options.size();
|
let size = *options.size();
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ impl VMManager {
|
|||||||
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
|
let process_mem_chunks = current.vm().mem_chunks().read().unwrap();
|
||||||
process_mem_chunks
|
process_mem_chunks
|
||||||
.iter()
|
.iter()
|
||||||
.find(|&chunk| chunk.range().intersect(&range).is_some())
|
.find(|&chunk| chunk.range().intersect(&target_range).is_some())
|
||||||
.cloned()
|
.cloned()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -175,10 +175,11 @@ impl VMManager {
|
|||||||
// This range is currently in a allocated chunk
|
// This range is currently in a allocated chunk
|
||||||
match chunk.internal() {
|
match chunk.internal() {
|
||||||
ChunkType::MultiVMA(chunk_internal) => {
|
ChunkType::MultiVMA(chunk_internal) => {
|
||||||
// If the chunk only intersect, but not a superset, we can't handle this.
|
if !chunk.range().is_superset_of(&target_range) && addr.is_force() {
|
||||||
if !chunk.range().is_superset_of(&range) {
|
// The target range spans multiple chunks and have a strong need for the address
|
||||||
return_errno!(EINVAL, "mmap with specified addr spans over two chunks");
|
return self.force_mmap_across_multiple_chunks(target_range, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
"mmap with addr in existing default chunk: {:?}",
|
"mmap with addr in existing default chunk: {:?}",
|
||||||
chunk.range()
|
chunk.range()
|
||||||
@ -194,18 +195,15 @@ impl VMManager {
|
|||||||
return_errno!(ENOMEM, "Single VMA is currently in use. Need failure");
|
return_errno!(ENOMEM, "Single VMA is currently in use. Need failure");
|
||||||
}
|
}
|
||||||
VMMapAddr::Force(addr) => {
|
VMMapAddr::Force(addr) => {
|
||||||
// Munmap the corresponding single vma chunk
|
if !chunk.range().is_superset_of(&target_range) {
|
||||||
// If the chunk only intersect, but not a superset, we can't handle this.
|
// The target range spans multiple chunks and have a strong need for the address
|
||||||
if !chunk.range().is_superset_of(&range) {
|
return self
|
||||||
trace!(
|
.force_mmap_across_multiple_chunks(target_range, options);
|
||||||
"chunk range = {:?}, target range = {:?}",
|
|
||||||
chunk.range(),
|
|
||||||
range
|
|
||||||
);
|
|
||||||
return_errno!(EINVAL, "mmap with specified addr spans two chunks");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Munmap the corresponding single vma chunk
|
||||||
let mut internal_manager = self.internal();
|
let mut internal_manager = self.internal();
|
||||||
internal_manager.munmap_chunk(&chunk, Some(&range))?;
|
internal_manager.munmap_chunk(&chunk, Some(&target_range))?;
|
||||||
}
|
}
|
||||||
VMMapAddr::Any => unreachable!(),
|
VMMapAddr::Any => unreachable!(),
|
||||||
}
|
}
|
||||||
@ -218,7 +216,7 @@ impl VMManager {
|
|||||||
let start = new_chunk.range().start();
|
let start = new_chunk.range().start();
|
||||||
debug_assert!({
|
debug_assert!({
|
||||||
match addr {
|
match addr {
|
||||||
VMMapAddr::Force(addr) | VMMapAddr::Need(addr) => start == range.start(),
|
VMMapAddr::Force(addr) | VMMapAddr::Need(addr) => start == target_range.start(),
|
||||||
_ => true,
|
_ => true,
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -277,18 +275,23 @@ impl VMManager {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Support munmap a part of default chunks
|
// Munmap ranges in default chunks
|
||||||
// Check munmap default chunks
|
for chunk in process_mem_chunks
|
||||||
if process_mem_chunks
|
|
||||||
.iter()
|
.iter()
|
||||||
.find(|p_chunk| p_chunk.range().overlap_with(&munmap_range))
|
.filter(|p_chunk| p_chunk.range().overlap_with(&munmap_range))
|
||||||
.is_some()
|
|
||||||
{
|
{
|
||||||
return_errno!(
|
match chunk.internal() {
|
||||||
EINVAL,
|
ChunkType::SingleVMA(_) => {
|
||||||
"munmap range overlap with default chunks is not supported"
|
unreachable!() // single-vma chunks should be drained already
|
||||||
);
|
}
|
||||||
|
ChunkType::MultiVMA(manager) => manager
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.chunk_manager()
|
||||||
|
.munmap_range(munmap_range)?,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
munmap_single_vma_chunks
|
munmap_single_vma_chunks
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -507,6 +510,104 @@ impl VMManager {
|
|||||||
|
|
||||||
assert!(mem_chunks.len() == 0);
|
assert!(mem_chunks.len() == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn force_mmap_across_multiple_chunks(
|
||||||
|
&self,
|
||||||
|
target_range: VMRange,
|
||||||
|
options: &VMMapOptions,
|
||||||
|
) -> Result<usize> {
|
||||||
|
match options.initializer() {
|
||||||
|
VMInitializer::DoNothing() | VMInitializer::FillZeros() => {}
|
||||||
|
_ => return_errno!(
|
||||||
|
ENOSYS,
|
||||||
|
"unsupported memory initializer in mmap across multiple chunks"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all intersect chunks
|
||||||
|
let intersect_chunks = {
|
||||||
|
let chunks = self
|
||||||
|
.internal()
|
||||||
|
.chunks
|
||||||
|
.iter()
|
||||||
|
.filter(|&chunk| chunk.range().intersect(&target_range).is_some())
|
||||||
|
.map(|chunk| chunk.clone())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// If any intersect chunk belongs to other process, then this mmap can't succeed
|
||||||
|
if chunks
|
||||||
|
.iter()
|
||||||
|
.any(|chunk| !chunk.is_owned_by_current_process())
|
||||||
|
{
|
||||||
|
return_errno!(
|
||||||
|
ENOMEM,
|
||||||
|
"part of the target range is allocated by other process"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
chunks
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut intersect_ranges = intersect_chunks
|
||||||
|
.iter()
|
||||||
|
.map(|chunk| chunk.range().intersect(&target_range).unwrap())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// Based on range of chunks, split the whole target range to ranges that are connected, including free ranges
|
||||||
|
let target_contained_ranges = {
|
||||||
|
let mut contained_ranges = Vec::with_capacity(intersect_ranges.len());
|
||||||
|
for ranges in intersect_ranges.windows(2) {
|
||||||
|
let range_a = ranges[0];
|
||||||
|
let range_b = ranges[1];
|
||||||
|
debug_assert!(range_a.end() <= range_b.start());
|
||||||
|
contained_ranges.push(range_a);
|
||||||
|
if range_a.end() < range_b.start() {
|
||||||
|
contained_ranges.push(VMRange::new(range_a.end(), range_b.start()).unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
contained_ranges.push(intersect_ranges.pop().unwrap());
|
||||||
|
contained_ranges
|
||||||
|
};
|
||||||
|
|
||||||
|
// Based on the target contained ranges, rebuild the VMMapOptions
|
||||||
|
let new_options = {
|
||||||
|
let perms = options.perms().clone();
|
||||||
|
let align = options.align().clone();
|
||||||
|
let initializer = options.initializer();
|
||||||
|
target_contained_ranges
|
||||||
|
.iter()
|
||||||
|
.map(|range| {
|
||||||
|
let size = range.size();
|
||||||
|
let addr = match options.addr() {
|
||||||
|
VMMapAddr::Force(_) => VMMapAddr::Force(range.start()),
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
VMMapOptionsBuilder::default()
|
||||||
|
.perms(perms)
|
||||||
|
.align(align)
|
||||||
|
.initializer(initializer.clone())
|
||||||
|
.addr(addr)
|
||||||
|
.size(size)
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
|
.collect::<Vec<VMMapOptions>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"force mmap across multiple chunks mmap ranges = {:?}",
|
||||||
|
target_contained_ranges
|
||||||
|
);
|
||||||
|
for (range, options) in target_contained_ranges.into_iter().zip(new_options.iter()) {
|
||||||
|
if self.mmap_with_addr(range, options).is_err() {
|
||||||
|
// Although the error here is fatal and rare, returning error is not enough here.
|
||||||
|
// FIXME: All previous mmap ranges should be munmapped.
|
||||||
|
return_errno!(ENOMEM, "mmap across multiple chunks failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(target_range.start())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modification on this structure must aquire the global lock.
|
// Modification on this structure must aquire the global lock.
|
||||||
|
@ -101,6 +101,15 @@ impl Default for VMMapAddr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl VMMapAddr {
|
||||||
|
pub(super) fn is_force(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
VMMapAddr::Force(_) => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Builder, Debug)]
|
#[derive(Builder, Debug)]
|
||||||
#[builder(pattern = "owned", build_fn(skip), no_std)]
|
#[builder(pattern = "owned", build_fn(skip), no_std)]
|
||||||
pub struct VMMapOptions {
|
pub struct VMMapOptions {
|
||||||
|
@ -522,6 +522,43 @@ int test_fixed_mmap_with_non_page_aligned_addr() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int test_fixed_mmap_spans_over_two_chunks() {
|
||||||
|
size_t hint = HINT_BEGIN + (HINT_END - HINT_BEGIN) / 3;
|
||||||
|
hint = ALIGN_DOWN(hint, PAGE_SIZE);
|
||||||
|
size_t len = (HINT_END - HINT_BEGIN) / 3 + 1;
|
||||||
|
len = ALIGN_UP(len, PAGE_SIZE);
|
||||||
|
int prot = PROT_READ | PROT_WRITE;
|
||||||
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
|
||||||
|
// Firstly, allocate memory at the default chunk
|
||||||
|
void *addr = mmap((void *)hint, len, prot, flags, -1, 0);
|
||||||
|
if ((size_t)addr != hint) {
|
||||||
|
THROW_ERROR("fixed mmap with good hint failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second, allocate single-vma chunk after the default chunk
|
||||||
|
hint = HINT_BEGIN + 36 * MB;
|
||||||
|
len = 2 * MB;
|
||||||
|
addr = mmap((void *)hint, len, prot, flags, -1, 0);
|
||||||
|
if ((size_t)addr != hint) {
|
||||||
|
THROW_ERROR("fixed mmap with good hint failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last, force allocate memory spans over these two chunks
|
||||||
|
hint = HINT_BEGIN + 30 * MB;
|
||||||
|
len = 16 * MB;
|
||||||
|
addr = mmap((void *)hint, len, prot, flags, -1, 0);
|
||||||
|
if ((size_t)addr != hint) {
|
||||||
|
THROW_ERROR("fixed mmap spans over two chunks failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free all potential allocated memory
|
||||||
|
size_t overall_len = (HINT_END - HINT_BEGIN) + (30 + 16) * MB;
|
||||||
|
if (munmap((void *)HINT_BEGIN, overall_len) < 0) {
|
||||||
|
THROW_ERROR("munmap failed");
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Test cases for munmap
|
// Test cases for munmap
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
@ -1285,6 +1322,7 @@ static test_case_t test_cases[] = {
|
|||||||
TEST_CASE(test_fixed_mmap_that_does_not_override_any_mmaping),
|
TEST_CASE(test_fixed_mmap_that_does_not_override_any_mmaping),
|
||||||
TEST_CASE(test_fixed_mmap_that_overrides_existing_mmaping),
|
TEST_CASE(test_fixed_mmap_that_overrides_existing_mmaping),
|
||||||
TEST_CASE(test_fixed_mmap_with_non_page_aligned_addr),
|
TEST_CASE(test_fixed_mmap_with_non_page_aligned_addr),
|
||||||
|
TEST_CASE(test_fixed_mmap_spans_over_two_chunks),
|
||||||
TEST_CASE(test_munmap_whose_range_is_a_subset_of_a_mmap_region),
|
TEST_CASE(test_munmap_whose_range_is_a_subset_of_a_mmap_region),
|
||||||
TEST_CASE(test_munmap_whose_range_is_a_superset_of_a_mmap_region),
|
TEST_CASE(test_munmap_whose_range_is_a_superset_of_a_mmap_region),
|
||||||
TEST_CASE(test_munmap_whose_range_intersects_with_a_mmap_region),
|
TEST_CASE(test_munmap_whose_range_intersects_with_a_mmap_region),
|
||||||
|
Loading…
Reference in New Issue
Block a user