Refactor VMRange allocation for readability

This commit is contained in:
Tate, Hongliang Tian 2019-04-10 18:50:34 +08:00 committed by Tate Tian
parent 356884679b
commit 895f70a2dc
3 changed files with 168 additions and 128 deletions

@ -101,11 +101,11 @@ impl VMAreaFlags {
} }
} }
#[derive(Clone, Copy, PartialEq)] #[derive(Clone, Copy, PartialEq, Default)]
pub struct VMAllocOptions { pub struct VMAllocOptions {
size: usize, size: usize,
addr: VMAddrOption, addr: VMAddrOption,
growth: Option<VMGrowthType>, growth: VMGrowthType,
} }
impl VMAllocOptions { impl VMAllocOptions {
@ -128,7 +128,7 @@ impl VMAllocOptions {
} }
pub fn growth(&mut self, growth: VMGrowthType) -> Result<&mut Self, Error> { pub fn growth(&mut self, growth: VMGrowthType) -> Result<&mut Self, Error> {
self.growth = Some(growth); self.growth = growth;
Ok(self) Ok(self)
} }
} }
@ -143,15 +143,6 @@ impl fmt::Debug for VMAllocOptions {
} }
} }
impl Default for VMAllocOptions {
fn default() -> VMAllocOptions {
VMAllocOptions {
size: 0,
addr: VMAddrOption::Any,
growth: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMAddrOption { pub enum VMAddrOption {
@ -161,6 +152,12 @@ pub enum VMAddrOption {
Beyond(usize), // Must be greater or equal to the given address Beyond(usize), // Must be greater or equal to the given address
} }
impl Default for VMAddrOption {
fn default() -> VMAddrOption {
VMAddrOption::Any
}
}
impl VMAddrOption { impl VMAddrOption {
pub fn is_addr_given(&self) -> bool { pub fn is_addr_given(&self) -> bool {
match self { match self {
@ -179,18 +176,26 @@ impl VMAddrOption {
} }
} }
/// How VMRange may grow: /// How VMRange may grow:
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGrowthType { pub enum VMGrowthType {
Fixed,
Upward, // e.g., mmaped regions grow upward Upward, // e.g., mmaped regions grow upward
Downward, // e.g., stacks grows downward Downward, // e.g., stacks grows downward
Fixed,
} }
#[derive(Clone, Debug)] impl Default for VMGrowthType {
fn default() -> VMGrowthType {
VMGrowthType::Fixed
}
}
#[derive(Clone, Debug, Default)]
pub struct VMResizeOptions { pub struct VMResizeOptions {
new_size: usize, new_size: usize,
new_addr: Option<VMAddrOption>, new_addr: VMAddrOption,
} }
impl VMResizeOptions { impl VMResizeOptions {
@ -205,16 +210,7 @@ impl VMResizeOptions {
} }
pub fn addr(&mut self, new_addr: VMAddrOption) -> &mut Self { pub fn addr(&mut self, new_addr: VMAddrOption) -> &mut Self {
self.new_addr = Some(new_addr); self.new_addr = new_addr;
self self
} }
} }
impl Default for VMResizeOptions {
fn default() -> VMResizeOptions {
VMResizeOptions {
new_size: 0,
new_addr: None,
}
}
}

@ -156,7 +156,8 @@ impl ProcessVM {
if addr < mmap_start_addr { if addr < mmap_start_addr {
return Err(Error::new(Errno::EINVAL, "Beyond valid memory range")); return Err(Error::new(Errno::EINVAL, "Beyond valid memory range"));
} }
VMAddrOption::Fixed(addr) // TODO: Fixed or Hint? Should hanle mmap flags
VMAddrOption::Hint(addr)
})? })?
.growth(VMGrowthType::Upward)?; .growth(VMGrowthType::Upward)?;
alloc_options alloc_options
@ -168,11 +169,10 @@ impl ProcessVM {
Ok(addr) Ok(addr)
} }
pub fn munmap(&mut self, addr: usize, size: usize) -> Result<(), Error> {
// TODO: handle the case when the given range [addr, addr + size) // TODO: handle the case when the given range [addr, addr + size)
// does not match exactly with any vma. For example, when this range // does not match exactly with any vma. For example, when this range
// cover multiple ranges or cover some range partially. // cover multiple ranges or cover some range partially.
pub fn munmap(&mut self, addr: usize, size: usize) -> Result<(), Error> {
let mmap_vma_i = { let mmap_vma_i = {
let mmap_vma_i = self let mmap_vma_i = self
.get_mmap_vmas() .get_mmap_vmas()

@ -60,107 +60,20 @@ impl VMRange {
} }
pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> { pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> {
// Get valid parameters from options // Find a free space that satisfies the options
let size = options.size; let free_space = self.look_for_free_space(options)?;
let addr = options.addr;
let growth = options.growth.unwrap_or(VMGrowthType::Fixed);
// Lazy initialize the subrange array upon the first allocation let (new_subrange_start, new_subrange_end) =
if !self.has_subranges() { self.alloc_from_free_space(&free_space, options);
self.init_subranges()?; debug_assert!(free_space.contains(new_subrange_start));
} debug_assert!(free_space.contains(new_subrange_end));
// Find a free space for allocating a VMRange let new_subrange_inner = VMRangeInner::new(new_subrange_start,
let free_space = { new_subrange_end,
// Look for the minimal big-enough free space options.growth);
let mut min_big_enough_free_space: Option<FreeSpace> = None;
let sub_ranges = self.get_subranges();
for (idx, range_pair) in sub_ranges.windows(2).enumerate() {
let pre_range = &range_pair[0];
let next_range = &range_pair[1];
let mut free_range = {
let free_range_start = pre_range.get_end();
let free_range_end = next_range.get_start();
let free_range_size = free_range_end - free_range_start;
if free_range_size < size {
continue;
}
free_range_start..free_range_end
};
match addr {
VMAddrOption::Hint(addr) | VMAddrOption::Fixed(addr) => {
if !free_range.contains(&addr) {
continue;
}
free_range.start = addr;
}
VMAddrOption::Beyond(addr) => {
if free_range.start < addr {
continue;
}
}
_ => {}
}
let free_space = Some(FreeSpace {
index_in_subranges: idx + 1,
start: free_range.start,
end: free_range.end,
may_neighbor_grow: (
pre_range.growth == VMGrowthType::Upward,
next_range.growth == VMGrowthType::Downward,
),
});
if min_big_enough_free_space == None || free_space < min_big_enough_free_space {
min_big_enough_free_space = free_space;
match addr {
VMAddrOption::Hint(addr) | VMAddrOption::Fixed(addr) => break,
_ => {}
}
}
}
if min_big_enough_free_space.is_none() {
return errno!(ENOMEM, "No enough space");
}
min_big_enough_free_space.unwrap()
};
// Given the free space, determine the start and end of the sub-range
let (new_subrange_start, new_subrange_end) = match addr {
VMAddrOption::Any | VMAddrOption::Beyond(_) => {
let should_no_gap_to_pre_domain =
free_space.may_neighbor_grow.0 == false && growth != VMGrowthType::Downward;
let should_no_gap_to_next_domain =
free_space.may_neighbor_grow.1 == false && growth != VMGrowthType::Upward;
let domain_start = if should_no_gap_to_pre_domain {
free_space.start
} else if should_no_gap_to_next_domain {
free_space.end - size
} else {
// We want to leave some space at both ends in case
// this sub-range or neighbor sub-range needs to grow later.
// As a simple heuristic, we put this sub-range near the
// center between the previous and next sub-ranges.
free_space.start + (free_space.get_size() - size) / 2
};
(domain_start, domain_start + size)
}
VMAddrOption::Fixed(addr) => (addr, addr + size),
VMAddrOption::Hint(addr) => {
return errno!(EINVAL, "Not implemented");
}
};
let new_subrange_inner = VMRangeInner::new(new_subrange_start, new_subrange_end, growth);
self.get_subranges_mut() self.get_subranges_mut()
.insert(free_space.index_in_subranges, new_subrange_inner); .insert(free_space.index_in_subranges, new_subrange_inner);
// Although there are two copies of the newly created VMRangeInner obj, // Although there are two copies of the newly created VMRangeInner obj,
// we can keep them in sync as all mutation on VMRange object must // we can keep them in sync as all mutation on VMRange object must
// be carried out through dealloc_subrange() and resize_subrange() that // be carried out through dealloc_subrange() and resize_subrange() that
@ -223,7 +136,7 @@ impl VMRange {
} }
} }
fn init_subranges(&mut self) -> Result<(), Error> { fn init_subrange_array(&mut self) -> Result<(), Error> {
// Use dummy VMRange as sentinel object at both ends to make the allocation // Use dummy VMRange as sentinel object at both ends to make the allocation
// and deallocation algorithm simpler // and deallocation algorithm simpler
let start = self.get_start(); let start = self.get_start();
@ -234,6 +147,132 @@ impl VMRange {
Ok(()) Ok(())
} }
// Find a free space for allocating a sub VMRange
fn look_for_free_space(&mut self, options: &VMAllocOptions) -> Result<FreeSpace, Error> {
// TODO: reduce the complexity from O(N) to O(log(N)), where N is
// the number of existing subranges.
// Lazy initialize the subrange array upon the first allocation
if !self.has_subranges() {
self.init_subrange_array()?;
}
// Get valid parameters from options
let size = options.size;
let addr = options.addr;
let growth = options.growth;
// Record the minimal free space that satisfies the options
let mut min_big_enough_free_space: Option<FreeSpace> = None;
let sub_ranges = self.get_subranges();
for (idx, range_pair) in sub_ranges.windows(2).enumerate() {
let pre_range = &range_pair[0];
let next_range = &range_pair[1];
let (free_range_start, free_range_end)= {
let free_range_start = pre_range.get_end();
let free_range_end = next_range.get_start();
let free_range_size = free_range_end - free_range_start;
if free_range_size < size {
continue;
}
(free_range_start, free_range_end)
};
let mut free_space = FreeSpace {
index_in_subranges: idx + 1,
start: free_range_start,
end: free_range_end,
may_neighbor_grow: (
pre_range.growth == VMGrowthType::Upward,
next_range.growth == VMGrowthType::Downward,
),
};
match addr {
// Want a minimal free_space
VMAddrOption::Any => { },
// Prefer to have free_space.start == addr
VMAddrOption::Hint(addr) => {
if free_space.contains(addr) {
if free_space.end - addr >= size {
free_space.start = addr;
return Ok(free_space);
}
}
},
// Must have free_space.start == addr
VMAddrOption::Fixed(addr) => {
if !free_space.contains(addr) {
continue;
}
if free_space.end - addr < size {
return errno!(ENOMEM, "not enough memory");
}
free_space.start = addr;
return Ok(free_space);
}
// Must have free_space.start >= addr
VMAddrOption::Beyond(addr) => {
if free_space.end < addr {
continue;
}
if free_space.contains(addr) {
free_space.start = addr;
if free_space.get_size() < size {
continue;
}
}
},
}
if min_big_enough_free_space == None ||
free_space < *min_big_enough_free_space.as_ref().unwrap() {
min_big_enough_free_space = Some(free_space);
}
}
min_big_enough_free_space
.ok_or_else(|| Error::new(Errno::ENOMEM, "not enough space"))
}
fn alloc_from_free_space(&self, free_space: &FreeSpace, options: &VMAllocOptions) -> (usize, usize) {
// Get valid parameters from options
let size = options.size;
let addr_option = options.addr;
let growth = options.growth;
if let VMAddrOption::Fixed(addr) = addr_option {
return (addr, addr + size);
}
else if let VMAddrOption::Hint(addr) = addr_option {
if free_space.start == addr {
return (addr, addr + size);
}
}
let should_no_gap_to_pre_domain =
free_space.may_neighbor_grow.0 == false && growth != VMGrowthType::Downward;
let should_no_gap_to_next_domain =
free_space.may_neighbor_grow.1 == false && growth != VMGrowthType::Upward;
let addr = if should_no_gap_to_pre_domain {
free_space.start
} else if should_no_gap_to_next_domain {
free_space.end - size
} else {
// We want to leave some space at both ends in case
// this sub-range or neighbor sub-range needs to grow later.
// As a simple heuristic, we put this sub-range near the
// center between the previous and next sub-ranges.
free_space.start + (free_space.get_size() - size) / 2
};
(addr, addr + size)
}
fn ensure_subrange_is_a_child(&self, subrange: &VMRange) { fn ensure_subrange_is_a_child(&self, subrange: &VMRange) {
// FIXME: // FIXME:
/*if subrange.parent_range != self as *const VMRange { /*if subrange.parent_range != self as *const VMRange {
@ -478,9 +517,14 @@ impl FreeSpace {
pressure += if self.may_neighbor_grow.1 { 1 } else { 0 }; pressure += if self.may_neighbor_grow.1 { 1 } else { 0 };
pressure pressure
} }
fn get_size(&self) -> usize { fn get_size(&self) -> usize {
self.end - self.start self.end - self.start
} }
fn contains(&self, addr: usize) -> bool {
self.start <= addr && addr < self.end
}
} }
impl PartialEq for FreeSpace { impl PartialEq for FreeSpace {