Refactor VMRange code and fix two bugs

The first bug is that a VMRange may not be allocated to a 4KB-aligned address.
The second bug is that a VMRange may not be deallocated by its parent VMRange.
This commit is contained in:
Tate, Hongliang Tian 2019-04-11 18:26:40 +08:00 committed by Tate Tian
parent 895f70a2dc
commit 862601604c
7 changed files with 266 additions and 249 deletions

@ -55,3 +55,7 @@ pub fn align_up(addr: usize, align: usize) -> usize {
pub fn align_down(addr: usize, align: usize) -> usize { pub fn align_down(addr: usize, align: usize) -> usize {
addr & !(align - 1) addr & !(align - 1)
} }
pub fn unbox<T>(value: Box<T>) -> T {
*value
}

@ -3,17 +3,15 @@ use prelude::*;
use process::{get_current, Process, ProcessRef}; use process::{get_current, Process, ProcessRef};
use std::fmt; use std::fmt;
// TODO: Rename VMSpace to VMUniverse
#[macro_use] #[macro_use]
mod vm_range; mod vm_range;
mod process_vm;
mod vm_area; mod vm_area;
mod vm_domain; mod process_vm;
mod vm_space;
pub use self::process_vm::ProcessVM;
pub use self::vm_range::{VMRange, VMRangeTrait}; pub use self::vm_range::{VMRange, VMRangeTrait};
pub use self::vm_area::{VMSpace, VMDomain, VMArea, VMAreaFlags, VM_AREA_FLAG_R, VM_AREA_FLAG_W, VM_AREA_FLAG_X};
pub use self::process_vm::ProcessVM;
// TODO: separate proc and flags // TODO: separate proc and flags
// TODO: accept fd and offset // TODO: accept fd and offset
@ -56,23 +54,6 @@ pub fn do_brk(addr: usize) -> Result<usize, Error> {
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;
#[derive(Debug)]
pub struct VMSpace {
range: VMRange,
guard_type: VMGuardAreaType,
}
#[derive(Debug, Default)]
pub struct VMDomain {
range: VMRange,
}
#[derive(Debug, Default)]
pub struct VMArea {
range: VMRange,
flags: VMAreaFlags,
}
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGuardAreaType { pub enum VMGuardAreaType {
None, None,
@ -80,32 +61,13 @@ pub enum VMGuardAreaType {
Dynamic { size: usize }, Dynamic { size: usize },
} }
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct VMAreaFlags(pub u32);
pub const VM_AREA_FLAG_R: u32 = 0x1; #[derive(Clone, PartialEq, Default)]
pub const VM_AREA_FLAG_W: u32 = 0x2;
pub const VM_AREA_FLAG_X: u32 = 0x4;
impl VMAreaFlags {
pub fn can_execute(&self) -> bool {
self.0 & VM_AREA_FLAG_X == VM_AREA_FLAG_X
}
pub fn can_write(&self) -> bool {
self.0 & VM_AREA_FLAG_W == VM_AREA_FLAG_W
}
pub fn can_read(&self) -> bool {
self.0 & VM_AREA_FLAG_R == VM_AREA_FLAG_R
}
}
#[derive(Clone, Copy, PartialEq, Default)]
pub struct VMAllocOptions { pub struct VMAllocOptions {
size: usize, size: usize,
addr: VMAddrOption, addr: VMAddrOption,
growth: VMGrowthType, growth: VMGrowthType,
description: String,
} }
impl VMAllocOptions { impl VMAllocOptions {
@ -131,6 +93,11 @@ impl VMAllocOptions {
self.growth = growth; self.growth = growth;
Ok(self) Ok(self)
} }
pub fn description(&mut self, description: &str) -> Result<&mut Self, Error> {
self.description = description.to_owned();
Ok(self)
}
} }
impl fmt::Debug for VMAllocOptions { impl fmt::Debug for VMAllocOptions {

@ -11,7 +11,7 @@ lazy_static! {
(addr, size) (addr, size)
}; };
let vm_space = unsafe { let vm_space = unsafe {
match VMSpace::new(addr, size, VMGuardAreaType::None) { match VMSpace::new(addr, size, VMGuardAreaType::None, "DATA_SPACE") {
Ok(vm_space) => vm_space, Ok(vm_space) => vm_space,
Err(_) => panic!("Failed to create a VMSpace"), Err(_) => panic!("Failed to create a VMSpace"),
} }
@ -27,11 +27,11 @@ extern "C" {
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct ProcessVM { pub struct ProcessVM {
//code_domain: VMDomain, //code_domain: VMDomain,
data_domain: VMDomain, data_domain: Option<Box<VMDomain>>,
code_vma: VMArea, code_vma: Option<Box<VMArea>>,
data_vma: VMArea, data_vma: Option<Box<VMArea>>,
heap_vma: VMArea, heap_vma: Option<Box<VMArea>>,
stack_vma: VMArea, stack_vma: Option<Box<VMArea>>,
mmap_vmas: Vec<Box<VMArea>>, mmap_vmas: Vec<Box<VMArea>>,
brk: usize, brk: usize,
} }
@ -44,29 +44,39 @@ impl ProcessVM {
stack_size: usize, stack_size: usize,
mmap_size: usize, mmap_size: usize,
) -> Result<ProcessVM, Error> { ) -> Result<ProcessVM, Error> {
let data_domain_size = code_size + data_size + heap_size + stack_size + mmap_size; // Allocate the data domain from the global data space
let mut data_domain = DATA_SPACE.lock().unwrap().alloc_domain(data_domain_size)?; let mut data_domain = {
let data_domain_size = code_size + data_size + heap_size
let (code_vma, data_vma, heap_vma, stack_vma) = ProcessVM::alloc_vmas( + stack_size + mmap_size;
&mut data_domain, let data_domain = DATA_SPACE.lock().unwrap().alloc_domain(
code_size, data_domain_size, "data_domain")?;
data_size, data_domain
heap_size, };
stack_size, // Allocate vmas from the data domain
)?; let (code_vma, data_vma, heap_vma, stack_vma) =
match ProcessVM::alloc_vmas(&mut data_domain, code_size,
data_size, heap_size, stack_size) {
Err(e) => {
// Note: we need to handle error here so that we can
// deallocate the data domain explictly.
DATA_SPACE.lock().unwrap().dealloc_domain(data_domain);
return Err(e);
},
Ok(vmas) => vmas,
};
// Initial value of the program break // Initial value of the program break
let brk = heap_vma.get_start(); let brk = heap_vma.get_start();
// No mmapped vmas initially // No mmapped vmas initially
let mmap_vmas = Vec::new(); let mmap_vmas = Vec::new();
let vm = ProcessVM { let vm = ProcessVM {
data_domain, data_domain: Some(Box::new(data_domain)),
code_vma, code_vma: Some(Box::new(code_vma)),
data_vma, data_vma: Some(Box::new(data_vma)),
heap_vma, heap_vma: Some(Box::new(heap_vma)),
stack_vma, stack_vma: Some(Box::new(stack_vma)),
mmap_vmas, mmap_vmas: mmap_vmas,
brk, brk: brk,
}; };
Ok(vm) Ok(vm)
} }
@ -79,11 +89,12 @@ impl ProcessVM {
stack_size: usize, stack_size: usize,
) -> Result<(VMArea, VMArea, VMArea, VMArea), Error> { ) -> Result<(VMArea, VMArea, VMArea, VMArea), Error> {
let mut addr = data_domain.get_start(); let mut addr = data_domain.get_start();
let mut alloc_vma_continuously = let mut alloc_vma_continuously =
|addr: &mut usize, size, flags, growth| -> Result<_, Error> { |addr: &mut usize, size, flags, growth, desc| -> Result<_, Error> {
let mut options = VMAllocOptions::new(size)?; let mut options = VMAllocOptions::new(size)?;
options.addr(VMAddrOption::Fixed(*addr))?.growth(growth)?; options.addr(VMAddrOption::Fixed(*addr))?
.growth(growth)?
.description(desc)?;
let new_vma = data_domain.alloc_area(&options, flags)?; let new_vma = data_domain.alloc_area(&options, flags)?;
*addr += size; *addr += size;
Ok(new_vma) Ok(new_vma)
@ -92,39 +103,39 @@ impl ProcessVM {
let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X); let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X);
let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W); let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W);
let code_vma = alloc_vma_continuously(&mut addr, code_size, rx_flags, VMGrowthType::Fixed)?; let code_vma = alloc_vma_continuously(&mut addr, code_size, rx_flags, VMGrowthType::Fixed, "code_vma")?;
let data_vma = alloc_vma_continuously(&mut addr, data_size, rw_flags, VMGrowthType::Fixed)?; let data_vma = alloc_vma_continuously(&mut addr, data_size, rw_flags, VMGrowthType::Fixed, "data_vma")?;
let heap_vma = alloc_vma_continuously(&mut addr, 0, rw_flags, VMGrowthType::Upward)?; let heap_vma = alloc_vma_continuously(&mut addr, 0, rw_flags, VMGrowthType::Upward, "heap_vma")?;
// Preserve the space for heap // Preserve the space for heap
addr += heap_size; addr += heap_size;
// After the heap is the stack // After the heap is the stack
let stack_vma = let stack_vma =
alloc_vma_continuously(&mut addr, stack_size, rw_flags, VMGrowthType::Downward)?; alloc_vma_continuously(&mut addr, stack_size, rw_flags, VMGrowthType::Downward, "stack_vma")?;
Ok((code_vma, data_vma, heap_vma, stack_vma)) Ok((code_vma, data_vma, heap_vma, stack_vma))
} }
pub fn get_base_addr(&self) -> usize { pub fn get_base_addr(&self) -> usize {
self.code_vma.get_start() self.get_code_vma().get_start()
} }
pub fn get_code_vma(&self) -> &VMArea { pub fn get_code_vma(&self) -> &VMArea {
&self.code_vma &self.code_vma.as_ref().unwrap()
} }
pub fn get_data_vma(&self) -> &VMArea { pub fn get_data_vma(&self) -> &VMArea {
&self.data_vma &self.data_vma.as_ref().unwrap()
} }
pub fn get_heap_vma(&self) -> &VMArea { pub fn get_heap_vma(&self) -> &VMArea {
&self.heap_vma &self.heap_vma.as_ref().unwrap()
} }
pub fn get_stack_vma(&self) -> &VMArea { pub fn get_stack_vma(&self) -> &VMArea {
&self.stack_vma &self.stack_vma.as_ref().unwrap()
} }
pub fn get_stack_top(&self) -> usize { pub fn get_stack_top(&self) -> usize {
self.stack_vma.get_end() self.get_stack_vma().get_end()
} }
pub fn get_mmap_vmas(&self) -> &[Box<VMArea>] { pub fn get_mmap_vmas(&self) -> &[Box<VMArea>] {
@ -163,7 +174,8 @@ impl ProcessVM {
alloc_options alloc_options
}; };
// TODO: when failed, try to resize data_domain // TODO: when failed, try to resize data_domain
let new_mmap_vma = self.data_domain.alloc_area(&alloc_options, flags)?; let new_mmap_vma = self.get_data_domain_mut()
.alloc_area(&alloc_options, flags)?;
let addr = new_mmap_vma.get_start(); let addr = new_mmap_vma.get_start();
self.mmap_vmas.push(Box::new(new_mmap_vma)); self.mmap_vmas.push(Box::new(new_mmap_vma));
Ok(addr) Ok(addr)
@ -184,8 +196,8 @@ impl ProcessVM {
mmap_vma_i.unwrap() mmap_vma_i.unwrap()
}; };
let mut removed_mmap_vma = self.mmap_vmas.swap_remove(mmap_vma_i); let removed_mmap_vma = self.mmap_vmas.swap_remove(mmap_vma_i);
self.data_domain.dealloc_area(&mut removed_mmap_vma); self.get_data_domain_mut().dealloc_area(unbox(removed_mmap_vma));
Ok(()) Ok(())
} }
@ -200,43 +212,52 @@ impl ProcessVM {
} }
pub fn brk(&mut self, new_brk: usize) -> Result<usize, Error> { pub fn brk(&mut self, new_brk: usize) -> Result<usize, Error> {
let (heap_start, heap_end) = {
let heap_vma = self.heap_vma.as_ref().unwrap();
(heap_vma.get_start(), heap_vma.get_end())
};
if new_brk == 0 { if new_brk == 0 {
return Ok(self.get_brk()); return Ok(self.get_brk());
} else if new_brk < self.heap_vma.get_start() { } else if new_brk < heap_start {
return errno!(EINVAL, "New brk address is too low"); return errno!(EINVAL, "New brk address is too low");
} else if new_brk <= self.heap_vma.get_end() { } else if new_brk > heap_end {
// TODO: init the memory with zeros for the expanded area
let resize_options = {
let new_heap_end = align_up(new_brk, 4096);
let new_heap_size = new_heap_end - heap_start;
let mut options = VMResizeOptions::new(new_heap_size)?;
options.addr(VMAddrOption::Fixed(heap_start));
options
};
let heap_vma = self.heap_vma.as_mut().unwrap();
let data_domain = self.data_domain.as_mut().unwrap();
data_domain.resize_area(heap_vma, &resize_options)?;
}
self.brk = new_brk; self.brk = new_brk;
return Ok(new_brk); return Ok(new_brk);
} }
// TODO: init the memory with zeros for the expanded area fn get_data_domain_mut(&mut self) -> &mut Box<VMDomain> {
let resize_options = { self.data_domain.as_mut().unwrap()
let brk_start = self.get_brk_start();
let new_heap_size = align_up(new_brk, 4096) - brk_start;
let mut options = VMResizeOptions::new(new_heap_size)?;
options.addr(VMAddrOption::Fixed(brk_start));
options
};
self.data_domain
.resize_area(&mut self.heap_vma, &resize_options)?;
Ok(new_brk)
} }
} }
impl Drop for ProcessVM { impl Drop for ProcessVM {
fn drop(&mut self) { fn drop(&mut self) {
let data_domain = &mut self.data_domain;
// Remove all vma from the domain // Remove all vma from the domain
data_domain.dealloc_area(&mut self.code_vma); {
data_domain.dealloc_area(&mut self.data_vma); let data_domain = self.data_domain.as_mut().unwrap();
data_domain.dealloc_area(&mut self.heap_vma); data_domain.dealloc_area(unbox(self.code_vma.take().unwrap()));
data_domain.dealloc_area(&mut self.stack_vma); data_domain.dealloc_area(unbox(self.data_vma.take().unwrap()));
for mmap_vma in &mut self.mmap_vmas { data_domain.dealloc_area(unbox(self.heap_vma.take().unwrap()));
data_domain.dealloc_area(mmap_vma); data_domain.dealloc_area(unbox(self.stack_vma.take().unwrap()));
for mmap_vma in self.mmap_vmas.drain(..) {
data_domain.dealloc_area(unbox(mmap_vma));
}
} }
// Remove the domain from its parent space // Remove the domain from its parent space
DATA_SPACE.lock().unwrap().dealloc_domain(data_domain); DATA_SPACE.lock().unwrap().dealloc_domain(
unbox(self.data_domain.take().unwrap()));
} }
} }

@ -1,6 +1,107 @@
use super::*; use super::*;
impl super::VMArea { #[derive(Debug)]
pub struct VMSpace {
range: VMRange,
guard_type: VMGuardAreaType,
}
impl_vmrange_trait_for!(VMSpace, range);
impl VMSpace {
pub unsafe fn new(
addr: usize,
size: usize,
guard_type: VMGuardAreaType,
desc: &str,
) -> Result<VMSpace, Error> {
let addr = align_up(addr, PAGE_SIZE);
let size = align_down(size, PAGE_SIZE);
let range = unsafe { VMRange::new(addr, addr + size, VMGrowthType::Fixed, desc)? };
Ok(VMSpace { range, guard_type })
}
pub fn get_guard_type(&self) -> VMGuardAreaType {
self.guard_type
}
pub fn alloc_domain(&mut self, size: usize, desc: &str) -> Result<VMDomain, Error> {
let mut options = VMAllocOptions::new(size)?;
options.growth(VMGrowthType::Upward)?
.description(desc)?;
let new_range = self.range.alloc_subrange(&options)?;
Ok(VMDomain { range: new_range })
}
pub fn dealloc_domain(&mut self, mut domain: VMDomain) {
self.range.dealloc_subrange(&mut domain.range)
}
pub fn resize_domain(&mut self, domain: &mut VMDomain, new_size: usize) -> Result<(), Error> {
let options = VMResizeOptions::new(new_size)?;
self.range.resize_subrange(&mut domain.range, &options)
}
}
#[derive(Debug)]
pub struct VMDomain {
range: VMRange,
}
impl_vmrange_trait_for!(VMDomain, range);
impl VMDomain {
pub fn alloc_area(
&mut self,
options: &VMAllocOptions,
flags: VMAreaFlags,
) -> Result<VMArea, Error> {
let new_range = self.range.alloc_subrange(options)?;
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_range.get_start() as *mut c_void;
let mem_size = new_range.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
Ok(VMArea {
range: new_range,
flags: flags,
})
}
pub fn dealloc_area(&mut self, mut area: VMArea) {
self.range.dealloc_subrange(&mut area.range)
}
pub fn resize_area(
&mut self,
area: &mut VMArea,
options: &VMResizeOptions,
) -> Result<(), Error> {
// TODO: init memory with zeros when expanding!
self.range.resize_subrange(&mut area.range, options)
}
}
#[link(name = "sgx_tstdc")]
extern "C" {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}
#[derive(Debug)]
pub struct VMArea {
range: VMRange,
flags: VMAreaFlags,
}
impl_vmrange_trait_for!(VMArea, range);
impl VMArea {
pub fn get_flags(&self) -> &VMAreaFlags { pub fn get_flags(&self) -> &VMAreaFlags {
&self.flags &self.flags
} }
@ -9,3 +110,25 @@ impl super::VMArea {
&mut self.flags &mut self.flags
} }
} }
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct VMAreaFlags(pub u32);
pub const VM_AREA_FLAG_R: u32 = 0x1;
pub const VM_AREA_FLAG_W: u32 = 0x2;
pub const VM_AREA_FLAG_X: u32 = 0x4;
impl VMAreaFlags {
pub fn can_execute(&self) -> bool {
self.0 & VM_AREA_FLAG_X == VM_AREA_FLAG_X
}
pub fn can_write(&self) -> bool {
self.0 & VM_AREA_FLAG_W == VM_AREA_FLAG_W
}
pub fn can_read(&self) -> bool {
self.0 & VM_AREA_FLAG_R == VM_AREA_FLAG_R
}
}

@ -1,41 +0,0 @@
use super::*;
impl VMDomain {
pub fn alloc_area(
&mut self,
options: &VMAllocOptions,
flags: VMAreaFlags,
) -> Result<VMArea, Error> {
let new_range = self.range.alloc_subrange(options)?;
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_range.get_start() as *mut c_void;
let mem_size = new_range.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
Ok(VMArea {
range: new_range,
flags: flags,
})
}
pub fn dealloc_area(&mut self, area: &mut VMArea) {
self.range.dealloc_subrange(&mut area.range)
}
pub fn resize_area(
&mut self,
area: &mut VMArea,
options: &VMResizeOptions,
) -> Result<(), Error> {
// TODO: init memory with zeros when expanding!
self.range.resize_subrange(&mut area.range, options)
}
}
#[link(name = "sgx_tstdc")]
extern "C" {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}

@ -35,44 +35,51 @@ macro_rules! impl_vmrange_trait_for {
}; };
} }
impl_vmrange_trait_for!(VMRange, inner);
impl_vmrange_trait_for!(VMSpace, range);
impl_vmrange_trait_for!(VMDomain, range);
impl_vmrange_trait_for!(VMArea, range);
#[derive(Debug)] #[derive(Debug)]
pub struct VMRange { pub struct VMRange {
inner: VMRangeInner, inner: VMRangeInner,
parent_range: *const VMRange,
sub_ranges: Option<Vec<VMRangeInner>>, sub_ranges: Option<Vec<VMRangeInner>>,
is_dealloced: bool,
description: String,
} }
impl_vmrange_trait_for!(VMRange, inner);
impl VMRange { impl VMRange {
pub unsafe fn new(start: usize, end: usize, growth: VMGrowthType) -> Result<VMRange, Error> { pub unsafe fn new(start: usize, end: usize, growth: VMGrowthType, description: &str) -> Result<VMRange, Error> {
if start % PAGE_SIZE != 0 || end % PAGE_SIZE != 0 { if start % PAGE_SIZE != 0 || end % PAGE_SIZE != 0 {
return errno!(EINVAL, "Invalid start and/or end"); return errno!(EINVAL, "Invalid start and/or end");
} }
Ok(VMRange { Ok(VMRange {
inner: VMRangeInner::new(start, end, growth), inner: VMRangeInner::new(start, end, growth),
parent_range: 0 as *const VMRange,
sub_ranges: None, sub_ranges: None,
is_dealloced: false,
description: description.to_owned(),
}) })
} }
pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> { pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> {
debug_assert!(!self.is_dealloced);
// Lazy initialize the subrange array upon the first allocation
if self.sub_ranges.is_none() {
self.init_subrange_array()?;
}
// Find a free space that satisfies the options // Find a free space that satisfies the options
let free_space = self.look_for_free_space(options)?; let free_space = self.look_for_free_space(options)?;
// Allocate a new subrange from the free space
let (new_subrange_idx, new_subrange_inner) = {
let (new_subrange_start, new_subrange_end) = let (new_subrange_start, new_subrange_end) =
self.alloc_from_free_space(&free_space, options); self.alloc_from_free_space(&free_space, options);
debug_assert!(free_space.contains(new_subrange_start)); debug_assert!(free_space.contains(new_subrange_start));
debug_assert!(free_space.contains(new_subrange_end)); debug_assert!(free_space.contains(new_subrange_end));
let new_subrange_inner = VMRangeInner::new(new_subrange_start, (free_space.index_in_subranges, VMRangeInner::new(
new_subrange_end, new_subrange_start, new_subrange_end, options.growth))
options.growth); };
self.get_subranges_mut() self.get_subranges_mut()
.insert(free_space.index_in_subranges, new_subrange_inner); .insert(new_subrange_idx, new_subrange_inner);
// Although there are two copies of the newly created VMRangeInner obj, // Although there are two copies of the newly created VMRangeInner obj,
// we can keep them in sync as all mutation on VMRange object must // we can keep them in sync as all mutation on VMRange object must
@ -82,29 +89,28 @@ impl VMRange {
// other in child, in dealloc_subrange and resize_subrange functions. // other in child, in dealloc_subrange and resize_subrange functions.
Ok(VMRange { Ok(VMRange {
inner: new_subrange_inner, inner: new_subrange_inner,
parent_range: self as *const VMRange,
sub_ranges: None, sub_ranges: None,
is_dealloced: false,
description: options.description.clone(),
}) })
} }
pub fn dealloc_subrange(&mut self, subrange: &mut VMRange) { pub fn dealloc_subrange(&mut self, subrange: &mut VMRange) {
self.ensure_subrange_is_a_child(subrange); debug_assert!(!self.is_dealloced);
if subrange.has_subranges() { debug_assert!(!subrange.is_dealloced);
panic!("A range can only be dealloc'ed when it has no sub-ranges"); debug_assert!(self.sub_ranges.is_some());
}
// Remove the sub-range // Remove the sub-range
let domain_i = self.position_subrange(subrange); let domain_i = self.position_subrange(subrange);
self.get_subranges_mut().remove(domain_i); self.get_subranges_mut().remove(domain_i);
// When all sub-ranges are removed, remove the sub-range array // When all sub-ranges are removed, remove the sub-range array
if self.get_subranges().len() == 2 { if self.get_subranges().len() == 2 {
// two sentinel sub-ranges excluded // two sentinel sub-ranges excluded
self.sub_ranges = None; self.sub_ranges = None;
} }
// Mark a range as dealloc'ed subrange.inner.end = subrange.inner.start;
subrange.mark_as_dealloced(); subrange.is_dealloced = true;
} }
pub fn resize_subrange( pub fn resize_subrange(
@ -112,7 +118,9 @@ impl VMRange {
subrange: &mut VMRange, subrange: &mut VMRange,
options: &VMResizeOptions, options: &VMResizeOptions,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.ensure_subrange_is_a_child(subrange); debug_assert!(!self.is_dealloced);
debug_assert!(!subrange.is_dealloced);
debug_assert!(self.sub_ranges.is_some());
// Get valid parameters from options // Get valid parameters from options
let new_size = options.new_size; let new_size = options.new_size;
@ -136,6 +144,10 @@ impl VMRange {
} }
} }
pub fn get_description(&self) -> &str {
&self.description
}
fn init_subrange_array(&mut self) -> Result<(), Error> { fn init_subrange_array(&mut self) -> Result<(), Error> {
// Use dummy VMRange as sentinel object at both ends to make the allocation // Use dummy VMRange as sentinel object at both ends to make the allocation
// and deallocation algorithm simpler // and deallocation algorithm simpler
@ -152,11 +164,6 @@ impl VMRange {
// TODO: reduce the complexity from O(N) to O(log(N)), where N is // TODO: reduce the complexity from O(N) to O(log(N)), where N is
// the number of existing subranges. // the number of existing subranges.
// Lazy initialize the subrange array upon the first allocation
if !self.has_subranges() {
self.init_subrange_array()?;
}
// Get valid parameters from options // Get valid parameters from options
let size = options.size; let size = options.size;
let addr = options.addr; let addr = options.addr;
@ -267,19 +274,13 @@ impl VMRange {
// this sub-range or neighbor sub-range needs to grow later. // this sub-range or neighbor sub-range needs to grow later.
// As a simple heuristic, we put this sub-range near the // As a simple heuristic, we put this sub-range near the
// center between the previous and next sub-ranges. // center between the previous and next sub-ranges.
free_space.start + (free_space.get_size() - size) / 2 let offset = align_down((free_space.get_size() - size) / 2, PAGE_SIZE);
free_space.start + offset
}; };
(addr, addr + size) (addr, addr + size)
} }
fn ensure_subrange_is_a_child(&self, subrange: &VMRange) {
// FIXME:
/*if subrange.parent_range != self as *const VMRange {
panic!("This range does not contain the given sub-range");
}*/
}
fn position_subrange(&self, subrange: &VMRange) -> usize { fn position_subrange(&self, subrange: &VMRange) -> usize {
let sub_ranges = self.get_subranges(); let sub_ranges = self.get_subranges();
sub_ranges sub_ranges
@ -296,10 +297,6 @@ impl VMRange {
self.sub_ranges.as_mut().unwrap() self.sub_ranges.as_mut().unwrap()
} }
fn has_subranges(&self) -> bool {
self.sub_ranges.is_some()
}
fn shrink_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize) -> Result<(), Error> { fn shrink_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize) -> Result<(), Error> {
let subrange_i = self.position_subrange(subrange); let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut(); let subranges = self.get_subranges_mut();
@ -379,15 +376,6 @@ impl VMRange {
} }
Ok(()) Ok(())
} }
fn mark_as_dealloced(&mut self) {
self.parent_range = 0 as *const VMRange;
self.inner.start = self.inner.end;
}
fn is_dealloced(&self) -> bool {
self.parent_range == 0 as *const VMRange
}
} }
impl PartialOrd for VMRange { impl PartialOrd for VMRange {
@ -404,29 +392,15 @@ impl PartialEq for VMRange {
impl Drop for VMRange { impl Drop for VMRange {
fn drop(&mut self) { fn drop(&mut self) {
if !self.is_dealloced() { if !self.is_dealloced {
println!("VMRange::drop::panic1");
panic!("A range must be dealloc'ed before drop"); panic!("A range must be dealloc'ed before drop");
} }
if self.has_subranges() {
println!("VMRange::drop::panic2");
panic!("All sub-ranges must be removed explicitly before drop");
}
} }
} }
unsafe impl Send for VMRange {} unsafe impl Send for VMRange {}
unsafe impl Sync for VMRange {} unsafe impl Sync for VMRange {}
impl Default for VMRange {
fn default() -> VMRange {
VMRange {
inner: VMRangeInner::new(0, 0, VMGrowthType::Fixed),
parent_range: 0 as *const VMRange,
sub_ranges: None,
}
}
}
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
pub struct VMRangeInner { pub struct VMRangeInner {
@ -437,6 +411,8 @@ pub struct VMRangeInner {
impl VMRangeInner { impl VMRangeInner {
pub fn new(start: usize, end: usize, growth: VMGrowthType) -> VMRangeInner { pub fn new(start: usize, end: usize, growth: VMGrowthType) -> VMRangeInner {
debug_assert!(start % PAGE_SIZE == 0);
debug_assert!(end % PAGE_SIZE == 0);
VMRangeInner { VMRangeInner {
start: start, start: start,
end: end, end: end,

@ -1,33 +0,0 @@
use super::*;
impl VMSpace {
pub unsafe fn new(
addr: usize,
size: usize,
guard_type: VMGuardAreaType,
) -> Result<VMSpace, Error> {
let range = unsafe { VMRange::new(addr, addr + size, VMGrowthType::Fixed)? };
Ok(VMSpace { range, guard_type })
}
pub fn get_guard_type(&self) -> VMGuardAreaType {
self.guard_type
}
pub fn alloc_domain(&mut self, size: usize) -> Result<VMDomain, Error> {
let mut options = VMAllocOptions::new(size)?;
options.growth(VMGrowthType::Upward)?;
let new_range = self.range.alloc_subrange(&options)?;
Ok(VMDomain { range: new_range })
}
pub fn dealloc_domain(&mut self, domain: &mut VMDomain) {
self.range.dealloc_subrange(&mut domain.range)
}
pub fn resize_domain(&mut self, domain: &mut VMDomain, new_size: usize) -> Result<(), Error> {
let options = VMResizeOptions::new(new_size)?;
self.range.resize_subrange(&mut domain.range, &options)
}
}