Make sure VMRange fills its memory with zeros if necessary

This commit is contained in:
Tate, Hongliang Tian 2019-04-11 19:21:02 +08:00 committed by Tate Tian
parent 862601604c
commit 2957fa99d5
4 changed files with 70 additions and 35 deletions

@ -68,6 +68,7 @@ pub struct VMAllocOptions {
addr: VMAddrOption, addr: VMAddrOption,
growth: VMGrowthType, growth: VMGrowthType,
description: String, description: String,
fill_zeros: bool,
} }
impl VMAllocOptions { impl VMAllocOptions {
@ -98,6 +99,11 @@ impl VMAllocOptions {
self.description = description.to_owned(); self.description = description.to_owned();
Ok(self) Ok(self)
} }
pub fn fill_zeros(&mut self, fill_zeros: bool) -> Result<&mut Self, Error> {
self.fill_zeros = fill_zeros;
Ok(self)
}
} }
impl fmt::Debug for VMAllocOptions { impl fmt::Debug for VMAllocOptions {
@ -163,6 +169,7 @@ impl Default for VMGrowthType {
pub struct VMResizeOptions { pub struct VMResizeOptions {
new_size: usize, new_size: usize,
new_addr: VMAddrOption, new_addr: VMAddrOption,
fill_zeros: bool,
} }
impl VMResizeOptions { impl VMResizeOptions {
@ -180,4 +187,9 @@ impl VMResizeOptions {
self.new_addr = new_addr; self.new_addr = new_addr;
self self
} }
pub fn fill_zeros(&mut self, fill_zeros: bool) -> &mut Self {
self.fill_zeros = fill_zeros;
self
}
} }

@ -90,11 +90,12 @@ impl ProcessVM {
) -> Result<(VMArea, VMArea, VMArea, VMArea), Error> { ) -> Result<(VMArea, VMArea, VMArea, VMArea), Error> {
let mut addr = data_domain.get_start(); let mut addr = data_domain.get_start();
let mut alloc_vma_continuously = let mut alloc_vma_continuously =
|addr: &mut usize, size, flags, growth, desc| -> Result<_, Error> { |addr: &mut usize, desc, size, flags, growth, fill_zeros| -> Result<_, Error> {
let mut options = VMAllocOptions::new(size)?; let mut options = VMAllocOptions::new(size)?;
options.addr(VMAddrOption::Fixed(*addr))? options.addr(VMAddrOption::Fixed(*addr))?
.growth(growth)? .growth(growth)?
.description(desc)?; .description(desc)?
.fill_zeros(fill_zeros)?;
let new_vma = data_domain.alloc_area(&options, flags)?; let new_vma = data_domain.alloc_area(&options, flags)?;
*addr += size; *addr += size;
Ok(new_vma) Ok(new_vma)
@ -103,14 +104,17 @@ impl ProcessVM {
let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X); let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X);
let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W); let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W);
let code_vma = alloc_vma_continuously(&mut addr, code_size, rx_flags, VMGrowthType::Fixed, "code_vma")?; let code_vma = alloc_vma_continuously(&mut addr, "code_vma", code_size,
let data_vma = alloc_vma_continuously(&mut addr, data_size, rw_flags, VMGrowthType::Fixed, "data_vma")?; rx_flags, VMGrowthType::Fixed, true)?;
let heap_vma = alloc_vma_continuously(&mut addr, 0, rw_flags, VMGrowthType::Upward, "heap_vma")?; let data_vma = alloc_vma_continuously(&mut addr, "data_vma", data_size,
rw_flags, VMGrowthType::Fixed, true)?;
let heap_vma = alloc_vma_continuously(&mut addr, "heap_vma", 0,
rw_flags, VMGrowthType::Upward, true)?;
// Preserve the space for heap // Preserve the space for heap
addr += heap_size; addr += heap_size;
// After the heap is the stack // After the heap is the stack
let stack_vma = let stack_vma = alloc_vma_continuously(&mut addr, "stack_vma", stack_size,
alloc_vma_continuously(&mut addr, stack_size, rw_flags, VMGrowthType::Downward, "stack_vma")?; rw_flags, VMGrowthType::Downward, false)?;
Ok((code_vma, data_vma, heap_vma, stack_vma)) Ok((code_vma, data_vma, heap_vma, stack_vma))
} }
@ -191,7 +195,7 @@ impl ProcessVM {
.iter() .iter()
.position(|vma| vma.get_start() == addr && vma.get_end() == addr + size); .position(|vma| vma.get_start() == addr && vma.get_end() == addr + size);
if mmap_vma_i.is_none() { if mmap_vma_i.is_none() {
return Ok(()); return errno!(EINVAL, "memory area not found");
} }
mmap_vma_i.unwrap() mmap_vma_i.unwrap()
}; };
@ -221,12 +225,12 @@ impl ProcessVM {
} else if new_brk < heap_start { } else if new_brk < heap_start {
return errno!(EINVAL, "New brk address is too low"); return errno!(EINVAL, "New brk address is too low");
} else if new_brk > heap_end { } else if new_brk > heap_end {
// TODO: init the memory with zeros for the expanded area
let resize_options = { let resize_options = {
let new_heap_end = align_up(new_brk, 4096); let new_heap_end = align_up(new_brk, PAGE_SIZE);
let new_heap_size = new_heap_end - heap_start; let new_heap_size = new_heap_end - heap_start;
let mut options = VMResizeOptions::new(new_heap_size)?; let mut options = VMResizeOptions::new(new_heap_size)?;
options.addr(VMAddrOption::Fixed(heap_start)); options.addr(VMAddrOption::Fixed(heap_start))
.fill_zeros(true);
options options
}; };
let heap_vma = self.heap_vma.as_mut().unwrap(); let heap_vma = self.heap_vma.as_mut().unwrap();

@ -59,14 +59,6 @@ impl VMDomain {
flags: VMAreaFlags, flags: VMAreaFlags,
) -> Result<VMArea, Error> { ) -> Result<VMArea, Error> {
let new_range = self.range.alloc_subrange(options)?; let new_range = self.range.alloc_subrange(options)?;
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_range.get_start() as *mut c_void;
let mem_size = new_range.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
Ok(VMArea { Ok(VMArea {
range: new_range, range: new_range,
flags: flags, flags: flags,
@ -82,16 +74,10 @@ impl VMDomain {
area: &mut VMArea, area: &mut VMArea,
options: &VMResizeOptions, options: &VMResizeOptions,
) -> Result<(), Error> { ) -> Result<(), Error> {
// TODO: init memory with zeros when expanding!
self.range.resize_subrange(&mut area.range, options) self.range.resize_subrange(&mut area.range, options)
} }
} }
#[link(name = "sgx_tstdc")]
extern "C" {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}
#[derive(Debug)] #[derive(Debug)]
pub struct VMArea { pub struct VMArea {

@ -81,6 +81,15 @@ impl VMRange {
self.get_subranges_mut() self.get_subranges_mut()
.insert(new_subrange_idx, new_subrange_inner); .insert(new_subrange_idx, new_subrange_inner);
if options.fill_zeros {
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_subrange_inner.get_start() as *mut c_void;
let mem_size = new_subrange_inner.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
// Although there are two copies of the newly created VMRangeInner obj, // Although there are two copies of the newly created VMRangeInner obj,
// we can keep them in sync as all mutation on VMRange object must // we can keep them in sync as all mutation on VMRange object must
// be carried out through dealloc_subrange() and resize_subrange() that // be carried out through dealloc_subrange() and resize_subrange() that
@ -140,7 +149,7 @@ impl VMRange {
} }
// Grow // Grow
else { else {
self.grow_subrange_to(subrange, new_size) self.grow_subrange_to(subrange, new_size, options.fill_zeros)
} }
} }
@ -340,39 +349,58 @@ impl VMRange {
Ok(()) Ok(())
} }
fn grow_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize) -> Result<(), Error> { fn grow_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize, fill_zeros: bool) -> Result<(), Error> {
let subrange_i = self.position_subrange(subrange); let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut(); let subranges = self.get_subranges_mut();
let subrange_old_start = subrange.inner.start;
let subrange_old_end = subrange.inner.end;
let subrange_old_size = subrange.get_size();
if subrange.inner.growth == VMGrowthType::Upward { if subrange.inner.growth == VMGrowthType::Upward {
// Can we grow? // Can we grow upward?
let max_new_size = { let max_new_size = {
let next_subrange = &subranges[subrange_i + 1]; let next_subrange = &subranges[subrange_i + 1];
next_subrange.start - subrange.inner.start next_subrange.start - subrange_old_start
}; };
if new_size > max_new_size { if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size"); return errno!(ENOMEM, "Cannot grow to new size");
} }
// Do grow // Do grow
let subrange_new_end = subrange.inner.start + new_size; let subrange_new_end = subrange_old_start + new_size;
subrange.inner.end = subrange_new_end; subrange.inner.end = subrange_new_end;
// Sync state // Sync state
subranges[subrange_i].end = subrange_new_end; subranges[subrange_i].end = subrange_new_end;
} else { // Init memory
// self.growth == VMGrowthType::Downward if fill_zeros {
// Can we grow? unsafe {
let mem_ptr = subrange_old_end as *mut c_void;
let mem_size = (subrange_new_end - subrange_old_end) as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
} else { // self.growth == VMGrowthType::Downward
// Can we grow downard?
let max_new_size = { let max_new_size = {
let pre_subrange = &subranges[subrange_i - 1]; let pre_subrange = &subranges[subrange_i - 1];
subrange.inner.end - pre_subrange.end subrange_old_end - pre_subrange.end
}; };
if new_size > max_new_size { if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size"); return errno!(ENOMEM, "Cannot grow to new size");
} }
// Do grow // Do grow
let subrange_new_start = subrange.inner.end - new_size; let subrange_new_start = subrange_old_end - new_size;
subrange.inner.start = subrange_new_start; subrange.inner.start = subrange_new_start;
// Sync state // Sync state
subranges[subrange_i].start = subrange_new_start; subranges[subrange_i].start = subrange_new_start;
// Init memory
if fill_zeros {
unsafe {
let mem_ptr = subrange_new_start as *mut c_void;
let mem_size = (subrange_old_start - subrange_new_start) as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
} }
Ok(()) Ok(())
} }
@ -532,3 +560,8 @@ impl PartialOrd for FreeSpace {
} }
} }
} }
#[link(name = "sgx_tstdc")]
extern "C" {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}