Add support for Address Space Layout Randomization

ASLR is only enabled on release mode. Also manage the whole process
VM with VMManager.
This commit is contained in:
Hui, Chunyang 2021-01-21 12:21:54 +00:00 committed by Zongmin.Gu
parent d7b994bc7d
commit 79b264a6c8
8 changed files with 138 additions and 64 deletions

@ -15,7 +15,7 @@ cp ../test_per_process_config.sh image/bin
# Set process memory space size to very small values and will fail when running target script using default configuration
new_json="$(jq '.process.default_stack_size = "1MB" |
.process.default_heap_size = "1MB" |
.process.default_mmap_size = "6MB"' Occlum.json)" && \
.process.default_mmap_size = "8MB"' Occlum.json)" && \
echo "${new_json}" > Occlum.json
pushd image/bin

@ -2,7 +2,7 @@
ulimit -a
# ulimit defined below will overide configuration in Occlum.json
ulimit -Sv 102400 # virtual memory size 100M (including heap, stack, mmap size)
ulimit -Sv 122880 # virtual memory size 120M (including heap, stack, mmap size)
ulimit -Ss 10240 # stack size 10M
ulimit -Sd 40960 # heap size 40M

@ -24,11 +24,11 @@ init_instance() {
rm -rf occlum_instance && mkdir occlum_instance
cd occlum_instance
occlum init
new_json="$(jq '.resource_limits.user_space_size = "1400MB" |
new_json="$(jq '.resource_limits.user_space_size = "1680MB" |
.resource_limits.kernel_space_heap_size="64MB" |
.resource_limits.max_num_of_threads = 64 |
.process.default_heap_size = "256MB" |
.process.default_mmap_size = "1120MB" |
.process.default_mmap_size = "1400MB" |
.entry_points = [ "/usr/lib/jvm/java-11-alibaba-dragonwell/jre/bin" ] |
.env.default = [ "LD_LIBRARY_PATH=/usr/lib/jvm/java-11-alibaba-dragonwell/jre/lib/server:/usr/lib/jvm/java-11-alibaba-dragonwell/jre/lib:/usr/lib/jvm/java-11-alibaba-dragonwell/jre/../lib" ]' Occlum.json)" && \
echo "${new_json}" > Occlum.json

@ -1,6 +1,6 @@
{
"resource_limits": {
"user_space_size": "256MB",
"user_space_size": "300MB",
"kernel_space_heap_size": "32MB",
"kernel_space_stack_size": "1MB",
"max_num_of_threads": 32
@ -8,7 +8,7 @@
"process": {
"default_stack_size": "4MB",
"default_heap_size": "32MB",
"default_mmap_size": "80MB"
"default_mmap_size": "100MB"
},
"entry_points": [
"/bin"

@ -9,6 +9,9 @@ use super::vm_manager::{
use super::vm_perms::VMPerms;
use std::sync::atomic::{AtomicUsize, Ordering};
// Used for heap and stack start address randomization.
const RANGE_FOR_RANDOMIZATION: usize = 256 * 4096; // 1M
#[derive(Debug, Clone)]
pub struct ProcessVMBuilder<'a, 'b> {
elfs: Vec<&'b ElfFile<'a>>,
@ -42,6 +45,21 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
self
}
// Generate a random address within [0, range]
// Note: This function doesn't gurantee alignment
fn get_randomize_offset(range: usize) -> usize {
if cfg!(debug_assertions) {
return range;
}
use crate::util::random;
trace!("entrophy size = {}", range);
let mut random_buf: [u8; 8] = [0u8; 8]; // same length as usize
random::get_random(&mut random_buf).expect("failed to get random number");
let random_num: usize = u64::from_le_bytes(random_buf) as usize;
random_num % range
}
pub fn build(self) -> Result<ProcessVM> {
self.validate()?;
@ -73,6 +91,8 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
})
})
.collect();
// TODO: Make heap and stack 16-byte aligned instead of page aligned.
let other_layouts = vec![
VMLayout::new(heap_size, PAGE_SIZE)?,
VMLayout::new(stack_size, PAGE_SIZE)?,
@ -94,55 +114,75 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
USER_SPACE_VM_MANAGER.alloc(process_layout.align() + process_layout.size())?
};
let process_base = process_range.range().start();
// Init the memory for ELFs in the process
let elf_ranges: Vec<VMRange> = {
let mut min_elf_start = process_base;
elf_layouts
.iter()
.map(|elf_layout| {
let new_elf_range = VMRange::new_with_layout(elf_layout, min_elf_start);
min_elf_start = new_elf_range.end();
new_elf_range
})
.collect()
};
self.elfs
.iter()
.zip(elf_ranges.iter())
.try_for_each(|(elf, elf_range)| Self::init_elf_memory(elf_range, elf))?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let heap_min_start = {
let last_elf_range = elf_ranges.iter().last().unwrap();
last_elf_range.end()
};
let heap_range = VMRange::new_with_layout(heap_layout, heap_min_start);
let brk = AtomicUsize::new(heap_range.start());
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let stack_min_start = heap_range.end();
let stack_range = VMRange::new_with_layout(stack_layout, stack_min_start);
// Note: we do not need to fill zeros for stack
// Init the mmap memory in the process
let mmap_layout = &other_layouts[2];
let mmap_min_start = stack_range.end();
let mmap_range = VMRange::new_with_layout(mmap_layout, mmap_min_start);
let mmap_manager = VMManager::from(mmap_range.start(), mmap_range.size())?;
// Use the vm_manager to manage the whole process VM (including mmap region)
let mut vm_manager = VMManager::from(process_base, process_range.range().size())?;
// Note: we do not need to fill zeros of the mmap region.
// VMManager will fill zeros (if necessary) on mmap.
debug_assert!(elf_ranges
// Tracker to track the min_start for each part
let mut min_start =
process_base + Self::get_randomize_offset(process_range.range().size() >> 3);
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.all(|elf_range| process_range.range().is_superset_of(elf_range)));
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let desired_range = VMRange::new_with_layout(elf_layout, min_start);
let vm_option = VMMapOptionsBuilder::default()
.size(desired_range.size())
.addr(VMMapAddr::Need(desired_range.start()))
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::DoNothing())
.build()?;
let elf_start = vm_manager.mmap(vm_option)?;
debug_assert!(desired_range.start == elf_start);
debug_assert!(elf_start % elf_layout.align() == 0);
debug_assert!(process_range.range().is_superset_of(&desired_range));
Self::init_elf_memory(&desired_range, elf_file)?;
min_start = desired_range.end();
elf_ranges.push(desired_range);
trace!("elf range = {:?}", desired_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let heap_min_start = min_start + Self::get_randomize_offset(RANGE_FOR_RANDOMIZATION);
let heap_range = VMRange::new_with_layout(heap_layout, heap_min_start);
let vm_option = VMMapOptionsBuilder::default()
.size(heap_range.size())
.addr(VMMapAddr::Need(heap_range.start()))
.perms(VMPerms::READ | VMPerms::WRITE)
.build()?;
let heap_start = vm_manager.mmap(vm_option)?;
debug_assert!(heap_range.start == heap_start);
trace!("heap range = {:?}", heap_range);
let brk = AtomicUsize::new(heap_range.start());
min_start = heap_range.end();
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let stack_min_start = min_start + Self::get_randomize_offset(RANGE_FOR_RANDOMIZATION);
let stack_range = VMRange::new_with_layout(stack_layout, stack_min_start);
let vm_option = VMMapOptionsBuilder::default()
.size(stack_range.size())
.addr(VMMapAddr::Need(stack_range.start()))
.perms(VMPerms::READ | VMPerms::WRITE)
.build()?;
let stack_start = vm_manager.mmap(vm_option)?;
debug_assert!(stack_range.start == stack_start);
trace!("stack range = {:?}", stack_range);
min_start = stack_range.end();
// Note: we do not need to fill zeros for stack
debug_assert!(process_range.range().is_superset_of(&heap_range));
debug_assert!(process_range.range().is_superset_of(&stack_range));
debug_assert!(process_range.range().is_superset_of(&mmap_range));
let mmap_manager = SgxMutex::new(mmap_manager);
// Set mmap prefered start address
vm_manager.set_mmap_prefered_start_addr(min_start);
let vm_manager = SgxMutex::new(vm_manager);
Ok(ProcessVM {
process_range,
@ -150,7 +190,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
heap_range,
stack_range,
brk,
mmap_manager,
vm_manager,
})
}
@ -201,7 +241,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
empty_start_offset = empty_end_offset + file_size;
});
empty_offset_vec.push((empty_start_offset, elf_proc_buf.len() - 1));
empty_offset_vec.push((empty_start_offset, elf_proc_buf.len()));
// Set zero for the remain part of the buffer
empty_offset_vec
@ -219,7 +259,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
mmap_manager: SgxMutex<VMManager>,
vm_manager: SgxMutex<VMManager>, // manage the whole process VM
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
@ -241,7 +281,7 @@ impl Default for ProcessVM {
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mmap_manager: Default::default(),
vm_manager: Default::default(),
}
}
}
@ -347,7 +387,7 @@ impl ProcessVM {
.initializer(initializer)
.writeback_file(writeback_file)
.build()?;
let mmap_addr = self.mmap_manager.lock().unwrap().mmap(mmap_options)?;
let mmap_addr = self.vm_manager.lock().unwrap().mmap(mmap_options)?;
Ok(mmap_addr)
}
@ -365,11 +405,11 @@ impl ProcessVM {
}
let mremap_option = VMRemapOptions::new(old_addr, old_size, new_size, flags)?;
self.mmap_manager.lock().unwrap().mremap(&mremap_option)
self.vm_manager.lock().unwrap().mremap(&mremap_option)
}
pub fn munmap(&self, addr: usize, size: usize) -> Result<()> {
self.mmap_manager.lock().unwrap().munmap(addr, size)
self.vm_manager.lock().unwrap().munmap(addr, size)
}
pub fn mprotect(&self, addr: usize, size: usize, perms: VMPerms) -> Result<()> {
@ -383,7 +423,7 @@ impl ProcessVM {
if !self.process_range.range().is_superset_of(&protect_range) {
return_errno!(ENOMEM, "invalid range");
}
let mut mmap_manager = self.mmap_manager.lock().unwrap();
let mut mmap_manager = self.vm_manager.lock().unwrap();
// TODO: support mprotect vm regions in addition to mmap
if !mmap_manager.range().is_superset_of(&protect_range) {
@ -396,18 +436,18 @@ impl ProcessVM {
pub fn msync(&self, addr: usize, size: usize) -> Result<()> {
let sync_range = VMRange::new_with_size(addr, size)?;
let mut mmap_manager = self.mmap_manager.lock().unwrap();
let mut mmap_manager = self.vm_manager.lock().unwrap();
mmap_manager.msync_by_range(&sync_range)
}
pub fn msync_by_file(&self, sync_file: &FileRef) {
let mut mmap_manager = self.mmap_manager.lock().unwrap();
let mut mmap_manager = self.vm_manager.lock().unwrap();
mmap_manager.msync_by_file(sync_file);
}
// Return: a copy of the found region
pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> {
self.mmap_manager
self.vm_manager
.lock()
.unwrap()
.find_mmap_region(addr)

@ -217,7 +217,8 @@ impl VMRemapOptions {
/// Memory manager.
///
/// VMManager provides useful memory management APIs such as mmap, munmap, mremap, etc.
/// VMManager provides useful memory management APIs such as mmap, munmap, mremap, etc. It also manages the whole
/// process VM including mmap, stack, heap, elf ranges.
///
/// # Invariants
///
@ -255,6 +256,7 @@ impl VMRemapOptions {
pub struct VMManager {
range: VMRange,
vmas: Vec<VMArea>,
mmap_prefered_start_addr: usize, // Prefer to alloc mmap range starting this address
}
impl VMManager {
@ -275,13 +277,26 @@ impl VMManager {
};
vec![start_sentry, end_sentry]
};
Ok(VMManager { range, vmas })
Ok(VMManager {
range,
vmas,
mmap_prefered_start_addr: addr, // make it the start of VMManger range by default
})
}
pub fn range(&self) -> &VMRange {
&self.range
}
pub fn vmas(&self) -> &Vec<VMArea> {
&self.vmas
}
// This is used to set the mmap prefered start address for VMManager
pub fn set_mmap_prefered_start_addr(&mut self, addr: usize) {
self.mmap_prefered_start_addr = addr
}
pub fn mmap(&mut self, mut options: VMMapOptions) -> Result<usize> {
// TODO: respect options.align when mmap
let addr = *options.addr();
@ -623,6 +638,14 @@ impl VMManager {
.ok_or_else(|| errno!(ESRCH, "no mmap regions that contains the address"))
}
pub fn usage_percentage(&self) -> f32 {
let totol_size = self.range.size();
let mut used_size = 0;
self.vmas.iter().for_each(|vma| used_size += vma.size());
return used_size as f32 / totol_size as f32;
}
// Find a VMA that contains the given range, returning the VMA's index
fn find_containing_vma_idx(&self, target_range: &VMRange) -> Option<usize> {
self.vmas
@ -644,7 +667,8 @@ impl VMManager {
// TODO: reduce the complexity from O(N) to O(log(N)), where N is
// the number of existing VMAs.
// Record the minimal free range that satisfies the contraints
let mmap_prefered_start_addr = self.mmap_prefered_start_addr;
// Record the minimal free range that satisfies the contraints8
let mut result_free_range: Option<VMRange> = None;
let mut result_idx: Option<usize> = None;
@ -698,6 +722,9 @@ impl VMManager {
if result_free_range == None
|| result_free_range.as_ref().unwrap().size() > free_range.size()
// Preferentially alloc range above mmap_prefered_start_addr
|| (result_free_range.as_ref().unwrap().end() < mmap_prefered_start_addr
&& mmap_prefered_start_addr <= free_range.start())
{
result_free_range = Some(free_range);
result_idx = Some(idx);
@ -705,6 +732,12 @@ impl VMManager {
}
if result_free_range.is_none() {
let usage = self.usage_percentage();
debug!(
"Not enough memory to allocate {} bytes. Current memory usage is {}%",
size,
usage * 100 as f32
);
return_errno!(ENOMEM, "not enough memory");
}

@ -2,6 +2,7 @@ use super::*;
bitflags! {
pub struct VMPerms : u32 {
const NONE = 0x0;
const READ = 0x1;
const WRITE = 0x2;
const EXEC = 0x4;

@ -2,13 +2,13 @@
"resource_limits": {
"kernel_space_heap_size": "40MB",
"kernel_space_stack_size": "1MB",
"user_space_size": "320MB",
"user_space_size": "400MB",
"max_num_of_threads": 32
},
"process": {
"default_stack_size": "4MB",
"default_heap_size": "8MB",
"default_mmap_size": "80MB"
"default_mmap_size": "100MB"
},
"entry_points": [
"/bin"