Refactor mmap implementation

1. Add a full test suite for mmap
2. Implement file-backed memory mapping
3. Implement fixed, anonymous memory mapping
4. Implement hinted, anonymous memory mapping
5. Implement munmap that covers partial/multiple memory mappings
This commit is contained in:
Tate, Hongliang Tian 2019-06-27 05:24:30 +00:00 committed by Tate Tian
parent 3a38f68c69
commit 56c69b5f3c
16 changed files with 1547 additions and 1142 deletions

114
src/libos/Cargo.lock generated

@ -5,6 +5,7 @@ name = "Occlum"
version = "0.0.1"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"derive_builder 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rcore-fs 0.1.0",
@ -30,6 +31,71 @@ name = "cfg-if"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "darling"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"darling_core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"darling_macro 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "darling_core"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"ident_case 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "darling_macro"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"darling_core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "derive_builder"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"darling 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"derive_builder_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "derive_builder_core"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"darling 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fnv"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.1.0"
@ -47,6 +113,22 @@ dependencies = [
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "proc-macro2"
version = "0.4.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quote"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rcore-fs"
version = "0.1.0"
@ -130,6 +212,26 @@ name = "static_assertions"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "strsim"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "syn"
version = "0.15.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "unicode-xid"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "version_check"
version = "0.1.4"
@ -151,9 +253,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum bitvec 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfadef5c4e2c2e64067b9ecc061179f12ac7ec65ba613b1f60f3972bbada1f5b"
"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
"checksum darling 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fcfbcb0c5961907597a7d1148e3af036268f2b773886b8bb3eeb1e1281d3d3d6"
"checksum darling_core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6afc018370c3bff3eb51f89256a6bdb18b4fdcda72d577982a14954a7a0b402c"
"checksum darling_macro 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c6d8dac1c6f1d29a41c4712b4400f878cb4fcc4c7628f298dd75038e024998d1"
"checksum derive_builder 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ac53fa6a3cda160df823a9346442525dcaf1e171999a1cf23e67067e4fd64d4"
"checksum derive_builder_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0288a23da9333c246bb18c143426074a6ae96747995c5819d2947b64cd942b37"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum ident_case 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759"
"checksum quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "faf4799c5d274f3868a4aae320a0a182cbd2baee377b378f080e16a23e9d80db"
"checksum spin 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "37b5646825922b96b5d7d676b5bb3458a54498e96ed7b0ce09dc43a07038fea4"
"checksum static_assertions 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "389ce475f424f267dbed6479cbd8f126c5e1afb053b0acdaa019c74305fc65d1"
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
"checksum syn 0.15.38 (registry+https://github.com/rust-lang/crates.io-index)" = "37ea458a750f59ab679b47fef9b6722c586c5742f4cfe18a120bbc807e5e01fd"
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
"checksum version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7716c242968ee87e5542f8021178248f267f295a5c4803beae8b8b7fd9bc6051"
"checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

@ -10,6 +10,7 @@ crate-type = ["staticlib"]
bitflags = "1.0"
log = "0.4"
lazy_static = { version = "1.1.0", features = ["spin_no_std"] } # Implies nightly
derive_builder = "0.7.2"
rcore-fs = { path = "../../deps/sefs/rcore-fs" }
rcore-fs-sefs = { path = "../../deps/sefs/rcore-fs-sefs" }

@ -24,6 +24,8 @@ extern crate lazy_static;
extern crate log;
extern crate rcore_fs;
extern crate rcore_fs_sefs;
#[macro_use]
extern crate derive_builder;
use sgx_trts::libc;
use sgx_types::*;

@ -78,4 +78,4 @@ use self::task::Task;
use super::*;
use fs::{File, FileRef, FileTable};
use misc::ResourceLimitsRef;
use vm::{ProcessVM, VMRangeTrait};
use vm::{ProcessVM};

@ -1,7 +1,7 @@
use super::task::Task;
use super::*;
use fs::{File, FileRef, FileTable};
use vm::{ProcessVM, VMRangeTrait};
use vm::{ProcessVM};
lazy_static! {
// Dummy object to make all processes having a parent

@ -6,7 +6,7 @@ use misc::ResourceLimitsRef;
use std::ffi::{CStr, CString};
use std::path::Path;
use std::sgxfs::SgxFile;
use vm::{ProcessVM, VMRangeTrait};
use vm::{ProcessVM};
use super::task::Task;
use super::*;
@ -64,7 +64,7 @@ pub fn do_spawn<P: AsRef<Path>>(
let base_addr = vm.get_base_addr();
let program_entry = {
let program_entry = base_addr + elf_helper::get_start_address(&elf_file)?;
if !vm.get_code_vma().contains_obj(program_entry, 16) {
if !vm.get_code_range().contains(program_entry) {
return errno!(EINVAL, "Invalid program entry");
}
program_entry

@ -15,7 +15,7 @@ use std::ffi::{CStr, CString};
use std::ptr;
use time::timeval_t;
use util::mem_util::from_user::*;
use vm::{VMAreaFlags, VMResizeOptions};
use vm::{VMPerms, MMapFlags};
use {fs, process, std, vm};
use super::*;
@ -648,13 +648,14 @@ fn do_sync() -> Result<isize, Error> {
fn do_mmap(
addr: usize,
size: usize,
prot: i32,
perms: i32,
flags: i32,
fd: FileDesc,
offset: off_t,
) -> Result<isize, Error> {
let flags = VMAreaFlags(prot as u32);
let addr = vm::do_mmap(addr, size, flags)?;
let perms = VMPerms::from_u32(perms as u32)?;
let flags = MMapFlags::from_u32(flags as u32)?;
let addr = vm::do_mmap(addr, size, perms, flags, fd, offset as usize)?;
Ok(addr as isize)
}
@ -670,10 +671,8 @@ fn do_mremap(
flags: i32,
new_addr: usize,
) -> Result<isize, Error> {
let mut options = VMResizeOptions::new(new_size)?;
// TODO: handle flags and new_addr
let ret_addr = vm::do_mremap(old_addr, old_size, &options)?;
Ok(ret_addr as isize)
warn!("mremap: not implemented!");
errno!(ENOSYS, "not supported yet")
}
fn do_mprotect(addr: usize, len: usize, prot: u32) -> Result<isize, Error> {

@ -1,59 +1,54 @@
use fs::FileDesc;
use prelude::*;
use process::{get_current, Process, ProcessRef};
use fs::{FileDesc, FileRef, File};
use std::fmt;
#[macro_use]
mod vm_range;
mod vm_manager;
mod user_space_vm;
mod process_vm;
mod vm_area;
pub use self::process_vm::ProcessVM;
pub use self::vm_area::{
VMArea, VMAreaFlags, VMDomain, VMSpace, VM_AREA_FLAG_R, VM_AREA_FLAG_W, VM_AREA_FLAG_X,
};
pub use self::vm_range::{VMRange, VMRangeTrait};
pub use self::process_vm::{ProcessVM, MMapFlags, VMPerms};
// TODO: separate proc and flags
// TODO: accept fd and offset
pub fn do_mmap(addr: usize, size: usize, flags: VMAreaFlags) -> Result<usize, Error> {
pub fn do_mmap(
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize, Error> {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
info!(
"mmap: addr: {:#x}, size: {:#x}, flags: {:?}",
addr, size, flags
"mmap: addr: {:#x}, size: {:#x}, perms: {:?}, flags: {:?}",
addr, size, perms, flags,
);
} else {
info!(
"mmap: addr: {:#x}, size: {:#x}, perms: {:?}, flags: {:?}, fd: {:?}, offset: {:?}",
addr, size, perms, flags, fd, offset
);
}
let mut current_vm_ref = {
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
let current_vm_ref = current_process.get_vm();
current_process.get_vm().clone()
};
let mut current_vm = current_vm_ref.lock().unwrap();
current_vm.mmap(addr, size, flags)
current_vm.mmap(addr, size, perms, flags, fd, offset)
}
pub fn do_munmap(addr: usize, size: usize) -> Result<(), Error> {
info!("munmap: addr: {:#x}, size: {:#x}", addr, size);
let mut current_vm_ref = {
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
let current_vm_ref = current_process.get_vm();
current_process.get_vm().clone()
};
let mut current_vm = current_vm_ref.lock().unwrap();
current_vm.munmap(addr, size)
}
// TODO: accept flags
pub fn do_mremap(
old_addr: usize,
old_size: usize,
options: &VMResizeOptions,
) -> Result<usize, Error> {
info!(
"mremap: oldaddr: {:#x}, oldsize: {:#x}, options: {:?}",
old_addr, old_size, options
);
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
let current_vm_ref = current_process.get_vm();
let mut current_vm = current_vm_ref.lock().unwrap();
current_vm.mremap(old_addr, old_size, options)
}
pub fn do_brk(addr: usize) -> Result<usize, Error> {
info!("brk: addr: {:#x}", addr);
let current_ref = get_current();
@ -64,139 +59,3 @@ pub fn do_brk(addr: usize) -> Result<usize, Error> {
}
pub const PAGE_SIZE: usize = 4096;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGuardAreaType {
None,
Static { size: usize, align: usize },
Dynamic { size: usize },
}
#[derive(Clone, PartialEq, Default)]
pub struct VMAllocOptions {
size: usize,
addr: VMAddrOption,
growth: VMGrowthType,
description: String,
fill_zeros: bool,
}
impl VMAllocOptions {
pub fn new(size: usize) -> Result<VMAllocOptions, Error> {
if size % PAGE_SIZE != 0 {
return errno!(EINVAL, "Size is not page-aligned");
}
Ok(VMAllocOptions {
size,
..Default::default()
})
}
pub fn addr(&mut self, addr: VMAddrOption) -> Result<&mut Self, Error> {
if addr.is_addr_given() && addr.get_addr() % PAGE_SIZE != 0 {
return errno!(EINVAL, "Invalid address");
}
self.addr = addr;
Ok(self)
}
pub fn growth(&mut self, growth: VMGrowthType) -> Result<&mut Self, Error> {
self.growth = growth;
Ok(self)
}
pub fn description(&mut self, description: &str) -> Result<&mut Self, Error> {
self.description = description.to_owned();
Ok(self)
}
pub fn fill_zeros(&mut self, fill_zeros: bool) -> Result<&mut Self, Error> {
self.fill_zeros = fill_zeros;
Ok(self)
}
}
impl fmt::Debug for VMAllocOptions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"VMAllocOptions {{ size: 0x{:X?}, addr: {:?}, growth: {:?} }}",
self.size, self.addr, self.growth
)
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMAddrOption {
Any, // Free to choose any address
Hint(usize), // Near the given address
Fixed(usize), // Must be the given address
Beyond(usize), // Must be greater or equal to the given address
}
impl Default for VMAddrOption {
fn default() -> VMAddrOption {
VMAddrOption::Any
}
}
impl VMAddrOption {
pub fn is_addr_given(&self) -> bool {
match self {
VMAddrOption::Any => false,
_ => true,
}
}
pub fn get_addr(&self) -> usize {
match self {
VMAddrOption::Hint(addr) | VMAddrOption::Fixed(addr) | VMAddrOption::Beyond(addr) => {
*addr
}
VMAddrOption::Any => panic!("No address given"),
}
}
}
/// How VMRange may grow:
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGrowthType {
Fixed,
Upward, // e.g., mmaped regions grow upward
Downward, // e.g., stacks grows downward
}
impl Default for VMGrowthType {
fn default() -> VMGrowthType {
VMGrowthType::Fixed
}
}
#[derive(Clone, Debug, Default)]
pub struct VMResizeOptions {
new_size: usize,
new_addr: VMAddrOption,
fill_zeros: bool,
}
impl VMResizeOptions {
pub fn new(new_size: usize) -> Result<VMResizeOptions, Error> {
if new_size % PAGE_SIZE != 0 {
return errno!(EINVAL, "Size is not page-aligned");
}
Ok(VMResizeOptions {
new_size,
..Default::default()
})
}
pub fn addr(&mut self, new_addr: VMAddrOption) -> &mut Self {
self.new_addr = new_addr;
self
}
pub fn fill_zeros(&mut self, fill_zeros: bool) -> &mut Self {
self.fill_zeros = fill_zeros;
self
}
}

@ -1,39 +1,32 @@
use super::*;
use super::vm_manager::{VMRange, VMManager, VMMapOptionsBuilder, VMMapOptions, VMMapAddr, VMInitializer};
use super::user_space_vm::{UserSpaceVMManager, UserSpaceVMRange, USER_SPACE_VM_MANAGER};
use std::slice;
// TODO: examine the ProcessVM code for memory leakage
lazy_static! {
static ref DATA_SPACE: SgxMutex<VMSpace> = {
let (addr, size) = {
let mut addr: usize = 0;
let mut size: usize = 0;
unsafe { vm_get_prealloced_data_space(&mut addr, &mut size) };
(addr, size)
};
let vm_space = unsafe {
match VMSpace::new(addr, size, VMGuardAreaType::None, "DATA_SPACE") {
Ok(vm_space) => vm_space,
Err(_) => panic!("Failed to create a VMSpace"),
}
};
SgxMutex::new(vm_space)
};
}
extern "C" {
pub fn vm_get_prealloced_data_space(addr: &mut usize, size: &mut usize);
}
#[derive(Debug, Default)]
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
//code_domain: VMDomain,
data_domain: Option<Box<VMDomain>>,
code_vma: Option<Box<VMArea>>,
data_vma: Option<Box<VMArea>>,
heap_vma: Option<Box<VMArea>>,
stack_vma: Option<Box<VMArea>>,
mmap_vmas: Vec<Box<VMArea>>,
process_range: UserSpaceVMRange,
code_range: VMRange,
data_range: VMRange,
heap_range: VMRange,
stack_range: VMRange,
brk: usize,
mmap_manager: VMManager,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
process_range: USER_SPACE_VM_MANAGER.alloc_dummy(),
code_range: VMRange::default(),
data_range: VMRange::default(),
heap_range: VMRange::default(),
stack_range: VMRange::default(),
brk: 0,
mmap_manager: VMManager::default(),
}
}
}
impl ProcessVM {
@ -44,259 +37,213 @@ impl ProcessVM {
stack_size: usize,
mmap_size: usize,
) -> Result<ProcessVM, Error> {
// Allocate the data domain from the global data space
let mut data_domain = {
let data_domain_size = code_size + data_size + heap_size + stack_size + mmap_size;
let data_domain = DATA_SPACE
.lock()
.unwrap()
.alloc_domain(data_domain_size, "data_domain")?;
data_domain
let process_range = {
let vm_range_size = code_size + data_size + heap_size + stack_size + mmap_size;
USER_SPACE_VM_MANAGER.alloc(vm_range_size)?
};
// Allocate vmas from the data domain
let (code_vma, data_vma, heap_vma, stack_vma) = match ProcessVM::alloc_vmas(
&mut data_domain,
code_size,
data_size,
heap_size,
stack_size,
) {
Err(e) => {
// Note: we need to handle error here so that we can
// deallocate the data domain explictly.
DATA_SPACE.lock().unwrap().dealloc_domain(data_domain);
return Err(e);
}
Ok(vmas) => vmas,
};
// Initial value of the program break
let brk = heap_vma.get_start();
// No mmapped vmas initially
let mmap_vmas = Vec::new();
let process_addr = process_range.range().start();
let vm = ProcessVM {
data_domain: Some(Box::new(data_domain)),
code_vma: Some(Box::new(code_vma)),
data_vma: Some(Box::new(data_vma)),
heap_vma: Some(Box::new(heap_vma)),
stack_vma: Some(Box::new(stack_vma)),
mmap_vmas: mmap_vmas,
brk: brk,
};
Ok(vm)
let range_sizes = vec![code_size, data_size, heap_size, stack_size];
let mut curr_addr = process_addr;
let mut ranges = Vec::new();
for range_size in &range_sizes {
let range_start = curr_addr;
let range_end = curr_addr + range_size;
let range = VMRange::from(range_start, range_end)?;
ranges.push(range);
curr_addr = range_end;
}
let code_range = *&ranges[0];
let data_range = *&ranges[1];
let heap_range = *&ranges[2];
let stack_range = *&ranges[3];
unsafe {
fill_zeros(code_range.start(), code_range.size());
fill_zeros(data_range.start(), data_range.size());
}
fn alloc_vmas(
data_domain: &mut VMDomain,
code_size: usize,
data_size: usize,
heap_size: usize,
stack_size: usize,
) -> Result<(VMArea, VMArea, VMArea, VMArea), Error> {
let mut addr = data_domain.get_start();
let mut alloc_vma_continuously =
|addr: &mut usize, desc, size, flags, growth, fill_zeros| -> Result<_, Error> {
let mut options = VMAllocOptions::new(size)?;
options
.addr(VMAddrOption::Fixed(*addr))?
.growth(growth)?
.description(desc)?
.fill_zeros(fill_zeros)?;
let new_vma = data_domain.alloc_area(&options, flags)?;
*addr += size;
Ok(new_vma)
};
let brk = heap_range.start();
let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X);
let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W);
let mmap_addr = stack_range.end();
let mmap_manager = VMManager::from(mmap_addr, mmap_size)?;
let code_vma = alloc_vma_continuously(
&mut addr,
"code_vma",
code_size,
rx_flags,
VMGrowthType::Fixed,
!cfg!(feature = "integrity_only_opt"),
)?;
let data_vma = alloc_vma_continuously(
&mut addr,
"data_vma",
data_size,
rw_flags,
VMGrowthType::Fixed,
!cfg!(feature = "integrity_only_opt"),
)?;
let heap_vma = alloc_vma_continuously(
&mut addr,
"heap_vma",
0,
rw_flags,
VMGrowthType::Upward,
true,
)?;
// Preserve the space for heap
addr += heap_size;
// After the heap is the stack
let stack_vma = alloc_vma_continuously(
&mut addr,
"stack_vma",
stack_size,
rw_flags,
VMGrowthType::Downward,
false,
)?;
Ok((code_vma, data_vma, heap_vma, stack_vma))
Ok(ProcessVM {
process_range,
code_range,
data_range,
heap_range,
stack_range,
brk,
mmap_manager,
})
}
pub fn get_process_range(&self) -> &VMRange {
self.process_range.range()
}
pub fn get_code_range(&self) -> &VMRange {
&self.code_range
}
pub fn get_data_range(&self) -> &VMRange {
&self.data_range
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_code_vma().get_start()
}
pub fn get_code_vma(&self) -> &VMArea {
&self.code_vma.as_ref().unwrap()
}
pub fn get_data_vma(&self) -> &VMArea {
&self.data_vma.as_ref().unwrap()
}
pub fn get_heap_vma(&self) -> &VMArea {
&self.heap_vma.as_ref().unwrap()
}
pub fn get_stack_vma(&self) -> &VMArea {
&self.stack_vma.as_ref().unwrap()
self.get_process_range().start()
}
pub fn get_stack_top(&self) -> usize {
self.get_stack_vma().get_end()
}
pub fn get_mmap_vmas(&self) -> &[Box<VMArea>] {
&self.mmap_vmas[..]
}
pub fn get_brk_start(&self) -> usize {
self.get_heap_vma().get_start()
self.get_stack_range().end()
}
pub fn get_brk(&self) -> usize {
self.brk
}
pub fn get_mmap_start(&self) -> usize {
self.get_stack_vma().get_end()
}
// TODO: support overriding the mmaping of already mmaped range
pub fn mmap(&mut self, addr: usize, size: usize, flags: VMAreaFlags) -> Result<usize, Error> {
let alloc_options = {
let mmap_start_addr = self.get_mmap_start();
let mut alloc_options = VMAllocOptions::new(size)?;
alloc_options
.addr(if addr == 0 {
VMAddrOption::Beyond(mmap_start_addr)
} else {
if addr < mmap_start_addr {
return errno!(EINVAL, "Beyond valid memory range");
}
// TODO: Fixed or Hint? Should hanle mmap flags
VMAddrOption::Hint(addr)
})?
.growth(VMGrowthType::Upward)?;
alloc_options
};
// TODO: when failed, try to resize data_domain
let new_mmap_vma = self
.get_data_domain_mut()
.alloc_area(&alloc_options, flags)?;
let addr = new_mmap_vma.get_start();
self.mmap_vmas.push(Box::new(new_mmap_vma));
Ok(addr)
}
// TODO: handle the case when the given range [addr, addr + size)
// does not match exactly with any vma. For example, when this range
// cover multiple ranges or cover some range partially.
pub fn munmap(&mut self, addr: usize, size: usize) -> Result<(), Error> {
let mmap_vma_i = {
let mmap_vma_i = self
.get_mmap_vmas()
.iter()
.position(|vma| vma.get_start() == addr && vma.get_end() == addr + size);
if mmap_vma_i.is_none() {
return errno!(EINVAL, "memory area not found");
}
mmap_vma_i.unwrap()
};
let removed_mmap_vma = self.mmap_vmas.swap_remove(mmap_vma_i);
self.get_data_domain_mut()
.dealloc_area(unbox(removed_mmap_vma));
Ok(())
}
pub fn mremap(
&mut self,
old_addr: usize,
old_size: usize,
options: &VMResizeOptions,
) -> Result<usize, Error> {
// TODO: Implement this!
errno!(EINVAL, "Not implemented")
}
pub fn brk(&mut self, new_brk: usize) -> Result<usize, Error> {
let (heap_start, heap_end) = {
let heap_vma = self.heap_vma.as_ref().unwrap();
(heap_vma.get_start(), heap_vma.get_end())
};
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
if new_brk == 0 {
return Ok(self.get_brk());
} else if new_brk < heap_start {
return errno!(EINVAL, "New brk address is too low");
} else if new_brk > heap_end {
let resize_options = {
let new_heap_end = align_up(new_brk, PAGE_SIZE);
let new_heap_size = new_heap_end - heap_start;
let mut options = VMResizeOptions::new(new_heap_size)?;
options
.addr(VMAddrOption::Fixed(heap_start))
.fill_zeros(true);
options
};
let heap_vma = self.heap_vma.as_mut().unwrap();
let data_domain = self.data_domain.as_mut().unwrap();
data_domain.resize_area(heap_vma, &resize_options)?;
return errno!(EINVAL, "New brk address is too high");
}
if self.brk < new_brk {
unsafe { fill_zeros(self.brk, new_brk - self.brk) };
}
self.brk = new_brk;
return Ok(new_brk);
}
fn get_data_domain_mut(&mut self) -> &mut Box<VMDomain> {
self.data_domain.as_mut().unwrap()
pub fn mmap(
&mut self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize
) -> Result<usize, Error> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
if !self.process_range.range().contains(addr) {
return errno!(EINVAL, "Beyond valid memory range");
}
VMMapAddr::Fixed(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
VMInitializer::FillZeros()
} else {
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
let file_ref = current_process.get_files().lock().unwrap().get(fd)?;
VMInitializer::LoadFromFile { file: file_ref, offset: offset }
}
};
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.addr(addr_option)
.initializer(initializer)
.build()?;
let mmap_addr = self.mmap_manager.mmap(&mmap_options)?;
Ok(mmap_addr)
}
pub fn munmap(&mut self, addr: usize, size: usize) -> Result<(), Error> {
self.mmap_manager.munmap(addr, size)
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
// Remove all vma from the domain
{
let data_domain = self.data_domain.as_mut().unwrap();
data_domain.dealloc_area(unbox(self.code_vma.take().unwrap()));
data_domain.dealloc_area(unbox(self.data_vma.take().unwrap()));
data_domain.dealloc_area(unbox(self.heap_vma.take().unwrap()));
data_domain.dealloc_area(unbox(self.stack_vma.take().unwrap()));
for mmap_vma in self.mmap_vmas.drain(..) {
data_domain.dealloc_area(unbox(mmap_vma));
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
// Remove the domain from its parent space
DATA_SPACE
.lock()
.unwrap()
.dealloc_domain(unbox(self.data_domain.take().unwrap()));
impl MMapFlags {
pub fn from_u32(bits: u32) -> Result<MMapFlags, Error> {
// TODO: detect non-supporting flags
MMapFlags::from_bits(bits)
.ok_or_else(|| (Errno::EINVAL, "Unknown mmap flags").into())
}
}
bitflags! {
pub struct VMPerms : u32 {
const READ = 0x1;
const WRITE = 0x2;
const EXEC = 0x4;
}
}
impl VMPerms {
pub fn can_read(&self) -> bool {
self.contains(VMPerms::READ)
}
pub fn can_write(&self) -> bool {
self.contains(VMPerms::WRITE)
}
pub fn can_execute(&self) -> bool {
self.contains(VMPerms::EXEC)
}
pub fn from_u32(bits: u32) -> Result<VMPerms, Error> {
VMPerms::from_bits(bits)
.ok_or_else(|| (Errno::EINVAL, "Unknown permission bits").into())
}
}
unsafe fn fill_zeros(addr: usize, size: usize) {
let ptr = addr as *mut u8;
let buf = slice::from_raw_parts_mut(ptr, size);
for b in buf {
*b = 0;
}
}

@ -0,0 +1,89 @@
use super::*;
use super::vm_manager::{VMRange, VMManager, VMMapOptionsBuilder, VMMapOptions};
/// The virtual memory manager for the entire user space
#[derive(Debug)]
pub struct UserSpaceVMManager {
vm_manager: Arc<SgxMutex<VMManager>>,
}
impl UserSpaceVMManager {
pub unsafe fn from(addr: usize, size: usize) -> Result<UserSpaceVMManager, Error> {
let vm_manager = Arc::new(SgxMutex::new(VMManager::from(addr, size)?));
Ok(UserSpaceVMManager {
vm_manager,
})
}
pub fn alloc(&self, size: usize) -> Result<UserSpaceVMRange, Error> {
let user_vm_range = unsafe {
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.build()?;
let mut vm_manager = self.vm_manager.lock().unwrap();
let user_vm_addr = vm_manager.mmap(&mmap_options)?;
VMRange::from_unchecked(user_vm_addr, user_vm_addr + size)
};
Ok(UserSpaceVMRange::new(user_vm_range, self.vm_manager.clone()))
}
pub fn alloc_dummy(&self) -> UserSpaceVMRange {
let empty_user_vm_range = unsafe {
VMRange::from_unchecked(0, 0)
};
UserSpaceVMRange::new(empty_user_vm_range, self.vm_manager.clone())
}
}
lazy_static! {
pub static ref USER_SPACE_VM_MANAGER: UserSpaceVMManager = {
let (addr, size) = {
let mut addr: usize = 0;
let mut size: usize = 0;
unsafe { vm_get_prealloced_data_space(&mut addr, &mut size) };
(addr, size)
};
let user_space_vm_manager = unsafe {
match UserSpaceVMManager::from(addr, size) {
Ok(user_space_vm) => user_space_vm,
Err(_) => panic!("Failed to initialize the user space virtual memory"),
}
};
user_space_vm_manager
};
}
extern "C" {
pub fn vm_get_prealloced_data_space(addr: &mut usize, size: &mut usize);
}
#[derive(Debug)]
pub struct UserSpaceVMRange {
vm_range: VMRange,
vm_manager: Arc<SgxMutex<VMManager>>,
}
impl UserSpaceVMRange {
fn new(vm_range: VMRange, vm_manager: Arc<SgxMutex<VMManager>>) -> UserSpaceVMRange {
UserSpaceVMRange {
vm_range,
vm_manager,
}
}
pub fn range(&self) -> &VMRange {
&self.vm_range
}
}
impl Drop for UserSpaceVMRange {
fn drop(&mut self) {
let addr = self.vm_range.start();
let size = self.vm_range.size();
if size == 0 { return; }
let mut vm_manager = self.vm_manager.lock().unwrap();
vm_manager.munmap(addr, size).expect("munmap should always succeed");
}
}

@ -1,116 +0,0 @@
use super::*;
#[derive(Debug)]
pub struct VMSpace {
range: VMRange,
guard_type: VMGuardAreaType,
}
impl_vmrange_trait_for!(VMSpace, range);
impl VMSpace {
pub unsafe fn new(
addr: usize,
size: usize,
guard_type: VMGuardAreaType,
desc: &str,
) -> Result<VMSpace, Error> {
let addr = align_up(addr, PAGE_SIZE);
let size = align_down(size, PAGE_SIZE);
let range = unsafe { VMRange::new(addr, addr + size, VMGrowthType::Fixed, desc)? };
Ok(VMSpace { range, guard_type })
}
pub fn get_guard_type(&self) -> VMGuardAreaType {
self.guard_type
}
pub fn alloc_domain(&mut self, size: usize, desc: &str) -> Result<VMDomain, Error> {
let mut options = VMAllocOptions::new(size)?;
options.growth(VMGrowthType::Upward)?.description(desc)?;
let new_range = self.range.alloc_subrange(&options)?;
Ok(VMDomain { range: new_range })
}
pub fn dealloc_domain(&mut self, mut domain: VMDomain) {
self.range.dealloc_subrange(&mut domain.range)
}
pub fn resize_domain(&mut self, domain: &mut VMDomain, new_size: usize) -> Result<(), Error> {
let options = VMResizeOptions::new(new_size)?;
self.range.resize_subrange(&mut domain.range, &options)
}
}
#[derive(Debug)]
pub struct VMDomain {
range: VMRange,
}
impl_vmrange_trait_for!(VMDomain, range);
impl VMDomain {
pub fn alloc_area(
&mut self,
options: &VMAllocOptions,
flags: VMAreaFlags,
) -> Result<VMArea, Error> {
let new_range = self.range.alloc_subrange(options)?;
Ok(VMArea {
range: new_range,
flags: flags,
})
}
pub fn dealloc_area(&mut self, mut area: VMArea) {
self.range.dealloc_subrange(&mut area.range)
}
pub fn resize_area(
&mut self,
area: &mut VMArea,
options: &VMResizeOptions,
) -> Result<(), Error> {
self.range.resize_subrange(&mut area.range, options)
}
}
#[derive(Debug)]
pub struct VMArea {
range: VMRange,
flags: VMAreaFlags,
}
impl_vmrange_trait_for!(VMArea, range);
impl VMArea {
pub fn get_flags(&self) -> &VMAreaFlags {
&self.flags
}
pub fn get_flags_mut(&mut self) -> &mut VMAreaFlags {
&mut self.flags
}
}
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct VMAreaFlags(pub u32);
pub const VM_AREA_FLAG_R: u32 = 0x1;
pub const VM_AREA_FLAG_W: u32 = 0x2;
pub const VM_AREA_FLAG_X: u32 = 0x4;
impl VMAreaFlags {
pub fn can_execute(&self) -> bool {
self.0 & VM_AREA_FLAG_X == VM_AREA_FLAG_X
}
pub fn can_write(&self) -> bool {
self.0 & VM_AREA_FLAG_W == VM_AREA_FLAG_W
}
pub fn can_read(&self) -> bool {
self.0 & VM_AREA_FLAG_R == VM_AREA_FLAG_R
}
}

@ -0,0 +1,406 @@
use super::*;
use std::{slice};
#[derive(Clone, Debug)]
pub enum VMInitializer {
DoNothing(),
FillZeros(),
LoadFromFile { file: FileRef, offset: usize },
}
impl Default for VMInitializer {
fn default() -> VMInitializer {
VMInitializer::DoNothing()
}
}
impl VMInitializer {
pub fn initialize(&self, buf: &mut [u8]) -> Result<(), Error> {
match self {
VMInitializer::DoNothing() => {
// Do nothing
},
VMInitializer::FillZeros() => {
for b in buf {
*b = 0;
}
},
VMInitializer::LoadFromFile { file, offset } => {
// TODO: make sure that read_at does not move file cursor
let len = file.read_at(*offset, buf)?;
for b in &mut buf[len..] {
*b = 0;
}
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMMapAddr {
Any, // Free to choose any address
Hint(usize), // Prefer the given address
Fixed(usize), // Must be the given address
}
impl Default for VMMapAddr {
fn default() -> VMMapAddr {
VMMapAddr::Any
}
}
#[derive(Builder, Debug, Default)]
#[builder(build_fn(skip), no_std)]
pub struct VMMapOptions {
size: usize,
align: usize,
addr: VMMapAddr,
initializer: VMInitializer
}
// VMMapOptionsBuilder is generated automatically, except the build function
impl VMMapOptionsBuilder {
pub fn build(&self) -> Result<VMMapOptions, Error> {
let size = {
let size = self.size.ok_or_else(|| (Errno::EINVAL, "Invalid size for mmap"))?;
if size == 0 {
return errno!(EINVAL, "Invalid size for mmap");
}
align_up(size, PAGE_SIZE)
};
let align = {
let align = self.align.unwrap_or(PAGE_SIZE);
if align == 0 || align % PAGE_SIZE != 0 {
return errno!(EINVAL, "Invalid size for mmap");
}
align
};
let addr = {
let addr = self.addr.unwrap_or_default();
match addr {
// TODO: check addr + size overflow
VMMapAddr::Any => {
VMMapAddr::Any
}
VMMapAddr::Hint(addr) => {
let addr = align_down(addr, PAGE_SIZE);
VMMapAddr::Hint(addr)
}
VMMapAddr::Fixed(addr) => {
if addr % align != 0 {
return errno!(EINVAL, "Unaligned addr for fixed mmap");
}
VMMapAddr::Fixed(addr)
}
}
};
let initializer = match self.initializer.as_ref() {
Some(initializer) => { initializer.clone() }
None => { VMInitializer::default() }
};
Ok(VMMapOptions {
size,
align,
addr,
initializer,
})
}
}
impl VMMapOptions {
pub fn size(&self) -> &usize {
&self.size
}
pub fn addr(&self) -> &VMMapAddr {
&self.addr
}
pub fn initializer(&self) -> &VMInitializer {
&self.initializer
}
}
#[derive(Debug, Default)]
pub struct VMManager {
range: VMRange,
sub_ranges: Vec<VMRange>,
}
impl VMManager {
pub fn from(addr: usize, size: usize) -> Result<VMManager, Error> {
let range = VMRange::from(addr, addr + size)?;
let sub_ranges = {
let start = range.start();
let end = range.end();
let start_sentry = VMRange::from(start, start)?;
let end_sentry = VMRange::from(end, end)?;
vec![start_sentry, end_sentry]
};
Ok(VMManager {
range,
sub_ranges,
})
}
pub fn range(&self) -> &VMRange {
&self.range
}
pub fn mmap(
&mut self,
options: &VMMapOptions,
) -> Result<usize, Error> {
// TODO: respect options.align when mmap
let addr = *options.addr();
let size = *options.size();
if let VMMapAddr::Fixed(addr) = addr {
self.munmap(addr, size)?;
}
// Allocate a new subrange for this mmap request
let (insert_idx, free_subrange) = self.find_free_subrange(size, addr)?;
let new_subrange = self.alloc_subrange_from(size, addr, &free_subrange);
let new_subrange_addr = new_subrange.start();
// Initialize the memory of the new subrange
unsafe {
let buf_ptr = new_subrange.start() as *mut u8;
let buf_size = new_subrange.size() as usize;
let buf = slice::from_raw_parts_mut(buf_ptr, buf_size);
options.initializer.initialize(buf)?;
}
// After initializing, we can safely add the new subrange
self.sub_ranges.insert(insert_idx, new_subrange);
Ok(new_subrange_addr)
}
pub fn munmap(&mut self, addr: usize, size: usize) -> Result<(), Error> {
let size = {
if size == 0 {
return errno!(EINVAL, "size of munmap must not be zero");
}
align_up(size, PAGE_SIZE)
};
let munmap_range = {
let munmap_range = VMRange::from(addr, addr + size)?;
let effective_munmap_range_opt = munmap_range.intersect(&self.range);
if effective_munmap_range_opt.is_none() {
return Ok(())
}
let effective_munmap_range = effective_munmap_range_opt.unwrap();
if effective_munmap_range.empty() {
return Ok(())
}
effective_munmap_range
};
let new_sub_ranges = self.sub_ranges
.iter()
.flat_map(|subrange| {
if subrange.size() > 0 {
subrange.subtract(&munmap_range)
} else { // Keep the two sentry subranges intact
vec![*subrange]
}
})
.collect();
self.sub_ranges = new_sub_ranges;
Ok(())
}
// Find the free subrange that satisfies the constraints of size and address
fn find_free_subrange(
&mut self,
size: usize,
addr: VMMapAddr
) -> Result<(usize, VMRange), Error> {
// TODO: reduce the complexity from O(N) to O(log(N)), where N is
// the number of existing subranges.
// Record the minimal free range that satisfies the contraints
let mut result_free_range: Option<VMRange> = None;
let mut result_idx: Option<usize> = None;
for (idx, range_pair) in self.sub_ranges.windows(2).enumerate() {
// Since we have two sentry sub_ranges at both ends, we can be sure that the free
// space only appears between two consecutive sub_ranges.
let pre_range = &range_pair[0];
let next_range = &range_pair[1];
let mut free_range = {
let free_range_start = pre_range.end();
let free_range_end = next_range.start();
let free_range_size = free_range_end - free_range_start;
if free_range_size < size {
continue;
}
unsafe {
VMRange::from_unchecked(free_range_start, free_range_end)
}
};
match addr {
// Want a minimal free_range
VMMapAddr::Any => {}
// Prefer to have free_range.start == addr
VMMapAddr::Hint(addr) => {
if free_range.contains(addr) {
if free_range.end() - addr >= size {
free_range.start = addr;
let insert_idx = idx + 1;
return Ok((insert_idx, free_range));
}
}
}
// Must have free_range.start == addr
VMMapAddr::Fixed(addr) => {
if free_range.start() > addr {
return errno!(ENOMEM, "Not enough memory for fixed mmap");
}
if !free_range.contains(addr) {
continue;
}
if free_range.end() - addr < size {
return errno!(ENOMEM, "Not enough memory for fixed mmap");
}
free_range.start = addr;
let insert_idx = idx + 1;
return Ok((insert_idx, free_range));
}
}
if result_free_range == None
|| result_free_range.as_ref().unwrap().size() > free_range.size() {
result_free_range = Some(free_range);
result_idx = Some(idx);
}
}
if result_free_range.is_none() {
return errno!(ENOMEM, "Cannot find enough memory");
}
let free_range = result_free_range.unwrap();
let insert_idx = result_idx.unwrap() + 1;
Ok((insert_idx, free_range))
}
fn alloc_subrange_from(
&self,
size: usize,
addr: VMMapAddr,
free_subrange: &VMRange
) -> VMRange {
debug_assert!(free_subrange.size() >= size);
let mut new_subrange = *free_subrange;
if let VMMapAddr::Fixed(addr) = addr {
debug_assert!(addr == new_subrange.start());
}
new_subrange.resize(size);
new_subrange
}
}
#[derive(Clone, Copy, Default, Debug, PartialEq)]
pub struct VMRange {
start: usize,
end: usize,
}
impl VMRange {
pub fn from(start: usize, end: usize) -> Result<VMRange, Error> {
if start % PAGE_SIZE != 0 || end % PAGE_SIZE != 0 || start > end {
return errno!(EINVAL, "invalid start or end");
}
Ok(VMRange {
start: start,
end: end,
})
}
pub unsafe fn from_unchecked(start: usize, end: usize) -> VMRange {
debug_assert!(start % PAGE_SIZE == 0);
debug_assert!(end % PAGE_SIZE == 0);
debug_assert!(start <= end);
VMRange {
start: start,
end: end,
}
}
pub fn start(&self) -> usize {
self.start
}
pub fn end(&self) -> usize {
self.end
}
pub fn size(&self) -> usize {
self.end - self.start
}
pub fn resize(&mut self, new_size: usize) {
self.end = self.start + new_size;
}
pub fn empty(&self) -> bool {
self.start == self.end
}
pub fn is_superset_of(&self, other: &VMRange) -> bool {
self.start() <= other.start() && other.end() <= self.end()
}
pub fn contains(&self, addr: usize) -> bool {
self.start() <= addr && addr < self.end()
}
pub fn subtract(&self, other: &VMRange) -> Vec<VMRange> {
let self_start = self.start();
let self_end = self.end();
let other_start = other.start();
let other_end = other.end();
match (self_start < other_start, other_end < self_end) {
(false, false) => {
Vec::new()
}
(false, true) => unsafe {
vec![VMRange::from_unchecked(self_start.max(other_end), self_end)]
}
(true, false) => unsafe {
vec![VMRange::from_unchecked(self_start, self_end.min(other_start))]
}
(true, true) => unsafe {
vec![VMRange::from_unchecked(self_start, other_start),
VMRange::from_unchecked(other_end, self_end)]
}
}
}
pub fn intersect(&self, other: &VMRange) -> Option<VMRange> {
let intersection_start = self.start().max(other.start());
let intersection_end = self.end().min(other.end());
if intersection_start > intersection_end {
return None;
}
unsafe {
Some(VMRange::from_unchecked(intersection_start, intersection_end))
}
}
}

@ -1,582 +0,0 @@
use super::*;
use std::fmt;
pub trait VMRangeTrait {
fn get_start(&self) -> usize;
fn get_end(&self) -> usize;
fn get_size(&self) -> usize;
fn get_growth(&self) -> VMGrowthType;
fn contains_obj(&self, ptr: usize, size: usize) -> bool;
}
macro_rules! impl_vmrange_trait_for {
($struct_name: ident, $field: ident) => {
impl VMRangeTrait for $struct_name {
fn get_start(&self) -> usize {
self.$field.get_start()
}
fn get_end(&self) -> usize {
self.$field.get_end()
}
fn get_size(&self) -> usize {
self.$field.get_end() - self.$field.get_start()
}
fn get_growth(&self) -> VMGrowthType {
self.$field.get_growth()
}
fn contains_obj(&self, ptr: usize, size: usize) -> bool {
self.$field.contains_obj(ptr, size)
}
}
};
}
#[derive(Debug)]
pub struct VMRange {
inner: VMRangeInner,
sub_ranges: Option<Vec<VMRangeInner>>,
is_dealloced: bool,
description: String,
}
impl_vmrange_trait_for!(VMRange, inner);
impl VMRange {
pub unsafe fn new(
start: usize,
end: usize,
growth: VMGrowthType,
description: &str,
) -> Result<VMRange, Error> {
if start % PAGE_SIZE != 0 || end % PAGE_SIZE != 0 {
return errno!(EINVAL, "Invalid start and/or end");
}
Ok(VMRange {
inner: VMRangeInner::new(start, end, growth),
sub_ranges: None,
is_dealloced: false,
description: description.to_owned(),
})
}
pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> {
debug_assert!(!self.is_dealloced);
// Lazy initialize the subrange array upon the first allocation
if self.sub_ranges.is_none() {
self.init_subrange_array()?;
}
// Find a free space that satisfies the options
let free_space = self.look_for_free_space(options)?;
// Allocate a new subrange from the free space
let (new_subrange_idx, new_subrange_inner) = {
let (new_subrange_start, new_subrange_end) =
self.alloc_from_free_space(&free_space, options);
debug_assert!(free_space.contains(new_subrange_start));
debug_assert!(free_space.contains(new_subrange_end));
(
free_space.index_in_subranges,
VMRangeInner::new(new_subrange_start, new_subrange_end, options.growth),
)
};
self.get_subranges_mut()
.insert(new_subrange_idx, new_subrange_inner);
if options.fill_zeros {
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_subrange_inner.get_start() as *mut c_void;
let mem_size = new_subrange_inner.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
// Although there are two copies of the newly created VMRangeInner obj,
// we can keep them in sync as all mutation on VMRange object must
// be carried out through dealloc_subrange() and resize_subrange() that
// takes both a (parent) range and its (child) sub-range as parameters.
// We update both copies of VMRangeInner, one in parent and the
// other in child, in dealloc_subrange and resize_subrange functions.
Ok(VMRange {
inner: new_subrange_inner,
sub_ranges: None,
is_dealloced: false,
description: options.description.clone(),
})
}
pub fn dealloc_subrange(&mut self, subrange: &mut VMRange) {
debug_assert!(!self.is_dealloced);
debug_assert!(!subrange.is_dealloced);
debug_assert!(self.sub_ranges.is_some());
// Remove the sub-range
let domain_i = self.position_subrange(subrange);
self.get_subranges_mut().remove(domain_i);
// When all sub-ranges are removed, remove the sub-range array
if self.get_subranges().len() == 2 {
// two sentinel sub-ranges excluded
self.sub_ranges = None;
}
subrange.inner.end = subrange.inner.start;
subrange.is_dealloced = true;
}
pub fn resize_subrange(
&mut self,
subrange: &mut VMRange,
options: &VMResizeOptions,
) -> Result<(), Error> {
debug_assert!(!self.is_dealloced);
debug_assert!(!subrange.is_dealloced);
debug_assert!(self.sub_ranges.is_some());
// Get valid parameters from options
let new_size = options.new_size;
let new_addr = options.new_addr;
// Handle no-resizing cases
if subrange.get_size() == new_size {
return Ok(());
}
if subrange.get_growth() == VMGrowthType::Fixed {
return errno!(EINVAL, "Cannot resize a fixed range");
}
// Shrink
if new_size < subrange.get_size() {
self.shrink_subrange_to(subrange, new_size)
}
// Grow
else {
self.grow_subrange_to(subrange, new_size, options.fill_zeros)
}
}
pub fn get_description(&self) -> &str {
&self.description
}
fn init_subrange_array(&mut self) -> Result<(), Error> {
// Use dummy VMRange as sentinel object at both ends to make the allocation
// and deallocation algorithm simpler
let start = self.get_start();
let end = self.get_end();
let start_sentry = VMRangeInner::new(start, start, VMGrowthType::Fixed);
let end_sentry = VMRangeInner::new(end, end, VMGrowthType::Fixed);
self.sub_ranges = Some(vec![start_sentry, end_sentry]);
Ok(())
}
// Find a free space for allocating a sub VMRange
fn look_for_free_space(&mut self, options: &VMAllocOptions) -> Result<FreeSpace, Error> {
// TODO: reduce the complexity from O(N) to O(log(N)), where N is
// the number of existing subranges.
// Get valid parameters from options
let size = options.size;
let addr = options.addr;
let growth = options.growth;
// Record the minimal free space that satisfies the options
let mut min_big_enough_free_space: Option<FreeSpace> = None;
let sub_ranges = self.get_subranges();
for (idx, range_pair) in sub_ranges.windows(2).enumerate() {
let pre_range = &range_pair[0];
let next_range = &range_pair[1];
let (free_range_start, free_range_end) = {
let free_range_start = pre_range.get_end();
let free_range_end = next_range.get_start();
let free_range_size = free_range_end - free_range_start;
if free_range_size < size {
continue;
}
(free_range_start, free_range_end)
};
let mut free_space = FreeSpace {
index_in_subranges: idx + 1,
start: free_range_start,
end: free_range_end,
may_neighbor_grow: (
pre_range.growth == VMGrowthType::Upward,
next_range.growth == VMGrowthType::Downward,
),
};
match addr {
// Want a minimal free_space
VMAddrOption::Any => {}
// Prefer to have free_space.start == addr
VMAddrOption::Hint(addr) => {
if free_space.contains(addr) {
if free_space.end - addr >= size {
free_space.start = addr;
return Ok(free_space);
}
}
}
// Must have free_space.start == addr
VMAddrOption::Fixed(addr) => {
if !free_space.contains(addr) {
continue;
}
if free_space.end - addr < size {
return errno!(ENOMEM, "not enough memory");
}
free_space.start = addr;
return Ok(free_space);
}
// Must have free_space.start >= addr
VMAddrOption::Beyond(addr) => {
if free_space.end < addr {
continue;
}
if free_space.contains(addr) {
free_space.start = addr;
if free_space.get_size() < size {
continue;
}
}
}
}
if min_big_enough_free_space == None
|| free_space < *min_big_enough_free_space.as_ref().unwrap()
{
min_big_enough_free_space = Some(free_space);
}
}
min_big_enough_free_space.ok_or_else(|| Error::new(Errno::ENOMEM, "not enough space"))
}
fn alloc_from_free_space(
&self,
free_space: &FreeSpace,
options: &VMAllocOptions,
) -> (usize, usize) {
// Get valid parameters from options
let size = options.size;
let addr_option = options.addr;
let growth = options.growth;
if let VMAddrOption::Fixed(addr) = addr_option {
return (addr, addr + size);
} else if let VMAddrOption::Hint(addr) = addr_option {
if free_space.start == addr {
return (addr, addr + size);
}
}
let should_no_gap_to_pre_domain =
free_space.may_neighbor_grow.0 == false && growth != VMGrowthType::Downward;
let should_no_gap_to_next_domain =
free_space.may_neighbor_grow.1 == false && growth != VMGrowthType::Upward;
let addr = if should_no_gap_to_pre_domain {
free_space.start
} else if should_no_gap_to_next_domain {
free_space.end - size
} else {
// We want to leave some space at both ends in case
// this sub-range or neighbor sub-range needs to grow later.
// As a simple heuristic, we put this sub-range near the
// center between the previous and next sub-ranges.
let offset = align_down((free_space.get_size() - size) / 2, PAGE_SIZE);
free_space.start + offset
};
(addr, addr + size)
}
fn position_subrange(&self, subrange: &VMRange) -> usize {
let sub_ranges = self.get_subranges();
sub_ranges
.iter()
.position(|d| d == &subrange.inner)
.unwrap()
}
fn get_subranges(&self) -> &Vec<VMRangeInner> {
self.sub_ranges.as_ref().unwrap()
}
fn get_subranges_mut(&mut self) -> &mut Vec<VMRangeInner> {
self.sub_ranges.as_mut().unwrap()
}
fn shrink_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize) -> Result<(), Error> {
let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut();
if subrange.inner.growth == VMGrowthType::Upward {
// Can we do shrink?
let min_new_size = match subrange.sub_ranges.as_mut() {
Some(child_subranges) => {
let child_last_subrange = &child_subranges[child_subranges.len() - 2];
child_last_subrange.end - subrange.inner.start
}
None => 0,
};
if new_size < min_new_size {
return errno!(ENOMEM, "Cannot shrink to new size");
}
// Do shrink
let new_subrange_end = subrange.inner.start + new_size;
subrange.inner.end = new_subrange_end;
// Sync state
subranges[subrange_i].end = new_subrange_end;
} else {
// self.growth == VMGrowthType::Downward
// Can we do shrink?
let min_new_size = match subrange.sub_ranges.as_mut() {
Some(child_subranges) => {
let child_first_subrange = &child_subranges[1];
subrange.inner.end - child_first_subrange.start
}
None => 0,
};
if new_size < min_new_size {
return errno!(ENOMEM, "Cannot shrink to new size");
}
// Do shrink
let new_subrange_start = subrange.inner.end - new_size;
subrange.inner.start = new_subrange_start;
// Sync state
subranges[subrange_i].start = new_subrange_start;
}
Ok(())
}
fn grow_subrange_to(
&mut self,
subrange: &mut VMRange,
new_size: usize,
fill_zeros: bool,
) -> Result<(), Error> {
let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut();
let subrange_old_start = subrange.inner.start;
let subrange_old_end = subrange.inner.end;
let subrange_old_size = subrange.get_size();
if subrange.inner.growth == VMGrowthType::Upward {
// Can we grow upward?
let max_new_size = {
let next_subrange = &subranges[subrange_i + 1];
next_subrange.start - subrange_old_start
};
if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size");
}
// Do grow
let subrange_new_end = subrange_old_start + new_size;
subrange.inner.end = subrange_new_end;
// Sync state
subranges[subrange_i].end = subrange_new_end;
// Init memory
if fill_zeros {
unsafe {
let mem_ptr = subrange_old_end as *mut c_void;
let mem_size = (subrange_new_end - subrange_old_end) as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
} else {
// self.growth == VMGrowthType::Downward
// Can we grow downard?
let max_new_size = {
let pre_subrange = &subranges[subrange_i - 1];
subrange_old_end - pre_subrange.end
};
if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size");
}
// Do grow
let subrange_new_start = subrange_old_end - new_size;
subrange.inner.start = subrange_new_start;
// Sync state
subranges[subrange_i].start = subrange_new_start;
// Init memory
if fill_zeros {
unsafe {
let mem_ptr = subrange_new_start as *mut c_void;
let mem_size = (subrange_old_start - subrange_new_start) as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
}
}
Ok(())
}
}
impl PartialOrd for VMRange {
fn partial_cmp(&self, other: &VMRange) -> Option<Ordering> {
self.inner.partial_cmp(&other.inner)
}
}
impl PartialEq for VMRange {
fn eq(&self, other: &VMRange) -> bool {
self.inner.eq(&other.inner)
}
}
impl Drop for VMRange {
fn drop(&mut self) {
if !self.is_dealloced {
panic!("A range must be dealloc'ed before drop");
}
}
}
unsafe impl Send for VMRange {}
unsafe impl Sync for VMRange {}
#[derive(Clone, Copy)]
pub struct VMRangeInner {
start: usize,
end: usize,
growth: VMGrowthType,
}
impl VMRangeInner {
pub fn new(start: usize, end: usize, growth: VMGrowthType) -> VMRangeInner {
debug_assert!(start % PAGE_SIZE == 0);
debug_assert!(end % PAGE_SIZE == 0);
VMRangeInner {
start: start,
end: end,
growth: growth,
}
}
}
impl VMRangeTrait for VMRangeInner {
fn get_start(&self) -> usize {
self.start
}
fn get_end(&self) -> usize {
self.end
}
fn get_size(&self) -> usize {
self.end - self.start
}
fn get_growth(&self) -> VMGrowthType {
self.growth
}
fn contains_obj(&self, ptr: usize, size: usize) -> bool {
let obj_begin = ptr as usize;
let obj_end = obj_begin + size;
self.start <= obj_begin && obj_end < self.end
}
}
impl fmt::Debug for VMRangeInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"VMRangeInner {{ start: 0x{:X?}, end: 0x{:X?}, size: 0x{:X?}, growth: {:?} }}",
self.start,
self.end,
self.get_size(),
self.growth
)
}
}
impl PartialOrd for VMRangeInner {
fn partial_cmp(&self, other: &VMRangeInner) -> Option<Ordering> {
if self.end <= other.start {
return Some(Ordering::Less);
} else if self.start >= other.end {
return Some(Ordering::Greater);
} else if self.start == other.start && self.end == other.end {
return Some(Ordering::Equal);
} else {
return None;
}
}
}
impl PartialEq for VMRangeInner {
fn eq(&self, other: &VMRangeInner) -> bool {
self.start == other.start && self.end == other.end
}
}
#[derive(Debug)]
struct FreeSpace {
index_in_subranges: usize,
start: usize,
end: usize,
may_neighbor_grow: (bool, bool),
}
impl FreeSpace {
fn get_neighbor_pressure(&self) -> u32 {
let mut pressure = 0;
pressure += if self.may_neighbor_grow.0 { 1 } else { 0 };
pressure += if self.may_neighbor_grow.1 { 1 } else { 0 };
pressure
}
fn get_size(&self) -> usize {
self.end - self.start
}
fn contains(&self, addr: usize) -> bool {
self.start <= addr && addr < self.end
}
}
impl PartialEq for FreeSpace {
fn eq(&self, other: &FreeSpace) -> bool {
self.get_size() == other.get_size()
&& self.get_neighbor_pressure() == other.get_neighbor_pressure()
}
}
impl PartialOrd for FreeSpace {
fn partial_cmp(&self, other: &FreeSpace) -> Option<Ordering> {
let self_size = self.get_size();
let other_size = other.get_size();
if self_size < other_size {
Some(Ordering::Less)
} else if self_size > other_size {
Some(Ordering::Greater)
} else {
// The less neighbor pressure, the larger the free space
let self_neighbor_pressure = self.get_neighbor_pressure();
let other_neighbor_pressure = other.get_neighbor_pressure();
if self_neighbor_pressure > other_neighbor_pressure {
Some(Ordering::Less)
} else if self_neighbor_pressure < other_neighbor_pressure {
Some(Ordering::Greater)
} else {
Some(Ordering::Equal)
}
}
}
}
#[link(name = "sgx_tstdc")]
extern "C" {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}

@ -4,7 +4,7 @@ PROJECT_DIR := $(realpath $(CUR_DIR)/../)
# Dependencies: need to be compiled but not to run by any Makefile target
TEST_DEPS := dev_null
# Tests: need to be compiled and run by test-% target
TESTS := empty argv hello_world malloc file getpid spawn pipe time truncate readdir mkdir link tls pthread uname rlimit client server server_epoll unix_socket cout
TESTS := empty argv hello_world malloc mmap file getpid spawn pipe time truncate readdir mkdir link tls pthread uname rlimit client server server_epoll unix_socket cout
# Benchmarks: need to be compiled and run by bench-% target
BENCHES := spawn_and_exit_latency pipe_throughput unix_socket_throughput

5
test/mmap/Makefile Normal file

@ -0,0 +1,5 @@
include ../test_common.mk
EXTRA_C_FLAGS := -Wno-return-stack-address
EXTRA_LINK_FLAGS :=
BIN_ARGS :=

681
test/mmap/main.c Normal file

@ -0,0 +1,681 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <fcntl.h>
// ============================================================================
// Helper macros
// ============================================================================
#define KB (1024UL)
#define MB (1024 * 1024UL)
#define PAGE_SIZE (4 * KB)
#define _STR(x) #x
#define STR(x) _STR(x)
#define ARRAY_SIZE(array) (sizeof(array)/sizeof(array[0]))
#define ALIGN_DOWN(x, a) ((x) & ~(a-1)) // a must be a power of two
#define ALIGN_UP(x, a) ALIGN_DOWN((x+(a-1)), (a))
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
#define throw_error(msg) while(1) { \
printf("ERROR: %s in func %s at line %d of file %s\n", \
(msg), __func__, __LINE__, __FILE__); \
return -1; \
}
#define MAX_MMAP_USED_MEMORY (4 * MB)
// ============================================================================
// Helper functions
// ============================================================================
static int fill_file_with_repeated_bytes(int fd, size_t len, int byte_val) {
char buf[PAGE_SIZE];
memset(buf, byte_val, sizeof(buf));
size_t remain_bytes = len;
while (remain_bytes > 0) {
int to_write_bytes = MIN(sizeof(buf), remain_bytes);
int written_bytes = write(fd, buf, to_write_bytes);
if (written_bytes != to_write_bytes) {
throw_error("file write failed");
}
remain_bytes -= written_bytes;
}
return 0;
}
static int check_bytes_in_buf(char* buf, size_t len, int expected_byte_val) {
for (size_t bi = 0; bi < len; bi++) {
if (buf[bi] != (char)expected_byte_val) {
printf("check_bytes_in_buf: expect %02X, but found %02X, at offset %lu\n",
(unsigned char)expected_byte_val, (unsigned char)buf[bi], bi);
return -1;
}
}
return 0;
}
static void* get_a_stack_ptr() {
volatile int a = 0;
return (void*) &a;
}
// ============================================================================
// Test suite initialization
// ============================================================================
// Get a valid range of address hints for mmap
static int get_a_valid_range_of_hints(size_t *hint_begin, size_t *hint_end) {
size_t big_buf_len = MAX_MMAP_USED_MEMORY;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void* big_buf = mmap(NULL, big_buf_len, prot, flags, -1, 0);
if (big_buf == MAP_FAILED) {
throw_error("mmap failed");
}
int ret = munmap(big_buf, big_buf_len);
if (ret < 0) {
throw_error("munmap failed");
}
*hint_begin = (size_t)big_buf;
*hint_end = *hint_begin + big_buf_len;
return 0;
}
static size_t HINT_BEGIN, HINT_END;
int test_suite_init() {
if (get_a_valid_range_of_hints(&HINT_BEGIN, &HINT_END) < 0) {
throw_error("get_a_valid_range_of_hints failed");
}
return 0;
}
// ============================================================================
// Test cases for anonymous mmap
// ============================================================================
int test_anonymous_mmap() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
for (size_t len = PAGE_SIZE; len <= MAX_MMAP_USED_MEMORY; len *= 2) {
void* buf = mmap(NULL, len, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
throw_error("mmap failed");
}
if (check_bytes_in_buf(buf, len, 0) < 0) {
throw_error("the buffer is not initialized to zeros");
}
int ret = munmap(buf, len);
if (ret < 0) {
throw_error("munmap failed");
}
}
return 0;
}
int test_anonymous_mmap_randomly() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void* bufs[16] = {NULL};
size_t lens[16];
size_t num_bufs = 0;
size_t used_memory = 0;
for (int i = 0; i < 5; i++) {
// Phrase 1: do mmap with random sizes until no more buffers or memory
for (num_bufs = 0;
num_bufs < ARRAY_SIZE(bufs) && used_memory < MAX_MMAP_USED_MEMORY;
num_bufs++)
{
// Choose the mmap size randomly
size_t len = rand() % (MAX_MMAP_USED_MEMORY - used_memory) + 1;
len = ALIGN_UP(len, PAGE_SIZE);
// Do mmap
void* buf = mmap(NULL, len, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
throw_error("mmap failed");
}
bufs[num_bufs] = buf;
lens[num_bufs] = len;
// Update memory usage
used_memory += len;
}
// Phrase 2: do munmap to free all memory mapped memory
for (int bi = 0; bi < num_bufs; bi++) {
void* buf = bufs[bi];
size_t len = lens[bi];
int ret = munmap(buf, len);
if (ret < 0) {
throw_error("munmap failed");
}
bufs[bi] = NULL;
lens[bi] = 0;
}
num_bufs = 0;
used_memory = 0;
}
return 0;
}
int test_anonymous_mmap_randomly_with_good_hints() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
for (int i = 0; i < 10; i++) {
size_t hint = HINT_BEGIN + rand() % (HINT_END - HINT_BEGIN);
hint = ALIGN_DOWN(hint, PAGE_SIZE);
size_t len = rand() % (HINT_END - (size_t)hint);
len = ALIGN_UP(len+1, PAGE_SIZE);
void* addr = mmap((void*)hint, len, prot, flags, -1, 0);
if (addr != (void*)hint) {
throw_error("mmap with hint failed");
}
int ret = munmap(addr, len);
if (ret < 0) {
throw_error("munmap failed");
}
}
return 0;
}
int test_anonymous_mmap_with_bad_hints() {
size_t bad_hints[] = {
PAGE_SIZE, // too low!
0xffff800000000000UL, // too high!
ALIGN_DOWN((size_t)get_a_stack_ptr(), PAGE_SIZE), // overlapped with stack!
HINT_BEGIN + 123, // within the valid range, not page aligned!
};
int len = PAGE_SIZE;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
for (int hi = 0; hi < ARRAY_SIZE(bad_hints); hi++) {
void* bad_hint = (void*)bad_hints[hi];
void* addr = mmap(bad_hint, len, prot, flags, -1, 0);
if (addr == MAP_FAILED) {
throw_error("mmap should have tolerated a bad hint");
}
if (addr == bad_hint) {
throw_error("mmap should not have accepted a bad hint");
}
int ret = munmap(addr, len);
if (ret < 0) {
throw_error("munmap failed");
}
}
return 0;
}
int test_anonymous_mmap_with_zero_len() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
int len = 0; // invalid!
void* buf = mmap(NULL, len, prot, flags, -1, 0);
if (buf != MAP_FAILED) {
throw_error("mmap with zero len should have been failed");
}
return 0;
}
int test_anonymous_mmap_with_non_page_aligned_len() {
int len = PAGE_SIZE + 17; // length need not to be page aligned!
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void* buf = mmap(NULL, len, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
throw_error("mmap with non-page aligned len should have worked");
}
// Even the length is not page aligned, the page mmaping is done in pages
if (check_bytes_in_buf(buf, ALIGN_UP(len, PAGE_SIZE), 0) < 0) {
throw_error("the buffer is not initialized to zeros");
}
int ret = munmap(buf, len);
if (ret < 0) {
throw_error("munmap failed");
}
return 0;
}
// ============================================================================
// Test cases for file-backed mmap
// ============================================================================
int test_file_mmap() {
const char* file_path = "mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) {
throw_error("file creation failed");
}
int file_len = 12 * KB + 128;
int byte_val = 0xab;
fill_file_with_repeated_bytes(fd, file_len, byte_val);
close(fd);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE;
fd = open(file_path, O_RDONLY);
if (fd < 0) {
throw_error("file open failed");
}
off_t offset = 0;
for (size_t len = PAGE_SIZE; len <= file_len; len *= 2) {
char* buf = mmap(NULL, len, prot, flags, fd, offset);
if (buf == MAP_FAILED) {
throw_error("mmap failed");
}
if (check_bytes_in_buf(buf, len, byte_val) < 0) {
throw_error("the buffer is not initialized according to the file");
}
int ret = munmap(buf, len);
if (ret < 0) {
throw_error("munmap failed");
}
}
close(fd);
unlink(file_path);
return 0;
}
int test_file_mmap_with_offset() {
const char* file_path = "mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644);
if (fd < 0) {
throw_error("file creation failed");
}
size_t first_len = 4 * KB + 47;
int first_val = 0xab;
fill_file_with_repeated_bytes(fd, first_len, first_val);
size_t second_len = 9 * KB - 47;
int second_val = 0xcd;
fill_file_with_repeated_bytes(fd, second_len, second_val);
size_t file_len = first_len + second_len;
off_t offset = 4 * KB;
int len = file_len - offset + 1 * KB;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE;
assert(offset <= first_len);
char* buf = mmap(NULL, len, prot, flags, fd, offset);
if (buf == MAP_FAILED) {
throw_error("mmap failed");
}
char* buf_cursor = buf;
if (check_bytes_in_buf(buf_cursor, first_len - offset, first_val) < 0) {
throw_error("the buffer is not initialized according to the file");
}
buf_cursor += first_len - offset;
if (check_bytes_in_buf(buf_cursor, second_len, second_val) < 0) {
throw_error("the buffer is not initialized according to the file");
}
buf_cursor += second_len;
if (check_bytes_in_buf(buf_cursor, ALIGN_UP(len, PAGE_SIZE) - (buf_cursor - buf), 0) < 0) {
throw_error("the remaining of the last page occupied by the buffer is not initialized to zeros");
}
int ret = munmap(buf, len);
if (ret < 0) {
throw_error("munmap failed");
}
close(fd);
unlink(file_path);
return 0;
}
int test_file_mmap_with_invalid_fd() {
size_t len = PAGE_SIZE;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE;
int fd = 1234; // invalid!
off_t offset = 0;
void* buf = mmap(NULL, len, prot, flags, fd, offset);
if (buf != MAP_FAILED) {
throw_error("file mmap with an invalid fd should have been failed");
}
return 0;
}
int test_file_mmap_with_non_page_aligned_offset() {
const char* file_path = "mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644);
if (fd < 0) {
throw_error("file creation failed");
}
int file_len = 12 * KB + 128;
int byte_val = 0xab;
fill_file_with_repeated_bytes(fd, file_len, byte_val);
size_t len = PAGE_SIZE;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
off_t offset = PAGE_SIZE + 127; // Invalid!
void* buf = mmap(NULL, len, prot, flags, fd, offset);
if (buf != MAP_FAILED) {
throw_error("mmap with non-page aligned len should have been failed");
}
close(fd);
unlink(file_path);
return 0;
}
// TODO: what if offset > file size or offset + len > file size?
// ============================================================================
// Test cases for fixed mmap
// ============================================================================
int test_fixed_mmap_that_does_not_override_any_mmaping() {
size_t hint = HINT_BEGIN + (HINT_END - HINT_BEGIN) / 3;
hint = ALIGN_DOWN(hint, PAGE_SIZE);
size_t len = (HINT_END - HINT_BEGIN) / 3;
len = ALIGN_UP(len, PAGE_SIZE);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
void* addr = mmap((void*)hint, len, prot, flags, -1, 0);
if (addr != (void*)hint) {
throw_error("mmap with fixed address failed");
}
int ret = munmap(addr, len);
if (ret < 0) {
throw_error("munmap failed");
}
return 0;
}
int test_fixed_mmap_that_overrides_existing_mmaping() {
// We're about to allocate two buffers: parent_buf and child_buf.
// The child_buf will override a range of memory that has already
// been allocated to the parent_buf.
size_t parent_len = 10 * PAGE_SIZE;
size_t pre_child_len = 2 * PAGE_SIZE, post_child_len = 3 * PAGE_SIZE;
size_t child_len = parent_len - pre_child_len - post_child_len;
// Allocate parent_buf
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void* parent_buf = mmap(NULL, parent_len, prot, flags, -1, 0);
if (parent_buf == MAP_FAILED) {
throw_error("mmap for parent failed");
}
int parent_val = 0xab;
memset(parent_buf, parent_val, parent_len);
// Allocate child_buf
void* child_buf = (char*)parent_buf + pre_child_len;
if (mmap(child_buf, child_len, prot, flags | MAP_FIXED, -1, 0) != child_buf) {
throw_error("mmap with fixed address failed");
}
// Check that child_buf, which overrides parent_buf, is initialized to zeros
if (check_bytes_in_buf(child_buf, child_len, 0) < 0) {
throw_error("the content of child mmap memory is not initialized");
}
// Check that the rest of parent_buf are kept intact
if (check_bytes_in_buf((char*)child_buf - pre_child_len,
pre_child_len, parent_val) < 0 ||
check_bytes_in_buf((char*)child_buf + child_len,
post_child_len, parent_val) < 0) {
throw_error("the content of parent mmap memory is broken");
}
// Deallocate parent_buf along with child_buf
int ret = munmap(parent_buf, parent_len);
if (ret < 0) {
throw_error("munmap failed");
}
return 0;
}
int test_fixed_mmap_with_non_page_aligned_addr() {
size_t hint = HINT_BEGIN + 123; // Not aligned!
size_t len = 1 * PAGE_SIZE;
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
void* addr = mmap((void*)hint, len, prot, flags, -1, 0);
if (addr != MAP_FAILED) {
throw_error("fixed mmap with non-page aligned hint should have failed");
}
return 0;
}
// ============================================================================
// Test cases for munmap
// ============================================================================
static int check_buf_is_munmapped(void* target_addr, size_t len) {
// The trivial case of zero-len meory region is considered as unmapped
if (len == 0) return 0;
// If the target_addr is not already mmaped, it should succeed to use it as
// a hint for mmap.
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void* real_addr = mmap(target_addr, len, prot, flags, -1, 0);
if (real_addr != target_addr) {
throw_error("address is already mmaped");
}
munmap(target_addr, len);
return 0;
}
static int mmap_then_munmap(size_t mmap_len, ssize_t munmap_offset, size_t munmap_len) {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
// Make sure that we are manipulating memory between [HINT_BEGIN, HINT_END)
void* mmap_addr = (void*)(munmap_offset >= 0 ? HINT_BEGIN : HINT_BEGIN - munmap_offset);
if (mmap(mmap_addr, mmap_len, prot, flags, -1, 0) != mmap_addr) {
throw_error("mmap failed");
}
void* munmap_addr = (char*)mmap_addr + munmap_offset;
if (munmap(munmap_addr, munmap_len) < 0) {
throw_error("munmap failed");
}
if (check_buf_is_munmapped(munmap_addr, munmap_len) < 0) {
throw_error("munmap does not really free the memory");
}
// Make sure that when this function returns, there are no memory mappings
// within [HINT_BEGIN, HINT_END)
if (munmap((void*)HINT_BEGIN, HINT_END - HINT_BEGIN) < 0) {
throw_error("munmap failed");
}
return 0;
}
int test_munmap_whose_range_is_a_subset_of_a_mmap_region() {
size_t mmap_len = 4 * PAGE_SIZE;
ssize_t munmap_offset = 1 * PAGE_SIZE;
size_t munmap_len = 2 * PAGE_SIZE;
if (mmap_then_munmap(mmap_len, munmap_offset, munmap_len) < 0) {
throw_error("first mmap and then munmap failed");
}
return 0;
}
int test_munmap_whose_range_is_a_superset_of_a_mmap_region() {
size_t mmap_len = 4 * PAGE_SIZE;
ssize_t munmap_offset = -2 * PAGE_SIZE;
size_t munmap_len = 7 * PAGE_SIZE;
if (mmap_then_munmap(mmap_len, munmap_offset, munmap_len) < 0) {
throw_error("first mmap and then munmap failed");
}
return 0;
}
int test_munmap_whose_range_intersects_with_a_mmap_region() {
size_t mmap_len = 200 * PAGE_SIZE;
size_t munmap_offset = 100 * PAGE_SIZE + 10 * PAGE_SIZE;
size_t munmap_len = 4 * PAGE_SIZE;
if (mmap_then_munmap(mmap_len, munmap_offset, munmap_len) < 0) {
throw_error("first mmap and then munmap failed");
}
return 0;
}
int test_munmap_whose_range_intersects_with_no_mmap_regions() {
size_t mmap_len = 1 * PAGE_SIZE;
size_t munmap_offset = 1 * PAGE_SIZE;
size_t munmap_len = 1 * PAGE_SIZE;
if (mmap_then_munmap(mmap_len, munmap_offset, munmap_len) < 0) {
throw_error("first mmap and then munmap failed");
}
return 0;
}
int test_munmap_whose_range_intersects_with_multiple_mmap_regions() {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
size_t mmap_len1 = 100 * PAGE_SIZE;
void* mmap_addr1 = mmap(NULL, mmap_len1, prot, flags, -1, 0);
if (mmap_addr1 == MAP_FAILED) {
throw_error("mmap failed");
}
size_t mmap_len2 = 12 * PAGE_SIZE;
void* mmap_addr2 = mmap(NULL, mmap_len2, prot, flags, -1, 0);
if (mmap_addr2 == MAP_FAILED) {
throw_error("mmap failed");
}
size_t mmap_min = MIN((size_t)mmap_addr1, (size_t)mmap_addr2);
size_t mmap_max = MAX((size_t)mmap_addr1 + mmap_len1,
(size_t)mmap_addr2 + mmap_len2);
void* munmap_addr = (void*)mmap_min;
size_t munmap_len = mmap_max - mmap_min;
if (munmap(munmap_addr, munmap_len) < 0) {
throw_error("munmap failed");
}
if (check_buf_is_munmapped(munmap_addr, munmap_len) < 0) {
throw_error("munmap does not really free the memory");
}
return 0;
}
int test_munmap_with_null_addr() {
// Set the address for munmap to NULL!
//
// The man page of munmap states that "it is not an error if the indicated
// range does not contain any mapped pages". This is not considered as
// an error!
void* munmap_addr = NULL;
size_t munmap_len = PAGE_SIZE;
if (munmap(munmap_addr, munmap_len) < 0) {
throw_error("munmap failed");
}
return 0;
}
int test_munmap_with_zero_len() {
void* munmap_addr = (void*)HINT_BEGIN;
// Set the length for munmap to 0! This is invalid!
size_t munmap_len = 0;
if (munmap(munmap_addr, munmap_len) == 0) {
throw_error("munmap with zero length should have failed");
}
return 0;
}
int test_munmap_with_non_page_aligned_len() {
size_t mmap_len = 2 * PAGE_SIZE;
size_t munmap_offset = 0;
// Set the length for munmap to a non-page aligned value!
//
// The man page of munmap states that "the address addr must be a
// multiple of the page size (but length need not be). All pages
// containing a part of the indicated range are unmapped". So this is
// not considered as an error!
size_t munmap_len = 1 * PAGE_SIZE + 123;
if (mmap_then_munmap(mmap_len, munmap_offset, munmap_len) < 0) {
throw_error("first mmap and then munmap failed");
}
return 0;
}
// ============================================================================
// Test suite main
// ============================================================================
typedef int(*test_case_func_t)(void);
typedef struct {
const char* name;
test_case_func_t func;
} test_case_t;
#define TEST_CASE(name) { STR(name), name }
static test_case_t test_cases[] = {
TEST_CASE(test_anonymous_mmap),
TEST_CASE(test_anonymous_mmap_randomly),
TEST_CASE(test_anonymous_mmap_randomly_with_good_hints),
TEST_CASE(test_anonymous_mmap_with_bad_hints),
TEST_CASE(test_anonymous_mmap_with_zero_len),
TEST_CASE(test_anonymous_mmap_with_non_page_aligned_len),
TEST_CASE(test_file_mmap),
TEST_CASE(test_file_mmap_with_offset),
TEST_CASE(test_file_mmap_with_invalid_fd),
TEST_CASE(test_file_mmap_with_non_page_aligned_offset),
TEST_CASE(test_fixed_mmap_that_does_not_override_any_mmaping),
TEST_CASE(test_fixed_mmap_that_overrides_existing_mmaping),
TEST_CASE(test_fixed_mmap_with_non_page_aligned_addr),
TEST_CASE(test_munmap_whose_range_is_a_subset_of_a_mmap_region),
TEST_CASE(test_munmap_whose_range_is_a_superset_of_a_mmap_region),
TEST_CASE(test_munmap_whose_range_intersects_with_a_mmap_region),
TEST_CASE(test_munmap_whose_range_intersects_with_no_mmap_regions),
TEST_CASE(test_munmap_whose_range_intersects_with_multiple_mmap_regions),
TEST_CASE(test_munmap_with_null_addr),
TEST_CASE(test_munmap_with_zero_len),
TEST_CASE(test_munmap_with_non_page_aligned_len)
};
int main() {
if (test_suite_init() < 0) {
throw_error("test_suite_init failed");
}
for (int ti = 0; ti < ARRAY_SIZE(test_cases); ti++) {
test_case_t* tc = &test_cases[ti];
if (tc->func() < 0) {
printf(" func %s - [ERR]\n", tc->name);
return -1;
}
printf(" func %s - [OK]\n", tc->name);
}
return 0;
}