Add virtual memory subsystem

This commit is contained in:
Tate, Hongliang Tian 2018-12-15 12:36:02 +08:00
parent 4107a65248
commit 852903de20
30 changed files with 1659 additions and 776 deletions

@ -6,17 +6,17 @@ LIBOS_ENCLAVE := libocclum.signed.so
LIBOS_SO := libocclum.so # Link $(LIBOS_A), $(C_OBJS) and all dependencies LIBOS_SO := libocclum.so # Link $(LIBOS_A), $(C_OBJS) and all dependencies
LIBOS_A := liblibos.a # Built from Rust code LIBOS_A := liblibos.a # Built from Rust code
RUST_SRCS := $(wildcard src/*.rs) RUST_SRCS := $(wildcard src/*.rs src/*/*.rs src/*/*/*.rs)
C_SRCS := $(sort $(filter-out src/Enclave_t.c, $(wildcard src/*.c))) src/Enclave_t.c C_SRCS := $(sort $(filter-out src/Enclave_t.c, $(wildcard src/*.c src/*/*.c))) src/Enclave_t.c
C_OBJS := $(C_SRCS:.c=.o) C_OBJS := $(C_SRCS:.c=.o)
S_SRCS := $(wildcard src/*.S) S_SRCS := $(wildcard src/*.S src/*/*.S)
S_OBJS := $(S_SRCS:.S=.o) S_OBJS := $(S_SRCS:.S=.o)
EDL_C_SRCS := src/Enclave_t.c src/Enclave_t.h EDL_C_SRCS := src/Enclave_t.c src/Enclave_t.h
ENCLAVE_CONFIG := Enclave_config.xml ENCLAVE_CONFIG := Enclave_config.xml
ENCLAVE_KEY := Enclave_private.pem ENCLAVE_KEY := Enclave_private.pem
C_FLAGS := $(SGX_CFLAGS_T) -fno-stack-protector C_FLAGS := $(SGX_CFLAGS_T) -fno-stack-protector -I./include/
_Other_Link_Flags := -L../../deps/rust-sgx-sdk/compiler-rt/ -L. _Other_Link_Flags := -L../../deps/rust-sgx-sdk/compiler-rt/ -L.
_Other_Enclave_Libs := -lcompiler-rt-patch -llibos -lsgx_tprotected_fs _Other_Enclave_Libs := -lcompiler-rt-patch -llibos -lsgx_tprotected_fs
LINK_FLAGS := $(SGX_LFLAGS_T) LINK_FLAGS := $(SGX_LFLAGS_T)

@ -1,6 +1,8 @@
use prelude::*; use prelude::*;
use std::{fmt, error, convert,}; use std::{fmt, error, convert,};
// TODO: remove errno.h
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub struct Error { pub struct Error {
pub errno: Errno, pub errno: Errno,
@ -13,7 +15,6 @@ impl Error {
errno, errno,
desc, desc,
}; };
println!("{}", ret);
ret ret
} }
} }

@ -1,6 +1,7 @@
use prelude::*; use prelude::*;
use {std}; use {std};
use std::{fmt}; use std::{fmt};
use std::borrow::BorrowMut;
use std::sgxfs as fs_impl; use std::sgxfs as fs_impl;

@ -50,7 +50,7 @@ pub fn do_open(path: &str, flags: u32, mode: u32) -> Result<FileDesc, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let mut current_process = current_ref.lock().unwrap(); let mut current_process = current_ref.lock().unwrap();
let fd = current_process.file_table.put(file_ref); let fd = current_process.get_files_mut().put(file_ref);
Ok(fd) Ok(fd)
} }
@ -58,7 +58,7 @@ pub fn do_open(path: &str, flags: u32, mode: u32) -> Result<FileDesc, Error> {
pub fn do_write(fd: FileDesc, buf: &[u8]) -> Result<usize, Error> { pub fn do_write(fd: FileDesc, buf: &[u8]) -> Result<usize, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let current_process = current_ref.lock().unwrap(); let current_process = current_ref.lock().unwrap();
let file_ref = current_process.file_table.get(fd) let file_ref = current_process.get_files().get(fd)
.ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_write]"))?; .ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_write]"))?;
file_ref.write(buf) file_ref.write(buf)
} }
@ -66,7 +66,7 @@ pub fn do_write(fd: FileDesc, buf: &[u8]) -> Result<usize, Error> {
pub fn do_read(fd: FileDesc, buf: &mut [u8]) -> Result<usize, Error> { pub fn do_read(fd: FileDesc, buf: &mut [u8]) -> Result<usize, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let current_process = current_ref.lock().unwrap(); let current_process = current_ref.lock().unwrap();
let file_ref = current_process.file_table.get(fd) let file_ref = current_process.get_files().get(fd)
.ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_read]"))?; .ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_read]"))?;
file_ref.read(buf) file_ref.read(buf)
} }
@ -74,7 +74,7 @@ pub fn do_read(fd: FileDesc, buf: &mut [u8]) -> Result<usize, Error> {
pub fn do_writev<'a, 'b>(fd: FileDesc, bufs: &'a [&'b [u8]]) -> Result<usize, Error> { pub fn do_writev<'a, 'b>(fd: FileDesc, bufs: &'a [&'b [u8]]) -> Result<usize, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let current_process = current_ref.lock().unwrap(); let current_process = current_ref.lock().unwrap();
let file_ref = current_process.file_table.get(fd) let file_ref = current_process.get_files().get(fd)
.ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_write]"))?; .ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_write]"))?;
file_ref.writev(bufs) file_ref.writev(bufs)
} }
@ -82,7 +82,7 @@ pub fn do_writev<'a, 'b>(fd: FileDesc, bufs: &'a [&'b [u8]]) -> Result<usize, Er
pub fn do_readv<'a, 'b>(fd: FileDesc, bufs: &'a mut [&'b mut [u8]]) -> Result<usize, Error> { pub fn do_readv<'a, 'b>(fd: FileDesc, bufs: &'a mut [&'b mut [u8]]) -> Result<usize, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let current_process = current_ref.lock().unwrap(); let current_process = current_ref.lock().unwrap();
let file_ref = current_process.file_table.get(fd) let file_ref = current_process.get_files().get(fd)
.ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_read]"))?; .ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_read]"))?;
file_ref.readv(bufs) file_ref.readv(bufs)
} }
@ -90,7 +90,7 @@ pub fn do_readv<'a, 'b>(fd: FileDesc, bufs: &'a mut [&'b mut [u8]]) -> Result<us
pub fn do_lseek<'a, 'b>(fd: FileDesc, offset: SeekFrom) -> Result<off_t, Error> { pub fn do_lseek<'a, 'b>(fd: FileDesc, offset: SeekFrom) -> Result<off_t, Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let current_process = current_ref.lock().unwrap(); let current_process = current_ref.lock().unwrap();
let file_ref = current_process.file_table.get(fd) let file_ref = current_process.get_files().get(fd)
.ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_lseek]"))?; .ok_or_else(|| Error::new(Errno::EBADF, "Invalid file descriptor [do_lseek]"))?;
file_ref.seek(offset) file_ref.seek(offset)
} }
@ -98,7 +98,7 @@ pub fn do_lseek<'a, 'b>(fd: FileDesc, offset: SeekFrom) -> Result<off_t, Error>
pub fn do_close(fd: FileDesc) -> Result<(), Error> { pub fn do_close(fd: FileDesc) -> Result<(), Error> {
let current_ref = process::get_current(); let current_ref = process::get_current();
let mut current_process = current_ref.lock().unwrap(); let mut current_process = current_ref.lock().unwrap();
let file_table = &mut current_process.file_table; let file_table = current_process.get_files_mut();
match file_table.del(fd) { match file_table.del(fd) {
Some(_) => Ok(()), Some(_) => Ok(()),
None => Err(Error::new(Errno::EBADF, "Invalid file descriptor [do_close]")), None => Err(Error::new(Errno::EBADF, "Invalid file descriptor [do_close]")),

@ -7,6 +7,7 @@
#![cfg_attr(target_env = "sgx", feature(rustc_private))] #![cfg_attr(target_env = "sgx", feature(rustc_private))]
#![feature(allocator_api)] #![feature(allocator_api)]
#![feature(integer_atomics)] #![feature(integer_atomics)]
#![feature(range_contains)]
extern crate sgx_types; extern crate sgx_types;
#[cfg(not(target_env = "sgx"))] #[cfg(not(target_env = "sgx"))]
@ -26,16 +27,13 @@ use sgx_trts::libc;
#[macro_use] #[macro_use]
mod prelude; mod prelude;
mod elf_helper;
mod errno; mod errno;
mod file; mod file;
mod file_table; mod file_table;
mod fs; mod fs;
mod mm;
mod process; mod process;
mod syscall; mod syscall;
mod vma; mod vm;
mod init_stack;
/// Export system calls /// Export system calls
pub use syscall::*; pub use syscall::*;

@ -1,81 +0,0 @@
use prelude::*;
use std::mem;
#[derive(Clone, Debug)]
pub struct MemObj {
mem_ptr: *mut c_void,
mem_size: usize,
mem_align: usize,
}
impl MemObj {
pub fn new(mem_size: usize, mem_align: usize)
-> Result<Self, Error>
{
if mem_size == 0 || !is_power_of_two(mem_align) ||
mem_align % mem::size_of::<*const c_void>() != 0 {
return Err((Errno::EINVAL, "Invalid argument").into());
}
let mem_ptr = unsafe { aligned_malloc(mem_size, mem_align) };
if mem_ptr == (0 as *mut c_void) {
return Err((Errno::ENOMEM, "Out of memory").into());
};
unsafe { memset(mem_ptr, 0 as c_int, mem_size as size_t) };
Ok(MemObj {
mem_ptr,
mem_size,
mem_align,
})
}
pub fn get_addr(&self) -> usize {
self.mem_ptr as usize
}
}
impl Default for MemObj {
fn default() -> Self {
MemObj {
mem_ptr: 0 as *mut c_void,
mem_size: 0,
mem_align: 1
}
}
}
impl Drop for MemObj {
fn drop(&mut self) {
if self.mem_ptr != (0 as *mut c_void) {
unsafe { free(self.mem_ptr); }
}
}
}
unsafe impl Send for MemObj {}
unsafe impl Sync for MemObj {}
fn is_power_of_two(x: usize) -> bool {
return (x != 0) && ((x & (x - 1)) == 0);
}
unsafe fn aligned_malloc(mem_size: usize, mem_align: usize) -> *mut c_void {
let mut mem_ptr = ::core::ptr::null_mut();
let ret = libc::posix_memalign(&mut mem_ptr, mem_align, mem_size);
if ret == 0 {
mem_ptr
} else {
0 as *mut c_void
}
}
unsafe fn free(mem_ptr: *mut c_void) {
libc::free(mem_ptr)
}
#[link(name = "sgx_tstdc")]
extern {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}

@ -7,18 +7,23 @@ pub use sgx_trts::libc;
pub use std::marker::{Sync, Send}; pub use std::marker::{Sync, Send};
pub use std::sync::{Arc, SgxMutex, SgxMutexGuard, SgxRwLock, pub use std::sync::{Arc, SgxMutex, SgxMutexGuard, SgxRwLock,
SgxRwLockReadGuard, SgxRwLockWriteGuard}; SgxRwLockReadGuard, SgxRwLockWriteGuard};
pub use std::cell::{Cell}; pub use std::cell::{Cell, RefCell};
pub use std::result::Result; pub use std::result::Result;
pub use std::borrow::BorrowMut; //pub use std::borrow::BorrowMut;
pub use std::boxed::Box; pub use std::boxed::Box;
pub use std::vec::Vec; pub use std::vec::Vec;
pub use std::string::{String}; pub use std::string::{String};
pub use std::collections::{HashMap, VecDeque}; pub use std::collections::{HashMap, VecDeque};
pub use std::fmt::{Debug, Display}; pub use std::fmt::{Debug, Display};
pub use std::io::{Read, Write, Seek, SeekFrom}; pub use std::io::{Read, Write, Seek, SeekFrom};
pub use std::rc::{Rc};
pub use std::iter::{Iterator};
pub use std::cmp::{Ordering, PartialOrd};
pub use std::borrow::{ToOwned};
pub use errno::Error as Error; pub use errno::Error as Error;
pub use errno::Errno; pub use errno::Errno;
pub use errno::Errno::{*};
pub use fs::off_t; pub use fs::off_t;
@ -27,3 +32,20 @@ macro_rules! debug_trace {
println!("> Line = {}, File = {}", line!(), file!()) println!("> Line = {}, File = {}", line!(), file!())
}; };
} }
macro_rules! errno {
($errno: ident, $msg: expr) => {
{
println!("ERROR: {} ({}, line {} in file {})", $errno, $msg, line!(), file!());
Err(Error::new($errno, $msg))
}
}
}
pub fn align_up(addr: usize, align: usize) -> usize {
(addr + (align - 1)) / align * align
}
pub fn align_down(addr: usize, align: usize) -> usize {
addr & !(align - 1)
}

@ -1,417 +0,0 @@
use prelude::*;
use {std, elf_helper, vma, syscall, file, file_table};
use std::{io, mem};
use std::path::Path;
use std::io::{Read};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sgxfs::SgxFile;
use std::thread;
use std::cell::Cell;
use std::ffi::{CStr, CString};
use xmas_elf::{ElfFile, header, program, sections};
use xmas_elf::symbol_table::Entry;
use vma::Vma;
use file::{File, StdinFile, StdoutFile/*, StderrFile*/};
use file_table::{FileTable};
use init_stack::{StackBuf, AuxKey, AuxTable, do_init_process_stack};
lazy_static! {
static ref PROCESS_TABLE: SgxMutex<HashMap<u32, ProcessRef>> = {
SgxMutex::new(HashMap::new())
};
}
fn put_into_pid_table(pid: u32, process: ProcessRef) {
PROCESS_TABLE.lock().unwrap().insert(pid, process);
}
fn del_from_pid_table(pid: u32) {
PROCESS_TABLE.lock().unwrap().remove(&pid);
}
fn look_up_pid_table(pid: u32) -> Option<ProcessRef> {
PROCESS_TABLE.lock().unwrap().get(&pid).map(|pr| pr.clone())
}
static NEXT_PID : AtomicU32 = AtomicU32::new(1);
fn alloc_pid() -> u32 {
NEXT_PID.fetch_add(1, Ordering::SeqCst)
}
fn free_pid(pid: u32) {
// TODO:
}
pub fn do_spawn<P: AsRef<Path>>(elf_path: &P, argv: &[CString], envp: &[CString])
-> Result<u32, Error>
{
let elf_buf = open_elf(&elf_path)
.map_err(|e| (e.errno, "Failed to open the ELF file"))?;
let elf_file = {
let elf_file = ElfFile::new(&elf_buf)
.map_err(|e| (Errno::ENOEXEC, "Failed to parse the ELF file"))?;
header::sanity_check(&elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to parse the ELF file"))?;
/*
elf_helper::print_program_headers(&elf_file)?;
elf_helper::print_sections(&elf_file)?;
elf_helper::print_pltrel_section(&elf_file)?;
*/
elf_file
};
let new_process = {
let mut new_process = Process::new(&elf_file, argv, envp)
.map_err(|e| (Errno::EUNDEF, "Failed to create the process"))?;
{
let file_table = &mut new_process.file_table;
//let stdin = Arc::new(SgxMutex::new(Box::new(StdinFile::new())));
let stdin : Arc<Box<File>> = Arc::new(Box::new(StdinFile::new()));
let stdout : Arc<Box<File>> = Arc::new(Box::new(StdoutFile::new()));
// TODO: implement and use a real stderr
let stderr = stdout.clone();
file_table.put(stdin);
file_table.put(stdout);
file_table.put(stderr);
};
new_process
};
let new_pid = new_process.pid;
let new_process_ref = Arc::new(SgxMutex::new(new_process));
enqueue_new_process(new_process_ref.clone());
put_into_pid_table(new_pid, new_process_ref.clone());
// FIXME: if ocall_new_task failed, then new_process will not be dropped
let mut ret = 0;
let ocall_status = unsafe { ocall_run_new_task(&mut ret) };
if ocall_status != sgx_status_t::SGX_SUCCESS || ret != 0 {
return Err((Errno::EUNDEF, "Failed to start the process").into());
}
Ok(new_pid)
}
thread_local! {
static _CURRENT_PROCESS_PTR: Cell<*const SgxMutex<Process>> =
Cell::new(0 as *const SgxMutex<Process>);
}
pub fn set_current(process: &ProcessRef) {
let process_ref_clone = process.clone();
let process_ptr = Arc::into_raw(process_ref_clone);
_CURRENT_PROCESS_PTR.with(|cp| {
cp.set(process_ptr);
});
}
pub fn reset_current() {
let mut process_ptr = 0 as *const SgxMutex<Process>;
_CURRENT_PROCESS_PTR.with(|cp| {
process_ptr = cp.get();
cp.set(0 as *const SgxMutex<Process>);
});
// Prevent memory leakage
unsafe { drop(Arc::from_raw(process_ptr)); }
}
pub fn get_current() -> &'static SgxMutex<Process> {
let mut process_ptr : *const SgxMutex<Process> = 0 as *const SgxMutex<Process>;
_CURRENT_PROCESS_PTR.with(|cp| {
process_ptr = cp.get();
});
unsafe {
mem::transmute::<*const SgxMutex<Process>, &'static SgxMutex<Process>>(process_ptr)
}
}
pub fn do_getpid() -> u32 {
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
current_process.pid
}
pub fn do_exit(exit_code: i32) {
{
let current_ref = get_current();
let mut current_process = current_ref.lock().unwrap();
current_process.exit_code = exit_code;
current_process.status = Status::ZOMBIE;
}
}
pub fn do_wait4(child_pid: u32) -> Result<i32, Error> {
let child_process = look_up_pid_table(child_pid)
.ok_or_else(|| (Errno::ECHILD, "Cannot find child process with the given PID"))?;
let mut exit_code = 0;
loop {
let guard = child_process.lock().unwrap();
if guard.status == Status::ZOMBIE {
exit_code = guard.exit_code;
break;
}
drop(guard);
}
let child_pid = child_process.lock().unwrap().pid;
del_from_pid_table(child_pid);
Ok(exit_code)
}
pub fn run_task() -> Result<(), Error> {
let new_process : ProcessRef = dequeue_new_process()
.ok_or_else(|| (Errno::EAGAIN, "No new processes to run"))?;
set_current(&new_process);
let pid;
let new_task;
{
let guard = new_process.lock().unwrap();
let process : &Process = &guard;
pid = process.pid;
//println!("Run process: {:#x?}", process);
//println!("Run process (pid = {})", process.pid);
new_task = &process.task as *const Task
};
unsafe { do_run_task(new_task as *const Task); }
// Init process does not have any parent, so it has to release itself
if pid == 1 {
del_from_pid_table(1);
}
reset_current();
Ok(())
}
fn open_elf<P: AsRef<Path>>(path: &P) -> Result<Vec<u8>, Error> {
let key : sgx_key_128bit_t = [0 as uint8_t; 16];
let mut elf_file = SgxFile::open_ex(path, &key)
.map_err(|e| (Errno::ENOENT, "Failed to open the SGX-protected file"))?;
let mut elf_buf = Vec::<u8>::new();
elf_file.read_to_end(&mut elf_buf);
Ok(elf_buf)
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct Process {
pub task: Task,
pub status: Status,
pub pid: u32,
pub exit_code: i32,
pub code_vma: Vma,
pub data_vma: Vma,
pub stack_vma: Vma,
pub program_base_addr: usize,
pub program_entry_addr: usize,
pub file_table: FileTable,
}
pub type ProcessRef = Arc<SgxMutex<Process>>;
impl Process {
pub fn new(elf_file: &ElfFile, argv: &[CString], envp: &[CString])
-> Result<Process, Error>
{
let mut new_process : Process = Default::default();
new_process.create_process_image(elf_file)?;
new_process.reloc_symbols(elf_file)?;
new_process.link_syscalls(elf_file)?;
new_process.mprotect()?;
//println!("Process image: {:#X?}", new_process);
new_process.task = Task {
user_stack_addr: new_process.init_stack(argv, envp)? as usize,
user_entry_addr: new_process.program_entry_addr,
fs_base_addr: 0,
.. Default::default()
};
new_process.pid = alloc_pid();
Ok(new_process)
}
fn init_stack(&mut self, argv: &[CString], envp: &[CString])
-> Result<*const u8, Error>
{
let stack = StackBuf::new(self.stack_vma.mem_end as *const u8,
self.stack_vma.mem_begin as *const u8)?;
let mut auxtbl = AuxTable::new();
auxtbl.set_val(AuxKey::AT_PAGESZ, 4096)?;
auxtbl.set_val(AuxKey::AT_UID, 0)?;
auxtbl.set_val(AuxKey::AT_GID, 0)?;
auxtbl.set_val(AuxKey::AT_EUID, 0)?;
auxtbl.set_val(AuxKey::AT_EGID, 0)?;
auxtbl.set_val(AuxKey::AT_SECURE, 0)?;
do_init_process_stack(&stack, &argv, &envp, &auxtbl)?;
Ok(stack.get_pos())
}
fn create_process_image(self: &mut Process, elf_file: &ElfFile)
-> Result<(), Error>
{
let code_ph = elf_helper::get_code_program_header(elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to get the program header of code"))?;
let data_ph = elf_helper::get_data_program_header(elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to get the program header of code"))?;
self.code_vma = Vma::from_program_header(&code_ph)?;
self.data_vma = Vma::from_program_header(&data_ph)?;
self.stack_vma = Vma::new(32 * 1024 * 1024, 4096,
vma::Perms(vma::PERM_R | vma::PERM_W))?;
self.program_base_addr = self.alloc_mem_for_vmas(elf_file)?;
self.program_entry_addr = self.program_base_addr +
elf_helper::get_start_address(elf_file)?;
if !self.code_vma.contains(self.program_entry_addr) {
return Err((Errno::EINVAL, "Entry address is out of the code segment").into());
}
Ok(())
}
fn alloc_mem_for_vmas(self: &mut Process, elf_file: &ElfFile)
-> Result<usize, Error>
{
let mut vma_list = vec![&mut self.code_vma, &mut self.data_vma, &mut self.stack_vma];
let base_addr = vma::malloc_batch(&mut vma_list, elf_file.input)?;
Ok(base_addr)
}
fn reloc_symbols(self: &mut Process, elf_file: &ElfFile)
-> Result<(), Error>
{
let rela_entries = elf_helper::get_rela_entries(&elf_file, ".rela.dyn")?;
for rela_entry in rela_entries {
/*
println!("\toffset: {:#X}, symbol index: {}, type: {}, addend: {:#X}",
rela_entry.get_offset(),
rela_entry.get_symbol_table_index(),
rela_entry.get_type(),
rela_entry.get_addend());
*/
/* reloc type == R_X86_64_RELATIVE */
match rela_entry.get_type() {
8 if rela_entry.get_symbol_table_index() == 0 => {
let rela_addr = self.program_base_addr + rela_entry.get_offset() as usize;
let rela_val = self.program_base_addr + rela_entry.get_addend() as usize;
unsafe {
std::ptr::write_unaligned(rela_addr as *mut usize, rela_val);
}
}
// TODO: need to handle other relocation types
_ => { }
}
}
Ok(())
}
fn link_syscalls(self: &mut Process, elf_file: &ElfFile)
-> Result<(), Error>
{
let syscall_addr = __occlum_syscall as *const () as usize;
let rela_entries = elf_helper::get_rela_entries(&elf_file, ".rela.plt")?;
let dynsym_entries = elf_helper::get_dynsym_entries(&elf_file)?;
for rela_entry in rela_entries {
let dynsym_idx = rela_entry.get_symbol_table_index() as usize;
let dynsym_entry = &dynsym_entries[dynsym_idx];
let dynsym_str = dynsym_entry.get_name(elf_file)
.map_err(|e| Error::new(Errno::ENOEXEC,
"Failed to get the name of dynamic symbol"))?;
if dynsym_str == "__occlum_syscall" {
let rela_addr = self.program_base_addr + rela_entry.get_offset() as usize;
unsafe {
std::ptr::write_unaligned(rela_addr as *mut usize, syscall_addr);
}
}
}
Ok(())
}
fn mprotect(self: &mut Process) -> Result<(), Error> {
let vma_list = vec![&self.code_vma, &self.data_vma, &self.stack_vma];
vma::mprotect_batch(&vma_list)
}
}
impl Drop for Process {
fn drop(&mut self) {
free_pid(self.pid);
}
}
/// Note: this definition must be in sync with task.h
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct Task {
pub syscall_stack_addr: usize,
pub user_stack_addr: usize,
pub user_entry_addr: usize,
pub fs_base_addr: usize,
pub saved_state: usize, // struct jmpbuf*
}
lazy_static! {
static ref new_process_queue: SgxMutex<VecDeque<ProcessRef>> = {
SgxMutex::new(VecDeque::new())
};
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Status {
RUNNING,
INTERRUPTIBLE,
ZOMBIE,
STOPPED,
}
impl Default for Status {
fn default() -> Status {
Status::RUNNING
}
}
fn dequeue_new_process() -> Option<ProcessRef> {
new_process_queue.lock().unwrap().pop_front()
}
fn enqueue_new_process(new_process: ProcessRef) {
new_process_queue.lock().unwrap().push_back(new_process)
}
extern {
fn ocall_run_new_task(ret: *mut i32) -> sgx_status_t;
fn do_run_task(task: *const Task) -> i32;
fn do_exit_task();
fn __occlum_syscall(num: i32, arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> i64;
}

@ -0,0 +1,45 @@
pub use self::process::{Process, ProcessRef, Status, pid_t};
pub use self::task::{get_current, run_task};
pub mod table {
pub use super::process_table::{get};
}
pub use self::spawn::{do_spawn};
pub fn do_getpid() -> pid_t {
let current_ref = get_current();
let current_process = current_ref.lock().unwrap();
current_process.get_pid()
}
pub fn do_exit(exit_code: i32) {
let current_ref = get_current();
let mut current_process = current_ref.lock().unwrap();
current_process.exit(exit_code);
}
pub fn do_wait4(child_pid: u32) -> Result<i32, Error> {
let child_process = process_table::get(child_pid)
.ok_or_else(|| (Errno::ECHILD, "Cannot find child process with the given PID"))?;
let mut exit_code = 0;
loop {
let guard = child_process.lock().unwrap();
if guard.get_status() == Status::ZOMBIE {
exit_code = guard.get_exit_code();
break;
}
drop(guard);
}
let child_pid = child_process.lock().unwrap().get_pid();
process_table::remove(child_pid);
Ok(exit_code)
}
mod task;
mod process;
mod process_table;
mod spawn;
use prelude::*;

@ -0,0 +1,79 @@
use super::*;
use super::task::{Task};
use vm::{ProcessVM, VMRangeTrait};
use file_table::{FileTable};
use file::{File, FileRef,};
#[allow(non_camel_case_types)]
pub type pid_t = u32;
#[derive(Debug)]
pub struct Process {
task: Task,
status: Status,
pid: pid_t,
tgid: pid_t,
exit_code: i32,
exec_path: String,
vm: ProcessVM,
file_table: FileTable,
}
pub type ProcessRef = Arc<SgxMutex<Process>>;
impl Process {
pub fn new(exec_path: &str, task: Task, vm: ProcessVM, file_table: FileTable)
-> Result<(pid_t, ProcessRef), Error>
{
let new_pid = process_table::alloc_pid();
let new_process_ref = Arc::new(SgxMutex::new(Process {
task: task,
status: Default::default(),
pid: new_pid,
tgid: new_pid,
exec_path: exec_path.to_owned(),
exit_code: 0,
vm: vm,
file_table: file_table,
}));
Ok((new_pid, new_process_ref))
}
pub fn get_task(&self) -> &Task { &self.task }
pub fn get_task_mut(&mut self) -> &mut Task { &mut self.task }
pub fn get_pid(&self) -> pid_t { self.pid }
pub fn get_tgid(&self) -> pid_t { self.tgid }
pub fn get_status(&self) -> Status { self.status }
pub fn get_exit_code(&self) -> i32 { self.exit_code }
pub fn get_exec_path(&self) -> &str { &self.exec_path }
pub fn get_vm(&self) -> &ProcessVM { &self.vm }
pub fn get_vm_mut(&mut self) -> &mut ProcessVM { &mut self.vm }
pub fn get_files(&self) -> &FileTable { &self.file_table }
pub fn get_files_mut(&mut self) -> &mut FileTable { &mut self.file_table }
pub fn exit(&mut self, exit_code: i32) {
self.exit_code = exit_code;
self.status = Status::ZOMBIE;
}
}
impl Drop for Process {
fn drop(&mut self) {
process_table::free_pid(self.pid);
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Status {
RUNNING,
INTERRUPTIBLE,
ZOMBIE,
STOPPED,
}
impl Default for Status {
fn default() -> Status {
Status::RUNNING
}
}

@ -0,0 +1,32 @@
use super::*;
use std::sync::atomic::{AtomicU32, Ordering};
lazy_static! {
static ref PROCESS_TABLE: SgxMutex<HashMap<pid_t, ProcessRef>> = {
SgxMutex::new(HashMap::new())
};
}
pub fn put(pid: pid_t, process: ProcessRef) {
PROCESS_TABLE.lock().unwrap().insert(pid, process);
}
pub fn remove(pid: pid_t) {
PROCESS_TABLE.lock().unwrap().remove(&pid);
}
pub fn get(pid: pid_t) -> Option<ProcessRef> {
PROCESS_TABLE.lock().unwrap().get(&pid).map(|pr| pr.clone())
}
static NEXT_PID : AtomicU32 = AtomicU32::new(1);
pub fn alloc_pid() -> u32 {
NEXT_PID.fetch_add(1, Ordering::SeqCst)
}
pub fn free_pid(pid: u32) {
// TODO:
}

@ -1,4 +1,4 @@
use prelude::*; use super::*;
use xmas_elf::{sections, ElfFile, program, P64}; use xmas_elf::{sections, ElfFile, program, P64};
use xmas_elf::symbol_table::{Entry64, DynEntry64}; use xmas_elf::symbol_table::{Entry64, DynEntry64};

@ -1,4 +1,5 @@
use prelude::*; use super::*;
use {std, std::mem, std::ptr}; use {std, std::mem, std::ptr};
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use std::os::raw::c_char; use std::os::raw::c_char;
@ -46,17 +47,19 @@ use std::os::raw::c_char;
* *
*/ */
pub fn do_init_process_stack<'a, 'b>(stack: &'a StackBuf, pub fn do_init(stack_top: usize, init_area_size: usize,
argv: &'b [CString], envp: &'b [CString], auxtbl: &'b AuxTable) argv: &[CString], envp: &[CString], auxtbl: &AuxTable)
-> Result<(), Error> -> Result<usize, Error>
{ {
let envp_cloned = clone_cstrings_on_stack(stack, envp)?; let stack_buf = unsafe { StackBuf::new(stack_top, init_area_size)? };
let argv_cloned = clone_cstrings_on_stack(stack, argv)?; let envp_cloned = clone_cstrings_on_stack(&stack_buf, envp)?;
dump_auxtbl_on_stack(stack, auxtbl)?; let argv_cloned = clone_cstrings_on_stack(&stack_buf, argv)?;
dump_cstrptrs_on_stack(stack, &envp_cloned); dump_auxtbl_on_stack(&stack_buf, auxtbl)?;
dump_cstrptrs_on_stack(stack, &argv_cloned); dump_auxtbl_on_stack(&stack_buf, auxtbl)?;
stack.put(argv.len() as u64); dump_cstrptrs_on_stack(&stack_buf, &envp_cloned);
Ok(()) dump_cstrptrs_on_stack(&stack_buf, &argv_cloned);
stack_buf.put(argv.len() as u64);
Ok(stack_buf.get_pos())
} }
/// StackBuf is a buffer that is filled in from high addresses to low /// StackBuf is a buffer that is filled in from high addresses to low
@ -64,29 +67,19 @@ pub fn do_init_process_stack<'a, 'b>(stack: &'a StackBuf,
/// [self.bottom, self.top). /// [self.bottom, self.top).
#[derive(Debug)] #[derive(Debug)]
pub struct StackBuf { pub struct StackBuf {
stack_top: *const u8, stack_top: usize,
stack_bottom: *const u8, stack_bottom: usize,
stack_pos: Cell<*const u8>, stack_pos: Cell<usize>,
}
impl Default for StackBuf {
fn default() -> StackBuf {
StackBuf {
stack_top: 0 as *const u8,
stack_bottom: 0 as *const u8,
stack_pos: Cell::new(0 as *const u8),
}
}
} }
impl StackBuf { impl StackBuf {
pub fn new(stack_top: *const u8, stack_bottom: *const u8) -> Result<StackBuf, Error>{ pub unsafe fn new(stack_top: usize, stack_size: usize) -> Result<StackBuf, Error>{
if stack_top as usize <= stack_bottom as usize{ if stack_top % 16 != 0 || stack_size == 0 || stack_top < stack_size {
return Err(Error::new(Errno::EINVAL, "Invalid stack range")); return errno!(EINVAL, "Invalid stack range");
}; };
Ok(StackBuf { Ok(StackBuf {
stack_top: stack_top, stack_top: stack_top,
stack_bottom: stack_bottom, stack_bottom: stack_top - stack_size,
stack_pos: Cell::new(stack_top), stack_pos: Cell::new(stack_top),
}) })
} }
@ -106,7 +99,11 @@ impl StackBuf {
{ {
let val_size = mem::size_of::<T>(); let val_size = mem::size_of::<T>();
let val_align = mem::align_of::<T>(); let val_align = mem::align_of::<T>();
let total_size = val_size * vals.len(); let total_size = {
let num_vals = vals.len();
if num_vals == 0 { return Ok(self.get_pos() as *const T); }
val_size * num_vals
};
let base_ptr = self.alloc(total_size, val_align)? as *mut T; let base_ptr = self.alloc(total_size, val_align)? as *mut T;
let mut val_ptr = base_ptr; let mut val_ptr = base_ptr;
@ -123,31 +120,18 @@ impl StackBuf {
self.put_slice(bytes) self.put_slice(bytes)
} }
pub fn get_pos(&self) -> *const u8 { pub fn get_pos(&self) -> usize {
self.stack_pos.get() self.stack_pos.get()
} }
fn alloc(&self, size: usize, align_power2: usize) -> Result<*mut u8, Error> { fn alloc(&self, size: usize, align: usize) -> Result<*mut u8, Error> {
if size == 0 || !align_power2.is_power_of_two() {
return Err(Error::new(Errno::EINVAL, "Invalid size or align"));
}
let old_pos = {
let old_pos = self.stack_pos.get() as usize;
let remain_size = old_pos - self.stack_bottom as usize;
if size > remain_size {
return Err(Error::new(Errno::ENOMEM, "No enough space in buffer"));
}
old_pos
};
let new_pos = { let new_pos = {
let mask = (-(align_power2 as isize)) as usize; let old_pos = self.stack_pos.get();
let new_pos = (old_pos - size) & mask; let new_pos = align_down(old_pos - size, align);
if new_pos < self.stack_bottom as usize { if new_pos < self.stack_bottom {
return Err(Error::new(Errno::ENOMEM, "No enough space in buffer")); return Err(Error::new(Errno::ENOMEM, "No enough space in buffer"));
} }
new_pos as *const u8 new_pos
}; };
self.stack_pos.set(new_pos); self.stack_pos.set(new_pos);

@ -0,0 +1,106 @@
use super::*;
use std::{ptr};
use xmas_elf::{ElfFile, header, program, sections};
use self::segment::{*};
pub const DEFAULT_STACK_SIZE : usize = 1 * 1024 * 1024;
pub const DEFAULT_HEAP_SIZE : usize = 2 * 1024 * 1024;
pub const DEFAULT_MMAP_SIZE : usize = 2 * 1024 * 1024;
pub fn do_init(elf_file: &ElfFile, elf_buf: &[u8]) -> Result<ProcessVM, Error> {
let mut code_seg = get_code_segment(elf_file)?;
let mut data_seg = get_data_segment(elf_file)?;
// Alloc all virtual memory areas
let code_start = 0;
let code_end = align_down(data_seg.get_mem_addr(), data_seg.get_mem_align());
let data_start = code_end;
let data_end = align_up(data_seg.get_mem_addr() + data_seg.get_mem_size(), 4096);
let code_size = code_end - code_start;
let data_size = data_end - data_start;
let stack_size = DEFAULT_STACK_SIZE;
let heap_size = DEFAULT_HEAP_SIZE;
let mmap_size = DEFAULT_MMAP_SIZE;
let mut process_vm = ProcessVM::new(code_size, data_size, heap_size,
stack_size, mmap_size)?;
// Calculate the "real" addresses
let process_base_addr = process_vm.get_base_addr();
let code_start = process_base_addr + code_start;
let code_end = process_base_addr + code_end;
let data_start = process_base_addr + data_start;
let data_end = process_base_addr + data_end;
code_seg.set_runtime_info(process_base_addr, code_start, code_end);
data_seg.set_runtime_info(process_base_addr, data_start, data_end);
// Load code and data
code_seg.load_from_file(elf_buf);
data_seg.load_from_file(elf_buf);
// Relocate symbols
reloc_symbols(process_base_addr, elf_file)?;
link_syscalls(process_base_addr, elf_file)?;
// Make code executable
code_seg.mprotect(PERM_R | PERM_W | PERM_X);
Ok(process_vm)
}
fn reloc_symbols(process_base_addr: usize, elf_file: &ElfFile)
-> Result<(), Error>
{
let rela_entries = elf_helper::get_rela_entries(elf_file, ".rela.dyn")?;
for rela_entry in rela_entries {
/*
println!("\toffset: {:#X}, symbol index: {}, type: {}, addend: {:#X}",
rela_entry.get_offset(),
rela_entry.get_symbol_table_index(),
rela_entry.get_type(),
rela_entry.get_addend());
*/
/* reloc type == R_X86_64_RELATIVE */
match rela_entry.get_type() {
8 if rela_entry.get_symbol_table_index() == 0 => {
let rela_addr = process_base_addr + rela_entry.get_offset() as usize;
let rela_val = process_base_addr + rela_entry.get_addend() as usize;
unsafe {
ptr::write_unaligned(rela_addr as *mut usize, rela_val);
}
}
// TODO: need to handle other relocation types
_ => { }
}
}
Ok(())
}
fn link_syscalls(process_base_addr: usize, elf_file: &ElfFile)
-> Result<(), Error>
{
let syscall_addr = __occlum_syscall as *const () as usize;
let rela_entries = elf_helper::get_rela_entries(elf_file, ".rela.plt")?;
let dynsym_entries = elf_helper::get_dynsym_entries(elf_file)?;
for rela_entry in rela_entries {
let dynsym_idx = rela_entry.get_symbol_table_index() as usize;
let dynsym_entry = &dynsym_entries[dynsym_idx];
let dynsym_str = dynsym_entry.get_name(elf_file)
.map_err(|e| Error::new(Errno::ENOEXEC,
"Failed to get the name of dynamic symbol"))?;
if dynsym_str == "__occlum_syscall" {
let rela_addr = process_base_addr + rela_entry.get_offset() as usize;
unsafe {
ptr::write_unaligned(rela_addr as *mut usize, syscall_addr);
}
}
}
Ok(())
}
extern {
fn __occlum_syscall(num: i32, arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> i64;
}

@ -0,0 +1,106 @@
use super::*;
use file::{File, StdinFile, StdoutFile/*, StderrFile*/};
use file_table::{FileTable};
use std::path::Path;
use std::ffi::{CStr, CString};
use std::sgxfs::SgxFile;
use xmas_elf::{ElfFile, header, program, sections};
use xmas_elf::symbol_table::Entry;
use self::init_stack::{AuxKey, AuxTable};
use super::task::{Task};
use vm::{ProcessVM, VMRangeTrait};
mod init_stack;
mod init_vm;
mod elf_helper;
mod segment;
pub fn do_spawn<P: AsRef<Path>>(elf_path: &P, argv: &[CString], envp: &[CString])
-> Result<u32, Error>
{
let mut elf_buf = {
let key : sgx_key_128bit_t = [0 as uint8_t; 16];
let mut sgx_file = SgxFile::open_ex(elf_path, &key)
.map_err(|e| (Errno::ENOENT, "Failed to open the SGX-protected file"))?;
let mut elf_buf = Vec::<u8>::new();
sgx_file.read_to_end(&mut elf_buf);
elf_buf
};
let elf_file = {
let elf_file = ElfFile::new(&elf_buf)
.map_err(|e| (Errno::ENOEXEC, "Failed to parse the ELF file"))?;
header::sanity_check(&elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to parse the ELF file"))?;
/*
elf_helper::print_program_headers(&elf_file)?;
elf_helper::print_sections(&elf_file)?;
elf_helper::print_pltrel_section(&elf_file)?;
*/
elf_file
};
let (new_pid, new_process_ref) = {
let vm = init_vm::do_init(&elf_file, &elf_buf[..])?;
let task = {
let program_entry = {
let program_entry = vm.get_base_addr() +
elf_helper::get_start_address(&elf_file)?;
if !vm.get_code_vma().contains_obj(program_entry, 16) {
return Err(Error::new(Errno::EINVAL, "Invalid program entry"));
}
program_entry
};
let stack_top = vm.get_stack_top();
init_task(program_entry, stack_top, argv, envp)?
};
let files = init_files()?;
let exec_path = elf_path.as_ref().to_str().unwrap();
Process::new(exec_path, task, vm, files)?
};
process_table::put(new_pid, new_process_ref.clone());
task::enqueue_task(new_process_ref);
Ok(new_pid)
}
fn init_files() -> Result<FileTable, Error> {
let mut file_table = FileTable::new();
let stdin : Arc<Box<File>> = Arc::new(Box::new(StdinFile::new()));
let stdout : Arc<Box<File>> = Arc::new(Box::new(StdoutFile::new()));
// TODO: implement and use a real stderr
let stderr = stdout.clone();
file_table.put(stdin);
file_table.put(stdout);
file_table.put(stderr);
Ok(file_table)
}
fn init_task(user_entry: usize, stack_top: usize,
argv: &[CString], envp: &[CString])
-> Result<Task, Error>
{
let user_stack = init_stack(stack_top, argv, envp)?;
Ok(Task {
user_stack_addr: user_stack,
user_entry_addr: user_entry,
fs_base_addr: 0,
.. Default::default()
})
}
fn init_stack(stack_top: usize, argv: &[CString], envp: &[CString])
-> Result<usize, Error>
{
let mut auxtbl = AuxTable::new();
auxtbl.set_val(AuxKey::AT_PAGESZ, 4096)?;
auxtbl.set_val(AuxKey::AT_UID, 0)?;
auxtbl.set_val(AuxKey::AT_GID, 0)?;
auxtbl.set_val(AuxKey::AT_EUID, 0)?;
auxtbl.set_val(AuxKey::AT_EGID, 0)?;
auxtbl.set_val(AuxKey::AT_SECURE, 0)?;
init_stack::do_init(stack_top, 4096, argv, envp, &auxtbl)
}

@ -0,0 +1,102 @@
use super::*;
use std::{slice};
use xmas_elf::program::{ProgramHeader};
#[derive(Debug, Default)]
pub struct Segment {
// Static info from ELF
mem_addr: usize,
mem_align: usize,
mem_size: usize,
file_offset: usize,
file_size: usize,
// Runtime info after loaded
process_base_addr: usize,
start_addr: usize,
end_addr: usize,
}
pub const PERM_R: u32 = 0x1;
pub const PERM_W: u32 = 0x2;
pub const PERM_X: u32 = 0x4;
impl Segment {
pub fn get_mem_addr(&self) -> usize { self.mem_addr }
pub fn get_mem_align(&self) -> usize { self.mem_align }
pub fn get_mem_size(&self) -> usize { self.mem_size }
pub fn from_program_header(ph: &ProgramHeader) -> Result<Segment, Error> {
let ph64 = match ph {
ProgramHeader::Ph32(ph) => {
return Err((Errno::ENOEXEC, "Not support 32-bit ELF").into())
}
ProgramHeader::Ph64(ph64) => {
ph64
}
};
if ph64.align > 1 && ((ph64.offset % ph64.align) !=
(ph64.virtual_addr % ph64.align)) {
return Err((Errno::EINVAL,
"Memory address and file offset is not equal, per modulo").into());
}
if ph64.mem_size < ph64.file_size {
return Err((Errno::EINVAL,
"Memory size must be greater than file size").into());
}
if !ph64.align.is_power_of_two() {
return Err((Errno::EINVAL,
"Memory alignment must be a power of two").into());
}
Ok(Segment {
mem_addr: ph64.virtual_addr as usize,
mem_align: ph64.align as usize,
mem_size: ph64.mem_size as usize,
file_offset: ph64.offset as usize,
file_size: ph64.file_size as usize,
..Default::default()
})
}
pub fn load_from_file(&self, elf_buf: &[u8]) {
let mut target_buf = unsafe {
slice::from_raw_parts_mut((self.process_base_addr + self.mem_addr)
as *mut u8,
self.file_size)
};
target_buf.copy_from_slice(&elf_buf[self.file_offset..
(self.file_offset + self.file_size)]);
}
pub fn set_runtime_info(&mut self, process_base_addr: usize,
start_addr: usize, end_addr: usize) {
self.process_base_addr = process_base_addr;
self.start_addr = start_addr;
self.end_addr = end_addr;
}
pub fn mprotect(&mut self, perm: u32) {
unsafe {
trts_mprotect(self.start_addr, self.end_addr - self.start_addr,
perm as u64);
}
}
}
pub fn get_code_segment(elf_file: &ElfFile) -> Result<Segment, Error> {
let code_ph = elf_helper::get_code_program_header(elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to get the program header of code"))?;
Segment::from_program_header(&code_ph)
}
pub fn get_data_segment(elf_file: &ElfFile) -> Result<Segment, Error> {
let data_ph = elf_helper::get_data_program_header(elf_file)
.map_err(|e| (Errno::ENOEXEC, "Failed to get the program header of code"))?;
Segment::from_program_header(&data_ph)
}
#[link(name = "sgx_trts")]
extern {
pub fn trts_mprotect(start: size_t, size: size_t, perms: uint64_t) -> sgx_status_t;
}

@ -0,0 +1,100 @@
use super::*;
use std::{mem};
/// Note: this definition must be in sync with task.h
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct Task {
pub syscall_stack_addr: usize,
pub user_stack_addr: usize,
pub user_entry_addr: usize,
pub fs_base_addr: usize,
pub saved_state: usize, // struct jmpbuf*
}
lazy_static! {
static ref new_process_queue: SgxMutex<VecDeque<ProcessRef>> = {
SgxMutex::new(VecDeque::new())
};
}
pub fn enqueue_task(new_process: ProcessRef) {
new_process_queue.lock().unwrap().push_back(new_process);
let mut ret = 0;
let ocall_status = unsafe { ocall_run_new_task(&mut ret) };
if ocall_status != sgx_status_t::SGX_SUCCESS || ret != 0 {
panic!("Failed to start the process");
}
}
fn dequeue_task() -> Option<ProcessRef> {
new_process_queue.lock().unwrap().pop_front()
}
pub fn run_task() -> Result<(), Error> {
let new_process : ProcessRef = dequeue_task()
.ok_or_else(|| (Errno::EAGAIN, "No new processes to run"))?;
set_current(&new_process);
let (pid, task) = {
let mut process = new_process.lock().unwrap();
let pid = process.get_pid();
let task = process.get_task_mut() as *mut Task;
(pid, task)
};
unsafe {
// task may only be modified by this function; so no lock is needed
do_run_task(task);
}
// Init process does not have any parent, so it has to release itself
if pid == 1 {
process_table::remove(1);
}
reset_current();
Ok(())
}
thread_local! {
static _CURRENT_PROCESS_PTR: Cell<*const SgxMutex<Process>> =
Cell::new(0 as *const SgxMutex<Process>);
}
pub fn get_current() -> &'static SgxMutex<Process> {
let mut process_ptr = 0 as *const SgxMutex<Process>;
_CURRENT_PROCESS_PTR.with(|cp| {
process_ptr = cp.get();
});
unsafe { mem::transmute(process_ptr) }
}
fn set_current(process: &ProcessRef) {
let process_ref_clone = process.clone();
let process_ptr = Arc::into_raw(process_ref_clone);
_CURRENT_PROCESS_PTR.with(|cp| {
cp.set(process_ptr);
});
}
fn reset_current() {
let mut process_ptr = 0 as *const SgxMutex<Process>;
_CURRENT_PROCESS_PTR.with(|cp| {
process_ptr = cp.get();
cp.set(0 as *const SgxMutex<Process>);
});
// Prevent memory leakage
unsafe { drop(Arc::from_raw(process_ptr)); }
}
extern {
fn ocall_run_new_task(ret: *mut i32) -> sgx_status_t;
fn do_run_task(task: *mut Task) -> i32;
}

178
src/libos/src/vm/mod.rs Normal file

@ -0,0 +1,178 @@
use prelude::*;
use std::{fmt};
// TODO: Rename VMSpace to VMUniverse
#[macro_use]
mod vm_range;
mod vm_space;
mod vm_domain;
mod vm_area;
mod process_vm;
pub use self::vm_range::{VMRange, VMRangeTrait};
pub use self::process_vm::{ProcessVM};
pub const PAGE_SIZE : usize = 4096;
#[derive(Debug)]
pub struct VMSpace {
range: VMRange,
guard_type: VMGuardAreaType,
}
#[derive(Debug)]
pub struct VMDomain {
range: VMRange,
}
#[derive(Debug)]
pub struct VMArea {
range: VMRange,
flags: VMAreaFlags,
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGuardAreaType {
None,
Static { size: usize, align: usize },
Dynamic { size: usize },
}
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct VMAreaFlags(pub u32);
pub const VM_AREA_FLAG_R: u32 = 0x1;
pub const VM_AREA_FLAG_W: u32 = 0x2;
pub const VM_AREA_FLAG_X: u32 = 0x4;
impl VMAreaFlags {
pub fn can_execute(&self) -> bool {
self.0 & VM_AREA_FLAG_X == VM_AREA_FLAG_X
}
pub fn can_write(&self) -> bool {
self.0 & VM_AREA_FLAG_W == VM_AREA_FLAG_W
}
pub fn can_read(&self) -> bool {
self.0 & VM_AREA_FLAG_R == VM_AREA_FLAG_R
}
}
#[derive(Clone, Copy, PartialEq)]
pub struct VMAllocOptions {
size: usize,
addr: VMAddrOption,
growth: Option<VMGrowthType>,
}
impl VMAllocOptions {
pub fn new(size: usize) -> Result<VMAllocOptions, Error> {
if size % PAGE_SIZE != 0 {
return Err(Error::new(Errno::EINVAL, "Size is not page-aligned"));
}
Ok(VMAllocOptions { size, ..Default::default() })
}
pub fn addr(&mut self, addr: VMAddrOption) -> Result<&mut Self, Error> {
if addr.is_addr_given() && addr.get_addr() % PAGE_SIZE != 0 {
return Err(Error::new(Errno::EINVAL, "Invalid address"));
}
self.addr = addr;
Ok(self)
}
pub fn growth(&mut self, growth: VMGrowthType) -> Result<&mut Self, Error> {
self.growth = Some(growth);
Ok(self)
}
}
impl fmt::Debug for VMAllocOptions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VMAllocOptions {{ size: 0x{:X?}, addr: {:?}, growth: {:?} }}",
self.size, self.addr, self.growth)
}
}
impl Default for VMAllocOptions {
fn default() -> VMAllocOptions{
VMAllocOptions {
size: 0,
addr: VMAddrOption::Any,
growth: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMAddrOption {
Any, // Free to choose any address
Hint(usize), // Near the given address
Fixed(usize), // Must be the given address
Beyond(usize), // Must be greater or equal to the given address
}
impl VMAddrOption {
pub fn is_addr_given(&self) -> bool {
match self {
VMAddrOption::Any => false,
_ => true,
}
}
pub fn get_addr(&self) -> usize {
match self {
VMAddrOption::Hint(addr) |
VMAddrOption::Fixed(addr) |
VMAddrOption::Beyond(addr) => *addr,
VMAddrOption::Any => panic!("No address given"),
}
}
}
/// How VMRange may grow:
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VMGrowthType {
Upward, // e.g., mmaped regions grow upward
Downward, // e.g., stacks grows downward
Fixed,
}
#[derive(Clone, Debug)]
pub struct VMResizeOptions {
new_size: usize,
new_addr: Option<VMAddrOption>,
}
impl VMResizeOptions {
pub fn new(new_size: usize) -> Result<VMResizeOptions, Error> {
if new_size % PAGE_SIZE != 0 {
return Err(Error::new(Errno::EINVAL, "Size is not page-aligned"));
}
Ok(VMResizeOptions { new_size, ..Default::default() })
}
pub fn addr(&mut self, new_addr: VMAddrOption) -> &mut Self {
self.new_addr = Some(new_addr);
self
}
}
impl Default for VMResizeOptions {
fn default() -> VMResizeOptions{
VMResizeOptions {
new_size: 0,
new_addr: None,
}
}
}

@ -0,0 +1,245 @@
use super::*;
const DATA_SPACE_SIZE : usize = 12 * 1024 * 1024; // 16MB
lazy_static! {
static ref DATA_SPACE: SgxMutex<VMSpace> = {
let size = DATA_SPACE_SIZE;
let addr = {
let ptr = unsafe { aligned_malloc(size, PAGE_SIZE) };
if ptr == (0 as *mut c_void) {
panic!("Out of memory");
};
ptr as usize
};
let vm_space = unsafe {
match VMSpace::new(addr, size, VMGuardAreaType::None) {
Ok(vm_space) => vm_space,
Err(_) => panic!("Failed to create a VMSpace"),
}
};
SgxMutex::new(vm_space)
};
}
unsafe fn aligned_malloc(mem_size: usize, mem_align: usize) -> *mut c_void {
let mut mem_ptr = ::core::ptr::null_mut();
let ret = libc::posix_memalign(&mut mem_ptr, mem_align, mem_size);
if ret == 0 {
mem_ptr
} else {
0 as *mut c_void
}
}
#[derive(Debug)]
pub struct ProcessVM {
//code_domain: VMDomain,
data_domain: VMDomain,
code_vma: VMArea,
data_vma: VMArea,
heap_vma: VMArea,
stack_vma: VMArea,
mmap_vmas: Vec<Box<VMArea>>,
}
impl ProcessVM {
pub fn new(code_size: usize, data_size: usize,
heap_size: usize, stack_size: usize,
mmap_size: usize)
-> Result<ProcessVM, Error>
{
let data_domain_size = code_size + data_size + heap_size + stack_size + mmap_size;
let mut data_domain = DATA_SPACE.lock().unwrap().
alloc_domain(data_domain_size)?;
let (code_vma, data_vma, heap_vma, stack_vma) =
ProcessVM::alloc_vmas(&mut data_domain, code_size, data_size,
heap_size, stack_size)?;
// No mmapped vmas initially
let mmap_vmas = Vec::new();
Ok(ProcessVM {
data_domain,
code_vma,
data_vma,
heap_vma,
stack_vma,
mmap_vmas,
})
}
fn alloc_vmas(data_domain: &mut VMDomain,
code_size: usize, data_size: usize,
heap_size: usize, stack_size: usize)
-> Result<(VMArea, VMArea, VMArea, VMArea), Error>
{
let mut addr = data_domain.get_start();
let mut alloc_vma_continuously = |addr: &mut usize, size, flags, growth| -> Result<_, Error> {
let mut options = VMAllocOptions::new(size)?;
options.addr(VMAddrOption::Fixed(*addr))?.growth(growth)?;
let new_vma = data_domain.alloc_area(&options, flags)?;
*addr += size;
Ok(new_vma)
};
let rx_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_X);
let rw_flags = VMAreaFlags(VM_AREA_FLAG_R | VM_AREA_FLAG_W);
let code_vma = alloc_vma_continuously(&mut addr,
code_size, rx_flags,
VMGrowthType::Fixed)?;
let data_vma = alloc_vma_continuously(&mut addr,
data_size, rw_flags,
VMGrowthType::Fixed)?;
let heap_vma = alloc_vma_continuously(&mut addr,
0, rw_flags,
VMGrowthType::Upward)?;
// Preserve the space for heap
addr += heap_size;
// After the heap is the stack
let stack_vma = alloc_vma_continuously(&mut addr,
stack_size, rw_flags,
VMGrowthType::Downward)?;
Ok((code_vma, data_vma, heap_vma, stack_vma))
}
pub fn get_base_addr(&self) -> usize {
self.code_vma.get_start()
}
pub fn get_code_vma(&self) -> &VMArea {
&self.code_vma
}
pub fn get_data_vma(&self) -> &VMArea {
&self.data_vma
}
pub fn get_heap_vma(&self) -> &VMArea {
&self.heap_vma
}
pub fn get_stack_vma(&self) -> &VMArea {
&self.stack_vma
}
pub fn get_stack_top(&self) -> usize {
self.stack_vma.get_end()
}
pub fn get_mmap_vmas(&self) -> &[Box<VMArea>] {
&self.mmap_vmas[..]
}
pub fn get_brk_start(&self) -> usize {
self.get_heap_vma().get_start()
}
pub fn get_brk(&self) -> usize {
self.get_heap_vma().get_end()
}
pub fn get_mmap_start(&self) -> usize {
self.get_stack_vma().get_end()
}
pub fn mmap(&mut self, addr: usize, size: usize, flags: VMAreaFlags)
-> Result<usize, Error>
{
let alloc_options = {
let mmap_start_addr = self.get_mmap_start();
let mut alloc_options = VMAllocOptions::new(size)?;
alloc_options.addr(
if addr == 0 {
VMAddrOption::Beyond(mmap_start_addr)
}
else {
if addr < mmap_start_addr {
return Err(Error::new(Errno::EINVAL,
"Beyond valid memory range"));
}
VMAddrOption::Fixed(addr)
}
)?.growth(VMGrowthType::Upward)?;
alloc_options
};
// TODO: when failed, try to resize data_domain
let new_mmap_vma = self.data_domain.alloc_area(&alloc_options, flags)?;
let addr = new_mmap_vma.get_start();
self.mmap_vmas.push(Box::new(new_mmap_vma));
Ok(addr)
}
pub fn munmap(&mut self, addr: usize, size: usize)
-> Result<(), Error>
{
// TODO: handle the case when the given range [addr, addr + size)
// does not match exactly with any vma. For example, when this range
// cover multiple ranges or cover some range partially.
let mmap_vma_i = {
let mmap_vma_i = self.get_mmap_vmas().iter().position(|vma| {
vma.get_start() == addr && vma.get_end() == addr + size
});
if mmap_vma_i.is_none() { return Ok(()) }
mmap_vma_i.unwrap()
};
let mut removed_mmap_vma = self.mmap_vmas.swap_remove(mmap_vma_i);
self.data_domain.dealloc_area(&mut removed_mmap_vma);
Ok(())
}
pub fn mremap(&mut self, old_addr: usize, old_size: usize,
options: &VMResizeOptions)
-> Result<usize, Error>
{
// TODO: Implement this!
Err(Error::new(Errno::EINVAL, "Not implemented"))
}
pub fn brk(&mut self, new_brk: usize) -> Result<usize, Error> {
if new_brk == 0 {
return Ok(self.get_brk());
}
let resize_options = {
let brk_start = self.get_brk_start();
let new_heap_size = {
if new_brk < brk_start {
return Err(Error::new(Errno::EINVAL, "Invalid brk"));
}
new_brk - brk_start
};
let mut options = VMResizeOptions::new(new_heap_size)?;
options.addr(VMAddrOption::Fixed(brk_start));
options
};
self.data_domain.resize_area(&mut self.heap_vma, &resize_options)?;
Ok(new_brk)
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
let data_domain = &mut self.data_domain;
// Remove all vma from the domain
data_domain.dealloc_area(&mut self.code_vma);
data_domain.dealloc_area(&mut self.data_vma);
data_domain.dealloc_area(&mut self.heap_vma);
data_domain.dealloc_area(&mut self.stack_vma);
for mmap_vma in &mut self.mmap_vmas {
data_domain.dealloc_area(mmap_vma);
}
// Remove the domain from its parent space
DATA_SPACE.lock().unwrap().dealloc_domain(data_domain);
}
}

@ -0,0 +1,11 @@
use super::*;
impl super::VMArea {
pub fn get_flags(&self) -> &VMAreaFlags {
&self.flags
}
pub fn get_flags_mut(&mut self) -> &mut VMAreaFlags {
&mut self.flags
}
}

@ -0,0 +1,31 @@
use super::*;
impl VMDomain {
pub fn alloc_area(&mut self, options: &VMAllocOptions, flags: VMAreaFlags) -> Result<VMArea, Error> {
let new_range = self.range.alloc_subrange(options)?;
// Init the memory area with all zeros
unsafe {
let mem_ptr = new_range.get_start() as *mut c_void;
let mem_size = new_range.get_size() as size_t;
memset(mem_ptr, 0 as c_int, mem_size);
}
Ok(VMArea { range: new_range, flags: flags })
}
pub fn dealloc_area(&mut self, area: &mut VMArea) {
self.range.dealloc_subrange(&mut area.range)
}
pub fn resize_area(&mut self, area: &mut VMArea, options: &VMResizeOptions)
-> Result<(), Error>
{
self.range.resize_subrange(&mut area.range, options)
}
}
#[link(name = "sgx_tstdc")]
extern {
pub fn memset(p: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
}

@ -0,0 +1,512 @@
use super::*;
use std::{fmt};
pub trait VMRangeTrait {
fn get_start(&self) -> usize;
fn get_end(&self) -> usize;
fn get_size(&self) -> usize;
fn get_growth(&self) -> VMGrowthType;
fn contains_obj(&self, ptr: usize, size: usize) -> bool;
}
macro_rules! impl_vmrange_trait_for {
($struct_name: ident, $field: ident) => {
impl VMRangeTrait for $struct_name {
fn get_start(&self) -> usize {
self.$field.get_start()
}
fn get_end(&self) -> usize {
self.$field.get_end()
}
fn get_size(&self) -> usize {
self.$field.get_end() - self.$field.get_start()
}
fn get_growth(&self) -> VMGrowthType {
self.$field.get_growth()
}
fn contains_obj(&self, ptr: usize, size: usize) -> bool {
self.$field.contains_obj(ptr, size)
}
}
}
}
impl_vmrange_trait_for!(VMRange, inner);
impl_vmrange_trait_for!(VMSpace, range);
impl_vmrange_trait_for!(VMDomain, range);
impl_vmrange_trait_for!(VMArea, range);
#[derive(Debug)]
pub struct VMRange {
inner: VMRangeInner,
parent_range: *const VMRange,
sub_ranges: Option<Vec<VMRangeInner>>,
}
impl VMRange {
pub unsafe fn new(start: usize, end: usize, growth: VMGrowthType) -> Result<VMRange, Error> {
if start % PAGE_SIZE != 0 || end % PAGE_SIZE != 0 {
return errno!(EINVAL, "Invalid start and/or end");
}
Ok(VMRange {
inner: VMRangeInner::new(start, end, growth),
parent_range: 0 as *const VMRange,
sub_ranges: None,
})
}
pub fn alloc_subrange(&mut self, options: &VMAllocOptions) -> Result<VMRange, Error> {
// Get valid parameters from options
let size = options.size;
let addr = options.addr;
let growth = options.growth.unwrap_or(VMGrowthType::Fixed);
// Lazy initialize the subrange array upon the first allocation
if !self.has_subranges() {
self.init_subranges()?;
}
// Find a free space for allocating a VMRange
let free_space = {
// Look for the minimal big-enough free space
let mut min_big_enough_free_space : Option<FreeSpace> = None;
let sub_ranges = self.get_subranges();
for (idx, range_pair) in sub_ranges.windows(2).enumerate() {
let pre_range = &range_pair[0];
let next_range = &range_pair[1];
let mut free_range = {
let free_range_start = pre_range.get_end();
let free_range_end = next_range.get_start();
let free_range_size = free_range_end - free_range_start;
if free_range_size < size { continue }
free_range_start..free_range_end
};
match addr {
VMAddrOption::Hint(addr) | VMAddrOption::Fixed(addr) => {
if !free_range.contains(&addr) { continue }
free_range.start = addr;
}
VMAddrOption::Beyond(addr) => {
if free_range.start < addr { continue }
}
_ => {}
}
let free_space = Some(FreeSpace {
index_in_subranges: idx + 1,
start: free_range.start,
end: free_range.end,
may_neighbor_grow: (pre_range.growth == VMGrowthType::Upward,
next_range.growth == VMGrowthType::Downward),
});
if min_big_enough_free_space == None ||
free_space < min_big_enough_free_space
{
min_big_enough_free_space = free_space;
match addr {
VMAddrOption::Hint(addr) | VMAddrOption::Fixed(addr) => {
break
}
_ => {},
}
}
}
if min_big_enough_free_space.is_none() {
return errno!(ENOMEM, "No enough space");
}
min_big_enough_free_space.unwrap()
};
// Given the free space, determine the start and end of the sub-range
let (new_subrange_start, new_subrange_end) = match addr {
VMAddrOption::Any | VMAddrOption::Beyond(_) => {
let should_no_gap_to_pre_domain =
free_space.may_neighbor_grow.0 == false &&
growth != VMGrowthType::Downward;
let should_no_gap_to_next_domain =
free_space.may_neighbor_grow.1 == false &&
growth != VMGrowthType::Upward;
let domain_start = if should_no_gap_to_pre_domain {
free_space.start
}
else if should_no_gap_to_next_domain {
free_space.end - size
}
else {
// We want to leave some space at both ends in case
// this sub-range or neighbor sub-range needs to grow later.
// As a simple heuristic, we put this sub-range near the
// center between the previous and next sub-ranges.
free_space.start + (free_space.get_size() - size) / 2
};
(domain_start, domain_start + size)
}
VMAddrOption::Fixed(addr) => {
(addr, addr + size)
}
VMAddrOption::Hint(addr) => {
return errno!(EINVAL, "Not implemented");
}
};
let new_subrange_inner = VMRangeInner::new(new_subrange_start,
new_subrange_end, growth);
self.get_subranges_mut().insert(free_space.index_in_subranges,
new_subrange_inner);
// Although there are two copies of the newly created VMRangeInner obj,
// we can keep them in sync as all mutation on VMRange object must
// be carried out through dealloc_subrange() and resize_subrange() that
// takes both a (parent) range and its (child) sub-range as parameters.
// We update both copies of VMRangeInner, one in parent and the
// other in child, in dealloc_subrange and resize_subrange functions.
Ok(VMRange {
inner: new_subrange_inner,
parent_range: self as *const VMRange,
sub_ranges: None,
})
}
pub fn dealloc_subrange(&mut self, subrange: &mut VMRange) {
self.ensure_subrange_is_a_child(subrange);
if subrange.has_subranges() {
panic!("A range can only be dealloc'ed when it has no sub-ranges");
}
// Remove the sub-range
let domain_i = self.position_subrange(subrange);
self.get_subranges_mut().remove(domain_i);
// When all sub-ranges are removed, remove the sub-range array
if self.get_subranges().len() == 2 { // two sentinel sub-ranges excluded
self.sub_ranges = None;
}
// Mark a range as dealloc'ed
subrange.mark_as_dealloced();
}
pub fn resize_subrange(&mut self, subrange: &mut VMRange, options: &VMResizeOptions)
-> Result<(), Error> {
self.ensure_subrange_is_a_child(subrange);
// Get valid parameters from options
let new_size = options.new_size;
let new_addr = options.new_addr;
// Handle no-resizing cases
if subrange.get_size() == new_size {
return Ok(());
}
if subrange.get_growth() == VMGrowthType::Fixed {
return errno!(EINVAL, "Cannot resize a fixed range");
}
// Shrink
if new_size < subrange.get_size() {
self.shrink_subrange_to(subrange, new_size)
}
// Grow
else {
self.grow_subrange_to(subrange, new_size)
}
}
fn init_subranges(&mut self) -> Result<(), Error> {
// Use dummy VMRange as sentinel object at both ends to make the allocation
// and deallocation algorithm simpler
let start = self.get_start();
let end = self.get_end();
let start_sentry = VMRangeInner::new(start, start, VMGrowthType::Fixed);
let end_sentry = VMRangeInner::new(end, end, VMGrowthType::Fixed);
self.sub_ranges = Some(vec![start_sentry, end_sentry]);
Ok(())
}
fn ensure_subrange_is_a_child(&self, subrange: &VMRange) {
// FIXME:
/*if subrange.parent_range != self as *const VMRange {
panic!("This range does not contain the given sub-range");
}*/
}
fn position_subrange(&self, subrange: &VMRange) -> usize {
let sub_ranges = self.get_subranges();
sub_ranges.iter().position(|d| d == &subrange.inner).unwrap()
}
fn get_subranges(&self) -> &Vec<VMRangeInner> {
self.sub_ranges.as_ref().unwrap()
}
fn get_subranges_mut(&mut self) -> &mut Vec<VMRangeInner> {
self.sub_ranges.as_mut().unwrap()
}
fn has_subranges(&self) -> bool {
self.sub_ranges.is_some()
}
fn shrink_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize)
-> Result<(), Error>
{
let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut();
if subrange.inner.growth == VMGrowthType::Upward {
// Can we do shrink?
let min_new_size = match subrange.sub_ranges.as_mut() {
Some(child_subranges) => {
let child_last_subrange = &child_subranges[
child_subranges.len() - 2];
child_last_subrange.end - subrange.inner.start
}
None => {
0
}
};
if new_size < min_new_size {
return errno!(ENOMEM, "Cannot shrink to new size");
}
// Do shrink
let new_subrange_end = subrange.inner.start + new_size;
subrange.inner.end = new_subrange_end;
// Sync state
subranges[subrange_i].end = new_subrange_end;
}
else { // self.growth == VMGrowthType::Downward
// Can we do shrink?
let min_new_size = match subrange.sub_ranges.as_mut() {
Some(child_subranges) => {
let child_first_subrange = &child_subranges[1];
subrange.inner.end - child_first_subrange.start
}
None => {
0
}
};
if new_size < min_new_size {
return errno!(ENOMEM, "Cannot shrink to new size");
}
// Do shrink
let new_subrange_start = subrange.inner.end - new_size;
subrange.inner.start = new_subrange_start;
// Sync state
subranges[subrange_i].start = new_subrange_start;
}
Ok(())
}
fn grow_subrange_to(&mut self, subrange: &mut VMRange, new_size: usize)
-> Result<(), Error>
{
let subrange_i = self.position_subrange(subrange);
let subranges = self.get_subranges_mut();
if subrange.inner.growth == VMGrowthType::Upward {
// Can we grow?
let max_new_size = {
let next_subrange = &subranges[subrange_i + 1];
next_subrange.start - subrange.inner.start
};
if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size");
}
// Do grow
let subrange_new_end = subrange.inner.start + new_size;
subrange.inner.end = subrange_new_end;
// Sync state
subranges[subrange_i].end = subrange_new_end;
}
else { // self.growth == VMGrowthType::Downward
// Can we grow?
let max_new_size = {
let pre_subrange = &subranges[subrange_i - 1];
subrange.inner.end - pre_subrange.end
};
if new_size > max_new_size {
return errno!(ENOMEM, "Cannot grow to new size");
}
// Do grow
let subrange_new_start = subrange.inner.end - new_size;
subrange.inner.start = subrange_new_start;
// Sync state
subranges[subrange_i].start = subrange_new_start;
}
Ok(())
}
fn mark_as_dealloced(&mut self) {
self.parent_range = 0 as *const VMRange;
self.inner.start = self.inner.end;
}
fn is_dealloced(&self) -> bool {
self.parent_range == 0 as *const VMRange
}
}
impl PartialOrd for VMRange {
fn partial_cmp(&self, other: &VMRange) -> Option<Ordering> {
self.inner.partial_cmp(&other.inner)
}
}
impl PartialEq for VMRange {
fn eq(&self, other: &VMRange) -> bool {
self.inner.eq(&other.inner)
}
}
impl Drop for VMRange {
fn drop(&mut self) {
if !self.is_dealloced() {
panic!("A range must be dealloc'ed before drop");
}
if self.has_subranges() {
panic!("All sub-ranges must be removed explicitly before drop");
}
}
}
unsafe impl Send for VMRange {}
unsafe impl Sync for VMRange {}
#[derive(Clone, Copy)]
pub struct VMRangeInner {
start: usize,
end: usize,
growth: VMGrowthType,
}
impl VMRangeInner {
pub fn new(start: usize, end: usize, growth: VMGrowthType) -> VMRangeInner
{
VMRangeInner {
start: start,
end: end,
growth: growth,
}
}
}
impl VMRangeTrait for VMRangeInner {
fn get_start(&self) -> usize {
self.start
}
fn get_end(&self) -> usize {
self.end
}
fn get_size(&self) -> usize {
self.end - self.start
}
fn get_growth(&self) -> VMGrowthType {
self.growth
}
fn contains_obj(&self, ptr: usize, size: usize) -> bool {
let obj_begin = ptr as usize;
let obj_end = obj_begin + size;
self.start <= obj_begin && obj_end < self.end
}
}
impl fmt::Debug for VMRangeInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VMRangeInner {{ start: 0x{:X?}, end: 0x{:X?}, size: 0x{:X?}, growth: {:?} }}",
self.start, self.end, self.get_size(), self.growth)
}
}
impl PartialOrd for VMRangeInner {
fn partial_cmp(&self, other: &VMRangeInner) -> Option<Ordering> {
if self.end <= other.start {
return Some(Ordering::Less);
}
else if self.start >= other.end {
return Some(Ordering::Greater);
}
else if self.start == other.start && self.end == other.end {
return Some(Ordering::Equal);
}
else {
return None;
}
}
}
impl PartialEq for VMRangeInner {
fn eq(&self, other: &VMRangeInner) -> bool {
self.start == other.start && self.end == other.end
}
}
#[derive(Debug)]
struct FreeSpace {
index_in_subranges: usize,
start: usize,
end: usize,
may_neighbor_grow: (bool, bool),
}
impl FreeSpace {
fn get_neighbor_pressure(&self) -> u32 {
let mut pressure = 0;
pressure += if self.may_neighbor_grow.0 { 1 } else { 0 };
pressure += if self.may_neighbor_grow.1 { 1 } else { 0 };
pressure
}
fn get_size(&self) -> usize {
self.end - self.start
}
}
impl PartialEq for FreeSpace {
fn eq(&self, other: &FreeSpace) -> bool {
self.get_size() == other.get_size() &&
self.get_neighbor_pressure() == other.get_neighbor_pressure()
}
}
impl PartialOrd for FreeSpace {
fn partial_cmp(&self, other: &FreeSpace) -> Option<Ordering> {
let self_size = self.get_size();
let other_size = other.get_size();
if self_size < other_size {
Some(Ordering::Less)
}
else if self_size > other_size {
Some(Ordering::Greater)
}
else {
// The less neighbor pressure, the larger the free space
let self_neighbor_pressure = self.get_neighbor_pressure();
let other_neighbor_pressure = other.get_neighbor_pressure();
if self_neighbor_pressure > other_neighbor_pressure {
Some(Ordering::Less)
}
else if self_neighbor_pressure < other_neighbor_pressure {
Some(Ordering::Greater)
}
else {
Some(Ordering::Equal)
}
}
}
}

@ -0,0 +1,39 @@
use super::*;
impl VMSpace {
pub unsafe fn new(addr: usize, size: usize, guard_type: VMGuardAreaType)
-> Result<VMSpace, Error>
{
let range = unsafe {
VMRange::new(addr, addr + size, VMGrowthType::Fixed)?
};
Ok(VMSpace { range, guard_type })
}
pub fn get_guard_type(&self) -> VMGuardAreaType {
self.guard_type
}
pub fn alloc_domain(&mut self, size: usize)
-> Result<VMDomain, Error>
{
let mut options = VMAllocOptions::new(size)?;
options.growth(VMGrowthType::Upward)?;
let new_range = self.range.alloc_subrange(&options)?;
Ok(VMDomain {
range: new_range,
})
}
pub fn dealloc_domain(&mut self, domain: &mut VMDomain) {
self.range.dealloc_subrange(&mut domain.range)
}
pub fn resize_domain(&mut self, domain: &mut VMDomain, new_size: usize)
-> Result<(), Error>
{
let options = VMResizeOptions::new(new_size)?;
self.range.resize_subrange(&mut domain.range, &options)
}
}

@ -1,211 +0,0 @@
/// Virtuam Memory Area (VMA)
use prelude::*;
use {std};
use xmas_elf::program;
use xmas_elf::program::{ProgramHeader};
use mm::MemObj;
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct Vma {
/// Basic info
pub mem_size: usize,
pub mem_align: usize,
pub mem_flags: Perms,
/// File mapping
pub file_is_mapped: bool,
pub mem_addr: usize,
pub file_offset: usize,
pub file_size: usize,
/// Memory allocation
pub mem_begin: usize,
pub mem_end: usize,
underlying: Arc<MemObj>,
}
const VMA_MIN_MEM_ALIGN: usize = (4 * 1024);
impl Vma {
pub fn from_program_header<'a>(ph: &ProgramHeader<'a>)
-> Result<Vma, Error>
{
let ph64 = match ph {
ProgramHeader::Ph32(ph) => {
return Err((Errno::ENOEXEC, "Not support 32-bit ELF").into())
}
ProgramHeader::Ph64(ph64) => {
ph64
}
};
if ph64.align > 1 && ((ph64.offset % ph64.align) !=
(ph64.virtual_addr % ph64.align)) {
return Err((Errno::EINVAL,
"Memory address and file offset is not equal, per modulo").into());
}
if ph64.mem_size < ph64.file_size {
return Err((Errno::EINVAL,
"Memory size must be greater than file size").into());
}
let mut new_vma = Vma::new(ph64.mem_size as usize,
ph64.align as usize,
Perms::from(&ph64.flags))?;
new_vma.mem_addr = ph64.virtual_addr as usize;
new_vma.file_is_mapped = true;
new_vma.file_offset = ph64.offset as usize;
new_vma.file_size = ph64.file_size as usize;
Ok(new_vma)
}
pub fn new(mem_size: usize, mem_align: usize, mem_flags: Perms)
-> Result<Self, Error>
{
if mem_align == 0 || mem_align % VMA_MIN_MEM_ALIGN != 0 {
return Err((Errno::EINVAL,
"Memory alignment is not a multiple of 4KB").into());
}
if mem_size == 0 {
return Err((Errno::EINVAL,
"Memory size must be greater than zero").into());
}
Ok(Vma {
mem_size: mem_size,
mem_align: mem_align,
mem_flags: mem_flags,
.. Default::default()
})
}
pub fn contains(&self, mem_addr: usize) -> bool {
self.mem_begin <= mem_addr && mem_addr <= self.mem_end
}
}
pub fn malloc_batch(vma_list: &mut [&mut Vma], mapped_data: &[u8])
-> Result<usize, Error>
{
let mut max_align = VMA_MIN_MEM_ALIGN;
let mut total_size = 0;
for vma in vma_list.into_iter() {
let mem_begin = align_up(total_size, vma.mem_align);
let mem_end = mem_begin + align_up(vma.mem_size, vma.mem_align);
if vma.file_is_mapped {
if vma.mem_addr < mem_begin ||
vma.mem_addr + vma.mem_size > mem_end {
return Err((Errno::EINVAL,
"Impossible memory layout for the VMA").into());
}
if vma.file_offset > mapped_data.len() ||
vma.file_offset + vma.file_size > mapped_data.len() {
return Err((Errno::EINVAL,
"Impossible to load data from file").into());
}
}
total_size = mem_end;
if vma.mem_align > max_align {
max_align = vma.mem_align;
}
}
let memobj = Arc::new(MemObj::new(total_size, max_align)?);
let program_base_addr = memobj.get_addr();
let mut mem_cur = program_base_addr;
for vma in vma_list.into_iter() {
vma.mem_begin = align_up(mem_cur, vma.mem_align);
vma.mem_end = vma.mem_begin + align_up(vma.mem_size, vma.mem_align);
vma.mem_addr += program_base_addr;
vma.underlying = memobj.clone();
if vma.file_is_mapped {
let mut vma_data = unsafe {
std::slice::from_raw_parts_mut(vma.mem_addr as *mut u8, vma.file_size)
};
vma_data.copy_from_slice(&mapped_data[vma.file_offset..
vma.file_offset + vma.file_size]);
}
mem_cur = vma.mem_end;
}
Ok(program_base_addr)
}
pub fn mprotect_batch(vma_list: &[&Vma])
-> Result<(), Error>
{
for vma in vma_list.into_iter() {
// If don't need to change memory permissions
if vma.mem_flags == Perms(PERM_R | PERM_W) {
continue;
}
let start = align_down(vma.mem_addr, 4096);
let size = align_up(vma.mem_size, 4096);
let perms = vma.mem_flags.0 as uint64_t;
let status = unsafe {
//TODO: use proper permissions
//TODO: reset the permissions when drop VMA
//trts_mprotect(start, size, perms)
//println!("trts_mprotect: start = {}, size = {}", start, size);
trts_mprotect(start, size, (PERM_R | PERM_W | PERM_X) as uint64_t)
};
if (status != sgx_status_t::SGX_SUCCESS) {
return Err((Errno::EACCES, "trts_mprotect failed").into());
}
}
Ok(())
}
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct Perms(pub u32);
pub const PERM_R: u32 = 0x1;
pub const PERM_W: u32 = 0x2;
pub const PERM_X: u32 = 0x4;
impl Perms {
pub fn is_execute(&self) -> bool {
self.0 & PERM_X == PERM_X
}
pub fn is_write(&self) -> bool {
self.0 & PERM_W == PERM_W
}
pub fn is_read(&self) -> bool {
self.0 & PERM_R == PERM_R
}
}
impl<'a> From<&'a program::Flags> for Perms {
fn from(flags: &'a program::Flags) -> Self {
let mut val = 0;
if flags.is_execute() { val |= PERM_X; }
if flags.is_read() { val |= PERM_R; }
if flags.is_write() { val |= PERM_W; }
Perms(val)
}
}
fn align_up(addr: usize, align: usize) -> usize {
(addr + (align - 1)) / align * align
}
fn align_down(addr: usize, align: usize) -> usize {
addr & !(align - 1)
}
#[link(name = "sgx_trts")]
extern {
pub fn trts_mprotect(start: size_t, size: size_t, perms: uint64_t) -> sgx_status_t;
}