Use special exception register and Replace sgx_tprotect_rsrv_mem with low leve API

This commit is contained in:
Hui, Chunyang 2023-11-02 03:05:28 +00:00 committed by volcano
parent ca4bcbf8fe
commit aae9b6d940
15 changed files with 454 additions and 380 deletions

@ -48,7 +48,6 @@ dcap = [] # DCAP support. The compilation relies on DCAP package.
cov = ["sgx_cov"] # Enable coverage colletcion. cov = ["sgx_cov"] # Enable coverage colletcion.
hyper_mode = [] # For running in hyper mode. hyper_mode = [] # For running in hyper mode.
pku = [] # PKU Support pku = [] # PKU Support
sim_mode = [] # For running in SGX simulation mode
[target.'cfg(not(target_env = "sgx"))'.dependencies] [target.'cfg(not(target_env = "sgx"))'.dependencies]
sgx_types = { path = "../../deps/rust-sgx-sdk/sgx_types" } sgx_types = { path = "../../deps/rust-sgx-sdk/sgx_types" }

@ -61,12 +61,7 @@ else
endif endif
LIBOS_CORE_A := $(OBJ_DIR)/libos/lib/lib$(LIBOS_CORE_LIB_NAME).a LIBOS_CORE_A := $(OBJ_DIR)/libos/lib/lib$(LIBOS_CORE_LIB_NAME).a
LIBOS_CORE_RS_A := $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a
ifeq ($(SGX_MODE), SIM)
LIBOS_CORE_RS_A := $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs_sim.a
else
LIBOS_CORE_RS_A := $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a
endif
# All source code # All source code
RUST_SRCS := $(wildcard src/*.rs src/*/*.rs src/*/*/*.rs src/*/*/*/*.rs src/*/*/*/*/*.rs) RUST_SRCS := $(wildcard src/*.rs src/*/*.rs src/*/*/*.rs src/*/*/*/*.rs src/*/*/*/*/*.rs)
@ -145,27 +140,20 @@ ifeq ($(SGX_MODE), HYPER)
LIBOS_FEATURES += hyper_mode LIBOS_FEATURES += hyper_mode
endif endif
ifeq ($(SGX_MODE), SIM)
LIBOS_FEATURES += sim_mode
endif
# Release build is for production use. We enable code coverage only for debug # Release build is for production use. We enable code coverage only for debug
# build. It also simplifies the implementation as the release and debug build # build. It also simplifies the implementation as the release and debug build
# have different output paths. # have different output paths.
ifeq ($(OCCLUM_RELEASE_BUILD), 1) ifeq ($(OCCLUM_RELEASE_BUILD), 1)
$(LIBOS_CORE_RS_A): $(RUST_SRCS) $(LIBOS_CORE_RS_A): $(RUST_SRCS)
@RUSTC_BOOTSTRAP=1 RUSTC_WRAPPER=$(RUSTC_WRAPPER) cargo build --release --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)" @RUSTC_BOOTSTRAP=1 RUSTC_WRAPPER=$(RUSTC_WRAPPER) cargo build --release --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)"
@mv $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a $@ || true
@echo "CARGO (release) => $@" @echo "CARGO (release) => $@"
else ifneq ($(OCCLUM_COV),) else ifneq ($(OCCLUM_COV),)
$(LIBOS_CORE_RS_A): $(RUST_SRCS) $(LIBOS_CORE_RS_A): $(RUST_SRCS)
@CARGO_INCREMENTAL=0 RUSTC_BOOTSTRAP=1 RUSTFLAGS=$(COV_FLAGS) cargo build --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)" @CARGO_INCREMENTAL=0 RUSTC_BOOTSTRAP=1 RUSTFLAGS=$(COV_FLAGS) cargo build --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)"
@mv $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a $@ || true
@echo "CARGO (debug + cov) => $@" @echo "CARGO (debug + cov) => $@"
else else
$(LIBOS_CORE_RS_A): $(RUST_SRCS) $(LIBOS_CORE_RS_A): $(RUST_SRCS)
@RUSTC_BOOTSTRAP=1 RUSTC_WRAPPER=$(RUSTC_WRAPPER) cargo build --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)" @RUSTC_BOOTSTRAP=1 RUSTC_WRAPPER=$(RUSTC_WRAPPER) cargo build --target-dir=$(RUST_TARGET_DIR) -Z unstable-options --out-dir=$(RUST_OUT_DIR) --features "$(LIBOS_FEATURES)"
@mv $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a $@ || true
@echo "CARGO (debug) => $@" @echo "CARGO (debug) => $@"
endif endif

@ -87,9 +87,6 @@ pub extern "C" fn occlum_ecall_init(
} }
} }
// Register exception handlers (support cpuid & rdtsc for now)
register_exception_handlers();
unsafe { unsafe {
let dir_str: &str = CStr::from_ptr(instance_dir).to_str().unwrap(); let dir_str: &str = CStr::from_ptr(instance_dir).to_str().unwrap();
INSTANCE_DIR.push_str(dir_str); INSTANCE_DIR.push_str(dir_str);
@ -99,11 +96,16 @@ pub extern "C" fn occlum_ecall_init(
interrupt::init(); interrupt::init();
HAS_INIT.store(true, Ordering::Release);
// Init boot up time stamp here. // Init boot up time stamp here.
time::up_time::init(); time::up_time::init();
vm::init_user_space();
// Register exception handlers (support cpuid & rdtsc for now)
register_exception_handlers();
HAS_INIT.store(true, Ordering::Release);
// Enable global backtrace // Enable global backtrace
unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) }; unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) };

@ -7,7 +7,7 @@ use super::*;
use crate::signal::{FaultSignal, SigSet}; use crate::signal::{FaultSignal, SigSet};
use crate::syscall::exception_interrupt_syscall_c_abi; use crate::syscall::exception_interrupt_syscall_c_abi;
use crate::syscall::{CpuContext, ExtraContext, SyscallNum}; use crate::syscall::{CpuContext, ExtraContext, SyscallNum};
use crate::vm::{enclave_page_fault_handler, USER_SPACE_VM_MANAGER}; use crate::vm::{enclave_page_fault_handler, is_page_committed, VMRange, USER_SPACE_VM_MANAGER};
use sgx_types::*; use sgx_types::*;
use sgx_types::{sgx_exception_type_t, sgx_exception_vector_t}; use sgx_types::{sgx_exception_type_t, sgx_exception_vector_t};
@ -21,12 +21,22 @@ mod rdtsc;
mod syscall; mod syscall;
pub fn register_exception_handlers() { pub fn register_exception_handlers() {
setup_cpuid_info(); extern "C" {
// Register handlers whose priorities go from low to high fn sgx_register_exception_handler_for_occlum_user_space(
unsafe { user_space_ranges: *const [VMRange; 2],
let is_first = 1; handler: sgx_exception_handler_t,
sgx_register_exception_handler(is_first, handle_exception); ) -> sgx_status_t;
} }
setup_cpuid_info();
let user_space_ranges: [VMRange; 2] = USER_SPACE_VM_MANAGER.get_user_space_ranges();
let ret = unsafe {
sgx_register_exception_handler_for_occlum_user_space(
&user_space_ranges as *const _,
handle_exception,
)
};
assert!(ret == sgx_status_t::SGX_SUCCESS);
} }
fn try_handle_kernel_exception(info: &sgx_exception_info_t) -> i32 { fn try_handle_kernel_exception(info: &sgx_exception_info_t) -> i32 {
@ -51,6 +61,12 @@ fn try_handle_kernel_exception(info: &sgx_exception_info_t) -> i32 {
return SGX_MM_EXCEPTION_CONTINUE_EXECUTION; return SGX_MM_EXCEPTION_CONTINUE_EXECUTION;
} }
// Check spurious #PF
// FIXME: We can re-consider this check when we know the root cause
if is_page_committed(pf_addr) {
return SGX_MM_EXCEPTION_CONTINUE_EXECUTION;
}
// If the triggered code is not user's code and the #PF address is in the userspace, then it is a // If the triggered code is not user's code and the #PF address is in the userspace, then it is a
// kernel-triggered #PF that we can handle. This can happen e.g. when read syscall triggers user buffer #PF // kernel-triggered #PF that we can handle. This can happen e.g. when read syscall triggers user buffer #PF
info!("kernel code triggers #PF"); info!("kernel code triggers #PF");

@ -26,6 +26,8 @@
#![feature(strict_provenance)] #![feature(strict_provenance)]
// for VMArea::can_merge_vmas // for VMArea::can_merge_vmas
#![feature(is_some_and)] #![feature(is_some_and)]
// for edmm_api macro
#![feature(linkage)]
#[macro_use] #[macro_use]
extern crate alloc; extern crate alloc;

@ -90,6 +90,18 @@ pub use self::vm_perms::VMPerms;
pub use self::vm_range::VMRange; pub use self::vm_range::VMRange;
pub use self::vm_util::{VMInitializer, VMMapOptionsBuilder}; pub use self::vm_util::{VMInitializer, VMMapOptionsBuilder};
pub fn init_user_space() {
// Lazy initialize
let _ = &USER_SPACE_VM_MANAGER;
}
pub fn is_page_committed(addr: usize) -> bool {
page_tracker::USER_SPACE_PAGE_CHUNK_MANAGER
.read()
.unwrap()
.is_committed(addr)
}
pub fn do_mmap( pub fn do_mmap(
addr: usize, addr: usize,
size: usize, size: usize,

@ -4,7 +4,7 @@ use super::user_space_vm::USER_SPACE_VM_MANAGER;
use super::vm_util::{GB, KB, MB}; use super::vm_util::{GB, KB, MB};
use bitvec::vec::BitVec; use bitvec::vec::BitVec;
use util::sync::RwLock; use util::sync::RwLock;
use vm_epc::EPCMemType; use vm_epc::{EPCAllocator, EPCMemType, UserRegionMem};
// In SGX v2, there is no upper limit for the size of EPC. If the user configure 1 TB memory, // In SGX v2, there is no upper limit for the size of EPC. If the user configure 1 TB memory,
// and we only use one bit to track if the page is committed, that's 1 TB / 4 kB / 8 bit = 32 MB of memory. // and we only use one bit to track if the page is committed, that's 1 TB / 4 kB / 8 bit = 32 MB of memory.
@ -53,6 +53,23 @@ impl PageChunkManager {
inner: HashMap::new(), inner: HashMap::new(),
} }
} }
pub fn is_committed(&self, mem_addr: usize) -> bool {
let page_start_addr = align_down(mem_addr, PAGE_SIZE);
let page_chunk_start_addr = get_page_chunk_start_addr(page_start_addr);
if let Some(global_page_chunk) = self.inner.get(&page_chunk_start_addr) {
if let Some(page_tracker) = &global_page_chunk.tracker {
let page_id = (page_start_addr - page_chunk_start_addr) / PAGE_SIZE;
page_tracker.read().unwrap().inner[page_id] == true
} else {
debug_assert!(global_page_chunk.fully_committed == true);
return true;
}
} else {
// the whole global page chunk is not committed
false
}
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -288,12 +305,16 @@ impl PageTracker {
// Commit EPC // Commit EPC
if self.is_reserved_only() { if self.is_reserved_only() {
vm_epc::commit_memory(self.range().start(), self.range().size(), Some(perms)).unwrap(); UserRegionMem
.commit_memory(self.range().start(), self.range().size(), Some(perms))
.unwrap();
} else { } else {
debug_assert!(self.is_partially_committed()); debug_assert!(self.is_partially_committed());
let uncommitted_ranges = self.get_ranges(false); let uncommitted_ranges = self.get_ranges(false);
for range in uncommitted_ranges { for range in uncommitted_ranges {
vm_epc::commit_memory(range.start(), range.size(), Some(perms)).unwrap(); UserRegionMem
.commit_memory(range.start(), range.size(), Some(perms))
.unwrap();
} }
} }
@ -311,7 +332,7 @@ impl PageTracker {
debug_assert!(self.type_ == TrackerType::VMATracker); debug_assert!(self.type_ == TrackerType::VMATracker);
debug_assert!(self.range().is_superset_of(range)); debug_assert!(self.range().is_superset_of(range));
vm_epc::commit_memory(range.start(), range.size(), new_perms)?; UserRegionMem.commit_memory(range.start(), range.size(), new_perms)?;
self.commit_pages_common(range.start(), range.size()); self.commit_pages_common(range.start(), range.size());
self.set_committed_pages_for_global_tracker(range.start(), range.size()); self.set_committed_pages_for_global_tracker(range.start(), range.size());
@ -319,24 +340,16 @@ impl PageTracker {
Ok(()) Ok(())
} }
pub fn commit_memory_and_init_with_file( pub fn commit_memory_with_data(
&mut self, &mut self,
range: &VMRange, range: &VMRange,
file: &FileRef, data: &[u8],
file_offset: usize,
new_perms: VMPerms, new_perms: VMPerms,
) -> Result<()> { ) -> Result<()> {
debug_assert!(self.type_ == TrackerType::VMATracker); debug_assert!(self.type_ == TrackerType::VMATracker);
debug_assert!(self.range().is_superset_of(range)); debug_assert!(self.range().is_superset_of(range));
vm_epc::commit_memory_and_init_with_file( UserRegionMem.commit_memory_with_data(range.start(), data, new_perms)?;
range.start(),
range.size(),
file,
file_offset,
new_perms,
)?;
self.commit_pages_common(range.start(), range.size()); self.commit_pages_common(range.start(), range.size());
self.set_committed_pages_for_global_tracker(range.start(), range.size()); self.set_committed_pages_for_global_tracker(range.start(), range.size());

@ -54,6 +54,27 @@ impl UserSpaceVMManager {
pub fn get_total_size(&self) -> usize { pub fn get_total_size(&self) -> usize {
self.range().size() self.range().size()
} }
pub fn get_user_space_ranges(&self) -> [VMRange; 2] {
let total_user_space_range = self.range();
let gap_range = self.gap_range();
if let Some(gap) = gap_range {
// There are two parts of user space
let (part_a_start_addr, part_b_end_addr) =
(total_user_space_range.start(), total_user_space_range.end());
let (part_a_end_addr, part_b_start_addr) = (gap.start(), gap.end());
let user_space_range_a = VMRange::new(part_a_start_addr, part_a_end_addr).unwrap();
let user_space_range_b = VMRange::new(part_b_start_addr, part_b_end_addr).unwrap();
[user_space_range_a, user_space_range_b]
} else {
// There is no gap. Thus set the part B memory range to zero
let (part_a_start_addr, part_a_end_addr) =
(total_user_space_range.start(), total_user_space_range.end());
let user_space_range_a = VMRange::new(part_a_start_addr, part_a_end_addr).unwrap();
let user_space_range_b = unsafe { VMRange::from_unchecked(0, 0) };
[user_space_range_a, user_space_range_b]
}
}
} }
// This provides module teardown function attribute similar with `__attribute__((destructor))` in C/C++ and will // This provides module teardown function attribute similar with `__attribute__((destructor))` in C/C++ and will

@ -4,7 +4,9 @@ use super::page_tracker::PageTracker;
use super::vm_epc::EPCMemType; use super::vm_epc::EPCMemType;
use super::vm_perms::VMPerms; use super::vm_perms::VMPerms;
use super::vm_range::VMRange; use super::vm_range::VMRange;
use super::vm_util::{FileBacked, PagePolicy, VMInitializer, VMMapOptions, GB, KB, MB}; use super::vm_util::{
AlignedZeroPage, FileBacked, PagePolicy, VMInitializer, VMMapOptions, GB, KB, MB,
};
use intrusive_collections::rbtree::{Link, RBTree}; use intrusive_collections::rbtree::{Link, RBTree};
use intrusive_collections::{intrusive_adapter, KeyAdapter}; use intrusive_collections::{intrusive_adapter, KeyAdapter};
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
@ -231,7 +233,7 @@ impl VMArea {
// Set memory permissions // Set memory permissions
if !options.perms().is_default() { if !options.perms().is_default() {
vm_area.modify_protection_force(None, VMPerms::DEFAULT, vm_area.perms()); vm_area.modify_permission_force(None, VMPerms::DEFAULT, vm_area.perms());
} }
} }
// Do nothing if this vma has no committed memory // Do nothing if this vma has no committed memory
@ -274,7 +276,7 @@ impl VMArea {
debug_assert!(self.range().is_superset_of(target_range)); debug_assert!(self.range().is_superset_of(target_range));
let buf = unsafe { target_range.as_slice_mut() }; let buf = unsafe { target_range.as_slice_mut() };
if !self.perms().is_default() { if !self.perms().is_default() {
self.modify_protection_force(Some(&target_range), self.perms(), VMPerms::default()); self.modify_permission_force(Some(&target_range), self.perms(), VMPerms::default());
} }
if need_flush { if need_flush {
@ -296,17 +298,13 @@ impl VMArea {
} }
} }
pub fn modify_permissions_for_committed_pages( pub fn modify_permissions_for_committed_pages(&self, curr_perms: VMPerms, new_perms: VMPerms) {
&self,
current_perms: VMPerms,
new_perms: VMPerms,
) {
if self.is_fully_committed() { if self.is_fully_committed() {
self.modify_protection_force(None, current_perms, new_perms); self.modify_permission_force(None, curr_perms, new_perms);
} else if self.is_partially_committed() { } else if self.is_partially_committed() {
let committed = true; let committed = true;
for range in self.pages().get_ranges(committed) { for range in self.pages().get_ranges(committed) {
self.modify_protection_force(Some(&range), current_perms, new_perms); self.modify_permission_force(Some(&range), curr_perms, new_perms);
} }
} }
} }
@ -642,7 +640,7 @@ impl VMArea {
// Current implementation with "unwrap()" can help us find the error quickly by panicing directly. Also, restoring VM state // Current implementation with "unwrap()" can help us find the error quickly by panicing directly. Also, restoring VM state
// when this function fails will require some work and is not that simple. // when this function fails will require some work and is not that simple.
// TODO: Return with Result instead of "unwrap()"" in this function. // TODO: Return with Result instead of "unwrap()"" in this function.
fn modify_protection_force( fn modify_permission_force(
&self, &self,
protect_range: Option<&VMRange>, protect_range: Option<&VMRange>,
current_perms: VMPerms, current_perms: VMPerms,
@ -651,7 +649,8 @@ impl VMArea {
let protect_range = protect_range.unwrap_or_else(|| self.range()); let protect_range = protect_range.unwrap_or_else(|| self.range());
self.epc_type self.epc_type
.modify_protection( .epc_allocator()
.modify_permission(
protect_range.start(), protect_range.start(),
protect_range.size(), protect_range.size(),
current_perms, current_perms,
@ -682,7 +681,7 @@ impl VMArea {
} }
VMInitializer::DoNothing() => { VMInitializer::DoNothing() => {
if !self.perms().is_default() { if !self.perms().is_default() {
self.modify_protection_force(Some(target_range), VMPerms::DEFAULT, perms); self.modify_permission_force(Some(target_range), VMPerms::DEFAULT, perms);
} }
} }
VMInitializer::FillZeros() => { VMInitializer::FillZeros() => {
@ -691,37 +690,14 @@ impl VMArea {
buf.iter_mut().for_each(|b| *b = 0); buf.iter_mut().for_each(|b| *b = 0);
} }
if !perms.is_default() { if !perms.is_default() {
self.modify_protection_force(Some(target_range), VMPerms::DEFAULT, perms); self.modify_permission_force(Some(target_range), VMPerms::DEFAULT, perms);
} }
} }
_ => todo!(), _ => todo!(),
} }
} else { } else {
// No initializer, #PF triggered. // No initializer, #PF triggered.
let init_file = self self.init_memory_for_page_fault(target_range)?;
.backed_file()
.map(|(file, offset)| (file.clone(), offset));
if let Some((file, offset)) = init_file {
let vma_range_start = self.range.start();
let init_file_offset = offset + (target_range.start() - vma_range_start);
self.pages
.as_mut()
.unwrap()
.commit_memory_and_init_with_file(
target_range,
&file,
init_file_offset,
perms,
)?;
} else {
// PF triggered, no file-backed memory, just modify protection
self.pages
.as_mut()
.unwrap()
.commit_range(target_range, Some(perms))?;
}
} }
Ok(()) Ok(())
@ -746,12 +722,41 @@ impl VMArea {
.map_err(|_| errno!(EACCES, "failed to init memory from file"))?; .map_err(|_| errno!(EACCES, "failed to init memory from file"))?;
if !new_perm.is_default() { if !new_perm.is_default() {
self.modify_protection_force(Some(target_range), VMPerms::DEFAULT, new_perm); self.modify_permission_force(Some(target_range), VMPerms::DEFAULT, new_perm);
} }
Ok(()) Ok(())
} }
fn init_memory_for_page_fault(&mut self, target_range: &VMRange) -> Result<()> {
let perms = self.perms;
let init_file = self
.backed_file()
.map(|(file, offset)| (file.clone(), offset));
if let Some((file, offset)) = init_file {
let vma_range_start = self.range.start();
let init_file_offset = offset + (target_range.start() - vma_range_start);
let mut data = AlignedZeroPage::new_page_aligned_vec(target_range.size());
let _ = file
.read_at(init_file_offset, data.as_mut_slice())
.map_err(|_| errno!(EACCES, "failed to init memory from file"))?;
self.pages.as_mut().unwrap().commit_memory_with_data(
target_range,
data.as_slice(),
perms,
)?;
} else {
// PF triggered, no file-backed memory, just modify protection
self.pages
.as_mut()
.unwrap()
.commit_range(target_range, Some(perms))?;
}
Ok(())
}
fn get_commit_once_size(&self) -> usize { fn get_commit_once_size(&self) -> usize {
COMMIT_SIZE_UNIT COMMIT_SIZE_UNIT
} }

@ -293,8 +293,7 @@ impl ChunkManager {
if intersection_vma.range() == containing_vma.range() { if intersection_vma.range() == containing_vma.range() {
// The whole containing_vma is mprotected // The whole containing_vma is mprotected
containing_vma.set_perms(new_perms); containing_vma.set_perms(new_perms);
containing_vma containing_vma.modify_permissions_for_committed_pages(old_perms, new_perms);
.modify_permissions_for_committed_pages(old_perms, containing_vma.perms());
containing_vmas.replace_with(VMAObj::new_vma_obj(containing_vma)); containing_vmas.replace_with(VMAObj::new_vma_obj(containing_vma));
containing_vmas.move_next(); containing_vmas.move_next();
continue; continue;
@ -318,10 +317,7 @@ impl ChunkManager {
new_perms, new_perms,
VMAccess::Private(current_pid), VMAccess::Private(current_pid),
); );
new_vma.modify_permissions_for_committed_pages( new_vma.modify_permissions_for_committed_pages(old_perms, new_perms);
containing_vma.perms(),
new_vma.perms(),
);
let new_vma = VMAObj::new_vma_obj(new_vma); let new_vma = VMAObj::new_vma_obj(new_vma);
// Another new VMA // Another new VMA
@ -355,10 +351,7 @@ impl ChunkManager {
VMAccess::Private(current_pid), VMAccess::Private(current_pid),
); );
new_vma.modify_permissions_for_committed_pages( new_vma.modify_permissions_for_committed_pages(old_perms, new_perms);
containing_vma.perms(),
new_vma.perms(),
);
if remain_vma.start() == containing_vma.start() { if remain_vma.start() == containing_vma.start() {
// mprotect right side of the vma // mprotect right side of the vma

@ -1,5 +1,7 @@
// This file contains EPC related APIs and definitions. // This file contains EPC related APIs and definitions.
use super::vm_util::AlignedZeroPage;
use super::*; use super::*;
use edmm_api::EDMMLocalApi;
use sgx_trts::emm::{ use sgx_trts::emm::{
AllocAddr, AllocFlags, AllocOptions, EmmAlloc, HandleResult, PageFaultHandler, Perm, AllocAddr, AllocFlags, AllocOptions, EmmAlloc, HandleResult, PageFaultHandler, Perm,
}; };
@ -29,60 +31,49 @@ pub enum EPCMemType {
pub struct ReservedMem; pub struct ReservedMem;
pub struct UserRegionMem; pub struct UserRegionMem;
#[repr(C, align(4096))]
#[derive(Clone)]
struct ZeroPage([u8; PAGE_SIZE]);
impl ZeroPage {
fn new() -> Self {
Self([0; PAGE_SIZE])
}
fn new_page_aligned_vec(size: usize) -> Vec<u8> {
debug_assert!(size % PAGE_SIZE == 0);
let page_num = size / PAGE_SIZE;
let mut page_vec = vec![Self::new(); page_num];
let ptr = page_vec.as_mut_ptr();
let size = page_num * std::mem::size_of::<Self>();
std::mem::forget(page_vec);
unsafe { Vec::from_raw_parts(ptr as *mut u8, size, size) }
}
}
lazy_static! { lazy_static! {
static ref ZERO_PAGE: Vec<u8> = ZeroPage::new_page_aligned_vec(PAGE_SIZE); static ref ZERO_PAGE: Vec<u8> = AlignedZeroPage::new_page_aligned_vec(PAGE_SIZE);
} }
pub trait EPCAllocator { pub trait EPCAllocator {
fn alloc(size: usize) -> Result<usize> { fn alloc(&self, size: usize) -> Result<usize> {
return_errno!(ENOSYS, "operation not supported"); return_errno!(ENOSYS, "operation not supported");
} }
fn alloc_with_addr(addr: usize, size: usize) -> Result<usize> { fn alloc_with_addr(&self, addr: usize, size: usize) -> Result<usize> {
return_errno!(ENOSYS, "operation not supported"); return_errno!(ENOSYS, "operation not supported");
} }
fn free(addr: usize, size: usize) -> Result<()> { fn free(&self, addr: usize, size: usize) -> Result<()> {
return_errno!(ENOSYS, "operation not supported"); return_errno!(ENOSYS, "operation not supported");
} }
fn modify_protection( fn modify_permission(
&self,
addr: usize, addr: usize,
length: usize, length: usize,
current_protection: VMPerms, curr_perms: VMPerms,
new_protection: VMPerms, new_perms: VMPerms,
) -> Result<()> { ) -> Result<()> {
return_errno!(ENOSYS, "operation not supported"); return_errno!(ENOSYS, "operation not supported");
} }
fn mem_type() -> EPCMemType; fn commit_memory(&self, start_addr: usize, size: usize, perms: Option<VMPerms>) -> Result<()> {
return_errno!(ENOSYS, "operation not supported");
}
fn commit_memory_with_data(
&self,
start_addr: usize,
data: &[u8],
perms: VMPerms,
) -> Result<()> {
return_errno!(ENOSYS, "operation not supported");
}
} }
impl EPCAllocator for ReservedMem { impl EPCAllocator for ReservedMem {
fn alloc(size: usize) -> Result<usize> { fn alloc(&self, size: usize) -> Result<usize> {
let ptr = unsafe { sgx_alloc_rsrv_mem(size) }; let ptr = unsafe { sgx_alloc_rsrv_mem(size) };
if ptr.is_null() { if ptr.is_null() {
return_errno!(ENOMEM, "run out of reserved memory"); return_errno!(ENOMEM, "run out of reserved memory");
@ -90,7 +81,7 @@ impl EPCAllocator for ReservedMem {
Ok(ptr as usize) Ok(ptr as usize)
} }
fn alloc_with_addr(addr: usize, size: usize) -> Result<usize> { fn alloc_with_addr(&self, addr: usize, size: usize) -> Result<usize> {
let ptr = unsafe { sgx_alloc_rsrv_mem_ex(addr as *const c_void, size) }; let ptr = unsafe { sgx_alloc_rsrv_mem_ex(addr as *const c_void, size) };
if ptr.is_null() { if ptr.is_null() {
return_errno!(ENOMEM, "can't allocate reserved memory at desired address"); return_errno!(ENOMEM, "can't allocate reserved memory at desired address");
@ -98,32 +89,50 @@ impl EPCAllocator for ReservedMem {
Ok(ptr as usize) Ok(ptr as usize)
} }
fn free(addr: usize, size: usize) -> Result<()> { fn free(&self, addr: usize, size: usize) -> Result<()> {
let ret = unsafe { sgx_free_rsrv_mem(addr as *const c_void, size) }; let ret = unsafe { sgx_free_rsrv_mem(addr as *const c_void, size) };
assert!(ret == 0); assert!(ret == 0);
Ok(()) Ok(())
} }
fn modify_protection( fn modify_permission(
&self,
addr: usize, addr: usize,
length: usize, length: usize,
current_protection: VMPerms, curr_perms: VMPerms,
new_protection: VMPerms, new_perms: VMPerms,
) -> Result<()> { ) -> Result<()> {
// PT_GROWSDOWN should only be applied to stack segment or a segment mapped with the MAP_GROWSDOWN flag set.
// Since the memory are managed by our own, mprotect ocall shouldn't use this flag. Otherwise, EINVAL will be thrown.
let mut new_perms = new_perms.clone();
new_perms.remove(VMPerms::GROWSDOWN);
let mut ret_val = 0; let mut ret_val = 0;
let ret = if rsgx_is_supported_EDMM() { let ret = if rsgx_is_supported_EDMM() {
unsafe { // Use raw implementation to reduce SDK's overhead
sgx_tprotect_rsrv_mem(addr as *const c_void, length, new_protection.bits() as i32) trace!(
} "raw modify_permission curr_perms: {:?}, new_perms: {:?}",
curr_perms,
new_perms
);
EDMMLocalApi::modify_permissions(addr, length, curr_perms, new_perms).unwrap();
sgx_status_t::SGX_SUCCESS
} else { } else {
// For platforms without EDMM, sgx_tprotect_rsrv_mem is actually useless. // For platforms without EDMM, sgx_tprotect_rsrv_mem is actually useless.
// However, at least we can set pages to desired protections in the host kernel page table. // However, at least we can set pages to desired protections in the host kernel page table.
extern "C" {
fn occlum_ocall_mprotect(
retval: *mut i32,
addr: *const c_void,
len: usize,
prot: i32,
) -> sgx_status_t;
}
unsafe { unsafe {
occlum_ocall_mprotect( occlum_ocall_mprotect(
&mut ret_val as *mut i32, &mut ret_val as *mut i32,
addr as *const c_void, addr as *const c_void,
length, length,
new_protection.bits() as i32, new_perms.bits() as i32,
) )
} }
}; };
@ -135,14 +144,10 @@ impl EPCAllocator for ReservedMem {
Ok(()) Ok(())
} }
fn mem_type() -> EPCMemType {
EPCMemType::Reserved
}
} }
impl EPCAllocator for UserRegionMem { impl EPCAllocator for UserRegionMem {
fn alloc(size: usize) -> Result<usize> { fn alloc(&self, size: usize) -> Result<usize> {
let alloc_options = AllocOptions::new() let alloc_options = AllocOptions::new()
.set_flags(AllocFlags::COMMIT_ON_DEMAND) .set_flags(AllocFlags::COMMIT_ON_DEMAND)
.set_handler(enclave_page_fault_handler_dummy, 0); .set_handler(enclave_page_fault_handler_dummy, 0);
@ -152,94 +157,58 @@ impl EPCAllocator for UserRegionMem {
Ok(ptr.addr().get()) Ok(ptr.addr().get())
} }
fn free(addr: usize, size: usize) -> Result<()> { fn free(&self, addr: usize, size: usize) -> Result<()> {
let ptr = NonNull::<u8>::new(addr as *mut u8).unwrap(); let ptr = NonNull::<u8>::new(addr as *mut u8).unwrap();
unsafe { EmmAlloc.dealloc(ptr, size) }.map_err(|e| errno!(Errno::from(e as u32)))?; unsafe { EmmAlloc.dealloc(ptr, size) }.map_err(|e| errno!(Errno::from(e as u32)))?;
Ok(()) Ok(())
} }
fn modify_protection( fn modify_permission(
&self,
addr: usize, addr: usize,
length: usize, length: usize,
current_protection: VMPerms, curr_perms: VMPerms,
new_protection: VMPerms, new_perms: VMPerms,
) -> Result<()> { ) -> Result<()> {
// PT_GROWSDOWN should only be applied to stack segment or a segment mapped with the MAP_GROWSDOWN flag set.
// Since the memory are managed by our own, mprotect ocall shouldn't use this flag. Otherwise, EINVAL will be thrown.
let mut new_perms = new_perms.clone();
new_perms.remove(VMPerms::GROWSDOWN);
trace!( trace!(
"user region modify protection, protection = {:?}, range = {:?}", "user region modify protection, protection = {:?}, range = {:?}",
new_protection, new_perms,
VMRange::new_with_size(addr, length).unwrap() VMRange::new_with_size(addr, length).unwrap()
); );
// Simulation mode doesn't have the symbol used here EDMMLocalApi::modify_permissions(addr, length, curr_perms, new_perms)?;
#[cfg(not(feature = "sim_mode"))]
{
EDMMLocalApi::modify_permissions(addr, length, current_protection, new_protection)?;
}
#[cfg(feature = "sim_mode")]
unreachable!();
Ok(()) Ok(())
} }
fn mem_type() -> EPCMemType { fn commit_memory(&self, start_addr: usize, size: usize, perms: Option<VMPerms>) -> Result<()> {
EPCMemType::UserRegion match perms {
} Some(perms) if perms != VMPerms::DEFAULT => {
} if size == PAGE_SIZE {
EDMMLocalApi::commit_with_data(start_addr, ZERO_PAGE.as_slice(), perms)?;
impl UserRegionMem { } else {
fn commit_memory(start_addr: usize, size: usize) -> Result<()> { let data = AlignedZeroPage::new_page_aligned_vec(size);
#[cfg(not(feature = "sim_mode"))] EDMMLocalApi::commit_with_data(start_addr, data.as_slice(), perms)?;
EDMMLocalApi::commit_memory(start_addr, size)?; }
#[cfg(feature = "sim_mode")]
unreachable!();
Ok(())
}
fn commit_memory_with_new_permission(
start_addr: usize,
size: usize,
new_perms: VMPerms,
) -> Result<()> {
#[cfg(not(feature = "sim_mode"))]
{
if size == PAGE_SIZE {
EDMMLocalApi::commit_with_data(start_addr, ZERO_PAGE.as_slice(), new_perms)?;
} else {
let data = ZeroPage::new_page_aligned_vec(size);
EDMMLocalApi::commit_with_data(start_addr, data.as_slice(), new_perms)?;
} }
_ => EDMMLocalApi::commit_memory(start_addr, size)?,
} }
#[cfg(feature = "sim_mode")]
unreachable!();
Ok(()) Ok(())
} }
fn commit_memory_and_init_with_file( fn commit_memory_with_data(
&self,
start_addr: usize, start_addr: usize,
size: usize, data: &[u8],
file: &FileRef,
file_offset: usize,
new_perms: VMPerms, new_perms: VMPerms,
) -> Result<()> { ) -> Result<()> {
#[cfg(not(feature = "sim_mode"))] EDMMLocalApi::commit_with_data(start_addr, data, new_perms)
{
let mut data = ZeroPage::new_page_aligned_vec(size);
let len = file
.read_at(file_offset, data.as_mut_slice())
.map_err(|_| errno!(EACCES, "failed to init memory from file"))?;
EDMMLocalApi::commit_with_data(start_addr, data.as_slice(), new_perms)?;
}
#[cfg(feature = "sim_mode")]
unreachable!();
Ok(())
} }
} }
@ -260,9 +229,9 @@ impl SGXPlatform {
if matches!(self, SGXPlatform::WithEDMM) && max_size > init_size { if matches!(self, SGXPlatform::WithEDMM) && max_size > init_size {
let user_region_size = max_size - init_size; let user_region_size = max_size - init_size;
let reserved_mem_start_addr = ReservedMem::alloc(init_size)?; let reserved_mem_start_addr = ReservedMem.alloc(init_size)?;
let user_region_start_addr = UserRegionMem::alloc(user_region_size)?; let user_region_start_addr = UserRegionMem.alloc(user_region_size)?;
let total_user_space_range = VMRange::new( let total_user_space_range = VMRange::new(
reserved_mem_start_addr, reserved_mem_start_addr,
@ -280,7 +249,7 @@ impl SGXPlatform {
Ok((total_user_space_range, Some(gap_range))) Ok((total_user_space_range, Some(gap_range)))
} else { } else {
// For platform with no-edmm support, or the max_size is the same as init_size, use reserved memory for the whole userspace // For platform with no-edmm support, or the max_size is the same as init_size, use reserved memory for the whole userspace
let reserved_mem_start_addr = ReservedMem::alloc(max_size)?; let reserved_mem_start_addr = ReservedMem.alloc(max_size)?;
let total_user_space_range = let total_user_space_range =
VMRange::new(reserved_mem_start_addr, reserved_mem_start_addr + max_size)?; VMRange::new(reserved_mem_start_addr, reserved_mem_start_addr + max_size)?;
@ -304,13 +273,19 @@ impl SGXPlatform {
debug_assert!(matches!(self, SGXPlatform::WithEDMM)); debug_assert!(matches!(self, SGXPlatform::WithEDMM));
let reserved_mem = user_space_ranges[0]; let reserved_mem = user_space_ranges[0];
let user_region_mem = user_space_ranges[1]; let user_region_mem = user_space_ranges[1];
ReservedMem::free(reserved_mem.start(), reserved_mem.size()).unwrap(); ReservedMem
UserRegionMem::free(user_region_mem.start(), user_region_mem.size()).unwrap(); .free(reserved_mem.start(), reserved_mem.size())
.unwrap();
UserRegionMem
.free(user_region_mem.start(), user_region_mem.size())
.unwrap();
} else { } else {
// For platforms with EDMM but max_size equals init_size or the paltforms without EDMM, there is no gap range. // For platforms with EDMM but max_size equals init_size or the paltforms without EDMM, there is no gap range.
debug_assert!(user_space_ranges.len() == 1); debug_assert!(user_space_ranges.len() == 1);
let reserved_mem = user_space_ranges[0]; let reserved_mem = user_space_ranges[0];
ReservedMem::free(reserved_mem.start(), reserved_mem.size()).unwrap(); ReservedMem
.free(reserved_mem.start(), reserved_mem.size())
.unwrap();
} }
} }
} }
@ -354,58 +329,14 @@ impl EPCMemType {
} }
} }
pub fn modify_protection( pub fn epc_allocator(&self) -> &dyn EPCAllocator {
&self,
addr: usize,
length: usize,
current_protection: VMPerms,
new_protection: VMPerms,
) -> Result<()> {
// PT_GROWSDOWN should only be applied to stack segment or a segment mapped with the MAP_GROWSDOWN flag set.
// Since the memory are managed by our own, mprotect ocall shouldn't use this flag. Otherwise, EINVAL will be thrown.
let mut prot = new_protection;
let mut current_prot = current_protection;
prot.remove(VMPerms::GROWSDOWN);
current_prot.remove(VMPerms::GROWSDOWN);
match self { match self {
EPCMemType::Reserved => { EPCMemType::Reserved => &ReservedMem,
ReservedMem::modify_protection(addr, length, current_prot, prot) EPCMemType::UserRegion => &UserRegionMem,
}
EPCMemType::UserRegion => {
UserRegionMem::modify_protection(addr, length, current_prot, prot)
}
} }
} }
} }
pub fn commit_memory(start_addr: usize, size: usize, new_perms: Option<VMPerms>) -> Result<()> {
debug!(
"commit epc: {:?}, new permission: {:?}",
VMRange::new_with_size(start_addr, size).unwrap(),
new_perms
);
// We should make memory commit and permission change atomic to prevent data races. Thus, if the new perms
// are not the default permission (RW), we implement a different function by calling EACCEPTCOPY
match new_perms {
Some(perms) if perms != VMPerms::DEFAULT => {
UserRegionMem::commit_memory_with_new_permission(start_addr, size, perms)
}
_ => UserRegionMem::commit_memory(start_addr, size),
}
}
pub fn commit_memory_and_init_with_file(
start_addr: usize,
size: usize,
file: &FileRef,
file_offset: usize,
new_perms: VMPerms,
) -> Result<()> {
UserRegionMem::commit_memory_and_init_with_file(start_addr, size, file, file_offset, new_perms)
}
// This is a dummy function for sgx_mm_alloc. The real handler is "enclave_page_fault_handler" shown below. // This is a dummy function for sgx_mm_alloc. The real handler is "enclave_page_fault_handler" shown below.
extern "C" fn enclave_page_fault_handler_dummy( extern "C" fn enclave_page_fault_handler_dummy(
pfinfo: &sgx_pfinfo, pfinfo: &sgx_pfinfo,
@ -432,146 +363,199 @@ pub fn enclave_page_fault_handler(
Ok(()) Ok(())
} }
extern "C" { mod edmm_api {
fn occlum_ocall_mprotect( use super::*;
retval: *mut i32, use std::marker::PhantomData;
addr: *const c_void, use std::mem;
len: usize,
prot: i32,
) -> sgx_status_t;
fn sgx_mm_modify_ocall(addr: usize, size: usize, flags_from: i32, flags_to: i32) -> i32; pub(super) struct EDMMLocalApi;
// EACCEPT impl EDMMLocalApi {
fn do_eaccept(si: *const sec_info_t, addr: usize) -> i32; // To replace sgx_mm_commit
pub(super) fn commit_memory(start_addr: usize, size: usize) -> Result<()> {
// EMODPE let si = sec_info_t::new_for_commit_memory();
fn do_emodpe(si: *const sec_info_t, addr: usize) -> i32; for page in (start_addr..start_addr + size).step_by(PAGE_SIZE) {
let ret = unsafe { do_eaccept(&si as *const sec_info_t, page) };
// EACCEPTCOPY
fn do_eacceptcopy(si: *const sec_info_t, dest: usize, src: usize) -> i32;
}
#[allow(non_camel_case_types)]
#[repr(C, align(512))]
struct sec_info_t {
flags: u64,
reserved: [u64; 7],
}
impl sec_info_t {
const SGX_EMA_STATE_PENDING: u64 = 0x08; // pending state
const SGX_EMA_STATE_PR: u64 = 0x20; // permission restriction state
fn new_for_modify_permission(new_protection: &VMPerms) -> Self {
Self {
flags: ((new_protection.bits() | SGX_EMA_PAGE_TYPE_REG) as u64)
| Self::SGX_EMA_STATE_PR,
reserved: [0; 7],
}
}
fn new_for_commit_memory() -> Self {
Self {
flags: ((VMPerms::DEFAULT.bits() | SGX_EMA_PAGE_TYPE_REG) as u64)
| Self::SGX_EMA_STATE_PENDING,
reserved: [0; 7],
}
}
fn new_for_commit_with_data(protection: &VMPerms) -> Self {
Self {
flags: (protection.bits() | SGX_EMA_PAGE_TYPE_REG) as u64,
reserved: [0; 7],
}
}
}
#[cfg(not(feature = "sim_mode"))]
struct EDMMLocalApi;
#[cfg(not(feature = "sim_mode"))]
impl EDMMLocalApi {
// To replace sgx_mm_commit
fn commit_memory(start_addr: usize, size: usize) -> Result<()> {
let si = sec_info_t::new_for_commit_memory();
for page in (start_addr..start_addr + size).step_by(PAGE_SIZE) {
let ret = unsafe { do_eaccept(&si as *const sec_info_t, page) };
if ret != 0 {
return_errno!(EFAULT, "do_eaccept failure");
}
}
Ok(())
}
// To replace sgx_mm_commit_data
fn commit_with_data(addr: usize, data: &[u8], perm: VMPerms) -> Result<()> {
let si = sec_info_t::new_for_commit_with_data(&perm);
let size = data.len();
let mut src_raw_ptr = data.as_ptr() as usize;
for dest_page in (addr..addr + size).step_by(PAGE_SIZE) {
let ret = unsafe { do_eacceptcopy(&si as *const sec_info_t, dest_page, src_raw_ptr) };
if ret != 0 {
return_errno!(EFAULT, "do_eacceptcopy failure");
}
Self::modify_permissions(dest_page, PAGE_SIZE, VMPerms::DEFAULT, perm)?;
src_raw_ptr += PAGE_SIZE;
}
Ok(())
}
// To replace sgx_mm_modify_permissions
fn modify_permissions(
addr: usize,
length: usize,
current_protection: VMPerms,
new_protection: VMPerms,
) -> Result<()> {
if current_protection == new_protection {
return Ok(());
}
let flags_from = current_protection.bits() | SGX_EMA_PAGE_TYPE_REG;
let flags_to = new_protection.bits() | SGX_EMA_PAGE_TYPE_REG;
let ret = unsafe { sgx_mm_modify_ocall(addr, length, flags_from as i32, flags_to as i32) };
if ret != 0 {
return_errno!(EFAULT, "sgx_mm_modify_ocall failure");
}
let si = sec_info_t::new_for_modify_permission(&new_protection);
for page in (addr..addr + length).step_by(PAGE_SIZE) {
debug_assert!(page % PAGE_SIZE == 0);
if new_protection.bits() | current_protection.bits() != current_protection.bits() {
unsafe { do_emodpe(&si as *const sec_info_t, page) };
// Check this return value is useless. RAX is set to SE_EMODPE which is 6 defined in SDK.
}
// If new permission is RWX, no EMODPR needed in untrusted part, hence no EACCEPT
if new_protection != VMPerms::ALL {
let ret = unsafe { do_eaccept(&si, page) };
if ret != 0 { if ret != 0 {
return_errno!(EFAULT, "do_eaccept failure"); return_errno!(EFAULT, "do_eaccept failure");
} }
} }
Ok(())
} }
// ??? // To replace sgx_mm_commit_data
if new_protection == VMPerms::NONE { pub(super) fn commit_with_data(addr: usize, data: &[u8], perm: VMPerms) -> Result<()> {
let ret = unsafe { let si = sec_info_t::new_for_commit_with_data(&perm);
sgx_mm_modify_ocall( let size = data.len();
addr, let mut src_raw_ptr = data.as_ptr() as usize;
length, for dest_page in (addr..addr + size).step_by(PAGE_SIZE) {
(SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PROT_NONE) as i32, let ret =
(SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PROT_NONE) as i32, unsafe { do_eacceptcopy(&si as *const sec_info_t, dest_page, src_raw_ptr) };
) if ret != 0 {
}; return_errno!(EFAULT, "do_eacceptcopy failure");
}
src_raw_ptr += PAGE_SIZE;
}
Self::modify_permissions(addr, size, VMPerms::DEFAULT, perm)?;
Ok(())
}
// To replace sgx_mm_modify_permissions
pub(super) fn modify_permissions(
addr: usize,
length: usize,
curr_perms: VMPerms,
new_perms: VMPerms,
) -> Result<()> {
if curr_perms == new_perms {
return Ok(());
}
let flags_from = curr_perms.bits() | SGX_EMA_PAGE_TYPE_REG;
let flags_to = new_perms.bits() | SGX_EMA_PAGE_TYPE_REG;
let ret =
unsafe { sgx_mm_modify_ocall(addr, length, flags_from as i32, flags_to as i32) };
if ret != 0 { if ret != 0 {
return_errno!(EFAULT, "sgx_mm_modify_ocall failure for permission None"); return_errno!(EFAULT, "sgx_mm_modify_ocall failure");
}
let si = sec_info_t::new_for_modify_permission(&new_perms);
for page in (addr..addr + length).step_by(PAGE_SIZE) {
debug_assert!(page % PAGE_SIZE == 0);
if new_perms.bits() | curr_perms.bits() != curr_perms.bits() {
unsafe { do_emodpe(&si as *const sec_info_t, page) };
// Check this return value is useless. RAX is set to SE_EMODPE which is 6 defined in SDK.
}
// If new permission is RWX, no EMODPR needed in untrusted part, hence no EACCEPT
if new_perms != VMPerms::ALL {
let ret = unsafe { do_eaccept(&si, page) };
if ret != 0 {
return_errno!(EFAULT, "do_eaccept failure");
}
}
}
if new_perms == VMPerms::NONE {
let ret = unsafe {
sgx_mm_modify_ocall(
addr,
length,
(SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PROT_NONE) as i32,
(SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PROT_NONE) as i32,
)
};
if ret != 0 {
return_errno!(EFAULT, "sgx_mm_modify_ocall failure for permission None");
}
}
Ok(())
}
}
extern "C" {
// EACCEPT
fn do_eaccept(si: *const sec_info_t, addr: usize) -> i32;
// EMODPE
fn do_emodpe(si: *const sec_info_t, addr: usize) -> i32;
// EACCEPTCOPY
fn do_eacceptcopy(si: *const sec_info_t, dest: usize, src: usize) -> i32;
}
#[allow(non_camel_case_types)]
#[repr(C, align(512))]
struct sec_info_t {
flags: u64,
reserved: [u64; 7],
}
impl sec_info_t {
const SGX_EMA_STATE_PENDING: u64 = 0x08; // pending state
const SGX_EMA_STATE_PR: u64 = 0x20; // permission restriction state
fn new_for_modify_permission(new_perms: &VMPerms) -> Self {
Self {
flags: ((new_perms.bits() | SGX_EMA_PAGE_TYPE_REG) as u64) | Self::SGX_EMA_STATE_PR,
reserved: [0; 7],
} }
} }
Ok(()) fn new_for_commit_memory() -> Self {
Self {
flags: ((VMPerms::DEFAULT.bits() | SGX_EMA_PAGE_TYPE_REG) as u64)
| Self::SGX_EMA_STATE_PENDING,
reserved: [0; 7],
}
}
fn new_for_commit_with_data(protection: &VMPerms) -> Self {
Self {
flags: (protection.bits() | SGX_EMA_PAGE_TYPE_REG) as u64,
reserved: [0; 7],
}
}
}
macro_rules! weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
extern "C" {
#[linkage = "extern_weak"]
static $name: *const c_void;
}
#[allow(unused_unsafe)]
ExternWeak::new(unsafe { $name })
};
)
}
pub struct ExternWeak<F> {
weak_ptr: *const c_void,
_marker: PhantomData<F>,
}
impl<F> ExternWeak<F> {
#[inline]
pub fn new(weak_ptr: *const c_void) -> Self {
ExternWeak {
weak_ptr,
_marker: PhantomData,
}
}
#[inline]
pub fn get(&self) -> Option<F> {
unsafe {
if self.weak_ptr.is_null() {
None
} else {
Some(mem::transmute_copy::<*const c_void, F>(&self.weak_ptr))
}
}
}
}
macro_rules! edmm_api {
(fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => (
unsafe fn $name($($arg_name:$t),*) -> $ret {
weak! { fn $name($($t),*) -> $ret }
if let Some(fun) = $name.get() {
fun($($arg_name),*)
} else {
(ENOSYS) as $ret
}
}
)
}
// Special symbol which is not defined in sim mode
edmm_api! {
fn sgx_mm_modify_ocall(addr: usize, size: usize, flags_from: i32, flags_to: i32) -> i32
} }
} }

@ -1,5 +1,6 @@
use super::*; use super::*;
#[repr(C)]
#[derive(Clone, Copy, Default, Eq, PartialEq, Hash)] #[derive(Clone, Copy, Default, Eq, PartialEq, Hash)]
pub struct VMRange { pub struct VMRange {
pub(super) start: usize, pub(super) start: usize,

@ -638,3 +638,26 @@ pub trait VMRemapParser {
fn is_free_range(&self, request_range: &VMRange) -> bool; fn is_free_range(&self, request_range: &VMRange) -> bool;
} }
#[repr(C, align(4096))]
#[derive(Clone)]
pub struct AlignedZeroPage([u8; PAGE_SIZE]);
impl AlignedZeroPage {
fn new() -> Self {
Self([0; PAGE_SIZE])
}
pub fn new_page_aligned_vec(size: usize) -> Vec<u8> {
debug_assert!(size % PAGE_SIZE == 0);
let page_num = size / PAGE_SIZE;
let mut page_vec = vec![Self::new(); page_num];
let ptr = page_vec.as_mut_ptr();
let size = page_num * std::mem::size_of::<Self>();
std::mem::forget(page_vec);
unsafe { Vec::from_raw_parts(ptr as *mut u8, size, size) }
}
}

@ -1,5 +1,5 @@
include ../test_common.mk include ../test_common.mk
EXTRA_C_FLAGS := -Wno-return-stack-address -Wno-unused-but-set-variable EXTRA_C_FLAGS := -Wno-return-stack-address -Wno-unused-but-set-variable -g
EXTRA_LINK_FLAGS := -lpthread EXTRA_LINK_FLAGS := -lpthread
BIN_ARGS := BIN_ARGS :=

@ -365,6 +365,21 @@ int test_handle_sigsegv() {
printf("Signal handler successfully jumped over a null-dereferencing instruction\n"); printf("Signal handler successfully jumped over a null-dereferencing instruction\n");
void *ptr = mmap(NULL, 8192, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (ptr == NULL) {
THROW_ERROR("mmap failure");
}
int ret = mprotect(ptr, 8192, PROT_NONE);
if (ret < 0) {
THROW_ERROR("mprotect failure");
}
val = read_maybe_null(ptr);
(void)val; // to suppress "unused variables" warning
printf("Signal handler successfully jumped over a PROT_NONE-visit instruction\n");
if (sigaction(SIGSEGV, &old_action, NULL) < 0) { if (sigaction(SIGSEGV, &old_action, NULL) < 0) {
THROW_ERROR("restoring old signal handler failed"); THROW_ERROR("restoring old signal handler failed");
} }