Add msync system call and flush updates to shared file memory mappings

1. Add msync-related methods to VMManager;
2. Add msync system call;
3. Fix munmap, fsync, and fdatasync system calls;
4. Add test cases.
This commit is contained in:
Tate, Hongliang Tian 2020-07-08 11:07:47 +00:00
parent c85163ec0a
commit a6dbce21cc
8 changed files with 386 additions and 58 deletions

@ -3,6 +3,7 @@ use super::*;
pub fn do_fsync(fd: FileDesc) -> Result<()> { pub fn do_fsync(fd: FileDesc) -> Result<()> {
debug!("fsync: fd: {}", fd); debug!("fsync: fd: {}", fd);
let file_ref = current!().file(fd)?; let file_ref = current!().file(fd)?;
flush_vm_backed_by(&file_ref);
file_ref.sync_all()?; file_ref.sync_all()?;
Ok(()) Ok(())
} }
@ -10,6 +11,11 @@ pub fn do_fsync(fd: FileDesc) -> Result<()> {
pub fn do_fdatasync(fd: FileDesc) -> Result<()> { pub fn do_fdatasync(fd: FileDesc) -> Result<()> {
debug!("fdatasync: fd: {}", fd); debug!("fdatasync: fd: {}", fd);
let file_ref = current!().file(fd)?; let file_ref = current!().file(fd)?;
flush_vm_backed_by(&file_ref);
file_ref.sync_data()?; file_ref.sync_data()?;
Ok(()) Ok(())
} }
fn flush_vm_backed_by(file: &FileRef) {
current!().vm().msync_by_file(file);
}

@ -45,7 +45,7 @@ use crate::signal::{
do_kill, do_rt_sigaction, do_rt_sigpending, do_rt_sigprocmask, do_rt_sigreturn, do_sigaltstack, do_kill, do_rt_sigaction, do_rt_sigpending, do_rt_sigprocmask, do_rt_sigreturn, do_sigaltstack,
do_tgkill, do_tkill, sigaction_t, sigset_t, stack_t, do_tgkill, do_tkill, sigaction_t, sigset_t, stack_t,
}; };
use crate::vm::{MMapFlags, MRemapFlags, VMPerms}; use crate::vm::{MMapFlags, MRemapFlags, MSyncFlags, VMPerms};
use crate::{fs, process, std, vm}; use crate::{fs, process, std, vm};
use super::*; use super::*;
@ -105,7 +105,7 @@ macro_rules! process_syscall_table_with_callback {
(Select = 23) => do_select(nfds: c_int, readfds: *mut libc::fd_set, writefds: *mut libc::fd_set, exceptfds: *mut libc::fd_set, timeout: *mut timeval_t), (Select = 23) => do_select(nfds: c_int, readfds: *mut libc::fd_set, writefds: *mut libc::fd_set, exceptfds: *mut libc::fd_set, timeout: *mut timeval_t),
(SchedYield = 24) => do_sched_yield(), (SchedYield = 24) => do_sched_yield(),
(Mremap = 25) => do_mremap(old_addr: usize, old_size: usize, new_size: usize, flags: i32, new_addr: usize), (Mremap = 25) => do_mremap(old_addr: usize, old_size: usize, new_size: usize, flags: i32, new_addr: usize),
(Msync = 26) => handle_unsupported(), (Msync = 26) => do_msync(addr: usize, size: usize, flags: u32),
(Mincore = 27) => handle_unsupported(), (Mincore = 27) => handle_unsupported(),
(Madvise = 28) => handle_unsupported(), (Madvise = 28) => handle_unsupported(),
(Shmget = 29) => handle_unsupported(), (Shmget = 29) => handle_unsupported(),
@ -772,6 +772,12 @@ fn do_brk(new_brk_addr: usize) -> Result<isize> {
Ok(ret_brk_addr as isize) Ok(ret_brk_addr as isize)
} }
fn do_msync(addr: usize, size: usize, flags: u32) -> Result<isize> {
let flags = MSyncFlags::from_u32(flags)?;
vm::do_msync(addr, size, flags)?;
Ok(0)
}
fn do_sysinfo(info: *mut sysinfo_t) -> Result<isize> { fn do_sysinfo(info: *mut sysinfo_t) -> Result<isize> {
check_mut_ptr(info)?; check_mut_ptr(info)?;
let info = unsafe { &mut *info }; let info = unsafe { &mut *info };

@ -14,7 +14,7 @@ mod vm_range;
use self::vm_layout::VMLayout; use self::vm_layout::VMLayout;
use self::vm_manager::{VMManager, VMMapOptionsBuilder}; use self::vm_manager::{VMManager, VMMapOptionsBuilder};
pub use self::process_vm::{MMapFlags, MRemapFlags, ProcessVM, ProcessVMBuilder}; pub use self::process_vm::{MMapFlags, MRemapFlags, MSyncFlags, ProcessVM, ProcessVMBuilder};
pub use self::user_space_vm::USER_SPACE_VM_MANAGER; pub use self::user_space_vm::USER_SPACE_VM_MANAGER;
pub use self::vm_perms::VMPerms; pub use self::vm_perms::VMPerms;
pub use self::vm_range::VMRange; pub use self::vm_range::VMRange;
@ -74,4 +74,18 @@ pub fn do_brk(addr: usize) -> Result<usize> {
current!().vm().brk(addr) current!().vm().brk(addr)
} }
pub fn do_msync(addr: usize, size: usize, flags: MSyncFlags) -> Result<()> {
debug!(
"msync: addr: {:#x}, size: {:#x}, flags: {:?}",
addr, size, flags
);
if flags.contains(MSyncFlags::MS_INVALIDATE) {
return_errno!(EINVAL, "not support MS_INVALIDATE");
}
if flags.contains(MSyncFlags::MS_ASYNC) {
warn!("not support MS_ASYNC");
}
current!().vm().msync(addr, size)
}
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;

@ -311,13 +311,24 @@ impl ProcessVM {
} }
} }
}; };
// Only shared, file-backed memory mappings have write-back files
let writeback_file = if flags.contains(MMapFlags::MAP_SHARED) {
if let VMInitializer::LoadFromFile { file, offset } = &initializer {
Some((file.clone(), *offset))
} else {
None
}
} else {
None
};
let mmap_options = VMMapOptionsBuilder::default() let mmap_options = VMMapOptionsBuilder::default()
.size(size) .size(size)
.addr(addr_option) .addr(addr_option)
.perms(perms) .perms(perms)
.initializer(initializer) .initializer(initializer)
.writeback_file(writeback_file)
.build()?; .build()?;
let mmap_addr = self.mmap_manager.lock().unwrap().mmap(&mmap_options)?; let mmap_addr = self.mmap_manager.lock().unwrap().mmap(mmap_options)?;
Ok(mmap_addr) Ok(mmap_addr)
} }
@ -358,6 +369,17 @@ impl ProcessVM {
mmap_manager.mprotect(addr, size, perms) mmap_manager.mprotect(addr, size, perms)
} }
pub fn msync(&self, addr: usize, size: usize) -> Result<()> {
let sync_range = VMRange::new_with_size(addr, size)?;
let mut mmap_manager = self.mmap_manager.lock().unwrap();
mmap_manager.msync_by_range(&sync_range)
}
pub fn msync_by_file(&self, sync_file: &FileRef) {
let mut mmap_manager = self.mmap_manager.lock().unwrap();
mmap_manager.msync_by_file(sync_file);
}
// Return: a copy of the found region // Return: a copy of the found region
pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> { pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> {
self.mmap_manager self.mmap_manager
@ -434,3 +456,22 @@ impl Default for MRemapFlags {
MRemapFlags::None MRemapFlags::None
} }
} }
bitflags! {
pub struct MSyncFlags : u32 {
const MS_ASYNC = 0x1;
const MS_INVALIDATE = 0x2;
const MS_SYNC = 0x4;
}
}
impl MSyncFlags {
pub fn from_u32(bits: u32) -> Result<Self> {
let flags =
MSyncFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "containing unknown bits"))?;
if flags.contains(Self::MS_ASYNC | Self::MS_SYNC) {
return_errno!(EINVAL, "must be either sync or async");
}
Ok(flags)
}
}

@ -4,15 +4,39 @@ use super::vm_perms::VMPerms;
use super::vm_range::VMRange; use super::vm_range::VMRange;
use super::*; use super::*;
#[derive(Clone, Copy, Debug, Default, PartialEq)] #[derive(Clone, Debug, Default)]
pub struct VMArea { pub struct VMArea {
range: VMRange, range: VMRange,
perms: VMPerms, perms: VMPerms,
writeback_file: Option<(FileRef, usize)>,
} }
impl VMArea { impl VMArea {
pub fn new(range: VMRange, perms: VMPerms) -> Self { pub fn new(range: VMRange, perms: VMPerms, writeback_file: Option<(FileRef, usize)>) -> Self {
Self { range, perms } Self {
range,
perms,
writeback_file,
}
}
/// Create a new VMArea object that inherits the write-back file (if any), but has
/// a new range and permissions.
pub fn inherits_file_from(vma: &VMArea, new_range: VMRange, new_perms: VMPerms) -> Self {
let new_writeback_file = vma.writeback_file.as_ref().map(|(file, file_offset)| {
let new_file = file.clone();
let new_file_offset = if vma.start() < new_range.start() {
let vma_offset = new_range.start() - vma.start();
*file_offset + vma_offset
} else {
let vma_offset = vma.start() - new_range.start();
debug_assert!(*file_offset >= vma_offset);
*file_offset - vma_offset
};
(new_file, new_file_offset)
});
Self::new(new_range, new_perms, new_writeback_file)
} }
pub fn perms(&self) -> VMPerms { pub fn perms(&self) -> VMPerms {
@ -23,6 +47,10 @@ impl VMArea {
&self.range &self.range
} }
pub fn writeback_file(&self) -> &Option<(FileRef, usize)> {
&self.writeback_file
}
pub fn set_perms(&mut self, new_perms: VMPerms) { pub fn set_perms(&mut self, new_perms: VMPerms) {
self.perms = new_perms; self.perms = new_perms;
} }
@ -30,10 +58,48 @@ impl VMArea {
pub fn subtract(&self, other: &VMRange) -> Vec<VMArea> { pub fn subtract(&self, other: &VMRange) -> Vec<VMArea> {
self.deref() self.deref()
.subtract(other) .subtract(other)
.iter() .into_iter()
.map(|range| VMArea::new(*range, self.perms())) .map(|range| Self::inherits_file_from(self, range, self.perms()))
.collect() .collect()
} }
// Returns an non-empty intersection if where is any
pub fn intersect(&self, other: &VMRange) -> Option<VMArea> {
let new_range = {
let new_range = self.range().intersect(other);
if new_range.is_none() {
return None;
}
new_range.unwrap()
};
let new_vma = VMArea::inherits_file_from(self, new_range, self.perms());
Some(new_vma)
}
pub fn resize(&mut self, new_size: usize) {
self.range.resize(new_size)
}
pub fn set_start(&mut self, new_start: usize) {
let old_start = self.start();
self.range.set_start(new_start);
// If the updates to the VMA needs to write back to a file, then the
// file offset must be adjusted according to the new start address.
if let Some((_, offset)) = self.writeback_file.as_mut() {
if old_start < new_start {
*offset += new_start - old_start;
} else {
// The caller must guarantee that the new start makes sense
debug_assert!(*offset >= old_start - new_start);
*offset -= old_start - new_start;
}
}
}
pub fn set_end(&mut self, new_end: usize) {
self.range.set_end(new_end);
}
} }
impl Deref for VMArea { impl Deref for VMArea {
@ -43,9 +109,3 @@ impl Deref for VMArea {
&self.range &self.range
} }
} }
impl DerefMut for VMArea {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.range
}
}

@ -64,19 +64,21 @@ impl Default for VMMapAddr {
} }
} }
#[derive(Builder, Debug, Default)] #[derive(Builder, Debug)]
#[builder(build_fn(skip), no_std)] #[builder(pattern = "owned", build_fn(skip), no_std)]
pub struct VMMapOptions { pub struct VMMapOptions {
size: usize, size: usize,
align: usize, align: usize,
perms: VMPerms, perms: VMPerms,
addr: VMMapAddr, addr: VMMapAddr,
initializer: VMInitializer, initializer: VMInitializer,
// The content of the VMA can be written back to a given file at a given offset
writeback_file: Option<(FileRef, usize)>,
} }
// VMMapOptionsBuilder is generated automatically, except the build function // VMMapOptionsBuilder is generated automatically, except the build function
impl VMMapOptionsBuilder { impl VMMapOptionsBuilder {
pub fn build(&self) -> Result<VMMapOptions> { pub fn build(mut self) -> Result<VMMapOptions> {
let size = { let size = {
let size = self let size = self
.size .size
@ -117,12 +119,14 @@ impl VMMapOptionsBuilder {
Some(initializer) => initializer.clone(), Some(initializer) => initializer.clone(),
None => VMInitializer::default(), None => VMInitializer::default(),
}; };
let writeback_file = self.writeback_file.take().unwrap_or_default();
Ok(VMMapOptions { Ok(VMMapOptions {
size, size,
align, align,
perms, perms,
addr, addr,
initializer, initializer,
writeback_file,
}) })
} }
} }
@ -143,6 +147,10 @@ impl VMMapOptions {
pub fn initializer(&self) -> &VMInitializer { pub fn initializer(&self) -> &VMInitializer {
&self.initializer &self.initializer
} }
pub fn writeback_file(&self) -> &Option<(FileRef, usize)> {
&self.writeback_file
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -258,12 +266,12 @@ impl VMManager {
let start_sentry = { let start_sentry = {
let range = VMRange::new_empty(start)?; let range = VMRange::new_empty(start)?;
let perms = VMPerms::empty(); let perms = VMPerms::empty();
VMArea::new(range, perms) VMArea::new(range, perms, None)
}; };
let end_sentry = { let end_sentry = {
let range = VMRange::new_empty(end)?; let range = VMRange::new_empty(end)?;
let perms = VMPerms::empty(); let perms = VMPerms::empty();
VMArea::new(range, perms) VMArea::new(range, perms, None)
}; };
vec![start_sentry, end_sentry] vec![start_sentry, end_sentry]
}; };
@ -274,7 +282,7 @@ impl VMManager {
&self.range &self.range
} }
pub fn mmap(&mut self, options: &VMMapOptions) -> Result<usize> { pub fn mmap(&mut self, mut options: VMMapOptions) -> Result<usize> {
// TODO: respect options.align when mmap // TODO: respect options.align when mmap
let addr = *options.addr(); let addr = *options.addr();
let size = *options.size(); let size = *options.size();
@ -287,7 +295,8 @@ impl VMManager {
let (insert_idx, free_range) = self.find_free_range(size, addr)?; let (insert_idx, free_range) = self.find_free_range(size, addr)?;
let new_range = self.alloc_range_from(size, addr, &free_range); let new_range = self.alloc_range_from(size, addr, &free_range);
let new_addr = new_range.start(); let new_addr = new_range.start();
let new_vma = VMArea::new(new_range, *options.perms()); let writeback_file = options.writeback_file.take();
let new_vma = VMArea::new(new_range, *options.perms(), writeback_file);
// Initialize the memory of the new range // Initialize the memory of the new range
unsafe { unsafe {
@ -325,24 +334,31 @@ impl VMManager {
effective_munmap_range effective_munmap_range
}; };
let new_vmas = self let old_vmas = {
.vmas let mut old_vmas = Vec::new();
.iter() std::mem::swap(&mut self.vmas, &mut old_vmas);
old_vmas
};
let new_vmas = old_vmas
.into_iter()
.flat_map(|vma| { .flat_map(|vma| {
// Keep the two sentry VMA intact // Keep the two sentry VMA intact
if vma.size() == 0 { if vma.size() == 0 {
return vec![*vma]; return vec![vma];
} }
let intersection_range = match vma.intersect(&munmap_range) { let intersection_vma = match vma.intersect(&munmap_range) {
None => return vec![*vma], None => return vec![vma],
Some(intersection_range) => intersection_range, Some(intersection_vma) => intersection_vma,
}; };
// Reset memory permissions // File-backed VMA needs to be flushed upon munmap
Self::apply_perms(&intersection_range, VMPerms::default()); Self::flush_file_vma(&intersection_vma);
vma.subtract(&intersection_range) // Reset memory permissions
Self::apply_perms(&intersection_vma, VMPerms::default());
vma.subtract(&intersection_vma)
}) })
.collect(); .collect();
self.vmas = new_vmas; self.vmas = new_vmas;
@ -465,7 +481,7 @@ impl VMManager {
// Perform mmap and munmap if needed // Perform mmap and munmap if needed
if let Some(mmap_options) = need_mmap { if let Some(mmap_options) = need_mmap {
let mmap_addr = self.mmap(&mmap_options)?; let mmap_addr = self.mmap(mmap_options)?;
if ret_addr.is_none() { if ret_addr.is_none() {
ret_addr = Some(mmap_addr); ret_addr = Some(mmap_addr);
@ -506,14 +522,14 @@ impl VMManager {
(false, true) => { (false, true) => {
containing_vma.set_end(protect_range.start()); containing_vma.set_end(protect_range.start());
let new_vma = VMArea::new(protect_range, new_perms); let new_vma = VMArea::inherits_file_from(containing_vma, protect_range, new_perms);
Self::apply_perms(&new_vma, new_vma.perms()); Self::apply_perms(&new_vma, new_vma.perms());
self.insert_new_vma(containing_idx + 1, new_vma); self.insert_new_vma(containing_idx + 1, new_vma);
} }
(true, false) => { (true, false) => {
containing_vma.set_start(protect_range.end()); containing_vma.set_start(protect_range.end());
let new_vma = VMArea::new(protect_range, new_perms); let new_vma = VMArea::inherits_file_from(containing_vma, protect_range, new_perms);
Self::apply_perms(&new_vma, new_vma.perms()); Self::apply_perms(&new_vma, new_vma.perms());
self.insert_new_vma(containing_idx, new_vma); self.insert_new_vma(containing_idx, new_vma);
} }
@ -530,15 +546,17 @@ impl VMManager {
containing_vma.set_end(protect_range.start()); containing_vma.set_end(protect_range.start());
// New VMA // New VMA
let new_vma = VMArea::new(protect_range, new_perms); let new_vma = VMArea::inherits_file_from(containing_vma, protect_range, new_perms);
Self::apply_perms(&new_vma, new_vma.perms()); Self::apply_perms(&new_vma, new_vma.perms());
self.insert_new_vma(containing_idx + 1, new_vma);
// Another new VMA // Another new VMA
let new_vma2 = { let new_vma2 = {
let range = VMRange::new(protect_end, old_end).unwrap(); let range = VMRange::new(protect_end, old_end).unwrap();
VMArea::new(range, old_perms) VMArea::inherits_file_from(containing_vma, range, old_perms)
}; };
drop(containing_vma);
self.insert_new_vma(containing_idx + 1, new_vma);
self.insert_new_vma(containing_idx + 2, new_vma2); self.insert_new_vma(containing_idx + 2, new_vma2);
} }
} }
@ -546,6 +564,57 @@ impl VMManager {
Ok(()) Ok(())
} }
/// Sync all shared, file-backed memory mappings in the given range by flushing the
/// memory content to its underlying file.
pub fn msync_by_range(&mut self, sync_range: &VMRange) -> Result<()> {
if !self.range().is_superset_of(&sync_range) {
return_errno!(ENOMEM, "invalid range");
}
// FIXME: check if sync_range covers unmapped memory
for vma in &self.vmas {
let vma = match vma.intersect(sync_range) {
None => continue,
Some(vma) => vma,
};
Self::flush_file_vma(&vma);
}
Ok(())
}
/// Sync all shared, file-backed memory mappings of the given file by flushing
/// the memory content to the file.
pub fn msync_by_file(&mut self, sync_file: &FileRef) {
for vma in &self.vmas {
let is_same_file = |file: &FileRef| -> bool { Arc::ptr_eq(&file, &sync_file) };
Self::flush_file_vma_with_cond(vma, is_same_file);
}
}
/// Flush a file-backed VMA to its file. This has no effect on anonymous VMA.
fn flush_file_vma(vma: &VMArea) {
Self::flush_file_vma_with_cond(vma, |_| true)
}
/// Same as flush_vma, except that an extra condition on the file needs to satisfy.
fn flush_file_vma_with_cond<F: Fn(&FileRef) -> bool>(vma: &VMArea, cond_fn: F) {
let (file, file_offset) = match vma.writeback_file().as_ref() {
None => return,
Some((file_and_offset)) => file_and_offset,
};
let file_writable = file
.get_access_mode()
.map(|ac| ac.writable())
.unwrap_or_default();
if !file_writable {
return;
}
if !cond_fn(file) {
return;
}
file.write_at(*file_offset, unsafe { vma.as_slice() });
}
pub fn find_mmap_region(&self, addr: usize) -> Result<&VMRange> { pub fn find_mmap_region(&self, addr: usize) -> Result<&VMRange> {
self.vmas self.vmas
.iter() .iter()
@ -702,12 +771,33 @@ impl VMManager {
fn can_merge_vmas(left: &VMArea, right: &VMArea) -> bool { fn can_merge_vmas(left: &VMArea, right: &VMArea) -> bool {
debug_assert!(left.end() <= right.start()); debug_assert!(left.end() <= right.start());
// Both of the two VMAs are not sentry (whose size == 0) // Both of the two VMAs must not be sentry (whose size == 0)
left.size() > 0 && right.size() > 0 && if left.size() == 0 || right.size() == 0 {
// Two VMAs must border with each other return false;
left.end() == right.start() && }
// Two VMAs must have the same memory permissions // The two VMAs must border with each other
left.perms() == right.perms() if left.end() != right.start() {
return false;
}
// The two VMAs must have the same memory permissions
if left.perms() != right.perms() {
return false;
}
// If the two VMAs have write-back files, the files must be the same and
// the two file regions must be continuous.
let left_writeback_file = left.writeback_file();
let right_writeback_file = right.writeback_file();
match (left_writeback_file, right_writeback_file) {
(None, None) => true,
(Some(_), None) => false,
(None, Some(_)) => false,
(Some((left_file, left_offset)), Some((right_file, right_offset))) => {
Arc::ptr_eq(&left_file, &right_file)
&& right_offset > left_offset
&& right_offset - left_offset == left.size()
}
}
} }
fn apply_perms(protect_range: &VMRange, perms: VMPerms) { fn apply_perms(protect_range: &VMRange, perms: VMPerms) {

@ -64,14 +64,14 @@ impl VMRange {
self.end = self.start + new_size; self.end = self.start + new_size;
} }
pub fn set_start(&mut self, start: usize) { pub fn set_start(&mut self, new_start: usize) {
debug_assert!(start % PAGE_SIZE == 0 && start <= self.end); debug_assert!(new_start % PAGE_SIZE == 0 && new_start <= self.end);
self.start = start; self.start = new_start;
} }
pub fn set_end(&mut self, end: usize) { pub fn set_end(&mut self, new_end: usize) {
debug_assert!(end % PAGE_SIZE == 0 && end >= self.start); debug_assert!(new_end % PAGE_SIZE == 0 && new_end >= self.start);
self.end = end; self.end = new_end;
} }
pub fn empty(&self) -> bool { pub fn empty(&self) -> bool {

@ -56,6 +56,28 @@ static int check_bytes_in_buf(char *buf, size_t len, int expected_byte_val) {
return 0; return 0;
} }
static int check_file_with_repeated_bytes(int fd, size_t len, int expected_byte_val) {
size_t remain = len;
char read_buf[512];
while (remain > 0) {
int read_nbytes = read(fd, read_buf, sizeof(read_buf));
if (read_nbytes < 0) {
// I/O error
return -1;
}
remain -= read_nbytes;
if (read_nbytes == 0 && remain > 0) {
// Not enough data in the file
return -1;
}
if (check_bytes_in_buf(read_buf, read_nbytes, expected_byte_val) < 0) {
// Incorrect data
return -1;
}
}
return 0;
}
static void *get_a_stack_ptr() { static void *get_a_stack_ptr() {
volatile int a = 0; volatile int a = 0;
return (void *) &a; return (void *) &a;
@ -254,7 +276,7 @@ int test_anonymous_mmap_with_non_page_aligned_len() {
// Test cases for file-backed mmap // Test cases for file-backed mmap
// ============================================================================ // ============================================================================
int test_file_mmap() { int test_private_file_mmap() {
const char *file_path = "/root/mmap_file.data"; const char *file_path = "/root/mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_WRONLY, 0644); int fd = open(file_path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) { if (fd < 0) {
@ -292,7 +314,7 @@ int test_file_mmap() {
return 0; return 0;
} }
int test_file_mmap_with_offset() { int test_private_file_mmap_with_offset() {
const char *file_path = "/root/mmap_file.data"; const char *file_path = "/root/mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644); int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644);
if (fd < 0) { if (fd < 0) {
@ -340,7 +362,7 @@ int test_file_mmap_with_offset() {
return 0; return 0;
} }
int test_file_mmap_with_invalid_fd() { int test_private_file_mmap_with_invalid_fd() {
size_t len = PAGE_SIZE; size_t len = PAGE_SIZE;
int prot = PROT_READ | PROT_WRITE; int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE; int flags = MAP_PRIVATE;
@ -353,7 +375,7 @@ int test_file_mmap_with_invalid_fd() {
return 0; return 0;
} }
int test_file_mmap_with_non_page_aligned_offset() { int test_private_file_mmap_with_non_page_aligned_offset() {
const char *file_path = "/root/mmap_file.data"; const char *file_path = "/root/mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644); int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644);
if (fd < 0) { if (fd < 0) {
@ -379,6 +401,91 @@ int test_file_mmap_with_non_page_aligned_offset() {
// TODO: what if offset > file size or offset + len > file size? // TODO: what if offset > file size or offset + len > file size?
typedef int (*flush_file_mmap_func_t)(int /*fd*/, void * /*addr*/, size_t /*size*/);
static int __test_shared_file_mmap_flushing_file(flush_file_mmap_func_t flush_fn) {
// Update a file by writing to its file-backed memory mapping
const char *file_path = "/root/mmap_file.data";
int fd = open(file_path, O_CREAT | O_TRUNC | O_RDWR, 0644);
if (fd < 0) {
THROW_ERROR("file creation failed");
}
if (fill_file_with_repeated_bytes(fd, PAGE_SIZE, 0) < 0) {
THROW_ERROR("file init failed");
}
int byte_val = 0xde;
char *write_buf = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (write_buf == MAP_FAILED) {
THROW_ERROR("mmap failed");
}
for (int i = 0; i < PAGE_SIZE; i++) { write_buf[i] = byte_val; }
int ret = flush_fn(fd, write_buf, PAGE_SIZE);
if (ret < 0) {
THROW_ERROR("fdatasync failed");
}
close(fd);
// Read the file back to see if the updates are durable
fd = open(file_path, O_RDONLY);
if (fd < 0) {
THROW_ERROR("file open failed");
}
if (check_file_with_repeated_bytes(fd, PAGE_SIZE, byte_val) < 0) {
THROW_ERROR("unexpected file content");
}
close(fd);
unlink(file_path);
return 0;
}
static int flush_shared_file_mmap_with_msync(int _fd, void *addr, size_t size) {
return msync(addr, size, MS_SYNC);
}
static int flush_shared_file_mmap_with_munmap(int _fd, void *addr, size_t size) {
return munmap(addr, size);
}
static int flush_shared_file_mmap_with_fdatasync(int fd, void *_addr, size_t _size) {
return fsync(fd);
}
static int flush_shared_file_mmap_with_fsync(int fd, void *_addr, size_t _size) {
return fdatasync(fd);
}
int test_shared_file_mmap_flushing_with_msync(void) {
if (__test_shared_file_mmap_flushing_file(flush_shared_file_mmap_with_msync)) {
THROW_ERROR("unexpected file content");
}
return 0;
}
int test_shared_file_mmap_flushing_with_munmap(void) {
if (__test_shared_file_mmap_flushing_file(flush_shared_file_mmap_with_munmap)) {
THROW_ERROR("unexpected file content");
}
return 0;
}
int test_shared_file_mmap_flushing_with_fdatasync(void) {
if (__test_shared_file_mmap_flushing_file(flush_shared_file_mmap_with_fdatasync)) {
THROW_ERROR("unexpected file content");
}
return 0;
}
int test_shared_file_mmap_flushing_with_fsync(void) {
if (__test_shared_file_mmap_flushing_file(flush_shared_file_mmap_with_fsync)) {
THROW_ERROR("unexpected file content");
}
return 0;
}
// ============================================================================ // ============================================================================
// Test cases for fixed mmap // Test cases for fixed mmap
// ============================================================================ // ============================================================================
@ -965,10 +1072,14 @@ static test_case_t test_cases[] = {
TEST_CASE(test_anonymous_mmap_with_bad_hints), TEST_CASE(test_anonymous_mmap_with_bad_hints),
TEST_CASE(test_anonymous_mmap_with_zero_len), TEST_CASE(test_anonymous_mmap_with_zero_len),
TEST_CASE(test_anonymous_mmap_with_non_page_aligned_len), TEST_CASE(test_anonymous_mmap_with_non_page_aligned_len),
TEST_CASE(test_file_mmap), TEST_CASE(test_private_file_mmap),
TEST_CASE(test_file_mmap_with_offset), TEST_CASE(test_private_file_mmap_with_offset),
TEST_CASE(test_file_mmap_with_invalid_fd), TEST_CASE(test_private_file_mmap_with_invalid_fd),
TEST_CASE(test_file_mmap_with_non_page_aligned_offset), TEST_CASE(test_private_file_mmap_with_non_page_aligned_offset),
TEST_CASE(test_shared_file_mmap_flushing_with_msync),
TEST_CASE(test_shared_file_mmap_flushing_with_munmap),
TEST_CASE(test_shared_file_mmap_flushing_with_fdatasync),
TEST_CASE(test_shared_file_mmap_flushing_with_fsync),
TEST_CASE(test_fixed_mmap_that_does_not_override_any_mmaping), TEST_CASE(test_fixed_mmap_that_does_not_override_any_mmaping),
TEST_CASE(test_fixed_mmap_that_overrides_existing_mmaping), TEST_CASE(test_fixed_mmap_that_overrides_existing_mmaping),
TEST_CASE(test_fixed_mmap_with_non_page_aligned_addr), TEST_CASE(test_fixed_mmap_with_non_page_aligned_addr),