[vm] Avoid early munmap of shm
This commit is contained in:
parent
ad317e61f6
commit
2e608cdf47
@ -4,7 +4,8 @@ use crate::fs::FileMode;
|
||||
use crate::process::{do_getegid, do_geteuid, gid_t, uid_t, ThreadRef};
|
||||
use crate::time::{do_gettimeofday, time_t};
|
||||
use crate::vm::{
|
||||
ChunkRef, VMInitializer, VMMapOptionsBuilder, VMPerms, VMRange, USER_SPACE_VM_MANAGER,
|
||||
ChunkRef, MunmapChunkFlag, VMInitializer, VMMapOptionsBuilder, VMPerms, VMRange,
|
||||
USER_SPACE_VM_MANAGER,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
@ -212,7 +213,7 @@ impl Drop for ShmSegment {
|
||||
assert!(self.process_set.len() == 0);
|
||||
USER_SPACE_VM_MANAGER
|
||||
.internal()
|
||||
.munmap_chunk(&self.chunk, None, false);
|
||||
.munmap_chunk(&self.chunk, None, MunmapChunkFlag::Default);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,7 @@ pub use self::chunk::{ChunkRef, ChunkType};
|
||||
pub use self::process_vm::{MMapFlags, MRemapFlags, MSyncFlags, ProcessVM, ProcessVMBuilder};
|
||||
pub use self::user_space_vm::USER_SPACE_VM_MANAGER;
|
||||
pub use self::vm_area::VMArea;
|
||||
pub use self::vm_manager::MunmapChunkFlag;
|
||||
pub use self::vm_perms::VMPerms;
|
||||
pub use self::vm_range::VMRange;
|
||||
pub use self::vm_util::{VMInitializer, VMMapOptionsBuilder};
|
||||
|
@ -3,6 +3,7 @@ use super::*;
|
||||
use super::chunk::*;
|
||||
use super::user_space_vm::USER_SPACE_VM_MANAGER;
|
||||
use super::vm_area::VMArea;
|
||||
use super::vm_manager::MunmapChunkFlag;
|
||||
use super::vm_perms::VMPerms;
|
||||
use super::vm_util::{
|
||||
FileBacked, VMInitializer, VMMapAddr, VMMapOptions, VMMapOptionsBuilder, VMRemapOptions,
|
||||
@ -214,7 +215,7 @@ impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
|
||||
chunks.iter().for_each(|chunk| {
|
||||
USER_SPACE_VM_MANAGER
|
||||
.internal()
|
||||
.munmap_chunk(chunk, None, false);
|
||||
.munmap_chunk(chunk, None, MunmapChunkFlag::Default);
|
||||
});
|
||||
}
|
||||
|
||||
@ -312,9 +313,11 @@ impl Drop for ProcessVM {
|
||||
mem_chunks
|
||||
.drain_filter(|chunk| chunk.is_single_vma())
|
||||
.for_each(|chunk| {
|
||||
USER_SPACE_VM_MANAGER
|
||||
.internal()
|
||||
.munmap_chunk(&chunk, None, false);
|
||||
USER_SPACE_VM_MANAGER.internal().munmap_chunk(
|
||||
&chunk,
|
||||
None,
|
||||
MunmapChunkFlag::OnProcessExit,
|
||||
);
|
||||
});
|
||||
|
||||
assert!(mem_chunks.len() == 0);
|
||||
|
@ -1,12 +1,12 @@
|
||||
//! Shared memory manager. (POSIX)
|
||||
use super::*;
|
||||
|
||||
use super::vm_manager::InternalVMManager;
|
||||
use super::vm_manager::{InternalVMManager, MunmapChunkFlag};
|
||||
use super::vm_util::VMMapOptions;
|
||||
use crate::process::ThreadStatus;
|
||||
|
||||
use rcore_fs::vfs::{FileType, Metadata};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
type InodeId = usize;
|
||||
@ -120,8 +120,8 @@ impl ShmManager {
|
||||
};
|
||||
|
||||
Self::apply_new_perms_if_higher(&mut shared_vma, *options.perms());
|
||||
if !contained {
|
||||
shared_vma.attach_shared_process(current_pid)?;
|
||||
if !contained {
|
||||
current.vm().add_mem_chunk(shared_chunk.clone());
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ impl ShmManager {
|
||||
&mut self,
|
||||
chunk: &ChunkRef,
|
||||
unmap_range: &VMRange,
|
||||
force_unmap: bool,
|
||||
flag: MunmapChunkFlag,
|
||||
) -> Result<MunmapSharedResult> {
|
||||
debug_assert!(chunk.is_shared());
|
||||
let mut shared_vma = Self::vma_of(chunk);
|
||||
@ -141,7 +141,9 @@ impl ShmManager {
|
||||
let partial_unmap = !unmap_range.is_superset_of(shared_range);
|
||||
|
||||
// Fails when force unmap a partial of shared chunk which is still shared by other process
|
||||
if force_unmap && (partial_unmap || !shared_vma.exclusive_by(current_pid)) {
|
||||
if flag == MunmapChunkFlag::Force
|
||||
&& (partial_unmap || !shared_vma.exclusive_by(current_pid))
|
||||
{
|
||||
return_errno!(EINVAL, "force unmap shared chunk failed");
|
||||
}
|
||||
|
||||
@ -150,7 +152,11 @@ impl ShmManager {
|
||||
return Ok(MunmapSharedResult::StillInUse);
|
||||
}
|
||||
|
||||
if shared_vma.detach_shared_process(current_pid)? {
|
||||
let force_detach = match flag {
|
||||
MunmapChunkFlag::Default => false,
|
||||
MunmapChunkFlag::Force | MunmapChunkFlag::OnProcessExit => true,
|
||||
};
|
||||
if shared_vma.detach_shared_process(current_pid, force_detach)? {
|
||||
self.shared_chunks.remove(&Self::inode_id_of(&shared_vma));
|
||||
Ok(MunmapSharedResult::Freeable)
|
||||
} else {
|
||||
|
@ -6,7 +6,6 @@ use super::vm_util::FileBacked;
|
||||
|
||||
use intrusive_collections::rbtree::{Link, RBTree};
|
||||
use intrusive_collections::{intrusive_adapter, KeyAdapter};
|
||||
use std::collections::HashSet;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@ -19,8 +18,11 @@ pub struct VMArea {
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum VMAccess {
|
||||
/// Can only be accessed by one single process
|
||||
Private(pid_t),
|
||||
Shared(HashSet<pid_t>),
|
||||
/// Can be accessed by multi processes, also a reference counter
|
||||
/// to record sharings within each process(like thread)
|
||||
Shared(HashMap<pid_t, u32>),
|
||||
}
|
||||
|
||||
impl VMArea {
|
||||
@ -88,14 +90,18 @@ impl VMArea {
|
||||
pub fn belong_to(&self, target_pid: pid_t) -> bool {
|
||||
match &self.access {
|
||||
VMAccess::Private(pid) => *pid == target_pid,
|
||||
VMAccess::Shared(pid_set) => pid_set.contains(&target_pid),
|
||||
VMAccess::Shared(pid_table) => pid_table.contains_key(&target_pid),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exclusive_by(&self, target_pid: pid_t) -> bool {
|
||||
match &self.access {
|
||||
VMAccess::Private(pid) => *pid == target_pid,
|
||||
VMAccess::Shared(pid_set) => pid_set.len() == 1 && pid_set.contains(&target_pid),
|
||||
VMAccess::Shared(pid_table) => {
|
||||
pid_table.len() == 1
|
||||
&& pid_table.contains_key(&target_pid)
|
||||
&& *pid_table.get(&target_pid).unwrap() == 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,7 +270,7 @@ impl VMArea {
|
||||
|
||||
pub fn mark_shared(&mut self) {
|
||||
let access = match self.access {
|
||||
VMAccess::Private(pid) => VMAccess::Shared(HashSet::from([pid])),
|
||||
VMAccess::Private(pid) => VMAccess::Shared(HashMap::from([(pid, 1)])),
|
||||
VMAccess::Shared(_) => {
|
||||
return;
|
||||
}
|
||||
@ -272,29 +278,31 @@ impl VMArea {
|
||||
self.access = access;
|
||||
}
|
||||
|
||||
pub fn shared_process_set(&self) -> Result<&HashSet<pid_t>> {
|
||||
match &self.access {
|
||||
VMAccess::Private(_) => Err(errno!(EINVAL, "not a shared vma")),
|
||||
VMAccess::Shared(pid_set) => Ok(pid_set),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn attach_shared_process(&mut self, pid: pid_t) -> Result<()> {
|
||||
match &mut self.access {
|
||||
VMAccess::Private(_) => Err(errno!(EINVAL, "not a shared vma")),
|
||||
VMAccess::Shared(pid_set) => {
|
||||
pid_set.insert(pid);
|
||||
VMAccess::Shared(pid_table) => {
|
||||
if let Some(mut ref_ctr) = pid_table.get_mut(&pid) {
|
||||
*ref_ctr += 1;
|
||||
} else {
|
||||
let _ = pid_table.insert(pid, 1);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn detach_shared_process(&mut self, pid: pid_t) -> Result<bool> {
|
||||
pub fn detach_shared_process(&mut self, pid: pid_t, force_detach: bool) -> Result<bool> {
|
||||
match &mut self.access {
|
||||
VMAccess::Private(_) => Err(errno!(EINVAL, "not a shared vma")),
|
||||
VMAccess::Shared(pid_set) => {
|
||||
pid_set.remove(&pid);
|
||||
Ok(pid_set.is_empty())
|
||||
VMAccess::Shared(pid_table) => {
|
||||
if let Some(mut ref_ctr) = pid_table.get_mut(&pid) {
|
||||
*ref_ctr -= 1;
|
||||
if *ref_ctr == 0 || force_detach {
|
||||
let _ = pid_table.remove(&pid);
|
||||
}
|
||||
}
|
||||
Ok(pid_table.is_empty())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -224,9 +224,11 @@ impl VMManager {
|
||||
|
||||
for chunk in overlapping_chunks.iter() {
|
||||
match chunk.internal() {
|
||||
ChunkType::SingleVMA(_) => {
|
||||
internal_manager.munmap_chunk(chunk, Some(&munmap_range), false)?
|
||||
}
|
||||
ChunkType::SingleVMA(_) => internal_manager.munmap_chunk(
|
||||
chunk,
|
||||
Some(&munmap_range),
|
||||
MunmapChunkFlag::Default,
|
||||
)?,
|
||||
ChunkType::MultiVMA(manager) => manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
@ -262,7 +264,7 @@ impl VMManager {
|
||||
return internal_manager.munmap_chunk(
|
||||
&overlapping_chunk,
|
||||
Some(&munmap_range),
|
||||
false,
|
||||
MunmapChunkFlag::Default,
|
||||
);
|
||||
} else {
|
||||
warn!("no overlapping chunks anymore");
|
||||
@ -512,7 +514,7 @@ impl VMManager {
|
||||
let mut mem_chunks = thread.vm().mem_chunks().write().unwrap();
|
||||
|
||||
mem_chunks.iter().for_each(|chunk| {
|
||||
internal_manager.munmap_chunk(&chunk, None, false);
|
||||
internal_manager.munmap_chunk(&chunk, None, MunmapChunkFlag::OnProcessExit);
|
||||
});
|
||||
mem_chunks.clear();
|
||||
|
||||
@ -588,7 +590,7 @@ impl InternalVMManager {
|
||||
&mut self,
|
||||
chunk: &ChunkRef,
|
||||
munmap_range: Option<&VMRange>,
|
||||
force_unmap: bool,
|
||||
flag: MunmapChunkFlag,
|
||||
) -> Result<()> {
|
||||
trace!(
|
||||
"munmap_chunk range = {:?}, munmap_range = {:?}",
|
||||
@ -619,11 +621,11 @@ impl InternalVMManager {
|
||||
|
||||
if chunk.is_shared() {
|
||||
trace!(
|
||||
"munmap_shared_chunk, chunk_range: {:?}, munmap_range = {:?}",
|
||||
"munmap_shared_chunk, chunk_range = {:?}, munmap_range = {:?}",
|
||||
chunk.range(),
|
||||
munmap_range,
|
||||
);
|
||||
return self.munmap_shared_chunk(chunk, munmap_range, force_unmap);
|
||||
return self.munmap_shared_chunk(chunk, munmap_range, flag);
|
||||
}
|
||||
|
||||
// Either the munmap range is a subset of the chunk range or the munmap range is
|
||||
@ -743,7 +745,7 @@ impl InternalVMManager {
|
||||
&mut self,
|
||||
chunk: &ChunkRef,
|
||||
munmap_range: &VMRange,
|
||||
force_unmap: bool,
|
||||
flag: MunmapChunkFlag,
|
||||
) -> Result<()> {
|
||||
if !chunk.is_shared() {
|
||||
return_errno!(EINVAL, "not a shared chunk");
|
||||
@ -754,7 +756,7 @@ impl InternalVMManager {
|
||||
|
||||
if self
|
||||
.shm_manager
|
||||
.munmap_shared_chunk(chunk, munmap_range, force_unmap)?
|
||||
.munmap_shared_chunk(chunk, munmap_range, flag)?
|
||||
== MunmapSharedResult::Freeable
|
||||
{
|
||||
let vma = chunk.get_vma_for_single_vma_chunk();
|
||||
@ -1025,7 +1027,7 @@ impl InternalVMManager {
|
||||
}
|
||||
|
||||
// Munmap the corresponding single vma chunk
|
||||
self.munmap_chunk(&chunk, Some(&target_range), true)?;
|
||||
self.munmap_chunk(&chunk, Some(&target_range), MunmapChunkFlag::Force)?;
|
||||
}
|
||||
VMMapAddr::Any => unreachable!(),
|
||||
}
|
||||
@ -1147,6 +1149,17 @@ impl InternalVMManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Flags used by `munmap_chunk()` and `munmap_shared_chunk()`.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum MunmapChunkFlag {
|
||||
/// Indicates normal behavior when munamp a shared chunk
|
||||
Default,
|
||||
/// Indicates the shared chunk must be freed entirely
|
||||
Force,
|
||||
/// Indicates the shared chunk must detach current process totally
|
||||
OnProcessExit,
|
||||
}
|
||||
|
||||
impl VMRemapParser for InternalVMManager {
|
||||
fn is_free_range(&self, request_range: &VMRange) -> bool {
|
||||
self.free_manager.is_free_range(request_range)
|
||||
|
Loading…
Reference in New Issue
Block a user