use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::Mutex as AsyncMutex;
pub struct ResourceState {
pub proc_count: u32,
pub max_processes: u32,
pub mem_used: u64,
pub max_memory_bytes: u64,
pub hold_forks: bool,
pub held_notif_ids: Vec<u64>,
pub load_avg: crate::procfs::LoadAvg,
pub start_instant: std::time::Instant,
}
impl ResourceState {
pub fn new(max_memory_bytes: u64, max_processes: u32) -> Self {
Self {
proc_count: 0,
max_processes,
mem_used: 0,
max_memory_bytes,
hold_forks: false,
held_notif_ids: Vec::new(),
load_avg: crate::procfs::LoadAvg::new(),
start_instant: std::time::Instant::now(),
}
}
}
pub struct ProcfsState {
pub vdso_patched_addr: u64,
}
impl ProcfsState {
pub fn new() -> Self {
Self {
vdso_patched_addr: 0,
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PidKey {
pub pid: i32,
pub start_time: u64,
}
pub(crate) fn read_pid_start_time(pid: i32) -> Option<u64> {
let stat = std::fs::read_to_string(format!("/proc/{}/stat", pid)).ok()?;
let rest = stat.rsplit_once(") ")?.1;
rest.split_whitespace().nth(19)?.parse().ok()
}
#[derive(Default)]
pub struct PerProcessState {
pub virtual_cwd: Option<String>,
pub brk_base: Option<u64>,
pub cow_dir_cache: HashMap<u32, (String, Vec<Vec<u8>>)>,
pub procfs_dir_cache: HashMap<(u32, String), Vec<Vec<u8>>>,
}
pub struct ProcessIndex {
inner: std::sync::RwLock<HashMap<i32, ProcessEntry>>,
}
#[derive(Clone)]
struct ProcessEntry {
key: PidKey,
state: Arc<AsyncMutex<PerProcessState>>,
}
impl ProcessIndex {
pub fn new() -> Self {
Self {
inner: std::sync::RwLock::new(HashMap::new()),
}
}
pub fn register(&self, pid: i32) -> Option<PidKey> {
let start_time = read_pid_start_time(pid)?;
let key = PidKey { pid, start_time };
let entry = ProcessEntry {
key,
state: Arc::new(AsyncMutex::new(PerProcessState::default())),
};
self.inner.write().ok()?.insert(pid, entry);
Some(key)
}
pub fn key_for(&self, pid: i32) -> Option<PidKey> {
self.inner.read().ok()?.get(&pid).map(|e| e.key)
}
pub fn entry_for(&self, pid: i32) -> Option<(PidKey, Arc<AsyncMutex<PerProcessState>>)> {
self.inner
.read()
.ok()?
.get(&pid)
.map(|e| (e.key, Arc::clone(&e.state)))
}
pub fn contains(&self, pid: i32) -> bool {
self.inner
.read()
.map(|g| g.contains_key(&pid))
.unwrap_or(false)
}
pub fn len(&self) -> usize {
self.inner.read().map(|g| g.len()).unwrap_or(0)
}
pub fn max_pid(&self) -> Option<i32> {
self.inner.read().ok()?.keys().copied().max()
}
pub fn pids_snapshot(&self) -> HashSet<i32> {
self.inner
.read()
.map(|g| g.keys().copied().collect())
.unwrap_or_default()
}
pub fn unregister(&self, key: PidKey) {
if let Ok(mut g) = self.inner.write() {
if g.get(&key.pid).map(|e| e.key) == Some(key) {
g.remove(&key.pid);
}
}
}
pub fn prune_dead(&self) {
let candidates: Vec<(i32, PidKey)> = match self.inner.read() {
Ok(g) => g.iter().map(|(p, e)| (*p, e.key)).collect(),
Err(_) => return,
};
let mut dead = Vec::new();
for (pid, key) in candidates {
match read_pid_start_time(pid) {
Some(st) if st == key.start_time => continue,
_ => dead.push(key),
}
}
if dead.is_empty() {
return;
}
if let Ok(mut g) = self.inner.write() {
for key in dead {
if g.get(&key.pid).map(|e| e.key) == Some(key) {
g.remove(&key.pid);
}
}
}
}
}
impl Default for ProcessIndex {
fn default() -> Self {
Self::new()
}
}
pub struct CowState {
pub branch: Option<crate::cow::seccomp::SeccompCowBranch>,
}
impl CowState {
pub fn new() -> Self {
Self { branch: None }
}
}
pub struct NetworkState {
pub network_policy: crate::seccomp::notif::NetworkPolicy,
pub port_map: crate::port_remap::PortMap,
pub pid_ip_overrides: std::sync::Arc<std::sync::RwLock<HashMap<u32, HashSet<std::net::IpAddr>>>>,
pub http_acl_addr: Option<std::net::SocketAddr>,
pub http_acl_ports: HashSet<u16>,
pub http_acl_orig_dest: Option<crate::http_acl::OrigDestMap>,
}
impl NetworkState {
pub fn new() -> Self {
Self {
network_policy: crate::seccomp::notif::NetworkPolicy::Unrestricted,
port_map: crate::port_remap::PortMap::new(),
pid_ip_overrides: std::sync::Arc::new(std::sync::RwLock::new(HashMap::new())),
http_acl_addr: None,
http_acl_ports: HashSet::new(),
http_acl_orig_dest: None,
}
}
pub fn effective_network_policy(
&self,
pid: u32,
live_policy: Option<&std::sync::Arc<std::sync::RwLock<crate::policy_fn::LivePolicy>>>,
) -> crate::seccomp::notif::NetworkPolicy {
if let Ok(overrides) = self.pid_ip_overrides.read() {
if let Some(ips) = overrides.get(&pid) {
return crate::seccomp::notif::NetworkPolicy::AllowList(ips.clone());
}
}
if let Some(lp) = live_policy {
if let Ok(live) = lp.read() {
if !live.allowed_ips.is_empty() {
return crate::seccomp::notif::NetworkPolicy::AllowList(live.allowed_ips.clone());
}
}
}
self.network_policy.clone()
}
}
pub struct TimeRandomState {
pub time_offset: Option<i64>,
pub random_state: Option<rand_chacha::ChaCha8Rng>,
}
impl TimeRandomState {
pub fn new(time_offset: Option<i64>, random_state: Option<rand_chacha::ChaCha8Rng>) -> Self {
Self { time_offset, random_state }
}
}
pub struct PolicyFnState {
pub event_tx: Option<tokio::sync::mpsc::UnboundedSender<crate::policy_fn::PolicyEvent>>,
pub live_policy: Option<std::sync::Arc<std::sync::RwLock<crate::policy_fn::LivePolicy>>>,
pub denied_paths: std::sync::Arc<std::sync::RwLock<HashSet<String>>>,
}
impl PolicyFnState {
pub fn new() -> Self {
Self {
event_tx: None,
live_policy: None,
denied_paths: std::sync::Arc::new(std::sync::RwLock::new(HashSet::new())),
}
}
pub fn is_path_denied(&self, path: &str) -> bool {
if let Ok(denied) = self.denied_paths.read() {
let path = std::path::Path::new(path);
denied.iter().any(|d| path.starts_with(std::path::Path::new(d)))
} else {
false
}
}
}
pub struct ChrootState {
pub chroot_exe: Option<std::path::PathBuf>,
}
impl ChrootState {
pub fn new() -> Self {
Self { chroot_exe: None }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn process_index_register_lookup_unregister() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
let key = idx
.register(self_pid)
.expect("register should succeed for live pid");
assert_eq!(key.pid, self_pid);
assert_eq!(idx.key_for(self_pid), Some(key));
assert!(idx.contains(self_pid));
assert_eq!(idx.key_for(self_pid + 999_999), None);
assert!(!idx.contains(self_pid + 999_999));
assert_eq!(idx.len(), 1);
assert_eq!(idx.max_pid(), Some(self_pid));
idx.unregister(key);
assert_eq!(idx.key_for(self_pid), None);
assert!(!idx.contains(self_pid));
assert_eq!(idx.len(), 0);
assert_eq!(idx.max_pid(), None);
}
#[test]
fn process_index_register_overwrites_stale_entry_for_recycled_pid() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
{
let stale_key = PidKey { pid: self_pid, start_time: 0 };
let stale = ProcessEntry {
key: stale_key,
state: Arc::new(AsyncMutex::new(PerProcessState::default())),
};
idx.inner.write().unwrap().insert(self_pid, stale);
}
let new_key = idx.register(self_pid).unwrap();
assert_ne!(new_key.start_time, 0);
assert_eq!(idx.key_for(self_pid), Some(new_key));
let stale_key = PidKey { pid: self_pid, start_time: 0 };
idx.unregister(stale_key);
assert_eq!(idx.key_for(self_pid), Some(new_key));
}
#[tokio::test]
async fn process_index_entry_for_returns_shared_handle() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
let key = idx.register(self_pid).unwrap();
let (k1, s1) = idx.entry_for(self_pid).unwrap();
let (k2, s2) = idx.entry_for(self_pid).unwrap();
assert_eq!(k1, key);
assert_eq!(k2, key);
s1.lock().await.brk_base = Some(0xdead_beef);
assert_eq!(s2.lock().await.brk_base, Some(0xdead_beef));
idx.unregister(key);
assert!(idx.entry_for(self_pid).is_none());
assert_eq!(s1.lock().await.brk_base, Some(0xdead_beef));
}
#[test]
fn process_index_pids_snapshot_is_independent() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
let key = idx.register(self_pid).unwrap();
let snap = idx.pids_snapshot();
idx.unregister(key);
assert!(snap.contains(&self_pid));
assert!(!idx.contains(self_pid));
}
#[test]
fn process_index_prune_dead_drops_recycled_entries() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
let stale_key = PidKey { pid: self_pid, start_time: 0 };
let stale = ProcessEntry {
key: stale_key,
state: Arc::new(AsyncMutex::new(PerProcessState::default())),
};
idx.inner.write().unwrap().insert(self_pid, stale);
idx.prune_dead();
assert!(!idx.contains(self_pid));
}
#[test]
fn process_index_prune_dead_keeps_live_entries() {
let self_pid = unsafe { libc::getpid() };
let idx = ProcessIndex::new();
let key = idx.register(self_pid).unwrap();
idx.prune_dead();
assert_eq!(idx.key_for(self_pid), Some(key));
}
}