pas-external 4.0.2

Ppoppo Accounts System (PAS) external SDK -- OAuth2 PKCE, PASETO verification, Axum middleware, session liveness
Documentation
//! Building blocks for `sv` claim validation
//! (STANDARDS_AUTH_INVALIDATION §5).
//!
//! In 4.0.0 the validator itself moved into the middleware layer
//! (see [`SvAwareSessionResolver`](crate::middleware::SvAwareSessionResolver))
//! so consumers get sv enforcement by default. This module hosts the
//! reusable primitives:
//!
//! - [`SessionVersionCache`] — pluggable cache trait (default impl:
//!   in-memory [`MemorySessionVersionCache`], 60 s TTL). Consumers with
//!   shared substrates (KVRocks, Redis) can implement this trait to
//!   converge break-glass across pods within network RTT instead of
//!   waiting for the per-pod TTL.
//! - [`SV_CACHE_KEY_PREFIX`] / [`SV_CACHE_TTL`] — the cache contract
//!   constants. Match PAS's `crates/ppoppo-token::sv_cache_key` so a
//!   custom KVRocks cache adapter can read the same key namespace as
//!   PCS chat-auth.
//!
//! See [`SvAwareSessionResolver`](crate::middleware::SvAwareSessionResolver)
//! for the resolver entry point.

use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};

use async_trait::async_trait;
use tokio::sync::RwLock;

/// Namespace prefix for cache keys. Matches chat-auth and is_admin caches.
pub const SV_CACHE_KEY_PREFIX: &str = "sv:";

/// TTL per `paseto-sv-claim.md §R5`. 60 s, non-configurable by design.
pub const SV_CACHE_TTL: Duration = Duration::from_secs(60);

// Memory-bound for [`MemorySessionVersionCache`]. With `SV_CACHE_TTL = 60 s`,
// an unbounded HashMap leaks one entry per unique `ppnum_id` ever resolved on
// the pod. 10_000 entries × ~80 bytes ≈ 800 KB worst case — comfortable for
// the SDK's "small to mid per-pod" sweet spot. Consumers needing higher caps
// should plug in their own `SessionVersionCache` via `resolver_with_cache`.
const MAX_ENTRIES: usize = 10_000;

/// Cache abstraction for `sv:{ppnum_id}` lookups.
///
/// Default implementation is [`MemorySessionVersionCache`]. Consumers
/// that already run KVRocks/Redis can write their own adapter — the
/// `get` / `set` contract is minimal.
///
/// `get` returns `None` on cache miss OR any transient backend error.
/// `set` is best-effort and swallows failures internally (a failed set
/// only costs us one extra fetch on the next validate).
#[async_trait]
pub trait SessionVersionCache: Send + Sync {
    async fn get(&self, key: &str) -> Option<i64>;
    async fn set(&self, key: &str, sv: i64, ttl: Duration);
}

/// In-memory [`SessionVersionCache`]. Default choice for SDK consumers.
///
/// `tokio::sync::RwLock<HashMap<String, (sv, Instant)>>` with lazy
/// eviction on read (entries past their TTL are treated as miss) plus a
/// 10 000-entry cap with opportunistic pruning on `set` (see source for
/// the exact policy). Production consumers with many pods may want to
/// plug in a shared cache (Redis, KVRocks) so a break-glass on one pod
/// converges on all pods within the same 60 s window; the in-memory
/// default is per-pod.
pub struct MemorySessionVersionCache {
    inner: Arc<RwLock<HashMap<String, (i64, Instant)>>>,
}

impl MemorySessionVersionCache {
    #[must_use]
    pub fn new() -> Self {
        Self {
            inner: Arc::new(RwLock::new(HashMap::new())),
        }
    }
}

impl Default for MemorySessionVersionCache {
    fn default() -> Self {
        Self::new()
    }
}

#[async_trait]
impl SessionVersionCache for MemorySessionVersionCache {
    async fn get(&self, key: &str) -> Option<i64> {
        let guard = self.inner.read().await;
        let (sv, written_at) = guard.get(key)?;
        if written_at.elapsed() >= SV_CACHE_TTL {
            return None;
        }
        Some(*sv)
    }

    async fn set(&self, key: &str, sv: i64, _ttl: Duration) {
        // TTL is governed by the SV_CACHE_TTL constant; ignore the param
        // so callers can't accidentally drift this substrate's TTL away
        // from the contract.
        let mut guard = self.inner.write().await;
        if guard.len() >= MAX_ENTRIES && !guard.contains_key(key) {
            // First, free expired slots cheaply.
            guard.retain(|_, (_, written_at)| written_at.elapsed() < SV_CACHE_TTL);
            // Still full? Evict the single oldest by write time (FIFO).
            // Under TTL=60s this is effectively LRU — entries don't live
            // long enough for hot-vs-cold patterns to develop.
            if guard.len() >= MAX_ENTRIES {
                let oldest_key = guard
                    .iter()
                    .min_by_key(|(_, (_, written))| *written)
                    .map(|(k, _)| k.clone());
                if let Some(k) = oldest_key {
                    guard.remove(&k);
                }
            }
        }
        guard.insert(key.to_string(), (sv, Instant::now()));
    }
}

#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn memory_cache_respects_ttl() {
        // Exercises MemorySessionVersionCache's lazy-eviction-on-read.
        // Can't literally advance wall clock, so this only proves that
        // within-TTL reads hit — the expiry branch is covered by
        // construction (if written_at.elapsed() >= SV_CACHE_TTL → None).
        let cache = MemorySessionVersionCache::new();
        cache.set("sv:abc", 42, SV_CACHE_TTL).await;
        assert_eq!(cache.get("sv:abc").await, Some(42));
        assert_eq!(cache.get("sv:missing").await, None);
    }

    #[tokio::test]
    async fn memory_cache_overwrite() {
        // A second set with the same key replaces the prior value.
        // Exercises the SvAwareSessionResolver refresh path that updates
        // the cache after picking up a newer sv.
        let cache = MemorySessionVersionCache::new();
        cache.set("sv:xyz", 1, SV_CACHE_TTL).await;
        cache.set("sv:xyz", 2, SV_CACHE_TTL).await;
        assert_eq!(cache.get("sv:xyz").await, Some(2));
    }

    #[tokio::test]
    async fn memory_cache_bounded_by_max_entries() {
        // Insert MAX_ENTRIES + N unique keys within TTL; cap must hold.
        // Without bounding, a long-lived consumer pod would leak one
        // entry per unique ppnum_id ever resolved.
        let cache = MemorySessionVersionCache::new();
        for i in 0..(MAX_ENTRIES + 100) {
            cache.set(&format!("sv:{i}"), i as i64, SV_CACHE_TTL).await;
        }
        let len = cache.inner.read().await.len();
        assert!(
            len <= MAX_ENTRIES,
            "cache exceeded cap: {len} > {MAX_ENTRIES}"
        );
        // Most-recently-written keys must still be present (FIFO eviction
        // drops oldest first).
        let last_key = format!("sv:{}", MAX_ENTRIES + 99);
        assert_eq!(
            cache.get(&last_key).await,
            Some((MAX_ENTRIES + 99) as i64),
            "newest entry must survive eviction"
        );
    }
}