Skip to main content

pas_external/
session_version.rs

1//! Building blocks for `sv` claim validation
2//! (STANDARDS_AUTH_INVALIDATION §5).
3//!
4//! In 4.0.0 the validator itself moved into the middleware layer
5//! (see [`SvAwareSessionResolver`](crate::middleware::SvAwareSessionResolver))
6//! so consumers get sv enforcement by default. This module hosts the
7//! reusable primitives:
8//!
9//! - [`SessionVersionCache`] — pluggable cache trait (default impl:
10//!   in-memory [`MemorySessionVersionCache`], 60 s TTL). Consumers with
11//!   shared substrates (KVRocks, Redis) can implement this trait to
12//!   converge break-glass across pods within network RTT instead of
13//!   waiting for the per-pod TTL.
14//! - [`SV_CACHE_KEY_PREFIX`] / [`SV_CACHE_TTL`] — the cache contract
15//!   constants. Match PAS's `crates/ppoppo-token::sv_cache_key` so a
16//!   custom KVRocks cache adapter can read the same key namespace as
17//!   PCS chat-auth.
18//!
19//! See [`SvAwareSessionResolver`](crate::middleware::SvAwareSessionResolver)
20//! for the resolver entry point.
21
22use std::collections::HashMap;
23use std::sync::Arc;
24use std::time::{Duration, Instant};
25
26use async_trait::async_trait;
27use tokio::sync::RwLock;
28
29/// Namespace prefix for cache keys. Matches chat-auth and is_admin caches.
30pub const SV_CACHE_KEY_PREFIX: &str = "sv:";
31
32/// TTL per `paseto-sv-claim.md §R5`. 60 s, non-configurable by design.
33pub const SV_CACHE_TTL: Duration = Duration::from_secs(60);
34
35// Memory-bound for [`MemorySessionVersionCache`]. With `SV_CACHE_TTL = 60 s`,
36// an unbounded HashMap leaks one entry per unique `ppnum_id` ever resolved on
37// the pod. 10_000 entries × ~80 bytes ≈ 800 KB worst case — comfortable for
38// the SDK's "small to mid per-pod" sweet spot. Consumers needing higher caps
39// should plug in their own `SessionVersionCache` via `resolver_with_cache`.
40const MAX_ENTRIES: usize = 10_000;
41
42/// Cache abstraction for `sv:{ppnum_id}` lookups.
43///
44/// Default implementation is [`MemorySessionVersionCache`]. Consumers
45/// that already run KVRocks/Redis can write their own adapter — the
46/// `get` / `set` contract is minimal.
47///
48/// `get` returns `None` on cache miss OR any transient backend error.
49/// `set` is best-effort and swallows failures internally (a failed set
50/// only costs us one extra fetch on the next validate).
51#[async_trait]
52pub trait SessionVersionCache: Send + Sync {
53    async fn get(&self, key: &str) -> Option<i64>;
54    async fn set(&self, key: &str, sv: i64, ttl: Duration);
55}
56
57/// In-memory [`SessionVersionCache`]. Default choice for SDK consumers.
58///
59/// `tokio::sync::RwLock<HashMap<String, (sv, Instant)>>` with lazy
60/// eviction on read (entries past their TTL are treated as miss) plus a
61/// 10 000-entry cap with opportunistic pruning on `set` (see source for
62/// the exact policy). Production consumers with many pods may want to
63/// plug in a shared cache (Redis, KVRocks) so a break-glass on one pod
64/// converges on all pods within the same 60 s window; the in-memory
65/// default is per-pod.
66pub struct MemorySessionVersionCache {
67    inner: Arc<RwLock<HashMap<String, (i64, Instant)>>>,
68}
69
70impl MemorySessionVersionCache {
71    #[must_use]
72    pub fn new() -> Self {
73        Self {
74            inner: Arc::new(RwLock::new(HashMap::new())),
75        }
76    }
77}
78
79impl Default for MemorySessionVersionCache {
80    fn default() -> Self {
81        Self::new()
82    }
83}
84
85#[async_trait]
86impl SessionVersionCache for MemorySessionVersionCache {
87    async fn get(&self, key: &str) -> Option<i64> {
88        let guard = self.inner.read().await;
89        let (sv, written_at) = guard.get(key)?;
90        if written_at.elapsed() >= SV_CACHE_TTL {
91            return None;
92        }
93        Some(*sv)
94    }
95
96    async fn set(&self, key: &str, sv: i64, _ttl: Duration) {
97        // TTL is governed by the SV_CACHE_TTL constant; ignore the param
98        // so callers can't accidentally drift this substrate's TTL away
99        // from the contract.
100        let mut guard = self.inner.write().await;
101        if guard.len() >= MAX_ENTRIES && !guard.contains_key(key) {
102            // First, free expired slots cheaply.
103            guard.retain(|_, (_, written_at)| written_at.elapsed() < SV_CACHE_TTL);
104            // Still full? Evict the single oldest by write time (FIFO).
105            // Under TTL=60s this is effectively LRU — entries don't live
106            // long enough for hot-vs-cold patterns to develop.
107            if guard.len() >= MAX_ENTRIES {
108                let oldest_key = guard
109                    .iter()
110                    .min_by_key(|(_, (_, written))| *written)
111                    .map(|(k, _)| k.clone());
112                if let Some(k) = oldest_key {
113                    guard.remove(&k);
114                }
115            }
116        }
117        guard.insert(key.to_string(), (sv, Instant::now()));
118    }
119}
120
121#[cfg(test)]
122#[allow(clippy::unwrap_used)]
123mod tests {
124    use super::*;
125
126    #[tokio::test]
127    async fn memory_cache_respects_ttl() {
128        // Exercises MemorySessionVersionCache's lazy-eviction-on-read.
129        // Can't literally advance wall clock, so this only proves that
130        // within-TTL reads hit — the expiry branch is covered by
131        // construction (if written_at.elapsed() >= SV_CACHE_TTL → None).
132        let cache = MemorySessionVersionCache::new();
133        cache.set("sv:abc", 42, SV_CACHE_TTL).await;
134        assert_eq!(cache.get("sv:abc").await, Some(42));
135        assert_eq!(cache.get("sv:missing").await, None);
136    }
137
138    #[tokio::test]
139    async fn memory_cache_overwrite() {
140        // A second set with the same key replaces the prior value.
141        // Exercises the SvAwareSessionResolver refresh path that updates
142        // the cache after picking up a newer sv.
143        let cache = MemorySessionVersionCache::new();
144        cache.set("sv:xyz", 1, SV_CACHE_TTL).await;
145        cache.set("sv:xyz", 2, SV_CACHE_TTL).await;
146        assert_eq!(cache.get("sv:xyz").await, Some(2));
147    }
148
149    #[tokio::test]
150    async fn memory_cache_bounded_by_max_entries() {
151        // Insert MAX_ENTRIES + N unique keys within TTL; cap must hold.
152        // Without bounding, a long-lived consumer pod would leak one
153        // entry per unique ppnum_id ever resolved.
154        let cache = MemorySessionVersionCache::new();
155        for i in 0..(MAX_ENTRIES + 100) {
156            cache.set(&format!("sv:{i}"), i as i64, SV_CACHE_TTL).await;
157        }
158        let len = cache.inner.read().await.len();
159        assert!(
160            len <= MAX_ENTRIES,
161            "cache exceeded cap: {len} > {MAX_ENTRIES}"
162        );
163        // Most-recently-written keys must still be present (FIFO eviction
164        // drops oldest first).
165        let last_key = format!("sv:{}", MAX_ENTRIES + 99);
166        assert_eq!(
167            cache.get(&last_key).await,
168            Some((MAX_ENTRIES + 99) as i64),
169            "newest entry must survive eviction"
170        );
171    }
172}