pas-external 0.12.0

Ppoppo Accounts System (PAS) external SDK — OAuth2 PKCE, JWT verification port, Axum middleware, session liveness
Documentation
//! [`SharedCacheCache`] — sv-axis cache adapter over `ppoppo_infra::Cache`.

use std::sync::Arc;
use std::time::Duration;

use async_trait::async_trait;
use ppoppo_infra::{Cache as InfraCache, CacheExt as _};

use super::Cache;

/// Adapter implementing the SDK's [`super::Cache`] (sv-specific shape,
/// `Option<i64>`) over any substrate that implements
/// [`ppoppo_infra::Cache`] (the workspace cache trait used by
/// `ppoppo-kvrocks` in production and any in-memory impl in tests).
///
/// # Promoted from chat-api in 0.10.0
///
/// chat-api shipped this exact bridge as
/// `chat_api::session_version::KvCache` since Phase 11.Z Slice 3
/// (`71af9f45`). Promoting to the SDK in 0.10.0 closes the duplication:
/// every consumer reading the canonical `STANDARDS_SHARED_CACHE.md §3.1`
/// `sv:{sub}` namespace uses one shared adapter against any
/// `ppoppo_infra::Cache` impl. Replaces, doesn't layer (see
/// RFC_2026-05-08 §4.1 lock).
///
/// # Substrate-agnostic by construction
///
/// The SDK depends on the `ppoppo-infra` trait crate (small, Redis-free)
/// and never on the substrate crate (`ppoppo-kvrocks` pulls Redis client
/// deps). Consumers wire whatever cache they want — `Arc<KvCache>` cast
/// to `Arc<dyn ppoppo_infra::Cache>` for production, in-memory mocks
/// for tests. Hexagonal Category 3 (Remote-but-owned).
///
/// # Best-effort contract preserved
///
/// [`super::Cache::get`] returns `None` on miss OR transient cache
/// error; [`super::Cache::set`] is fire-and-forget. Errors from the
/// underlying `ppoppo_infra::Cache` are swallowed per the
/// [`super::CompositeEpochRevocation`] documented fall-through-to-fetcher
/// policy.
///
/// # TTL clamping
///
/// `ppoppo_infra::Cache::set` accepts `Option<i32>` seconds. Sub-second
/// `Duration` values would round to 0 and trigger immediate-delete on
/// KVRocks (`EXPIRE 0` semantics) — clamped to `1s` minimum so the
/// entry is at least visible to the very next request. Upper-bound
/// clamps to `i32::MAX` guard against a future `SV_CACHE_TTL > 68 years`
/// silently truncating.
pub struct SharedCacheCache {
    inner: Arc<dyn InfraCache>,
}

impl SharedCacheCache {
    /// Build from any `Arc<dyn ppoppo_infra::Cache>`. Production
    /// consumers wire an `Arc<KvCache>` cast at the call site:
    ///
    /// ```ignore
    /// use std::sync::Arc;
    /// use pas_external::epoch::SharedCacheCache;
    /// use ppoppo_infra::Cache as InfraCache;
    /// use ppoppo_kvrocks::KvCache;
    /// # async fn wire(client: ppoppo_kvrocks::KvClient) -> Arc<dyn pas_external::epoch::Cache> {
    /// let kv: Arc<dyn InfraCache> = Arc::new(KvCache::new(client));
    /// Arc::new(SharedCacheCache::new(kv))
    /// # }
    /// ```
    #[must_use]
    pub fn new(inner: Arc<dyn InfraCache>) -> Self {
        Self { inner }
    }
}

impl std::fmt::Debug for SharedCacheCache {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("SharedCacheCache").finish_non_exhaustive()
    }
}

#[async_trait]
impl Cache for SharedCacheCache {
    async fn get(&self, key: &str) -> Option<i64> {
        self.inner.get_typed::<i64>(key).await.ok().flatten()
    }

    async fn set(&self, key: &str, sv: i64, ttl: Duration) {
        let ttl_secs = ttl.as_secs().max(1).min(i32::MAX as u64) as i32;
        let _ = self.inner.set_typed(key, &sv, Some(ttl_secs)).await;
    }
}

#[cfg(test)]
#[allow(
    clippy::unwrap_used,
    clippy::expect_used,
    clippy::panic,
    clippy::unimplemented
)]
mod tests {
    //! Pins the wire shape — `i64` round-trip via JSON, TTL clamp at the
    //! sub-second floor and `i32::MAX` ceiling, error swallowing. Boundary
    //! tests in `tests/epoch_shared_cache_boundary.rs` cover the composer
    //! integration; this module pins just the adapter's local invariants.
    use super::*;
    use async_trait::async_trait;
    use ppoppo_infra::Result as InfraResult;
    use ppoppo_token::sv_cache_key;
    use serde_json::Value as Json;
    use std::collections::HashMap;
    use std::sync::Mutex;

    #[derive(Default)]
    struct MemCache {
        store: Mutex<HashMap<String, Json>>,
        last_set_ttl: Mutex<Option<i32>>,
        get_should_error: Mutex<bool>,
    }

    #[async_trait]
    impl InfraCache for MemCache {
        async fn get(&self, key: &str) -> InfraResult<Option<Json>> {
            if *self.get_should_error.lock().unwrap() {
                return Err(ppoppo_infra::Error::NotFound("simulated".into()));
            }
            Ok(self.store.lock().unwrap().get(key).cloned())
        }

        async fn set(
            &self,
            key: &str,
            value: &Json,
            ttl_seconds: Option<i32>,
        ) -> InfraResult<()> {
            *self.last_set_ttl.lock().unwrap() = ttl_seconds;
            self.store
                .lock()
                .unwrap()
                .insert(key.to_string(), value.clone());
            Ok(())
        }

        async fn del(&self, _key: &str) -> InfraResult<bool> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn exists(&self, _key: &str) -> InfraResult<bool> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn ttl(&self, _key: &str) -> InfraResult<Option<i32>> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn mset(
            &self,
            _entries: &[(&str, Json, Option<i32>)],
        ) -> InfraResult<usize> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn mget(&self, _keys: &[&str]) -> InfraResult<Vec<(String, Option<Json>)>> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn mdel(&self, _keys: &[&str]) -> InfraResult<usize> {
            unimplemented!("not exercised by SharedCacheCache")
        }
        async fn keys(&self, _pattern: &str, _limit: i32) -> InfraResult<Vec<String>> {
            unimplemented!("not exercised by SharedCacheCache")
        }
    }

    const SUB: &str = "01HSAB00000000000000000000";

    #[tokio::test]
    async fn set_writes_through_with_ttl_in_seconds() {
        let mem = Arc::new(MemCache::default());
        let cache = SharedCacheCache::new(mem.clone() as Arc<dyn InfraCache>);
        let key = sv_cache_key(SUB);

        cache.set(&key, 42, Duration::from_secs(60)).await;

        let stored = mem.store.lock().unwrap().get(&key).cloned();
        assert_eq!(stored, Some(serde_json::json!(42)));
        assert_eq!(*mem.last_set_ttl.lock().unwrap(), Some(60));
    }

    #[tokio::test]
    async fn set_clamps_subsecond_ttl_to_one() {
        // Sub-second TTL would round to 0 and trigger immediate delete on
        // KVRocks. The adapter clamps to 1s so the very next request can
        // still see the entry.
        let mem = Arc::new(MemCache::default());
        let cache = SharedCacheCache::new(mem.clone() as Arc<dyn InfraCache>);

        cache
            .set(&sv_cache_key(SUB), 7, Duration::from_millis(500))
            .await;

        assert_eq!(*mem.last_set_ttl.lock().unwrap(), Some(1));
    }

    #[tokio::test]
    async fn get_returns_stored_i64() {
        let mem = Arc::new(MemCache::default());
        let key = sv_cache_key(SUB);
        mem.store
            .lock()
            .unwrap()
            .insert(key.clone(), serde_json::json!(13));

        let cache = SharedCacheCache::new(mem.clone() as Arc<dyn InfraCache>);
        assert_eq!(cache.get(&key).await, Some(13));
    }

    #[tokio::test]
    async fn get_returns_none_on_miss() {
        let mem = Arc::new(MemCache::default());
        let cache = SharedCacheCache::new(mem as Arc<dyn InfraCache>);
        assert_eq!(cache.get("sv:nonexistent").await, None);
    }

    #[tokio::test]
    async fn get_swallows_substrate_errors_as_none() {
        // Cache::get contract: None on miss OR transient cache error.
        // The composer falls through to the fetcher in either case, so
        // surfacing a substrate error here would double-report.
        let mem = Arc::new(MemCache::default());
        *mem.get_should_error.lock().unwrap() = true;
        let cache = SharedCacheCache::new(mem as Arc<dyn InfraCache>);
        assert_eq!(cache.get("sv:abc").await, None);
    }
}