whatsapp_rust/cache_config.rs
1use std::fmt::Display;
2use std::sync::Arc;
3use std::time::Duration;
4
5use crate::cache::Cache;
6use serde::{Serialize, de::DeserializeOwned};
7
8use crate::cache_store::TypedCache;
9pub use wacore::store::cache::CacheStore;
10
11/// Configuration for a single cache instance.
12///
13/// Controls the expiry timeout and maximum capacity of a moka cache.
14/// The `timeout` field is used as either TTL (`build_with_ttl`) or TTI
15/// (`build_with_tti`) depending on which builder method is called.
16/// Set `timeout` to `None` to disable time-based expiry (entries stay until
17/// evicted by capacity).
18#[derive(Debug, Clone)]
19pub struct CacheEntryConfig {
20 /// Expiry timeout duration. `None` means no time-based expiry.
21 /// Interpreted as TTL or TTI depending on the builder method used.
22 pub timeout: Option<Duration>,
23 /// Maximum number of entries.
24 pub capacity: u64,
25}
26
27impl CacheEntryConfig {
28 pub fn new(timeout: Option<Duration>, capacity: u64) -> Self {
29 Self { timeout, capacity }
30 }
31
32 /// Build a Cache using time_to_live semantics.
33 pub(crate) fn build_with_ttl<K, V>(&self) -> Cache<K, V>
34 where
35 K: std::hash::Hash + Eq + Clone + Send + Sync + 'static,
36 V: Clone + Send + Sync + 'static,
37 {
38 let mut builder = Cache::builder().max_capacity(self.capacity);
39 if let Some(timeout) = self.timeout {
40 builder = builder.time_to_live(timeout);
41 }
42 builder.build()
43 }
44
45 /// Build a [`TypedCache`] with TTL semantics, using the custom store if
46 /// provided or falling back to an in-process cache.
47 pub(crate) fn build_typed_ttl<K, V>(
48 &self,
49 store: Option<Arc<dyn CacheStore>>,
50 namespace: &'static str,
51 ) -> TypedCache<K, V>
52 where
53 K: std::hash::Hash + Eq + Clone + Display + Send + Sync + 'static,
54 V: Clone + Serialize + DeserializeOwned + Send + Sync + 'static,
55 {
56 match store {
57 Some(s) => TypedCache::from_store(s, namespace, self.timeout),
58 None => TypedCache::from_moka(self.build_with_ttl()),
59 }
60 }
61
62 /// Build a Cache using time_to_idle semantics.
63 pub(crate) fn build_with_tti<K, V>(&self) -> Cache<K, V>
64 where
65 K: std::hash::Hash + Eq + Clone + Send + Sync + 'static,
66 V: Clone + Send + Sync + 'static,
67 {
68 let mut builder = Cache::builder().max_capacity(self.capacity);
69 if let Some(timeout) = self.timeout {
70 builder = builder.time_to_idle(timeout);
71 }
72 builder.build()
73 }
74}
75
76/// Per-cache custom store overrides.
77///
78/// Each field is an optional [`CacheStore`] for that specific cache. When
79/// `None`, the default in-process moka cache is used.
80///
81/// # Example — only group and device on Redis
82///
83/// ```rust,ignore
84/// let redis = Arc::new(MyRedisCacheStore::new("redis://localhost:6379"));
85/// let config = CacheConfig {
86/// cache_stores: CacheStores {
87/// group_cache: Some(redis.clone()),
88/// device_cache: Some(redis.clone()),
89/// ..Default::default()
90/// },
91/// ..Default::default()
92/// };
93/// ```
94#[derive(Default, Clone)]
95pub struct CacheStores {
96 /// Custom store for group metadata cache.
97 pub group_cache: Option<Arc<dyn CacheStore>>,
98 /// Custom store for device list cache.
99 pub device_cache: Option<Arc<dyn CacheStore>>,
100 /// Custom store for device registry cache.
101 pub device_registry_cache: Option<Arc<dyn CacheStore>>,
102 /// Custom store for LID-PN bidirectional mapping cache.
103 pub lid_pn_cache: Option<Arc<dyn CacheStore>>,
104}
105
106impl CacheStores {
107 /// Set the same [`CacheStore`] for all pluggable caches at once.
108 ///
109 /// Coordination caches (`session_locks`, `message_queues`, etc.) and the
110 /// signal write-behind cache always remain in-process regardless of this
111 /// setting.
112 ///
113 /// # Example
114 ///
115 /// ```rust,ignore
116 /// let stores = CacheStores::all(Arc::new(MyRedisCacheStore::new("redis://localhost:6379")));
117 /// ```
118 pub fn all(store: Arc<dyn CacheStore>) -> Self {
119 Self {
120 group_cache: Some(store.clone()),
121 device_cache: Some(store.clone()),
122 device_registry_cache: Some(store.clone()),
123 lid_pn_cache: Some(store),
124 }
125 }
126}
127
128/// Configuration for all client caches and resource pools.
129///
130/// All fields default to WhatsApp Web behavior. Use `..Default::default()` to
131/// override only specific settings.
132///
133/// # Example — tune TTL/capacity
134///
135/// ```rust,ignore
136/// use whatsapp_rust::{CacheConfig, CacheEntryConfig};
137/// use std::time::Duration;
138///
139/// let config = CacheConfig {
140/// group_cache: CacheEntryConfig::new(None, 1_000), // no TTL
141/// ..Default::default()
142/// };
143/// ```
144///
145/// # Example — Redis for group and device caches only
146///
147/// ```rust,ignore
148/// use std::sync::Arc;
149/// use whatsapp_rust::{CacheConfig, CacheStores};
150///
151/// let redis = Arc::new(MyRedisCacheStore::new("redis://localhost:6379"));
152/// let config = CacheConfig {
153/// cache_stores: CacheStores {
154/// group_cache: Some(redis.clone()),
155/// device_cache: Some(redis.clone()),
156/// ..Default::default()
157/// },
158/// ..Default::default()
159/// };
160/// ```
161#[derive(Clone)]
162pub struct CacheConfig {
163 /// Group metadata cache (time_to_live). Default: 1h TTL, 250 entries.
164 pub group_cache: CacheEntryConfig,
165 /// Device list cache (time_to_live). Default: 1h TTL, 5000 entries.
166 pub device_cache: CacheEntryConfig,
167 /// Device registry cache (time_to_live). Default: 1h TTL, 5000 entries.
168 pub device_registry_cache: CacheEntryConfig,
169 /// LID-to-phone cache (time_to_idle). Default: 1h timeout, 10000 entries.
170 pub lid_pn_cache: CacheEntryConfig,
171 /// Retried group messages tracker (time_to_live). Default: 5m TTL, 2000 entries.
172 pub retried_group_messages: CacheEntryConfig,
173 /// Optional L1 in-memory cache for sent messages (retry support).
174 /// Default: capacity 0 (disabled — DB-only, matching WA Web).
175 /// Set capacity > 0 to enable a fast in-memory cache in front of the DB.
176 pub recent_messages: CacheEntryConfig,
177 /// Message retry counts (time_to_live). Default: 5m TTL, 1000 entries.
178 pub message_retry_counts: CacheEntryConfig,
179 /// PDO pending requests (time_to_live). Default: 30s TTL, 500 entries.
180 pub pdo_pending_requests: CacheEntryConfig,
181
182 // --- Coordination caches (capacity-only, no TTL) ---
183 /// Per-device Signal session lock capacity. Default: 2000.
184 pub session_locks_capacity: u64,
185 /// Per-chat message processing queue capacity. Default: 2000.
186 pub message_queues_capacity: u64,
187 /// Per-chat message enqueue lock capacity. Default: 2000.
188 pub message_enqueue_locks_capacity: u64,
189
190 // --- Sent message DB cleanup ---
191 /// TTL in seconds for sent messages in DB before periodic cleanup.
192 /// 0 = no automatic cleanup. Default: 300 (5 minutes).
193 pub sent_message_ttl_secs: u64,
194
195 // --- Custom store overrides ---
196 /// Per-cache custom store overrides.
197 ///
198 /// For each field set to `Some(store)`, the corresponding cache uses that
199 /// backend instead of the default in-process moka cache. Fields left as
200 /// `None` keep the default moka behaviour.
201 ///
202 /// Coordination caches (`session_locks`, `message_queues`,
203 /// `message_enqueue_locks`), the signal write-behind cache, and
204 /// `pdo_pending_requests` always stay in-process — they hold live Rust
205 /// objects (mutexes, channel senders, oneshot senders) that cannot be
206 /// serialised to an external store.
207 pub cache_stores: CacheStores,
208}
209
210impl std::fmt::Debug for CacheConfig {
211 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
212 f.debug_struct("CacheConfig")
213 .field("group_cache", &self.group_cache)
214 .field("device_cache", &self.device_cache)
215 .field("device_registry_cache", &self.device_registry_cache)
216 .field("lid_pn_cache", &self.lid_pn_cache)
217 .field("retried_group_messages", &self.retried_group_messages)
218 .field("recent_messages", &self.recent_messages)
219 .field("message_retry_counts", &self.message_retry_counts)
220 .field("pdo_pending_requests", &self.pdo_pending_requests)
221 .field("session_locks_capacity", &self.session_locks_capacity)
222 .field("message_queues_capacity", &self.message_queues_capacity)
223 .field(
224 "message_enqueue_locks_capacity",
225 &self.message_enqueue_locks_capacity,
226 )
227 .field("sent_message_ttl_secs", &self.sent_message_ttl_secs)
228 .field(
229 "cache_stores.group_cache",
230 &self.cache_stores.group_cache.is_some(),
231 )
232 .field(
233 "cache_stores.device_cache",
234 &self.cache_stores.device_cache.is_some(),
235 )
236 .field(
237 "cache_stores.device_registry_cache",
238 &self.cache_stores.device_registry_cache.is_some(),
239 )
240 .field(
241 "cache_stores.lid_pn_cache",
242 &self.cache_stores.lid_pn_cache.is_some(),
243 )
244 .finish()
245 }
246}
247
248impl Default for CacheConfig {
249 fn default() -> Self {
250 let one_hour = Some(Duration::from_secs(3600));
251 let five_min = Some(Duration::from_secs(300));
252
253 Self {
254 group_cache: CacheEntryConfig::new(one_hour, 250),
255 device_cache: CacheEntryConfig::new(one_hour, 5_000),
256 device_registry_cache: CacheEntryConfig::new(one_hour, 5_000),
257 lid_pn_cache: CacheEntryConfig::new(one_hour, 10_000),
258 retried_group_messages: CacheEntryConfig::new(five_min, 2_000),
259 recent_messages: CacheEntryConfig::new(five_min, 0),
260 message_retry_counts: CacheEntryConfig::new(five_min, 1_000),
261 pdo_pending_requests: CacheEntryConfig::new(Some(Duration::from_secs(30)), 500),
262 session_locks_capacity: 2_000,
263 message_queues_capacity: 2_000,
264 message_enqueue_locks_capacity: 2_000,
265 sent_message_ttl_secs: 300,
266 cache_stores: CacheStores::default(),
267 }
268 }
269}