fbc_starter/cache/batch_cache.rs
1/// 批量缓存操作 Trait
2///
3/// 定义批量缓存操作的通用接口,支持单个和批量获取、删除操作
4///
5/// 使用关联类型 `Key` 和 `Value` 来定义缓存键和值的类型。
6///
7/// # 示例
8/// ```rust,no_run
9/// use fbc_starter::cache::BatchCache;
10/// use fbc_starter::AppResult;
11/// use std::collections::HashMap;
12///
13/// struct MyCache;
14///
15/// #[async_trait::async_trait]
16/// impl BatchCache for MyCache {
17/// type Key = i64;
18/// type Value = String;
19///
20/// async fn get(&self, req: Self::Key) -> AppResult<Option<Self::Value>> {
21/// // 实现单个获取逻辑
22/// Ok(None)
23/// }
24///
25/// async fn get_batch(&self, req: &[Self::Key]) -> AppResult<HashMap<Self::Key, Self::Value>> {
26/// // 实现批量获取逻辑
27/// Ok(HashMap::new())
28/// }
29///
30/// async fn delete(&self, req: Self::Key) -> AppResult<()> {
31/// // 实现单个删除逻辑
32/// Ok(())
33/// }
34///
35/// async fn delete_batch(&self, req: &[Self::Key]) -> AppResult<()> {
36/// // 实现批量删除逻辑
37/// Ok(())
38/// }
39/// }
40/// ```
41use async_trait::async_trait;
42use std::collections::HashMap;
43use std::hash::Hash;
44
45#[cfg(feature = "redis")]
46use deadpool_redis::redis::{self, AsyncCommands};
47#[cfg(feature = "redis")]
48use serde::{Deserialize, Serialize};
49
50use crate::error::AppResult;
51
52/// 批量缓存操作 Trait
53///
54/// 提供单个和批量缓存操作的统一接口
55///
56/// # 关联类型
57/// - `Key`: 缓存键类型,需要实现 `Send + Sync + Clone + Hash + Eq`
58/// - `Value`: 缓存值类型,需要实现 `Send + Sync + Clone` 和序列化(当使用 Redis 默认实现时)
59/// - `RedisClient`: (可选)Redis 客户端类型,用于提供基于 Redis 的默认实现
60#[async_trait]
61pub trait BatchCache {
62 /// 缓存键类型
63 type Key: Send + Sync + Clone + Hash + Eq;
64 /// 缓存值类型
65 type Value: Send + Sync + Clone;
66
67 /// 获取单个缓存
68 ///
69 /// # 参数
70 /// - `req`: 缓存键
71 ///
72 /// # 返回
73 /// - `Ok(Some(value))`: 找到缓存值
74 /// - `Ok(None)`: 缓存不存在
75 /// - `Err(e)`: 操作失败
76 async fn get(&self, req: Self::Key) -> AppResult<Option<Self::Value>>;
77
78 /// 批量获取缓存
79 ///
80 /// # 参数
81 /// - `req`: 缓存键列表
82 ///
83 /// # 返回
84 /// - `Ok(map)`: 返回键值对映射,只包含存在的缓存项
85 /// - `Err(e)`: 操作失败
86 async fn get_batch(&self, req: &[Self::Key]) -> AppResult<HashMap<Self::Key, Self::Value>>;
87
88 /// 删除单个缓存
89 ///
90 /// # 参数
91 /// - `req`: 缓存键
92 ///
93 /// # 返回
94 /// - `Ok(())`: 删除成功(无论缓存是否存在)
95 /// - `Err(e)`: 操作失败
96 async fn delete(&self, req: Self::Key) -> AppResult<()>;
97
98 /// 批量删除缓存
99 ///
100 /// # 参数
101 /// - `req`: 缓存键列表
102 ///
103 /// # 返回
104 /// - `Ok(())`: 删除成功(无论缓存是否存在)
105 /// - `Err(e)`: 操作失败
106 async fn delete_batch(&self, req: &[Self::Key]) -> AppResult<()>;
107
108 /// 批量刷新缓存(清除指定 map 中所有键对应的缓存)
109 ///
110 /// # 参数
111 /// - `req`: 包含要清除的缓存键的 HashMap(值会被忽略,只使用键)
112 ///
113 /// # 返回
114 /// - `Ok(())`: 清除成功(无论缓存是否存在)
115 /// - `Err(e)`: 操作失败
116 ///
117 /// # 示例
118 /// ```rust,no_run
119 /// use fbc_starter::cache::BatchCache;
120 /// use fbc_starter::AppResult;
121 /// use std::collections::HashMap;
122 ///
123 /// # async fn example<T: BatchCache<Key = i64, Value = String>>(cache: &T) -> AppResult<()> {
124 /// let mut map = HashMap::new();
125 /// map.insert(1, "value1".to_string());
126 /// map.insert(2, "value2".to_string());
127 /// map.insert(3, "value3".to_string());
128 ///
129 /// // 清除 map 中所有键对应的缓存
130 /// cache.refresh(&map).await?;
131 /// # Ok(())
132 /// # }
133 /// ```
134 async fn refresh(&self, req: &HashMap<Self::Key, Self::Value>) -> AppResult<()> {
135 // 默认实现:提取所有键并批量删除
136 let keys: Vec<Self::Key> = req.keys().cloned().collect();
137 self.delete_batch(&keys).await
138 }
139}
140
141/// 基于 Redis 的批量缓存操作扩展 Trait
142///
143/// 为实现了 `BatchCache` 且提供了 Redis 客户端的类型提供基于 Redis 的默认实现。
144/// 实现此 trait 后,可以使用 `get_from_redis`、`set_to_redis` 等辅助方法。
145///
146/// # 使用示例
147/// ```rust,no_run
148/// use fbc_starter::cache::{BatchCache, RedisBatchCache};
149/// use std::sync::Arc;
150/// use deadpool_redis::Pool;
151///
152/// struct MyRedisCache {
153/// redis_pool: Arc<Pool>,
154/// }
155///
156/// #[async_trait::async_trait]
157/// impl BatchCache for MyRedisCache {
158/// type Key = i64;
159/// type Value = String;
160/// // ... 实现其他必需的方法
161/// }
162///
163/// #[async_trait::async_trait]
164/// impl RedisBatchCache for MyRedisCache {
165/// async fn get_redis_connection(&self) -> AppResult<deadpool_redis::Connection> {
166/// use fbc_starter::error::AppError;
167/// Ok(self.redis_pool.get().await.map_err(|e| AppError::Internal(anyhow::anyhow!("获取 Redis 连接失败: {}", e)))?)
168/// }
169///
170/// fn build_cache_key(&self, key: &Self::Key) -> String {
171/// format!("cache:{}", key)
172/// }
173///
174/// fn cache_expire(&self) -> Option<u64> {
175/// Some(300) // 5 分钟
176/// }
177/// }
178/// ```
179#[cfg(feature = "redis")]
180#[async_trait]
181pub trait RedisBatchCache: BatchCache
182where
183 Self::Value: Serialize + for<'de> Deserialize<'de>,
184{
185 /// 获取 Redis 连接
186 async fn get_redis_connection(&self) -> AppResult<deadpool_redis::Connection>;
187
188 /// 构建缓存键
189 ///
190 /// 将 `Key` 类型转换为字符串键
191 fn build_cache_key(&self, key: &Self::Key) -> String;
192
193 /// 获取缓存过期时间(秒)
194 ///
195 /// 返回 `Some(seconds)` 如果设置了过期时间,否则返回 `None`(永不过期)
196 fn cache_expire(&self) -> Option<u64> {
197 None
198 }
199
200 /// 基于 Redis 的默认实现:获取单个缓存
201 async fn get_from_redis(&self, req: Self::Key) -> AppResult<Option<Self::Value>> {
202 use crate::error::AppError;
203 let key = self.build_cache_key(&req);
204 let mut conn = self.get_redis_connection().await?;
205
206 // 尝试从 Redis 获取
207 let value: Option<String> = conn.get(&key).await.map_err(AppError::Redis)?;
208 if let Some(json_str) = value {
209 let value: Self::Value = serde_json::from_str(&json_str)
210 .map_err(|e| AppError::Internal(anyhow::anyhow!("JSON 反序列化失败: {}", e)))?;
211 return Ok(Some(value));
212 }
213
214 Ok(None)
215 }
216
217 /// 基于 Redis 的默认实现:批量获取缓存
218 async fn get_batch_from_redis(
219 &self,
220 req: &[Self::Key],
221 ) -> AppResult<HashMap<Self::Key, Self::Value>> {
222 use crate::error::AppError;
223 if req.is_empty() {
224 return Ok(HashMap::new());
225 }
226
227 let mut conn = self.get_redis_connection().await?;
228 let mut result = HashMap::new();
229
230 // 构建所有键
231 let keys: Vec<String> = req.iter().map(|k| self.build_cache_key(k)).collect();
232
233 // 批量从 Redis 获取
234 let values: Vec<Option<String>> = conn.mget(&keys).await.map_err(AppError::Redis)?;
235
236 for (key, value_opt) in req.iter().zip(values.iter()) {
237 if let Some(json_str) = value_opt {
238 if let Ok(value) = serde_json::from_str::<Self::Value>(json_str) {
239 result.insert(key.clone(), value);
240 }
241 }
242 }
243
244 Ok(result)
245 }
246
247 /// 基于 Redis 的默认实现:删除单个缓存
248 async fn delete_from_redis(&self, req: Self::Key) -> AppResult<()> {
249 use crate::error::AppError;
250 let key = self.build_cache_key(&req);
251 let mut conn = self.get_redis_connection().await?;
252 let _: () = conn.del(&key).await.map_err(AppError::Redis)?;
253 Ok(())
254 }
255
256 /// 基于 Redis 的默认实现:批量删除缓存
257 async fn delete_batch_from_redis(&self, req: &[Self::Key]) -> AppResult<()> {
258 use crate::error::AppError;
259 if req.is_empty() {
260 return Ok(());
261 }
262
263 let mut conn = self.get_redis_connection().await?;
264 let keys: Vec<String> = req.iter().map(|k| self.build_cache_key(k)).collect();
265
266 if !keys.is_empty() {
267 let _: () = conn.del(&keys).await.map_err(AppError::Redis)?;
268 }
269
270 Ok(())
271 }
272
273 /// 写入缓存到 Redis
274 ///
275 /// 注意:对于批量写入,建议使用 `set_batch_to_redis` 方法以获得更好的性能
276 async fn set_to_redis(&self, key: Self::Key, value: &Self::Value) -> AppResult<()> {
277 use crate::error::AppError;
278 let cache_key = self.build_cache_key(&key);
279 let json_str = serde_json::to_string(value)
280 .map_err(|e| AppError::Internal(anyhow::anyhow!("JSON 序列化失败: {}", e)))?;
281 let mut conn = self.get_redis_connection().await?;
282
283 if let Some(expire) = self.cache_expire() {
284 let _: () = conn
285 .set_ex(&cache_key, &json_str, expire)
286 .await
287 .map_err(AppError::Redis)?;
288 } else {
289 let _: () = conn
290 .set(&cache_key, &json_str)
291 .await
292 .map_err(AppError::Redis)?;
293 }
294
295 Ok(())
296 }
297
298 /// 批量写入缓存到 Redis(性能更高)
299 ///
300 /// 使用 Redis 的 pipeline 或 MSET 命令进行批量写入,性能比单个写入高很多
301 ///
302 /// # 参数
303 /// - `items`: 键值对映射
304 ///
305 /// # 返回
306 /// - `Ok(())`: 写入成功
307 /// - `Err(e)`: 操作失败
308 ///
309 /// # 示例
310 /// ```rust,no_run
311 /// use fbc_starter::cache::RedisBatchCache;
312 /// use fbc_starter::AppResult;
313 /// use std::collections::HashMap;
314 ///
315 /// # async fn example<T: RedisBatchCache<Key = i64, Value = String>>(cache: &T) -> AppResult<()> {
316 /// let mut items = HashMap::new();
317 /// items.insert(1, "value1".to_string());
318 /// items.insert(2, "value2".to_string());
319 /// items.insert(3, "value3".to_string());
320 ///
321 /// // 批量写入缓存
322 /// cache.set_batch_to_redis(&items).await?;
323 /// # Ok(())
324 /// # }
325 /// ```
326 async fn set_batch_to_redis(&self, items: &HashMap<Self::Key, Self::Value>) -> AppResult<()> {
327 use crate::error::AppError;
328 if items.is_empty() {
329 return Ok(());
330 }
331
332 let mut conn = self.get_redis_connection().await?;
333 let expire = self.cache_expire();
334
335 // 使用 pipeline 批量执行命令,性能更高
336 let mut pipe = redis::pipe();
337 pipe.atomic();
338
339 for (key, value) in items {
340 let cache_key = self.build_cache_key(key);
341 let json_str = serde_json::to_string(value)
342 .map_err(|e| AppError::Internal(anyhow::anyhow!("JSON 序列化失败: {}", e)))?;
343
344 if let Some(expire_secs) = expire {
345 pipe.set_ex(&cache_key, &json_str, expire_secs);
346 } else {
347 pipe.set(&cache_key, &json_str);
348 }
349 }
350
351 pipe.query_async::<()>(&mut conn)
352 .await
353 .map_err(AppError::Redis)?;
354 Ok(())
355 }
356}
357
358/// 基于本地内存缓存的批量缓存操作扩展 Trait(类似 Caffeine)
359///
360/// 为实现了 `BatchCache` 且提供了本地缓存客户端的类型提供基于本地内存缓存的默认实现。
361/// 使用 `moka` 库作为底层实现,提供高性能的本地缓存功能。
362///
363/// # 使用示例
364/// ```rust,no_run
365/// use fbc_starter::cache::{BatchCache, LocalBatchCache};
366/// use fbc_starter::AppResult;
367/// use std::sync::Arc;
368/// use std::collections::HashMap;
369/// use moka::future::Cache;
370/// use std::time::Duration;
371///
372/// struct MyLocalCache {
373/// cache: Arc<Cache<String, String>>,
374/// }
375///
376/// impl MyLocalCache {
377/// fn new() -> Self {
378/// // 创建缓存,设置最大容量和过期时间
379/// let cache = Cache::builder()
380/// .max_capacity(10_000)
381/// .time_to_live(Duration::from_secs(300)) // 5 分钟过期
382/// .build();
383///
384/// Self {
385/// cache: Arc::new(cache),
386/// }
387/// }
388/// }
389///
390/// #[async_trait::async_trait]
391/// impl BatchCache for MyLocalCache {
392/// type Key = i64;
393/// type Value = String;
394///
395/// async fn get(&self, req: Self::Key) -> AppResult<Option<Self::Value>> {
396/// // 使用本地缓存的默认实现
397/// self.get_from_local(req).await
398/// }
399///
400/// async fn get_batch(&self, req: &[Self::Key]) -> AppResult<HashMap<Self::Key, Self::Value>> {
401/// // 使用本地缓存的默认实现
402/// self.get_batch_from_local(req).await
403/// }
404///
405/// async fn delete(&self, req: Self::Key) -> AppResult<()> {
406/// // 使用本地缓存的默认实现
407/// self.delete_from_local(req).await
408/// }
409///
410/// async fn delete_batch(&self, req: &[Self::Key]) -> AppResult<()> {
411/// // 使用本地缓存的默认实现
412/// self.delete_batch_from_local(req).await
413/// }
414/// }
415///
416/// #[async_trait::async_trait]
417/// impl LocalBatchCache for MyLocalCache {
418/// fn get_local_cache(&self) -> Arc<Cache<String, Self::Value>> {
419/// self.cache.clone()
420/// }
421///
422/// fn build_cache_key(&self, key: &Self::Key) -> String {
423/// format!("cache:{}", key)
424/// }
425/// }
426/// ```
427#[cfg(feature = "local_cache")]
428#[async_trait]
429pub trait LocalBatchCache: BatchCache
430where
431 Self::Value: Send + Sync + Clone + 'static,
432 Self::Key: Send + Sync + Clone + Hash + Eq + 'static,
433{
434 /// 获取本地缓存实例
435 ///
436 /// 返回 `moka::future::Cache` 的 `Arc` 包装,用于实际的缓存操作
437 fn get_local_cache(&self) -> std::sync::Arc<moka::future::Cache<String, Self::Value>>;
438
439 /// 构建缓存键
440 ///
441 /// 将 `Key` 类型转换为字符串键
442 fn build_cache_key(&self, key: &Self::Key) -> String;
443
444 /// 基于本地缓存的默认实现:获取单个缓存
445 async fn get_from_local(&self, req: Self::Key) -> AppResult<Option<Self::Value>> {
446 let cache = self.get_local_cache();
447 let key = self.build_cache_key(&req);
448 let value = cache.get(&key).await;
449 Ok(value)
450 }
451
452 /// 基于本地缓存的默认实现:批量获取缓存
453 async fn get_batch_from_local(
454 &self,
455 req: &[Self::Key],
456 ) -> AppResult<HashMap<Self::Key, Self::Value>> {
457 if req.is_empty() {
458 return Ok(HashMap::new());
459 }
460
461 let cache = self.get_local_cache();
462 let mut result = HashMap::new();
463
464 // 批量从本地缓存获取
465 for key in req {
466 let cache_key = self.build_cache_key(key);
467 if let Some(value) = cache.get(&cache_key).await {
468 result.insert(key.clone(), value);
469 }
470 }
471
472 Ok(result)
473 }
474
475 /// 基于本地缓存的默认实现:删除单个缓存
476 async fn delete_from_local(&self, req: Self::Key) -> AppResult<()> {
477 let cache = self.get_local_cache();
478 let key = self.build_cache_key(&req);
479 cache.invalidate(&key).await;
480 Ok(())
481 }
482
483 /// 基于本地缓存的默认实现:批量删除缓存
484 async fn delete_batch_from_local(&self, req: &[Self::Key]) -> AppResult<()> {
485 if req.is_empty() {
486 return Ok(());
487 }
488
489 let cache = self.get_local_cache();
490 let keys: Vec<String> = req.iter().map(|k| self.build_cache_key(k)).collect();
491
492 // 批量失效缓存
493 for key in keys {
494 cache.invalidate(&key).await;
495 }
496
497 Ok(())
498 }
499
500 /// 写入缓存到本地内存
501 ///
502 /// 注意:对于批量写入,建议使用 `set_batch_to_local` 方法以获得更好的性能
503 async fn set_to_local(&self, key: Self::Key, value: &Self::Value) -> AppResult<()> {
504 let cache = self.get_local_cache();
505 let cache_key = self.build_cache_key(&key);
506 cache.insert(cache_key, value.clone()).await;
507 Ok(())
508 }
509
510 /// 批量写入缓存到本地内存(性能更高)
511 ///
512 /// 批量插入缓存项,性能比单个插入高
513 ///
514 /// # 参数
515 /// - `items`: 键值对映射
516 ///
517 /// # 返回
518 /// - `Ok(())`: 写入成功
519 /// - `Err(e)`: 操作失败
520 ///
521 /// # 示例
522 /// ```rust,no_run
523 /// use fbc_starter::cache::LocalBatchCache;
524 /// use fbc_starter::AppResult;
525 /// use std::collections::HashMap;
526 ///
527 /// # async fn example<T: LocalBatchCache<Key = i64, Value = String>>(cache: &T) -> AppResult<()> {
528 /// let mut items = HashMap::new();
529 /// items.insert(1, "value1".to_string());
530 /// items.insert(2, "value2".to_string());
531 /// items.insert(3, "value3".to_string());
532 ///
533 /// // 批量写入缓存
534 /// cache.set_batch_to_local(&items).await?;
535 /// # Ok(())
536 /// # }
537 /// ```
538 async fn set_batch_to_local(&self, items: &HashMap<Self::Key, Self::Value>) -> AppResult<()> {
539 if items.is_empty() {
540 return Ok(());
541 }
542
543 let cache = self.get_local_cache();
544
545 // 批量插入缓存
546 for (key, value) in items {
547 let cache_key = self.build_cache_key(key);
548 cache.insert(cache_key, value.clone()).await;
549 }
550
551 Ok(())
552 }
553}
554
555#[cfg(test)]
556mod tests {
557 use super::*;
558 use std::collections::HashMap;
559
560 /// 测试用的简单缓存实现
561 struct TestCache {
562 data: HashMap<i64, String>,
563 }
564
565 #[async_trait]
566 impl BatchCache for TestCache {
567 type Key = i64;
568 type Value = String;
569
570 async fn get(&self, req: Self::Key) -> AppResult<Option<Self::Value>> {
571 Ok(self.data.get(&req).cloned())
572 }
573
574 async fn get_batch(&self, req: &[Self::Key]) -> AppResult<HashMap<Self::Key, Self::Value>> {
575 let mut result = HashMap::new();
576 for key in req {
577 if let Some(value) = self.data.get(key) {
578 result.insert(*key, value.clone());
579 }
580 }
581 Ok(result)
582 }
583
584 async fn delete(&self, _req: Self::Key) -> AppResult<()> {
585 // 测试实现,不做实际删除
586 Ok(())
587 }
588
589 async fn delete_batch(&self, _req: &[Self::Key]) -> AppResult<()> {
590 // 测试实现,不做实际删除
591 Ok(())
592 }
593 }
594
595 #[tokio::test]
596 async fn test_get() {
597 let mut data = HashMap::new();
598 data.insert(1, "value1".to_string());
599 data.insert(2, "value2".to_string());
600
601 let cache = TestCache { data };
602
603 let result = cache.get(1).await.unwrap();
604 assert_eq!(result, Some("value1".to_string()));
605
606 let result = cache.get(3).await.unwrap();
607 assert_eq!(result, None);
608 }
609
610 #[tokio::test]
611 async fn test_get_batch() {
612 let mut data = HashMap::new();
613 data.insert(1, "value1".to_string());
614 data.insert(2, "value2".to_string());
615 data.insert(3, "value3".to_string());
616
617 let cache = TestCache { data };
618
619 let result = cache.get_batch(&[1, 2, 4]).await.unwrap();
620 assert_eq!(result.len(), 2);
621 assert_eq!(result.get(&1), Some(&"value1".to_string()));
622 assert_eq!(result.get(&2), Some(&"value2".to_string()));
623 assert_eq!(result.get(&4), None);
624 }
625
626 #[tokio::test]
627 async fn test_delete() {
628 let data = HashMap::new();
629 let cache = TestCache { data };
630
631 let result = cache.delete(1).await;
632 assert!(result.is_ok());
633 }
634
635 #[tokio::test]
636 async fn test_delete_batch() {
637 let data = HashMap::new();
638 let cache = TestCache { data };
639
640 let result = cache.delete_batch(&[1, 2, 3]).await;
641 assert!(result.is_ok());
642 }
643
644 #[tokio::test]
645 async fn test_refresh() {
646 let mut data = HashMap::new();
647 data.insert(1, "value1".to_string());
648 data.insert(2, "value2".to_string());
649 data.insert(3, "value3".to_string());
650
651 let cache = TestCache { data };
652
653 let mut refresh_map = HashMap::new();
654 refresh_map.insert(1, "ignored".to_string());
655 refresh_map.insert(2, "ignored".to_string());
656
657 let result = cache.refresh(&refresh_map).await;
658 assert!(result.is_ok());
659 }
660}