Skip to main content

nydus_storage/cache/
dummycache.rs

1// Copyright 2020 Ant Group. All rights reserved.
2// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
3//
4// SPDX-License-Identifier: Apache-2.0
5
6//! A dummy implementation of the [BlobCacheMgr](trait.BlobCacheMgr.html) trait.
7//!
8//! The [DummyCacheMgr](struct.DummyCacheMgr.html) is a dummy implementation of the
9//! [BlobCacheMgr](../trait.BlobCacheMgr.html) trait, which doesn't really cache any data.
10//! Instead it just reads data from the backend, uncompressed it if needed and then pass on
11//! the data to the clients.
12//!
13//! There are two possible usage mode of the [DummyCacheMgr]:
14//! - Read compressed/uncompressed data from remote Registry/OSS backend but not cache the
15//!   uncompressed data on local storage. The
16//!   [is_chunk_cached()](../trait.BlobCache.html#tymethod.is_chunk_cached)
17//!   method always return false to disable data prefetching.
18//! - Read uncompressed data from local disk and no need to double cache the data.
19//!   The [is_chunk_cached()](../trait.BlobCache.html#tymethod.is_chunk_cached) method always
20//!   return true to enable data prefetching.
21use std::io::Result;
22use std::sync::atomic::{AtomicBool, Ordering};
23use std::sync::Arc;
24
25use fuse_backend_rs::file_buf::FileVolatileSlice;
26use nydus_api::CacheConfigV2;
27use nydus_utils::crypt::{Algorithm, Cipher, CipherContext};
28use nydus_utils::{compress, digest};
29
30use crate::backend::{BlobBackend, BlobReader};
31use crate::cache::state::{ChunkMap, NoopChunkMap};
32use crate::cache::{BlobCache, BlobCacheMgr};
33use crate::device::{
34    BlobChunkInfo, BlobFeatures, BlobInfo, BlobIoDesc, BlobIoVec, BlobPrefetchRequest,
35};
36use crate::utils::{alloc_buf, copyv};
37use crate::{StorageError, StorageResult};
38
39struct DummyCache {
40    blob_id: String,
41    blob_info: Arc<BlobInfo>,
42    chunk_map: Arc<dyn ChunkMap>,
43    reader: Arc<dyn BlobReader>,
44    compressor: compress::Algorithm,
45    digester: digest::Algorithm,
46    is_legacy_stargz: bool,
47    need_validation: bool,
48}
49
50impl BlobCache for DummyCache {
51    fn blob_id(&self) -> &str {
52        &self.blob_id
53    }
54
55    fn blob_uncompressed_size(&self) -> Result<u64> {
56        Ok(self.blob_info.uncompressed_size())
57    }
58
59    fn blob_compressed_size(&self) -> Result<u64> {
60        self.reader.blob_size().map_err(|e| eother!(e))
61    }
62
63    fn blob_compressor(&self) -> compress::Algorithm {
64        self.compressor
65    }
66
67    fn blob_cipher(&self) -> Algorithm {
68        self.blob_info.cipher()
69    }
70
71    fn blob_cipher_object(&self) -> Arc<Cipher> {
72        self.blob_info.cipher_object()
73    }
74
75    fn blob_cipher_context(&self) -> Option<CipherContext> {
76        self.blob_info.cipher_context()
77    }
78
79    fn blob_digester(&self) -> digest::Algorithm {
80        self.digester
81    }
82
83    fn is_legacy_stargz(&self) -> bool {
84        self.is_legacy_stargz
85    }
86
87    fn need_validation(&self) -> bool {
88        self.need_validation
89    }
90
91    fn reader(&self) -> &dyn BlobReader {
92        &*self.reader
93    }
94
95    fn get_chunk_map(&self) -> &Arc<dyn ChunkMap> {
96        &self.chunk_map
97    }
98
99    fn get_chunk_info(&self, _chunk_index: u32) -> Option<Arc<dyn BlobChunkInfo>> {
100        None
101    }
102
103    fn start_prefetch(&self) -> StorageResult<()> {
104        Ok(())
105    }
106
107    fn stop_prefetch(&self) -> StorageResult<()> {
108        Ok(())
109    }
110
111    fn is_prefetch_active(&self) -> bool {
112        false
113    }
114
115    fn prefetch(
116        &self,
117        _blob_cache: Arc<dyn BlobCache>,
118        _prefetches: &[BlobPrefetchRequest],
119        _bios: &[BlobIoDesc],
120    ) -> StorageResult<usize> {
121        Err(StorageError::Unsupported)
122    }
123
124    fn read(&self, iovec: &mut BlobIoVec, bufs: &[FileVolatileSlice]) -> Result<usize> {
125        let bios = &iovec.bi_vec;
126
127        if iovec.size() == 0 || bios.is_empty() {
128            return Err(einval!("parameter `bios` is empty"));
129        }
130
131        let bios_len = bios.len();
132        let offset = bios[0].offset;
133        let d_size = bios[0].chunkinfo.uncompressed_size() as usize;
134        // Use the destination buffer to receive the uncompressed data if possible.
135        if bufs.len() == 1 && bios_len == 1 && offset == 0 && bufs[0].len() >= d_size {
136            if !bios[0].user_io {
137                return Ok(0);
138            }
139            let buf = unsafe { std::slice::from_raw_parts_mut(bufs[0].as_ptr(), d_size) };
140            self.read_chunk_from_backend(&bios[0].chunkinfo, buf)?;
141            return Ok(buf.len());
142        }
143
144        let mut user_size = 0;
145        let mut buffer_holder: Vec<Vec<u8>> = Vec::with_capacity(bios.len());
146        for bio in bios.iter() {
147            if bio.user_io {
148                let mut d = alloc_buf(bio.chunkinfo.uncompressed_size() as usize);
149                self.read_chunk_from_backend(&bio.chunkinfo, d.as_mut_slice())?;
150                buffer_holder.push(d);
151                // Even a merged IO can hardly reach u32::MAX. So this is safe
152                user_size += bio.size;
153            }
154        }
155
156        copyv(
157            &buffer_holder,
158            bufs,
159            offset as usize,
160            user_size as usize,
161            0,
162            0,
163        )
164        .map(|(n, _)| n)
165        .map_err(|e| eother!(e))
166    }
167}
168
169/// A dummy implementation of [BlobCacheMgr](../trait.BlobCacheMgr.html), simply reporting each
170/// chunk as cached or not cached according to configuration.
171///
172/// The `DummyCacheMgr` is a dummy implementation of the `BlobCacheMgr`, which doesn't really cache
173/// data. Instead it just reads data from the backend, uncompressed it if needed and then pass on
174/// the data to the clients.
175pub struct DummyCacheMgr {
176    backend: Arc<dyn BlobBackend>,
177    cached: bool,
178    need_validation: bool,
179    closed: AtomicBool,
180}
181
182impl DummyCacheMgr {
183    /// Create a new instance of `DummyCacheMgr`.
184    pub fn new(
185        config: &CacheConfigV2,
186        backend: Arc<dyn BlobBackend>,
187        cached: bool,
188    ) -> Result<DummyCacheMgr> {
189        Ok(DummyCacheMgr {
190            backend,
191            cached,
192            need_validation: config.cache_validate,
193            closed: AtomicBool::new(false),
194        })
195    }
196}
197
198impl BlobCacheMgr for DummyCacheMgr {
199    fn init(&self) -> Result<()> {
200        Ok(())
201    }
202
203    fn destroy(&self) {
204        if !self.closed.load(Ordering::Acquire) {
205            self.closed.store(true, Ordering::Release);
206            self.backend().shutdown();
207        }
208    }
209
210    fn gc(&self, _id: Option<&str>) -> bool {
211        false
212    }
213
214    fn backend(&self) -> &dyn BlobBackend {
215        self.backend.as_ref()
216    }
217
218    fn get_blob_cache(&self, blob_info: &Arc<BlobInfo>) -> Result<Arc<dyn BlobCache>> {
219        if blob_info.has_feature(BlobFeatures::ZRAN) {
220            return Err(einval!(
221                "BlobCacheMgr doesn't support ZRan based RAFS data blobs"
222            ));
223        }
224
225        let blob_id = blob_info.blob_id();
226        let reader = self.backend.get_reader(&blob_id).map_err(|e| eother!(e))?;
227
228        Ok(Arc::new(DummyCache {
229            blob_id,
230            blob_info: blob_info.clone(),
231            chunk_map: Arc::new(NoopChunkMap::new(self.cached)),
232            reader,
233            compressor: blob_info.compressor(),
234            digester: blob_info.digester(),
235            is_legacy_stargz: blob_info.is_legacy_stargz(),
236            need_validation: self.need_validation && !blob_info.is_legacy_stargz(),
237        }))
238    }
239
240    fn check_stat(&self) {}
241}
242
243impl Drop for DummyCacheMgr {
244    fn drop(&mut self) {
245        self.destroy();
246    }
247}
248
249#[cfg(test)]
250mod tests {
251    use std::fs::OpenOptions;
252
253    use nydus_api::ConfigV2;
254    use nydus_utils::metrics::BackendMetrics;
255    use vmm_sys_util::tempdir::TempDir;
256
257    use crate::{
258        cache::state::IndexedChunkMap,
259        device::{BlobIoChunk, BlobIoRange},
260        meta::tests::DummyBlobReader,
261        test::{MockBackend, MockChunkInfo},
262    };
263
264    use super::*;
265
266    #[test]
267    fn test_dummy_cache() {
268        let info = BlobInfo::new(
269            0,
270            "blob-0".to_string(),
271            800,
272            0,
273            8,
274            100,
275            BlobFeatures::empty(),
276        );
277        let dir = TempDir::new().unwrap();
278        let blob_path = dir
279            .as_path()
280            .join("blob-0")
281            .as_os_str()
282            .to_str()
283            .unwrap()
284            .to_string();
285        let chunkmap = IndexedChunkMap::new(blob_path.as_str(), 100, true).unwrap();
286        let chunkmap_unuse = IndexedChunkMap::new(blob_path.as_str(), 100, true).unwrap();
287
288        let f = OpenOptions::new()
289            .truncate(true)
290            .create(true)
291            .write(true)
292            .read(true)
293            .open(blob_path.as_str())
294            .unwrap();
295        assert!(f.set_len(800).is_ok());
296        let reader: Arc<dyn BlobReader> = Arc::new(DummyBlobReader {
297            metrics: BackendMetrics::new("dummy", "localfs"),
298            file: f,
299        });
300        let cache = DummyCache {
301            blob_id: "0".to_string(),
302            blob_info: Arc::new(info.clone()),
303            chunk_map: Arc::new(chunkmap),
304            reader: reader.clone(),
305            compressor: compress::Algorithm::None,
306            digester: digest::Algorithm::Blake3,
307            is_legacy_stargz: false,
308            need_validation: false,
309        };
310
311        let cache_unuse = DummyCache {
312            blob_id: "1".to_string(),
313            blob_info: Arc::new(info.clone()),
314            chunk_map: Arc::new(chunkmap_unuse),
315            reader,
316            compressor: compress::Algorithm::None,
317            digester: digest::Algorithm::Blake3,
318            is_legacy_stargz: false,
319            need_validation: false,
320        };
321
322        assert!(cache.get_legacy_stargz_size(0, 100).is_ok());
323        assert!(!cache.is_zran());
324        assert!(!cache.is_batch());
325        assert!(cache.get_blob_object().is_none());
326        assert!(cache.prefetch_range(&BlobIoRange::default()).is_err());
327        assert_eq!(cache.blob_id, "0");
328        assert_eq!(cache.blob_uncompressed_size().unwrap(), 800);
329        assert_eq!(cache.blob_compressed_size().unwrap(), 0);
330        assert_eq!(cache.blob_compressor(), compress::Algorithm::None);
331        assert_eq!(cache.blob_cipher(), Algorithm::None);
332        match cache.blob_cipher_object().as_ref() {
333            Cipher::None => {}
334            _ => panic!(),
335        }
336        assert!(cache.blob_cipher_context().is_none());
337        assert_eq!(cache.blob_digester(), digest::Algorithm::Blake3);
338        assert!(!cache.is_legacy_stargz());
339        assert!(!cache.need_validation());
340        let _r = cache.reader();
341        let _m = cache.get_chunk_map();
342        assert!(cache.get_chunk_info(0).is_none());
343
344        assert!(cache.start_prefetch().is_ok());
345        let reqs = BlobPrefetchRequest {
346            blob_id: "blob-0".to_string(),
347            offset: 0,
348            len: 10,
349        };
350        let iovec_arr: &[BlobIoDesc] = &[];
351        let reqs = &[reqs];
352
353        assert!(cache
354            .prefetch(Arc::new(cache_unuse), reqs, iovec_arr)
355            .is_err());
356        assert!(cache.stop_prefetch().is_ok());
357        let mut iovec = BlobIoVec::new(Arc::new(info.clone()));
358        let chunk: Arc<dyn BlobChunkInfo> = Arc::new(MockChunkInfo {
359            block_id: Default::default(),
360            blob_index: 0,
361            flags: Default::default(),
362            compress_size: 0,
363            uncompress_size: 800,
364            compress_offset: 0,
365            uncompress_offset: 0,
366            file_offset: 0,
367            index: 0,
368            crc32: 0,
369        });
370        iovec.push(BlobIoDesc::new(
371            Arc::new(info.clone()),
372            BlobIoChunk::from(chunk.clone()),
373            0,
374            10,
375            true,
376        ));
377
378        let mut dst_buf1 = vec![0x0u8; 800];
379        let volatile_slice_1 =
380            unsafe { FileVolatileSlice::from_raw_ptr(dst_buf1.as_mut_ptr(), dst_buf1.len()) };
381        let bufs: &[FileVolatileSlice] = &[volatile_slice_1];
382        assert_eq!(cache.read(&mut iovec, bufs).unwrap(), 800);
383
384        let chunk2: Arc<dyn BlobChunkInfo> = Arc::new(MockChunkInfo {
385            block_id: Default::default(),
386            blob_index: 0,
387            flags: Default::default(),
388            compress_size: 0,
389            uncompress_size: 100,
390            compress_offset: 0,
391            uncompress_offset: 0,
392            file_offset: 0,
393            index: 0,
394            crc32: 0,
395        });
396
397        let chunk3: Arc<dyn BlobChunkInfo> = Arc::new(MockChunkInfo {
398            block_id: Default::default(),
399            blob_index: 0,
400            flags: Default::default(),
401            compress_size: 0,
402            uncompress_size: 100,
403            compress_offset: 100,
404            uncompress_offset: 0,
405            file_offset: 0,
406            index: 0,
407            crc32: 0,
408        });
409
410        let mut iovec = BlobIoVec::new(Arc::new(info.clone()));
411
412        iovec.push(BlobIoDesc::new(
413            Arc::new(info.clone()),
414            BlobIoChunk::from(chunk2.clone()),
415            0,
416            100,
417            true,
418        ));
419
420        iovec.push(BlobIoDesc::new(
421            Arc::new(info),
422            BlobIoChunk::from(chunk3.clone()),
423            100,
424            100,
425            true,
426        ));
427
428        let mut dst_buf2 = vec![0x0u8; 100];
429        let mut dst_buf3 = vec![0x0u8; 100];
430        let volatile_slice_2 =
431            unsafe { FileVolatileSlice::from_raw_ptr(dst_buf2.as_mut_ptr(), dst_buf2.len()) };
432
433        let volatile_slice_3 =
434            unsafe { FileVolatileSlice::from_raw_ptr(dst_buf3.as_mut_ptr(), dst_buf3.len()) };
435        let bufs: &[FileVolatileSlice] = &[volatile_slice_2, volatile_slice_3];
436        assert_eq!(cache.read(&mut iovec, bufs).unwrap(), 200);
437    }
438
439    #[test]
440    fn test_dummy_cache_mgr() {
441        let content = r#"version=2
442        id = "my_id"
443        metadata_path = "meta_path"
444        [backend]
445        type = "localfs"
446        [backend.localfs]
447        blob_file = "/tmp/nydus.blob.data"
448        dir = "/tmp"
449        alt_dirs = ["/var/nydus/cache"]
450        [cache]
451        type = "filecache"
452        compressed = true
453        validate = true
454        [cache.filecache]
455        work_dir = "/tmp"
456        "#;
457
458        let cfg: ConfigV2 = toml::from_str(content).unwrap();
459        let backend = MockBackend {
460            metrics: BackendMetrics::new("dummy", "localfs"),
461        };
462        let mgr =
463            DummyCacheMgr::new(cfg.get_cache_config().unwrap(), Arc::new(backend), false).unwrap();
464        assert!(mgr.init().is_ok());
465        assert!(!mgr.gc(Some("blob-0")));
466        let _bak = mgr.backend();
467        mgr.check_stat();
468        mgr.destroy();
469        assert!(mgr.closed.load(Ordering::Acquire));
470        drop(mgr);
471    }
472}