pub trait BlobCache: Send + Sync {
Show 29 methods
// Required methods
fn blob_id(&self) -> &str;
fn blob_uncompressed_size(&self) -> Result<u64>;
fn blob_compressed_size(&self) -> Result<u64>;
fn blob_compressor(&self) -> Algorithm;
fn blob_cipher(&self) -> Algorithm;
fn blob_cipher_object(&self) -> Arc<Cipher>;
fn blob_cipher_context(&self) -> Option<CipherContext>;
fn blob_digester(&self) -> Algorithm;
fn is_legacy_stargz(&self) -> bool;
fn need_validation(&self) -> bool;
fn reader(&self) -> &dyn BlobReader;
fn get_chunk_map(&self) -> &Arc<dyn ChunkMap>;
fn get_chunk_info(&self, chunk_index: u32) -> Option<Arc<dyn BlobChunkInfo>>;
fn start_prefetch(&self) -> StorageResult<()>;
fn stop_prefetch(&self) -> StorageResult<()>;
fn is_prefetch_active(&self) -> bool;
fn prefetch(
&self,
cache: Arc<dyn BlobCache>,
prefetches: &[BlobPrefetchRequest],
bios: &[BlobIoDesc],
) -> StorageResult<usize>;
fn read(
&self,
iovec: &mut BlobIoVec,
buffers: &[FileVolatileSlice<'_>],
) -> Result<usize>;
// Provided methods
fn get_legacy_stargz_size(
&self,
offset: u64,
uncomp_size: usize,
) -> Result<usize> { ... }
fn is_zran(&self) -> bool { ... }
fn is_batch(&self) -> bool { ... }
fn get_blob_object(&self) -> Option<&dyn BlobObject> { ... }
fn prefetch_range(&self, _range: &BlobIoRange) -> Result<usize> { ... }
fn read_chunks_from_backend<'a, 'b>(
&'a self,
blob_offset: u64,
blob_size: usize,
chunks: &'b [Arc<dyn BlobChunkInfo>],
prefetch: bool,
) -> Result<ChunkDecompressState<'a, 'b>>
where Self: Sized { ... }
fn read_chunk_from_backend(
&self,
chunk: &dyn BlobChunkInfo,
buffer: &mut [u8],
) -> Result<Option<Vec<u8>>> { ... }
fn decompress_chunk_data(
&self,
raw_buffer: &[u8],
buffer: &mut [u8],
is_compressed: bool,
) -> Result<()> { ... }
fn validate_chunk_data(
&self,
chunk: &dyn BlobChunkInfo,
buffer: &[u8],
force_validation: bool,
) -> Result<usize> { ... }
fn check_digest(&self, chunk: &dyn BlobChunkInfo, buffer: &[u8]) -> bool { ... }
fn get_blob_meta_info(
&self,
) -> Result<Option<Arc<BlobCompressionContextInfo>>> { ... }
}Expand description
Trait representing a cache object for a blob on backend storage.
The caller may use the BlobCache trait to access blob data on backend storage, with an
optional intermediate cache layer to improve performance.
Required Methods§
Sourcefn blob_uncompressed_size(&self) -> Result<u64>
fn blob_uncompressed_size(&self) -> Result<u64>
Get size of the decompressed blob object.
Sourcefn blob_compressed_size(&self) -> Result<u64>
fn blob_compressed_size(&self) -> Result<u64>
Get size of the compressed blob object.
Sourcefn blob_compressor(&self) -> Algorithm
fn blob_compressor(&self) -> Algorithm
Get data compression algorithm to handle chunks in the blob.
Sourcefn blob_cipher(&self) -> Algorithm
fn blob_cipher(&self) -> Algorithm
Get data encryption algorithm to handle chunks in the blob.
Sourcefn blob_cipher_object(&self) -> Arc<Cipher>
fn blob_cipher_object(&self) -> Arc<Cipher>
Cipher object to encrypt/decrypt chunk data.
Sourcefn blob_cipher_context(&self) -> Option<CipherContext>
fn blob_cipher_context(&self) -> Option<CipherContext>
Cipher context to encrypt/decrypt chunk data.
Sourcefn blob_digester(&self) -> Algorithm
fn blob_digester(&self) -> Algorithm
Get message digest algorithm to handle chunks in the blob.
Sourcefn is_legacy_stargz(&self) -> bool
fn is_legacy_stargz(&self) -> bool
Check whether the cache object is for an stargz image with legacy chunk format.
Sourcefn need_validation(&self) -> bool
fn need_validation(&self) -> bool
Check whether need to validate the data chunk by digest value.
Sourcefn reader(&self) -> &dyn BlobReader
fn reader(&self) -> &dyn BlobReader
Get the BlobReader to read data from storage backend.
Sourcefn get_chunk_map(&self) -> &Arc<dyn ChunkMap>
fn get_chunk_map(&self) -> &Arc<dyn ChunkMap>
Get the underlying ChunkMap object.
Sourcefn get_chunk_info(&self, chunk_index: u32) -> Option<Arc<dyn BlobChunkInfo>>
fn get_chunk_info(&self, chunk_index: u32) -> Option<Arc<dyn BlobChunkInfo>>
Get the BlobChunkInfo object corresponding to chunk_index.
Sourcefn start_prefetch(&self) -> StorageResult<()>
fn start_prefetch(&self) -> StorageResult<()>
Enable prefetching blob data in background.
It should be paired with stop_prefetch().
Sourcefn stop_prefetch(&self) -> StorageResult<()>
fn stop_prefetch(&self) -> StorageResult<()>
Stop prefetching blob data in background.
It should be paired with start_prefetch().
fn is_prefetch_active(&self) -> bool
Sourcefn prefetch(
&self,
cache: Arc<dyn BlobCache>,
prefetches: &[BlobPrefetchRequest],
bios: &[BlobIoDesc],
) -> StorageResult<usize>
fn prefetch( &self, cache: Arc<dyn BlobCache>, prefetches: &[BlobPrefetchRequest], bios: &[BlobIoDesc], ) -> StorageResult<usize>
Start to prefetch requested data in background.
Provided Methods§
Sourcefn get_legacy_stargz_size(
&self,
offset: u64,
uncomp_size: usize,
) -> Result<usize>
fn get_legacy_stargz_size( &self, offset: u64, uncomp_size: usize, ) -> Result<usize>
Get maximum size of gzip compressed data.
Sourcefn get_blob_object(&self) -> Option<&dyn BlobObject>
fn get_blob_object(&self) -> Option<&dyn BlobObject>
Get a BlobObject instance to directly access uncompressed blob file.
Sourcefn prefetch_range(&self, _range: &BlobIoRange) -> Result<usize>
fn prefetch_range(&self, _range: &BlobIoRange) -> Result<usize>
Execute filesystem data prefetch.
Sourcefn read_chunks_from_backend<'a, 'b>(
&'a self,
blob_offset: u64,
blob_size: usize,
chunks: &'b [Arc<dyn BlobChunkInfo>],
prefetch: bool,
) -> Result<ChunkDecompressState<'a, 'b>>where
Self: Sized,
fn read_chunks_from_backend<'a, 'b>(
&'a self,
blob_offset: u64,
blob_size: usize,
chunks: &'b [Arc<dyn BlobChunkInfo>],
prefetch: bool,
) -> Result<ChunkDecompressState<'a, 'b>>where
Self: Sized,
Read multiple chunks from the blob cache in batch mode.
This is an interface to optimize chunk data fetch performance by merging multiple continuous
chunks into one backend request. Callers must ensure that chunks in chunks covers a
continuous range, and the range exactly matches [blob_offset..blob_offset + blob_size].
Function read_chunks_from_backend() returns one buffer containing decompressed chunk data
for each entry in the chunks array in corresponding order.
This method returns success only if all requested data are successfully fetched.
Sourcefn read_chunk_from_backend(
&self,
chunk: &dyn BlobChunkInfo,
buffer: &mut [u8],
) -> Result<Option<Vec<u8>>>
fn read_chunk_from_backend( &self, chunk: &dyn BlobChunkInfo, buffer: &mut [u8], ) -> Result<Option<Vec<u8>>>
Read a whole chunk directly from the storage backend.
The fetched chunk data may be compressed or encrypted or not, which depends on chunk information
from chunk. Moreover, chunk data from backend storage may be validated per user’s configuration.
Sourcefn decompress_chunk_data(
&self,
raw_buffer: &[u8],
buffer: &mut [u8],
is_compressed: bool,
) -> Result<()>
fn decompress_chunk_data( &self, raw_buffer: &[u8], buffer: &mut [u8], is_compressed: bool, ) -> Result<()>
Decompress chunk data.
Sourcefn validate_chunk_data(
&self,
chunk: &dyn BlobChunkInfo,
buffer: &[u8],
force_validation: bool,
) -> Result<usize>
fn validate_chunk_data( &self, chunk: &dyn BlobChunkInfo, buffer: &[u8], force_validation: bool, ) -> Result<usize>
Validate chunk data.