stable_fs/storage/
stable.rs

1use std::{collections::HashMap, ops::Range};
2
3use crate::storage::types::ZEROES;
4use ic_cdk::api::stable::WASM_PAGE_SIZE_IN_BYTES;
5use ic_stable_structures::{
6    BTreeMap, Cell, Memory,
7    memory_manager::{MemoryId, MemoryManager, VirtualMemory},
8};
9
10use crate::{
11    runtime::structure_helpers::{read_obj, write_obj},
12    storage::ptr_cache::CachedChunkPtr,
13};
14
15use crate::{
16    error::Error,
17    runtime::{
18        structure_helpers::{get_chunk_infos, grow_memory},
19        types::ChunkSize,
20        types::ChunkType,
21    },
22};
23
24use super::{
25    Storage,
26    allocator::ChunkPtrAllocator,
27    chunk_iterator::ChunkV2Iterator,
28    metadata_provider::MetadataProvider,
29    ptr_cache::PtrCache,
30    types::{
31        DUMMY_DOT_DOT_ENTRY, DUMMY_DOT_DOT_ENTRY_INDEX, DUMMY_DOT_ENTRY, DUMMY_DOT_ENTRY_INDEX,
32        DirEntry, DirEntryIndex, FILE_CHUNK_SIZE_V1, FileChunk, FileChunkIndex, FileChunkPtr,
33        FileSize, FileType, Header, MAX_FILE_CHUNK_COUNT, MAX_FILE_ENTRY_INDEX, MAX_FILE_SIZE,
34        Metadata, Node, Times,
35    },
36};
37
38pub const ROOT_NODE: Node = 0;
39const FS_VERSION: u32 = 1;
40
41const DEFAULT_FIRST_MEMORY_INDEX: u8 = 229;
42
43// the maximum index accepted as the end range
44const MAX_MEMORY_INDEX: u8 = 254;
45
46// the number of memory indices used by the file system (currently 8 plus some reserved ids)
47const MEMORY_INDEX_COUNT: u8 = 10;
48
49// index containing cached metadata (deprecated)
50const MOUNTED_META_PTR: u64 = 16;
51
52enum StorageMemoryIdx {
53    Header = 0,
54    Metadata = 1,
55
56    DirEntries = 2,
57    FileChunksV1 = 3,
58
59    // metadata for mounted files
60    MountedMetadata = 4,
61
62    // V2 chunks
63    FileChunksV2 = 5,
64    ChunkAllocatorV2 = 6,
65    FileChunksMemoryV2 = 7,
66
67    // caching helper
68    CacheJournal = 8,
69}
70
71struct StorageMemories<M: Memory> {
72    header_memory: VirtualMemory<M>,
73    metadata_memory: VirtualMemory<M>,
74    direntry_memory: VirtualMemory<M>,
75    filechunk_memory: VirtualMemory<M>,
76
77    mounted_meta_memory: VirtualMemory<M>,
78
79    v2_chunk_ptr_memory: VirtualMemory<M>,
80    v2_chunks_memory: VirtualMemory<M>,
81    v2_allocator_memory: VirtualMemory<M>,
82
83    cache_journal: VirtualMemory<M>,
84}
85
86#[repr(C)]
87pub struct V2FileChunks<M: Memory> {
88    // the file chunk storage V2, we only store pointers to reduce serialization overheads.
89    pub(crate) v2_chunk_ptr: BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory<M>>,
90    // the actual storage of the chunks,
91    // * we can read and write small fragments of data, no need to read and write in chunk-sized blocks
92    // * the pointers in the BTreeMap (Node, FileChunkIndex) -> FileChunkPtr are static,
93    //   this allows caching to avoid chunk search overheads.
94    pub(crate) v2_chunks: VirtualMemory<M>,
95    // keeps information on the chunks currently available.
96    // it can be setup to work with different chunk sizes.
97    // 4K - the same as chunks V1, 16K - the default, 64K - the biggest chunk size available.
98    // the increased chunk size reduces the number of BTree insertions, and increases the performanc.
99    pub(crate) v2_allocator: ChunkPtrAllocator<M>,
100}
101
102#[repr(C)]
103pub struct StableStorage<M: Memory> {
104    // some static-sized filesystem data, contains version number and the next node id.
105    header: Cell<Header, VirtualMemory<M>>,
106    // information about the directory structure.
107    direntry: BTreeMap<(Node, DirEntryIndex), DirEntry, VirtualMemory<M>>,
108    // actual file data stored in chunks insize BTreeMap.
109    filechunk: BTreeMap<(Node, FileChunkIndex), FileChunk, VirtualMemory<M>>,
110
111    // file data stored in V2 file chunks
112    pub(crate) v2_filechunk: V2FileChunks<M>,
113
114    // helper object managing file metadata access of all types
115    meta_provider: MetadataProvider<M>,
116
117    // It is not used, but is needed to keep memories alive.
118    _memory_manager: Option<MemoryManager<M>>,
119    // active mounts.
120    active_mounts: HashMap<Node, Box<dyn Memory>>,
121
122    // chunk type to use when creating new files.
123    chunk_type: ChunkType,
124
125    // chunk pointer cache. This cache reduces chunk search overhead when reading a file,
126    // or writing a file over existing data. (the new files still need insert new pointers into the treemap, hence it is rather slow)
127    pub(crate) ptr_cache: PtrCache,
128}
129
130impl<M: Memory> StableStorage<M> {
131    pub fn new(memory: M) -> Self {
132        let memory_manager = MemoryManager::init(memory);
133
134        let mut storage = Self::new_with_memory_manager(
135            &memory_manager,
136            DEFAULT_FIRST_MEMORY_INDEX..DEFAULT_FIRST_MEMORY_INDEX + MEMORY_INDEX_COUNT,
137        );
138
139        storage._memory_manager = Some(memory_manager);
140
141        storage
142    }
143
144    pub fn new_with_memory_manager(
145        memory_manager: &MemoryManager<M>,
146        memory_indices: Range<u8>,
147    ) -> StableStorage<M> {
148        if memory_indices.end - memory_indices.start < MEMORY_INDEX_COUNT {
149            panic!(
150                "The memory index range must include at least {} incides",
151                MEMORY_INDEX_COUNT
152            );
153        }
154
155        if memory_indices.end > MAX_MEMORY_INDEX {
156            panic!(
157                "Last memory index must be less than or equal to {}",
158                MAX_MEMORY_INDEX
159            );
160        }
161
162        let header_memory = memory_manager.get(MemoryId::new(
163            memory_indices.start + StorageMemoryIdx::Header as u8,
164        ));
165        let metadata_memory = memory_manager.get(MemoryId::new(
166            memory_indices.start + StorageMemoryIdx::Metadata as u8,
167        ));
168        let direntry_memory = memory_manager.get(MemoryId::new(
169            memory_indices.start + StorageMemoryIdx::DirEntries as u8,
170        ));
171        let filechunk_memory = memory_manager.get(MemoryId::new(
172            memory_indices.start + StorageMemoryIdx::FileChunksV1 as u8,
173        ));
174        let mounted_meta_memory = memory_manager.get(MemoryId::new(
175            memory_indices.start + StorageMemoryIdx::MountedMetadata as u8,
176        ));
177
178        let v2_chunk_ptr_memory = memory_manager.get(MemoryId::new(
179            memory_indices.start + StorageMemoryIdx::FileChunksV2 as u8,
180        ));
181        let v2_allocator_memory = memory_manager.get(MemoryId::new(
182            memory_indices.start + StorageMemoryIdx::ChunkAllocatorV2 as u8,
183        ));
184        let v2_chunks_memory = memory_manager.get(MemoryId::new(
185            memory_indices.start + StorageMemoryIdx::FileChunksMemoryV2 as u8,
186        ));
187
188        let cache_journal = memory_manager.get(MemoryId::new(
189            memory_indices.start + StorageMemoryIdx::CacheJournal as u8,
190        ));
191
192        let memories = StorageMemories {
193            header_memory,
194            metadata_memory,
195            direntry_memory,
196            filechunk_memory,
197            mounted_meta_memory,
198            v2_chunk_ptr_memory,
199            v2_chunks_memory,
200            v2_allocator_memory,
201            cache_journal,
202        };
203
204        Self::new_with_custom_memories(memories)
205    }
206
207    // support deprecated storage, recover stored mounted file metadata
208    // we have to use a custom node here,
209    fn init_size_from_cache_journal(&mut self, journal: &VirtualMemory<M>) {
210        // re-define old Metadata type for correct reading
211        #[derive(Clone, Default, PartialEq)]
212        pub struct MetadataLegacy {
213            pub node: Node,
214            pub file_type: FileType,
215            pub link_count: u64,
216            pub size: FileSize,
217            pub times: Times,
218            pub first_dir_entry: Option<DirEntryIndex>,
219            pub last_dir_entry: Option<DirEntryIndex>,
220            pub chunk_type: Option<ChunkType>,
221        }
222
223        // try recover stored mounted metadata (if any)
224        if journal.size() > 0 {
225            let mut mounted_node = 0u64;
226            let mut mounted_meta = MetadataLegacy::default();
227
228            read_obj(journal, MOUNTED_META_PTR, &mut mounted_node);
229
230            read_obj(journal, MOUNTED_META_PTR + 8, &mut mounted_meta);
231
232            let meta_read = Metadata {
233                node: mounted_meta.node,
234                file_type: FileType::RegularFile,
235                link_count: mounted_meta.link_count,
236                size: mounted_meta.size,
237                times: mounted_meta.times,
238                first_dir_entry: mounted_meta.first_dir_entry,
239                last_dir_entry: mounted_meta.last_dir_entry,
240                chunk_type: mounted_meta.chunk_type,
241                maximum_size_allowed: None,
242            };
243
244            if mounted_node != u64::MAX && mounted_node == mounted_meta.node {
245                // immediately store the recovered metadata
246                self.meta_provider.put_metadata(
247                    mounted_node,
248                    true,
249                    &meta_read,
250                    None,
251                    &mut self.v2_filechunk,
252                );
253
254                // reset cached metadata
255                write_obj(journal, MOUNTED_META_PTR, &(u64::MAX as Node));
256            }
257        }
258    }
259
260    fn new_with_custom_memories(memories: StorageMemories<M>) -> Self {
261        let default_header_value = Header {
262            version: FS_VERSION,
263            next_node: ROOT_NODE + 1,
264        };
265
266        let v2_allocator = ChunkPtrAllocator::new(memories.v2_allocator_memory).unwrap();
267        let ptr_cache = PtrCache::new();
268        let v2_chunk_ptr = BTreeMap::init(memories.v2_chunk_ptr_memory);
269
270        let meta_provider =
271            MetadataProvider::new(memories.metadata_memory, memories.mounted_meta_memory);
272
273        let mut result = Self {
274            header: Cell::init(memories.header_memory, default_header_value).unwrap(),
275            direntry: BTreeMap::init(memories.direntry_memory),
276            filechunk: BTreeMap::init(memories.filechunk_memory),
277
278            v2_filechunk: V2FileChunks {
279                v2_chunk_ptr,
280                v2_chunks: memories.v2_chunks_memory,
281                v2_allocator,
282            },
283
284            // transient runtime data
285            _memory_manager: None,
286            active_mounts: HashMap::new(),
287
288            // default chunk type is V2
289            chunk_type: ChunkType::V2,
290
291            ptr_cache,
292
293            meta_provider,
294        };
295
296        // init mounted drive
297        result.init_size_from_cache_journal(&memories.cache_journal);
298
299        let version = result.header.get().version;
300
301        if version != FS_VERSION {
302            panic!("Unsupported file system version");
303        }
304
305        result
306    }
307
308    // write into mounted memory
309    fn write_mounted(&self, memory: &dyn Memory, offset: FileSize, buf: &[u8]) -> FileSize {
310        let length_to_write = buf.len() as FileSize;
311
312        // grow memory if needed
313        let max_address = offset as FileSize + length_to_write;
314
315        grow_memory(memory, max_address);
316
317        memory.write(offset, buf);
318
319        length_to_write
320    }
321
322    // Insert of update a selected file chunk with the data provided in a buffer.
323    fn write_filechunk_v1(
324        &mut self,
325        node: Node,
326        index: FileChunkIndex,
327        offset: FileSize,
328        buf: &[u8],
329    ) {
330        let mut entry = self.filechunk.get(&(node, index)).unwrap_or_default();
331        entry.bytes[offset as usize..offset as usize + buf.len()].copy_from_slice(buf);
332        self.filechunk.insert((node, index), entry);
333    }
334
335    fn write_chunks_v2(
336        &mut self,
337        node: Node,
338        offset: FileSize,
339        buf: &[u8],
340    ) -> Result<FileSize, Error> {
341        let mut remainder = buf.len() as FileSize;
342        let last_address = offset + remainder;
343
344        let chunk_size = self.chunk_size();
345
346        let start_index = (offset / chunk_size as FileSize) as FileChunkIndex;
347
348        let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize;
349
350        let mut size_written: FileSize = 0;
351
352        let write_iter = ChunkV2Iterator::new(
353            node,
354            offset,
355            last_address,
356            self.chunk_size() as FileSize,
357            &mut self.ptr_cache,
358            &mut self.v2_filechunk.v2_chunk_ptr,
359        );
360
361        let write_iter: Vec<_> = write_iter.collect();
362
363        for ((nd, index), chunk_ptr) in write_iter {
364            assert!(nd == node);
365
366            if remainder == 0 {
367                break;
368            }
369
370            let to_write = remainder
371                .min(chunk_size as FileSize - chunk_offset)
372                .min(buf.len() as FileSize - size_written);
373
374            let write_buf =
375                &buf[size_written as usize..(size_written as usize + to_write as usize)];
376
377            let chunk_ptr = if let CachedChunkPtr::ChunkExists(ptr) = chunk_ptr {
378                ptr
379            } else {
380                // insert new chunk
381                let ptr = self.v2_filechunk.v2_allocator.allocate();
382
383                grow_memory(&self.v2_filechunk.v2_chunks, ptr + chunk_size as FileSize);
384
385                // fill new chunk with zeroes (appart from the area that will be overwritten)
386
387                // fill before written content
388                self.v2_filechunk
389                    .v2_chunks
390                    .write(ptr, &ZEROES[0..chunk_offset as usize]);
391
392                // fill after written content
393                self.v2_filechunk.v2_chunks.write(
394                    ptr + chunk_offset + to_write as FileSize,
395                    &ZEROES[0..(chunk_size - chunk_offset as usize - to_write as usize)],
396                );
397
398                // register new chunk pointer
399                self.v2_filechunk.v2_chunk_ptr.insert((node, index), ptr);
400
401                //
402                self.ptr_cache
403                    .add(vec![((node, index), CachedChunkPtr::ChunkExists(ptr))]);
404
405                ptr
406            };
407
408            // growing here should not be required as the grow is called during
409            // grow_memory(&self.v2_chunks, chunk_ptr + offset + buf.len() as FileSize);
410            self.v2_filechunk
411                .v2_chunks
412                .write(chunk_ptr + chunk_offset, write_buf);
413
414            chunk_offset = 0;
415            size_written += to_write;
416            remainder -= to_write;
417        }
418
419        Ok(size_written)
420    }
421
422    fn read_chunks_v1(
423        &self,
424        node: Node,
425        offset: FileSize,
426        file_size: FileSize,
427        buf: &mut [u8],
428    ) -> Result<FileSize, Error> {
429        let start_index = (offset / FILE_CHUNK_SIZE_V1 as FileSize) as FileChunkIndex;
430        let end_index = ((offset + buf.len() as FileSize) / FILE_CHUNK_SIZE_V1 as FileSize + 1)
431            as FileChunkIndex;
432
433        let mut chunk_offset = offset - start_index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize;
434
435        let range = (node, start_index)..(node, MAX_FILE_CHUNK_COUNT);
436
437        let mut size_read: FileSize = 0;
438        let mut remainder = file_size - offset;
439
440        let mut iter = self.filechunk.range(range);
441        let mut cur_fetched = None;
442
443        for cur_index in start_index..end_index {
444            let chunk_space = FILE_CHUNK_SIZE_V1 as FileSize - chunk_offset;
445
446            let to_read = remainder
447                .min(chunk_space)
448                .min(buf.len() as FileSize - size_read);
449
450            // finished reading, buffer full
451            if size_read == buf.len() as FileSize {
452                break;
453            }
454
455            if cur_fetched.is_none() {
456                cur_fetched = iter.next();
457            }
458
459            let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize];
460
461            if let Some(((nd, idx), ref value)) = cur_fetched {
462                if idx == cur_index {
463                    assert!(nd == node);
464
465                    read_buf.copy_from_slice(
466                        &value.bytes
467                            [chunk_offset as usize..chunk_offset as usize + to_read as usize],
468                    );
469
470                    // consume token
471                    cur_fetched = None;
472                } else {
473                    // fill up with zeroes
474                    read_buf.iter_mut().for_each(|m| *m = 0)
475                }
476            } else {
477                // fill up with zeroes
478                read_buf.iter_mut().for_each(|m| *m = 0)
479            }
480
481            chunk_offset = 0;
482            size_read += to_read;
483            remainder -= to_read;
484        }
485
486        Ok(size_read)
487    }
488
489    fn read_chunks_v2(
490        &mut self,
491        node: Node,
492        offset: FileSize,
493        file_size: FileSize,
494        buf: &mut [u8],
495    ) -> Result<FileSize, Error> {
496        // early exit if nothing left to read
497        if offset >= file_size {
498            return Ok(0 as FileSize);
499        }
500
501        // compute remainder to read
502        let mut remainder = file_size - offset;
503
504        let chunk_size = self.chunk_size();
505
506        let start_index = (offset / chunk_size as FileSize) as FileChunkIndex;
507
508        let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize;
509
510        //let end_index = ((offset + buf.len() as FileSize) / chunk_size as FileSize + 1) as FileChunkIndex;
511        //let mut range = (node, start_index)..(node, end_index);
512
513        let mut size_read: FileSize = 0;
514
515        let read_iter = ChunkV2Iterator::new(
516            node,
517            offset,
518            file_size,
519            chunk_size as FileSize,
520            &mut self.ptr_cache,
521            &mut self.v2_filechunk.v2_chunk_ptr,
522        );
523
524        for ((nd, _idx), cached_chunk) in read_iter {
525            assert!(nd == node);
526
527            // finished reading, buffer full
528            if size_read == buf.len() as FileSize {
529                break;
530            }
531
532            let chunk_space = chunk_size as FileSize - chunk_offset;
533
534            let to_read = remainder
535                .min(chunk_space)
536                .min(buf.len() as FileSize - size_read);
537
538            let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize];
539
540            if let CachedChunkPtr::ChunkExists(cptr) = cached_chunk {
541                self.v2_filechunk
542                    .v2_chunks
543                    .read(cptr + chunk_offset, read_buf);
544            } else {
545                // fill read buffer with 0
546                read_buf.iter_mut().for_each(|m| *m = 0)
547            }
548
549            chunk_offset = 0;
550            size_read += to_read;
551            remainder -= to_read;
552        }
553
554        Ok(size_read)
555    }
556
557    fn use_v2(&mut self, metadata: &Metadata, node: u64) -> bool {
558        // decide if we use v2 chunks for reading/writing
559        let use_v2 = match metadata.chunk_type {
560            Some(ChunkType::V2) => true,
561            Some(ChunkType::V1) => false,
562
563            // try to figure out, which chunk type to use
564            None => {
565                if metadata.size > 0 {
566                    // try to find any v2 node, othersize use v1
567                    let ptr = self
568                        .v2_filechunk
569                        .v2_chunk_ptr
570                        .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
571                        .next();
572
573                    ptr.is_some()
574                } else {
575                    self.chunk_type() == ChunkType::V2
576                }
577            }
578        };
579        use_v2
580    }
581
582    fn validate_metadata_update(
583        old_meta: Option<&Metadata>,
584        new_meta: &Metadata,
585    ) -> Result<(), Error> {
586        if let Some(old_meta) = old_meta {
587            // do not allow changing file type
588            if old_meta.file_type != new_meta.file_type {
589                return Err(Error::FunctionNotSupported);
590            }
591        }
592
593        if let Some(old_meta) = old_meta {
594            // changing node is not allowed
595            if old_meta.node != new_meta.node {
596                return Err(Error::IllegalByteSequence);
597            }
598        }
599
600        if let Some(max_size) = new_meta.maximum_size_allowed {
601            if new_meta.size > max_size {
602                return Err(Error::FileTooLarge);
603            }
604        }
605
606        Ok(())
607    }
608
609    fn resize_file_internal(&mut self, node: Node, new_size: FileSize) -> Result<(), Error> {
610        // anyone calling this function should also clear pointer cache for this node
611
612        if self.is_mounted(node) {
613            // for the mounted node we only update file size in the metadata (no need to delete chunks)
614            return Ok(());
615        }
616
617        // delete v1 chunks
618        let chunk_size = FILE_CHUNK_SIZE_V1;
619
620        let first_deletable_index = (new_size.div_ceil(chunk_size as FileSize)) as FileChunkIndex;
621
622        let range = (node, first_deletable_index)..(node, MAX_FILE_CHUNK_COUNT);
623
624        let mut chunks: Vec<(Node, FileChunkIndex)> = Vec::new();
625
626        for (k, _v) in self.filechunk.range(range) {
627            chunks.push(k);
628        }
629
630        for (nd, idx) in chunks.into_iter() {
631            assert!(nd == node);
632            self.filechunk.remove(&(node, idx));
633        }
634
635        // fill with zeros the last chunk memory above the file size
636        if first_deletable_index > 0 {
637            let offset = new_size as FileSize % chunk_size as FileSize;
638
639            self.write_filechunk_v1(
640                node,
641                first_deletable_index - 1,
642                offset,
643                &ZEROES[0..(chunk_size - offset as usize)],
644            );
645        }
646
647        // delete v2 chunks
648
649        let chunk_size = self.chunk_size();
650
651        let first_deletable_index = (new_size.div_ceil(chunk_size as FileSize)) as FileChunkIndex;
652
653        let range = (node, first_deletable_index)..(node, MAX_FILE_CHUNK_COUNT);
654        let mut chunks: Vec<(Node, FileChunkIndex)> = Vec::new();
655        for (k, _v) in self.v2_filechunk.v2_chunk_ptr.range(range) {
656            chunks.push(k);
657        }
658
659        for (nd, idx) in chunks.into_iter() {
660            assert!(nd == node);
661            let removed = self.v2_filechunk.v2_chunk_ptr.remove(&(node, idx));
662
663            if let Some(removed) = removed {
664                self.v2_filechunk.v2_allocator.free(removed);
665            }
666        }
667
668        // fill with zeros the last chunk memory above the file size
669        if first_deletable_index > 0 {
670            let offset = new_size as FileSize % chunk_size as FileSize;
671            self.write_chunks_v2(node, new_size, &ZEROES[0..(chunk_size - offset as usize)])?;
672        }
673
674        Ok(())
675    }
676}
677
678impl<M: Memory> Storage for StableStorage<M> {
679    // Get the root node ID of the storage.
680    fn root_node(&self) -> Node {
681        ROOT_NODE
682    }
683
684    // Generate the next available node ID.
685    fn new_node(&mut self) -> Node {
686        let mut header = self.header.get().clone();
687
688        let result = header.next_node;
689
690        header.next_node += 1;
691
692        self.header.set(header).unwrap();
693
694        result
695    }
696
697    fn get_version(&self) -> u32 {
698        let header = self.header.get();
699        header.version
700    }
701
702    // Get the metadata associated with the node.
703    fn get_metadata(&self, node: Node) -> Result<Metadata, Error> {
704        self.meta_provider
705            .get_metadata(
706                node,
707                self.is_mounted(node),
708                &self.v2_filechunk.v2_chunk_ptr,
709                &self.v2_filechunk.v2_chunks,
710            )
711            .map(|x| x.0)
712            .ok_or(Error::NoSuchFileOrDirectory)
713    }
714
715    // Update the metadata associated with the node.
716    fn put_metadata(&mut self, node: Node, metadata: &Metadata) -> Result<(), Error> {
717        let is_mounted = self.is_mounted(node);
718
719        let meta_rec = self.meta_provider.get_metadata(
720            node,
721            is_mounted,
722            &self.v2_filechunk.v2_chunk_ptr,
723            &self.v2_filechunk.v2_chunks,
724        );
725
726        let (old_meta, meta_ptr) = match meta_rec.as_ref() {
727            Some((m, p)) => (Some(m), *p),
728            None => (None, None),
729        };
730
731        Self::validate_metadata_update(old_meta, metadata)?;
732
733        if let Some(old_meta) = old_meta {
734            // if the size was reduced, we need to delete the file chunks above the file size
735            if metadata.size < old_meta.size {
736                self.ptr_cache.clear();
737                self.resize_file_internal(node, metadata.size)?;
738            }
739        }
740
741        self.meta_provider.put_metadata(
742            node,
743            is_mounted,
744            metadata,
745            meta_ptr,
746            &mut self.v2_filechunk,
747        );
748
749        Ok(())
750    }
751
752    // Retrieve the DirEntry instance given the Node and DirEntryIndex.
753    fn get_direntry(&self, node: Node, index: DirEntryIndex) -> Result<DirEntry, Error> {
754        self.direntry
755            .get(&(node, index))
756            .ok_or(Error::NoSuchFileOrDirectory)
757    }
758
759    fn get_direntries(
760        &self,
761        node: Node,
762        initial_index: Option<DirEntryIndex>,
763    ) -> Result<Vec<(DirEntryIndex, DirEntry)>, Error> {
764        let mut res = Vec::new();
765
766        if initial_index.is_none() {
767            let mut dot_entry = DUMMY_DOT_ENTRY;
768            dot_entry.1.node = node;
769            res.push(dot_entry);
770            res.push(DUMMY_DOT_DOT_ENTRY);
771        }
772
773        let initial_index = initial_index.unwrap_or(0);
774
775        if initial_index == DUMMY_DOT_ENTRY_INDEX {
776            let mut dot_entry = DUMMY_DOT_ENTRY;
777            dot_entry.1.node = node;
778            res.push(dot_entry);
779            res.push(DUMMY_DOT_DOT_ENTRY);
780        }
781
782        if initial_index == DUMMY_DOT_DOT_ENTRY_INDEX {
783            res.push(DUMMY_DOT_DOT_ENTRY);
784        }
785
786        let max_index = MAX_FILE_ENTRY_INDEX;
787
788        for ((_node, index), entry) in self
789            .direntry
790            .range((node, initial_index)..(node, max_index))
791        {
792            res.push((index, entry));
793        }
794
795        Ok(res)
796    }
797
798    // Update or insert the DirEntry instance given the Node and DirEntryIndex.
799    fn put_direntry(&mut self, node: Node, index: DirEntryIndex, entry: DirEntry) {
800        self.direntry.insert((node, index), entry);
801    }
802
803    // Remove the DirEntry instance given the Node and DirEntryIndex.
804    fn rm_direntry(&mut self, node: Node, index: DirEntryIndex) {
805        self.direntry.remove(&(node, index));
806    }
807
808    // Fill the buffer contents with data of a chosen data range.
809    fn read(&mut self, node: Node, offset: FileSize, buf: &mut [u8]) -> Result<FileSize, Error> {
810        let metadata = self.get_metadata(node)?;
811
812        let max_size = metadata.maximum_size_allowed.unwrap_or(MAX_FILE_SIZE);
813        let file_size = metadata.size.min(max_size);
814
815        if offset >= file_size {
816            return Ok(0);
817        }
818
819        let size_read = if let Some(memory) = self.active_mounts.get(&node) {
820            let remainder = file_size - offset;
821            let to_read = remainder.min(buf.len() as FileSize);
822
823            // grow memory also for reading
824            grow_memory(memory.as_ref(), offset + to_read);
825
826            memory.read(offset, &mut buf[..to_read as usize]);
827            to_read
828        } else {
829            let use_v2 = self.use_v2(&metadata, node);
830
831            if use_v2 {
832                self.read_chunks_v2(node, offset, file_size, buf)?
833            } else {
834                self.read_chunks_v1(node, offset, file_size, buf)?
835            }
836        };
837
838        Ok(size_read)
839    }
840
841    // Write file at the current file cursor, the cursor position will NOT be updated after writing.
842    fn write(&mut self, node: Node, offset: FileSize, buf: &[u8]) -> Result<FileSize, Error> {
843        let mut metadata = self.get_metadata(node)?;
844
845        // do not attempt to write 0 bytes to avoid file resize (when writing above file size)
846        if buf.is_empty() {
847            return Ok(0);
848        }
849
850        let max_size = metadata.maximum_size_allowed.unwrap_or(MAX_FILE_SIZE);
851
852        if offset + buf.len() as FileSize > max_size {
853            return Err(Error::FileTooLarge);
854        }
855
856        let written_size = if let Some(memory) = self.get_mounted_memory(node) {
857            self.write_mounted(memory, offset, buf);
858
859            buf.len() as FileSize
860        } else {
861            let end = offset + buf.len() as FileSize;
862
863            let use_v2 = self.use_v2(&metadata, node);
864
865            if use_v2 {
866                self.write_chunks_v2(node, offset, buf)?
867            } else {
868                let chunk_infos = get_chunk_infos(offset, end, FILE_CHUNK_SIZE_V1);
869
870                let mut written = 0usize;
871
872                for chunk in chunk_infos.into_iter() {
873                    self.write_filechunk_v1(
874                        node,
875                        chunk.index,
876                        chunk.offset,
877                        &buf[written..(written + chunk.len as usize)],
878                    );
879
880                    written += chunk.len as usize;
881                }
882
883                written as FileSize
884            }
885        };
886
887        let end = offset + buf.len() as FileSize;
888        if end > metadata.size {
889            metadata.size = end;
890            self.put_metadata(node, &metadata)?;
891        }
892
893        Ok(written_size)
894    }
895
896    fn resize_file(&mut self, node: Node, new_size: FileSize) -> Result<(), Error> {
897        let mut meta = self.get_metadata(node)?;
898
899        meta.size = new_size;
900
901        self.put_metadata(node, &meta)
902    }
903
904    //
905    fn rm_file(&mut self, node: Node) -> Result<(), Error> {
906        if self.is_mounted(node) {
907            return Err(Error::DeviceOrResourceBusy);
908        }
909
910        self.resize_file(node, 0)?;
911
912        self.meta_provider.remove_metadata(
913            node,
914            &mut self.ptr_cache,
915            &mut self.filechunk,
916            &mut self.v2_filechunk.v2_chunk_ptr,
917            &mut self.v2_filechunk.v2_allocator,
918        );
919
920        Ok(())
921    }
922
923    fn mount_node(&mut self, node: Node, memory: Box<dyn Memory>) -> Result<(), Error> {
924        if self.is_mounted(node) {
925            return Err(Error::DeviceOrResourceBusy);
926        }
927
928        // do extra meta preparation
929        // get the file metadata (we are not mounted at this point)
930        let mut file_meta = self.get_metadata(node)?;
931
932        // activate mount
933        self.active_mounts.insert(node, memory);
934
935        if let Ok(_old_mounted_meta) = self.get_metadata(node) {
936            // do nothing, we already have the metadata
937        } else {
938            // take a copy of the file meta, set the size to 0 by default
939            file_meta.size = 0;
940
941            // update mounted metadata
942            self.put_metadata(node, &file_meta)?;
943        };
944
945        Ok(())
946    }
947
948    fn unmount_node(&mut self, node: Node) -> Result<Box<dyn Memory>, Error> {
949        let memory = self.active_mounts.remove(&node);
950
951        memory.ok_or(Error::NoSuchDevice)
952    }
953
954    fn is_mounted(&self, node: Node) -> bool {
955        self.active_mounts.contains_key(&node)
956    }
957
958    fn get_mounted_memory(&self, node: Node) -> Option<&dyn Memory> {
959        let res: Option<&Box<dyn Memory>> = self.active_mounts.get(&node);
960
961        res.map(|b| b.as_ref())
962    }
963
964    fn init_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
965        // temporary disable mount to activate access to the original file
966        let memory = self.unmount_node(node)?;
967
968        let meta = self.get_metadata(node)?;
969        let file_size = meta.size;
970
971        // grow memory if needed
972        grow_memory(memory.as_ref(), file_size);
973
974        let mut remainder = file_size;
975
976        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
977
978        let mut offset = 0;
979
980        while remainder > 0 {
981            let to_read = remainder.min(buf.len() as FileSize);
982
983            self.read(node, offset, &mut buf[..to_read as usize])?;
984
985            memory.write(offset, &buf[..to_read as usize]);
986
987            offset += to_read;
988            remainder -= to_read;
989        }
990
991        self.mount_node(node, memory)?;
992
993        self.put_metadata(node, &meta)?;
994
995        Ok(())
996    }
997
998    fn store_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
999        // get current size of the mounted memory
1000        let meta = self.get_metadata(node)?;
1001        let file_size = meta.size;
1002
1003        // temporary disable mount to activate access to the original file
1004        let memory = self.unmount_node(node)?;
1005
1006        // grow memory if needed
1007        grow_memory(memory.as_ref(), file_size);
1008
1009        let mut remainder = file_size;
1010
1011        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
1012
1013        let mut offset = 0;
1014
1015        while remainder > 0 {
1016            let to_read = remainder.min(buf.len() as FileSize);
1017
1018            // grow memory also for reading
1019            grow_memory(memory.as_ref(), offset + to_read);
1020
1021            memory.read(offset, &mut buf[..to_read as usize]);
1022
1023            self.write(node, offset, &buf[..to_read as usize])?;
1024
1025            offset += to_read;
1026            remainder -= to_read;
1027        }
1028
1029        self.put_metadata(node, &meta)?;
1030
1031        self.mount_node(node, memory)?;
1032
1033        Ok(())
1034    }
1035
1036    fn set_chunk_size(&mut self, chunk_size: ChunkSize) -> Result<(), Error> {
1037        self.v2_filechunk
1038            .v2_allocator
1039            .set_chunk_size(chunk_size as usize)
1040    }
1041
1042    fn chunk_size(&self) -> usize {
1043        self.v2_filechunk.v2_allocator.chunk_size()
1044    }
1045
1046    fn set_chunk_type(&mut self, chunk_type: ChunkType) {
1047        self.chunk_type = chunk_type;
1048    }
1049
1050    fn chunk_type(&self) -> ChunkType {
1051        self.chunk_type
1052    }
1053
1054    fn flush(&mut self, _node: Node) {
1055        // nothing to flush, the system immediately stores data on write
1056    }
1057}
1058
1059#[cfg(test)]
1060mod tests {
1061
1062    use ic_stable_structures::DefaultMemoryImpl;
1063
1064    use crate::storage::types::FileName;
1065
1066    use super::*;
1067
1068    #[test]
1069    fn read_and_write_filechunk() {
1070        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1071        let node = storage.new_node();
1072        storage
1073            .put_metadata(
1074                node,
1075                &Metadata {
1076                    node,
1077                    file_type: FileType::RegularFile,
1078                    link_count: 1,
1079                    size: 10,
1080                    times: Times::default(),
1081                    first_dir_entry: Some(42),
1082                    last_dir_entry: Some(24),
1083                    chunk_type: Some(storage.chunk_type()),
1084                    maximum_size_allowed: None,
1085                },
1086            )
1087            .unwrap();
1088        let metadata = storage.get_metadata(node).unwrap();
1089        assert_eq!(metadata.node, node);
1090        assert_eq!(metadata.file_type, FileType::RegularFile);
1091        assert_eq!(metadata.link_count, 1);
1092        assert_eq!(metadata.first_dir_entry, Some(42));
1093        assert_eq!(metadata.last_dir_entry, Some(24));
1094        storage.write(node, 0, &[42; 10]).unwrap();
1095
1096        let mut buf = [0; 10];
1097        storage.read(node, 0, &mut buf).unwrap();
1098        assert_eq!(buf, [42; 10]);
1099    }
1100
1101    #[test]
1102    fn read_and_write_direntry() {
1103        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1104        let node = storage.new_node();
1105        storage.put_direntry(
1106            node,
1107            7,
1108            DirEntry {
1109                node,
1110                name: FileName::new("test".as_bytes()).unwrap(),
1111                next_entry: Some(42),
1112                prev_entry: Some(24),
1113            },
1114        );
1115        let direntry = storage.get_direntry(node, 7).unwrap();
1116        assert_eq!(direntry.node, node);
1117        assert_eq!(
1118            direntry.name.bytes,
1119            FileName::new("test".as_bytes()).unwrap().bytes
1120        );
1121        assert_eq!(direntry.next_entry, Some(42));
1122        assert_eq!(direntry.prev_entry, Some(24));
1123    }
1124
1125    fn new_file<M: Memory>(storage: &mut StableStorage<M>) -> Node {
1126        let node = storage.new_node();
1127
1128        storage
1129            .put_metadata(
1130                node,
1131                &Metadata {
1132                    node,
1133                    file_type: FileType::RegularFile,
1134                    link_count: 1,
1135                    size: 0,
1136                    times: Times::default(),
1137                    first_dir_entry: None,
1138                    last_dir_entry: None,
1139                    chunk_type: Some(storage.chunk_type()),
1140                    maximum_size_allowed: None,
1141                },
1142            )
1143            .unwrap();
1144
1145        node
1146    }
1147
1148    #[test]
1149    fn read_beyond_file_size() {
1150        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1151
1152        let node = new_file(&mut storage);
1153
1154        storage.write(node, 0, b"hello").unwrap();
1155
1156        let mut buf = [0u8; 10];
1157        let bytes_read = storage.read(node, 3, &mut buf).unwrap();
1158
1159        assert_eq!(bytes_read, 2);
1160        assert_eq!(&buf[..2], b"lo");
1161
1162        assert_eq!(buf[2..], [0; 8]);
1163    }
1164
1165    #[test]
1166    fn switch_chunk_types() {
1167        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1168
1169        storage.set_chunk_type(ChunkType::V1);
1170
1171        let node_v1 = new_file(&mut storage);
1172
1173        storage.write(node_v1, 0, b"v1_data").unwrap();
1174
1175        storage.set_chunk_type(ChunkType::V2);
1176
1177        let node_v2 = new_file(&mut storage);
1178
1179        // Write data
1180        storage.write(node_v2, 0, b"v2_data").unwrap();
1181
1182        // Confirm reads
1183        let mut buf_v1 = [0u8; 7];
1184        storage.read(node_v1, 0, &mut buf_v1).unwrap();
1185        assert_eq!(&buf_v1, b"v1_data");
1186        let meta = storage.get_metadata(node_v1).unwrap();
1187        assert_eq!(meta.chunk_type.unwrap(), ChunkType::V1);
1188
1189        let mut buf_v2 = [0u8; 7];
1190        storage.read(node_v2, 0, &mut buf_v2).unwrap();
1191        assert_eq!(&buf_v2, b"v2_data");
1192        let meta = storage.get_metadata(node_v2).unwrap();
1193        assert_eq!(meta.chunk_type.unwrap(), ChunkType::V2);
1194    }
1195
1196    #[test]
1197    fn resize_file_shrink_and_grow() {
1198        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1199        let node = new_file(&mut storage);
1200
1201        storage.write(node, 0, b"1234567890").unwrap();
1202        let mut buf = [0u8; 10];
1203        storage.read(node, 0, &mut buf).unwrap();
1204        assert_eq!(&buf, b"1234567890");
1205
1206        // Shrink to 5 bytes
1207        storage.resize_file(node, 5).unwrap();
1208
1209        let meta = storage.get_metadata(node).unwrap();
1210        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1211
1212        // Reading the file now
1213        let mut buf_small = [0u8; 10];
1214        let bytes_read = storage.read(node, 0, &mut buf_small).unwrap();
1215        assert_eq!(bytes_read, 5);
1216        assert_eq!(&buf_small[..5], b"12345");
1217        assert_eq!(&buf_small[5..], [0; 5]);
1218
1219        // check zero fill
1220        let mut meta = storage.get_metadata(node).unwrap();
1221        meta.size = 10;
1222        storage.put_metadata(node, &meta).unwrap();
1223
1224        // Confirm new bytes are zeroed or remain uninitialized, depending on design
1225        let mut buf_grow = [0u8; 10];
1226        storage.read(node, 0, &mut buf_grow).unwrap();
1227        // First 5 bytes should remain "12345", rest must be zero:
1228        assert_eq!(&buf_grow[..5], b"12345");
1229        assert_eq!(&buf_grow[5..], [0; 5]);
1230    }
1231
1232    #[test]
1233    fn resize_file_shrink_deletes_v2_chunks() {
1234        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1235        let node = new_file(&mut storage);
1236
1237        let chunk_size = storage.chunk_size() as FileSize;
1238
1239        // write something into the second chunk
1240        storage.write(node, chunk_size + 4, b"1234567890").unwrap();
1241
1242        // write something into the first chunk
1243        storage.write(node, 4, b"1234567890").unwrap();
1244
1245        let mut buf = [0u8; 10];
1246        // read second chunk
1247        storage.read(node, chunk_size + 9, &mut buf).unwrap();
1248
1249        assert_eq!(&buf, b"67890\0\0\0\0\0");
1250
1251        let chunks: Vec<_> = storage
1252            .v2_filechunk
1253            .v2_chunk_ptr
1254            .range((node, 0)..(node, 5))
1255            .collect();
1256        assert_eq!(chunks.len(), 2);
1257
1258        // Shrink to 5 bytes
1259        storage.resize_file(node, 5).unwrap();
1260
1261        let meta = storage.get_metadata(node).unwrap();
1262
1263        // only one chunk should be present
1264        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1265
1266        let chunks: Vec<_> = storage
1267            .v2_filechunk
1268            .v2_chunk_ptr
1269            .range((node, 0)..(node, 5))
1270            .collect();
1271        assert_eq!(chunks.len(), 1);
1272
1273        // for 0 size, all chunks have to be deleted
1274        storage.resize_file(node, 0).unwrap();
1275        let chunks: Vec<_> = storage
1276            .v2_filechunk
1277            .v2_chunk_ptr
1278            .range((node, 0)..(node, 5))
1279            .collect();
1280        assert_eq!(chunks.len(), 0);
1281    }
1282
1283    #[test]
1284    fn resize_file_shrink_deletes_v1_chunks() {
1285        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1286        storage.set_chunk_type(ChunkType::V1);
1287
1288        let node = new_file(&mut storage);
1289
1290        let chunk_size = FILE_CHUNK_SIZE_V1 as FileSize;
1291
1292        // write something into the second chunk
1293        storage.write(node, chunk_size + 4, b"1234567890").unwrap();
1294
1295        // write something into the first chunk
1296        storage.write(node, 4, b"1234567890").unwrap();
1297
1298        let mut buf = [0u8; 10];
1299        // read second chunk
1300        storage.read(node, chunk_size + 9, &mut buf).unwrap();
1301
1302        assert_eq!(&buf, b"67890\0\0\0\0\0");
1303
1304        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1305        assert_eq!(chunks.len(), 2);
1306
1307        // Shrink to 5 bytes
1308        storage.resize_file(node, 5).unwrap();
1309
1310        let meta = storage.get_metadata(node).unwrap();
1311
1312        // only one chunk should be present
1313        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1314
1315        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1316        assert_eq!(chunks.len(), 1);
1317
1318        // for 0 size, all chunks have to be deleted
1319        storage.resize_file(node, 0).unwrap();
1320        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1321        assert_eq!(chunks.len(), 0);
1322    }
1323
1324    #[test]
1325    fn remove_file_chunks_v2() {
1326        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1327
1328        let chunk_size = storage.chunk_size() as FileSize;
1329
1330        // some other files present
1331        let node_other = new_file(&mut storage);
1332        storage.write(node_other, 0, b"some data").unwrap();
1333
1334        let node = new_file(&mut storage);
1335
1336        // write into 5 chunks + 1 metadata chunk, expect to find 6 chunks
1337        storage.write(node, 0, b"some data").unwrap();
1338        storage.write(node, chunk_size, b"some data").unwrap();
1339        storage.write(node, chunk_size * 2, b"some data").unwrap();
1340        // write into two chunks with one call
1341        storage
1342            .write(node, chunk_size * 5 - 2, b"some data")
1343            .unwrap();
1344
1345        // some other files present
1346        let node_other2 = new_file(&mut storage);
1347        storage.write(node_other2, 0, b"some data").unwrap();
1348
1349        // check chunk count
1350        let chunks: Vec<_> = storage
1351            .v2_filechunk
1352            .v2_chunk_ptr
1353            .range((node, 0)..(node, u32::MAX))
1354            .collect();
1355
1356        // chunks of the given node
1357        assert_eq!(chunks.len(), 6);
1358
1359        // check the allocator is also holding 6 chunks of the main file, and 4 chunks from the two other files
1360        assert_eq!(
1361            storage.v2_filechunk.v2_allocator.get_current_max_ptr(),
1362            (6 + 4) * chunk_size
1363        );
1364
1365        // Remove file
1366        storage.rm_file(node).unwrap();
1367
1368        // Confirm reading fails or returns NotFound
1369        let mut buf = [0u8; 9];
1370        let res = storage.read(node, 0, &mut buf);
1371        assert!(matches!(res, Err(Error::NoSuchFileOrDirectory)));
1372
1373        // Confirm metadata is removed
1374        let meta_res = storage.get_metadata(node);
1375        assert!(matches!(meta_res, Err(Error::NoSuchFileOrDirectory)));
1376
1377        // check there are no chunks left after deleting the node
1378        let chunks: Vec<_> = storage
1379            .v2_filechunk
1380            .v2_chunk_ptr
1381            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1382            .collect();
1383
1384        assert_eq!(chunks.len(), 0);
1385    }
1386
1387    #[test]
1388    fn remove_file_chunks_v1() {
1389        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1390        storage.set_chunk_type(ChunkType::V1);
1391
1392        let chunk_size = FILE_CHUNK_SIZE_V1 as FileSize;
1393
1394        // some other files present
1395        let node_other = new_file(&mut storage);
1396        storage.write(node_other, 0, b"some data").unwrap();
1397
1398        let node = new_file(&mut storage);
1399
1400        // write into 5 chunks
1401        storage.write(node, 0, b"some data").unwrap();
1402        storage.write(node, chunk_size, b"some data").unwrap();
1403        storage.write(node, chunk_size * 2, b"some data").unwrap();
1404        // write into two chunks with one call
1405        storage
1406            .write(node, chunk_size * 5 - 2, b"some data")
1407            .unwrap();
1408
1409        // some other files present
1410        let node_other2 = new_file(&mut storage);
1411        storage.write(node_other2, 0, b"some data").unwrap();
1412
1413        // check chunk count
1414        let chunks: Vec<_> = storage
1415            .filechunk
1416            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1417            .collect();
1418
1419        // chunks of the given node
1420        assert_eq!(chunks.len(), 5);
1421
1422        // check the allocator is holding three chunks for 3 stored metadata
1423        assert_eq!(
1424            storage.v2_filechunk.v2_allocator.get_current_max_ptr(),
1425            storage.chunk_size() as FileSize * 3
1426        );
1427
1428        // Remove file
1429        storage.rm_file(node).unwrap();
1430
1431        // Confirm reading fails or returns NotFound
1432        let mut buf = [0u8; 9];
1433        let res = storage.read(node, 0, &mut buf);
1434        assert!(matches!(res, Err(Error::NoSuchFileOrDirectory)));
1435
1436        // Confirm metadata is removed
1437        let meta_res = storage.get_metadata(node);
1438        assert!(matches!(meta_res, Err(Error::NoSuchFileOrDirectory)));
1439
1440        // check there are no chunks left after deleting the node
1441        let chunks: Vec<_> = storage
1442            .filechunk
1443            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1444            .collect();
1445
1446        assert_eq!(chunks.len(), 0);
1447    }
1448}