stable_fs/storage/
stable.rs

1use std::{collections::HashMap, ops::Range};
2
3use crate::storage::types::{FileName, MountedFileSizePolicy, ZEROES};
4use ic_cdk::stable::WASM_PAGE_SIZE_IN_BYTES;
5use ic_stable_structures::{
6    BTreeMap, Cell, Memory,
7    memory_manager::{MemoryId, MemoryManager, VirtualMemory},
8};
9
10use crate::{
11    runtime::structure_helpers::{read_obj, write_obj},
12    storage::ptr_cache::CachedChunkPtr,
13};
14
15use crate::{
16    error::Error,
17    runtime::{
18        structure_helpers::{get_chunk_infos, grow_memory},
19        types::ChunkSize,
20        types::ChunkType,
21    },
22};
23
24use super::{
25    Storage,
26    allocator::ChunkPtrAllocator,
27    chunk_iterator::ChunkV2Iterator,
28    metadata_provider::MetadataProvider,
29    ptr_cache::PtrCache,
30    types::{
31        DUMMY_DOT_DOT_ENTRY, DUMMY_DOT_DOT_ENTRY_INDEX, DUMMY_DOT_ENTRY, DUMMY_DOT_ENTRY_INDEX,
32        DirEntry, DirEntryIndex, FILE_CHUNK_SIZE_V1, FileChunk, FileChunkIndex, FileChunkPtr,
33        FileSize, FileType, Header, MAX_FILE_CHUNK_COUNT, MAX_FILE_ENTRY_INDEX, MAX_FILE_SIZE,
34        Metadata, Node, Times,
35    },
36};
37
38pub const ROOT_NODE: Node = 0;
39const FS_VERSION: u32 = 1;
40
41const DEFAULT_FIRST_MEMORY_INDEX: u8 = 229;
42
43/// the maximum index accepted as the end range
44const MAX_MEMORY_INDEX: u8 = 254;
45
46/// the number of memory indices used by the file system (currently 10)
47const MEMORY_INDEX_COUNT: u8 = 10;
48
49/// index containing cached metadata (deprecated)
50const MOUNTED_META_PTR: u64 = 16;
51
52enum StorageMemoryIdx {
53    Header = 0,
54    Metadata = 1,
55
56    DirEntries = 2,
57    FileChunksV1 = 3,
58
59    /// metadata for mounted files
60    MountedMetadata = 4,
61
62    /// V2 chunks
63    FileChunksV2 = 5,
64    ChunkAllocatorV2 = 6,
65    FileChunksMemoryV2 = 7,
66
67    /// caching helper
68    CacheJournal = 8,
69
70    /// dir entry lookup map
71    DirEntryLookup = 9,
72}
73
74struct StorageMemories<M: Memory> {
75    header_memory: VirtualMemory<M>,
76    metadata_memory: VirtualMemory<M>,
77    direntry_memory: VirtualMemory<M>,
78    filechunk_memory: VirtualMemory<M>,
79
80    mounted_meta_memory: VirtualMemory<M>,
81
82    v2_chunk_ptr_memory: VirtualMemory<M>,
83    v2_chunks_memory: VirtualMemory<M>,
84    v2_allocator_memory: VirtualMemory<M>,
85
86    cache_journal: VirtualMemory<M>, // is deprecated, will be removed in the future
87
88    direntry_lookup_memory: VirtualMemory<M>,
89}
90
91#[repr(C)]
92pub struct V2FileChunks<M: Memory> {
93    // the file chunk storage V2, we only store pointers to reduce serialization overheads.
94    pub(crate) v2_chunk_ptr: BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory<M>>,
95    // the actual storage of the chunks,
96    // * we can read and write small fragments of data, no need to read and write in chunk-sized blocks
97    // * the pointers in the BTreeMap (Node, FileChunkIndex) -> FileChunkPtr are static,
98    //   this allows caching to avoid chunk search overheads.
99    pub(crate) v2_chunks: VirtualMemory<M>,
100    // keeps information on the chunks currently available.
101    // it can be setup to work with different chunk sizes.
102    // 4K - the same as chunks V1, 16K - the default, 64K - the biggest chunk size available.
103    // the increased chunk size reduces the number of BTree insertions, and increases the performanc.
104    pub(crate) v2_allocator: ChunkPtrAllocator<M>,
105}
106
107#[repr(C)]
108pub struct StableStorage<M: Memory> {
109    /// some static-sized filesystem data, contains version number and the next node id.
110    header: Cell<Header, VirtualMemory<M>>,
111    /// information about the directory structure.
112    direntry: BTreeMap<(Node, DirEntryIndex), DirEntry, VirtualMemory<M>>,
113    /// actual file data stored in chunks insize BTreeMap.
114    filechunk: BTreeMap<(Node, FileChunkIndex), FileChunk, VirtualMemory<M>>,
115
116    /// Lookup map to quickly find DirEntryIndex from a file name
117    direntry_lookup: BTreeMap<(Node, FileName), FileChunkIndex, VirtualMemory<M>>,
118
119    /// file data stored in V2 file chunks
120    pub(crate) v2_filechunk: V2FileChunks<M>,
121
122    /// helper object managing file metadata access of all types
123    meta_provider: MetadataProvider<M>,
124
125    /// It is not used, but is needed to keep memories alive.
126    _memory_manager: Option<MemoryManager<M>>,
127
128    /// active mounts.
129    active_mounts: HashMap<Node, Box<dyn Memory>>,
130
131    /// chunk type to use when creating new files.
132    chunk_type: ChunkType,
133
134    /// chunk pointer cache. This cache reduces chunk search overhead when reading a file,
135    /// or writing a file over existing data. (the new files still need insert new pointers into the treemap, hence it is rather slow)
136    pub(crate) ptr_cache: PtrCache,
137}
138
139impl<M: Memory> StableStorage<M> {
140    pub fn new(memory: M) -> Self {
141        let memory_manager = MemoryManager::init(memory);
142
143        let mut storage = Self::new_with_memory_manager(
144            &memory_manager,
145            DEFAULT_FIRST_MEMORY_INDEX..DEFAULT_FIRST_MEMORY_INDEX + MEMORY_INDEX_COUNT,
146        );
147
148        storage._memory_manager = Some(memory_manager);
149
150        storage
151    }
152
153    pub fn new_with_memory_manager(
154        memory_manager: &MemoryManager<M>,
155        memory_indices: Range<u8>,
156    ) -> StableStorage<M> {
157        if memory_indices.end - memory_indices.start < MEMORY_INDEX_COUNT {
158            panic!("The memory index range must include at least {MEMORY_INDEX_COUNT} incides");
159        }
160
161        if memory_indices.end > MAX_MEMORY_INDEX {
162            panic!("Last memory index must be less than or equal to {MAX_MEMORY_INDEX}");
163        }
164
165        let header_memory = memory_manager.get(MemoryId::new(
166            memory_indices.start + StorageMemoryIdx::Header as u8,
167        ));
168        let metadata_memory = memory_manager.get(MemoryId::new(
169            memory_indices.start + StorageMemoryIdx::Metadata as u8,
170        ));
171        let direntry_memory = memory_manager.get(MemoryId::new(
172            memory_indices.start + StorageMemoryIdx::DirEntries as u8,
173        ));
174        let filechunk_memory = memory_manager.get(MemoryId::new(
175            memory_indices.start + StorageMemoryIdx::FileChunksV1 as u8,
176        ));
177        let mounted_meta_memory = memory_manager.get(MemoryId::new(
178            memory_indices.start + StorageMemoryIdx::MountedMetadata as u8,
179        ));
180
181        let v2_chunk_ptr_memory = memory_manager.get(MemoryId::new(
182            memory_indices.start + StorageMemoryIdx::FileChunksV2 as u8,
183        ));
184        let v2_allocator_memory = memory_manager.get(MemoryId::new(
185            memory_indices.start + StorageMemoryIdx::ChunkAllocatorV2 as u8,
186        ));
187        let v2_chunks_memory = memory_manager.get(MemoryId::new(
188            memory_indices.start + StorageMemoryIdx::FileChunksMemoryV2 as u8,
189        ));
190
191        let cache_journal = memory_manager.get(MemoryId::new(
192            memory_indices.start + StorageMemoryIdx::CacheJournal as u8,
193        ));
194        let direntry_lookup_memory = memory_manager.get(MemoryId::new(
195            memory_indices.start + StorageMemoryIdx::DirEntryLookup as u8,
196        ));
197
198        let memories = StorageMemories {
199            header_memory,
200            metadata_memory,
201            direntry_memory,
202            filechunk_memory,
203            mounted_meta_memory,
204            v2_chunk_ptr_memory,
205            v2_chunks_memory,
206            v2_allocator_memory,
207            cache_journal,
208            direntry_lookup_memory,
209        };
210
211        Self::new_with_custom_memories(memories)
212    }
213
214    // support deprecated storage, recover stored mounted file metadata
215    // we have to use a custom node here,
216    fn init_size_from_cache_journal(&mut self, journal: &VirtualMemory<M>) {
217        // re-define old Metadata type for correct reading
218        #[derive(Clone, Default, PartialEq)]
219        pub struct MetadataLegacy {
220            pub node: Node,
221            pub file_type: FileType,
222            pub link_count: u64,
223            pub size: FileSize,
224            pub times: Times,
225            pub first_dir_entry: Option<DirEntryIndex>,
226            pub last_dir_entry: Option<DirEntryIndex>,
227            pub chunk_type: Option<ChunkType>,
228        }
229
230        // try recover stored mounted metadata (if any)
231        if journal.size() > 0 {
232            let mut mounted_node = 0u64;
233            let mut mounted_meta = MetadataLegacy::default();
234
235            read_obj(journal, MOUNTED_META_PTR, &mut mounted_node);
236
237            read_obj(journal, MOUNTED_META_PTR + 8, &mut mounted_meta);
238
239            let meta_read = Metadata {
240                node: mounted_meta.node,
241                file_type: FileType::RegularFile,
242                link_count: mounted_meta.link_count,
243                size: mounted_meta.size,
244                times: mounted_meta.times,
245                chunk_type: mounted_meta.chunk_type,
246                maximum_size_allowed: None,
247                first_dir_entry: None,
248                last_dir_entry: None,
249            };
250
251            if mounted_node != u64::MAX && mounted_node == mounted_meta.node {
252                // immediately store the recovered metadata
253                self.meta_provider.put_metadata(
254                    mounted_node,
255                    true,
256                    &meta_read,
257                    None,
258                    &mut self.v2_filechunk,
259                );
260
261                // reset cached metadata
262                write_obj(journal, MOUNTED_META_PTR, &(u64::MAX as Node));
263            }
264        }
265    }
266
267    fn new_with_custom_memories(memories: StorageMemories<M>) -> Self {
268        let default_header_value = Header {
269            version: FS_VERSION,
270            next_node: ROOT_NODE + 1,
271        };
272
273        let v2_allocator = ChunkPtrAllocator::new(memories.v2_allocator_memory).unwrap();
274        let ptr_cache = PtrCache::new();
275        let v2_chunk_ptr = BTreeMap::init(memories.v2_chunk_ptr_memory);
276
277        let meta_provider =
278            MetadataProvider::new(memories.metadata_memory, memories.mounted_meta_memory);
279
280        let mut result = Self {
281            header: Cell::init(memories.header_memory, default_header_value),
282            direntry: BTreeMap::init(memories.direntry_memory),
283            filechunk: BTreeMap::init(memories.filechunk_memory),
284
285            direntry_lookup: BTreeMap::init(memories.direntry_lookup_memory),
286
287            v2_filechunk: V2FileChunks {
288                v2_chunk_ptr,
289                v2_chunks: memories.v2_chunks_memory,
290                v2_allocator,
291            },
292
293            // transient runtime data
294            _memory_manager: None,
295            active_mounts: HashMap::new(),
296
297            // default chunk type is V2
298            chunk_type: ChunkType::V2,
299
300            ptr_cache,
301
302            meta_provider,
303        };
304
305        // init mounted drive
306        result.init_size_from_cache_journal(&memories.cache_journal);
307
308        let version = result.header.get().version;
309
310        if version != FS_VERSION {
311            panic!("Unsupported file system version");
312        }
313
314        result
315    }
316
317    // write into mounted memory
318    fn write_mounted(&self, memory: &dyn Memory, offset: FileSize, buf: &[u8]) -> FileSize {
319        let length_to_write = buf.len() as FileSize;
320
321        // grow memory if needed
322        let max_address = offset as FileSize + length_to_write;
323
324        grow_memory(memory, max_address);
325
326        memory.write(offset, buf);
327
328        length_to_write
329    }
330
331    // Insert of update a selected file chunk with the data provided in a buffer.
332    fn write_filechunk_v1(
333        &mut self,
334        node: Node,
335        index: FileChunkIndex,
336        offset: FileSize,
337        buf: &[u8],
338    ) {
339        let mut entry = self.filechunk.get(&(node, index)).unwrap_or_default();
340        entry.bytes[offset as usize..offset as usize + buf.len()].copy_from_slice(buf);
341        self.filechunk.insert((node, index), entry);
342    }
343
344    fn write_chunks_v2(
345        &mut self,
346        node: Node,
347        offset: FileSize,
348        buf: &[u8],
349    ) -> Result<FileSize, Error> {
350        let mut remainder = buf.len() as FileSize;
351        let last_address = offset + remainder;
352
353        let chunk_size = self.chunk_size();
354
355        let start_index = (offset / chunk_size as FileSize) as FileChunkIndex;
356
357        let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize;
358
359        let mut size_written: FileSize = 0;
360
361        let write_iter = ChunkV2Iterator::new(
362            node,
363            offset,
364            last_address,
365            self.chunk_size() as FileSize,
366            &mut self.ptr_cache,
367            &mut self.v2_filechunk.v2_chunk_ptr,
368        );
369
370        let write_iter: Vec<_> = write_iter.collect();
371
372        for ((nd, index), chunk_ptr) in write_iter {
373            assert!(nd == node);
374
375            if remainder == 0 {
376                break;
377            }
378
379            let to_write = remainder
380                .min(chunk_size as FileSize - chunk_offset)
381                .min(buf.len() as FileSize - size_written);
382
383            let write_buf =
384                &buf[size_written as usize..(size_written as usize + to_write as usize)];
385
386            let chunk_ptr = if let CachedChunkPtr::ChunkExists(ptr) = chunk_ptr {
387                ptr
388            } else {
389                // insert new chunk
390                let ptr = self.v2_filechunk.v2_allocator.allocate();
391
392                grow_memory(&self.v2_filechunk.v2_chunks, ptr + chunk_size as FileSize);
393
394                // fill new chunk with zeroes (appart from the area that will be overwritten)
395
396                // fill before written content
397                self.v2_filechunk
398                    .v2_chunks
399                    .write(ptr, &ZEROES[0..chunk_offset as usize]);
400
401                // fill after written content
402                self.v2_filechunk.v2_chunks.write(
403                    ptr + chunk_offset + to_write as FileSize,
404                    &ZEROES[0..(chunk_size - chunk_offset as usize - to_write as usize)],
405                );
406
407                // register new chunk pointer
408                self.v2_filechunk.v2_chunk_ptr.insert((node, index), ptr);
409
410                //
411                self.ptr_cache
412                    .add(vec![((node, index), CachedChunkPtr::ChunkExists(ptr))]);
413
414                ptr
415            };
416
417            // growing here should not be required as the grow is called during
418            // grow_memory(&self.v2_chunks, chunk_ptr + offset + buf.len() as FileSize);
419            self.v2_filechunk
420                .v2_chunks
421                .write(chunk_ptr + chunk_offset, write_buf);
422
423            chunk_offset = 0;
424            size_written += to_write;
425            remainder -= to_write;
426        }
427
428        Ok(size_written)
429    }
430
431    fn read_chunks_v1(
432        &self,
433        node: Node,
434        offset: FileSize,
435        file_size: FileSize,
436        buf: &mut [u8],
437    ) -> Result<FileSize, Error> {
438        let start_index = (offset / FILE_CHUNK_SIZE_V1 as FileSize) as FileChunkIndex;
439        let end_index = ((offset + buf.len() as FileSize) / FILE_CHUNK_SIZE_V1 as FileSize + 1)
440            as FileChunkIndex;
441
442        let mut chunk_offset = offset - start_index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize;
443
444        let range = (node, start_index)..(node, MAX_FILE_CHUNK_COUNT);
445
446        let mut size_read: FileSize = 0;
447        let mut remainder = file_size - offset;
448
449        let mut iter = self.filechunk.range(range);
450        let mut cur_fetched = None;
451
452        for cur_index in start_index..end_index {
453            let chunk_space = FILE_CHUNK_SIZE_V1 as FileSize - chunk_offset;
454
455            let to_read = remainder
456                .min(chunk_space)
457                .min(buf.len() as FileSize - size_read);
458
459            // finished reading, buffer full
460            if size_read == buf.len() as FileSize {
461                break;
462            }
463
464            if cur_fetched.is_none() {
465                cur_fetched = iter.next();
466            }
467
468            let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize];
469
470            if let Some(ref en) = cur_fetched {
471                let (nd, idx) = *en.key();
472                let value = en.value();
473
474                if idx == cur_index {
475                    assert!(nd == node);
476
477                    read_buf.copy_from_slice(
478                        &value.bytes
479                            [chunk_offset as usize..chunk_offset as usize + to_read as usize],
480                    );
481
482                    // consume token
483                    cur_fetched = None;
484                } else {
485                    // fill up with zeroes
486                    read_buf.iter_mut().for_each(|m| *m = 0)
487                }
488            } else {
489                // fill up with zeroes
490                read_buf.iter_mut().for_each(|m| *m = 0)
491            }
492
493            chunk_offset = 0;
494            size_read += to_read;
495            remainder -= to_read;
496        }
497
498        Ok(size_read)
499    }
500
501    fn read_chunks_v2(
502        &mut self,
503        node: Node,
504        offset: FileSize,
505        file_size: FileSize,
506        buf: &mut [u8],
507    ) -> Result<FileSize, Error> {
508        // early exit if nothing left to read
509        if offset >= file_size {
510            return Ok(0 as FileSize);
511        }
512
513        // compute remainder to read
514        let mut remainder = file_size - offset;
515
516        let chunk_size = self.chunk_size();
517
518        let start_index = (offset / chunk_size as FileSize) as FileChunkIndex;
519
520        let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize;
521
522        //let end_index = ((offset + buf.len() as FileSize) / chunk_size as FileSize + 1) as FileChunkIndex;
523        //let mut range = (node, start_index)..(node, end_index);
524
525        let mut size_read: FileSize = 0;
526
527        let read_iter = ChunkV2Iterator::new(
528            node,
529            offset,
530            file_size,
531            chunk_size as FileSize,
532            &mut self.ptr_cache,
533            &mut self.v2_filechunk.v2_chunk_ptr,
534        );
535
536        for ((nd, _idx), cached_chunk) in read_iter {
537            assert!(nd == node);
538
539            // finished reading, buffer full
540            if size_read == buf.len() as FileSize {
541                break;
542            }
543
544            let chunk_space = chunk_size as FileSize - chunk_offset;
545
546            let to_read = remainder
547                .min(chunk_space)
548                .min(buf.len() as FileSize - size_read);
549
550            let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize];
551
552            if let CachedChunkPtr::ChunkExists(cptr) = cached_chunk {
553                self.v2_filechunk
554                    .v2_chunks
555                    .read(cptr + chunk_offset, read_buf);
556            } else {
557                // fill read buffer with 0
558                read_buf.iter_mut().for_each(|m| *m = 0)
559            }
560
561            chunk_offset = 0;
562            size_read += to_read;
563            remainder -= to_read;
564        }
565
566        Ok(size_read)
567    }
568
569    fn use_v2(&mut self, metadata: &Metadata, node: u64) -> bool {
570        // decide if we use v2 chunks for reading/writing
571        match metadata.chunk_type {
572            Some(ChunkType::V2) => true,
573            Some(ChunkType::V1) => false,
574
575            // try to figure out, which chunk type to use
576            None => {
577                if metadata.size > 0 {
578                    // try to find any v2 node, othersize use v1
579                    let ptr = self
580                        .v2_filechunk
581                        .v2_chunk_ptr
582                        .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
583                        .next();
584
585                    ptr.is_some()
586                } else {
587                    self.chunk_type() == ChunkType::V2
588                }
589            }
590        }
591    }
592
593    fn validate_metadata_update(
594        old_meta: Option<&Metadata>,
595        new_meta: &Metadata,
596    ) -> Result<(), Error> {
597        if let Some(old_meta) = old_meta {
598            // do not allow changing file type
599            if old_meta.file_type != new_meta.file_type {
600                return Err(Error::FunctionNotSupported);
601            }
602        }
603
604        if let Some(old_meta) = old_meta {
605            // changing node is not allowed
606            if old_meta.node != new_meta.node {
607                return Err(Error::IllegalByteSequence);
608            }
609        }
610
611        if let Some(max_size) = new_meta.maximum_size_allowed
612            && new_meta.size > max_size
613        {
614            return Err(Error::FileTooLarge);
615        }
616
617        Ok(())
618    }
619
620    fn resize_file_internal(&mut self, node: Node, new_size: FileSize) -> Result<(), Error> {
621        // anyone calling this function should also clear pointer cache for this node
622
623        if self.is_mounted(node) {
624            // for the mounted node we only update file size in the metadata (no need to delete chunks)
625            return Ok(());
626        }
627
628        // delete v1 chunks
629        let chunk_size = FILE_CHUNK_SIZE_V1;
630
631        let first_deletable_index = (new_size.div_ceil(chunk_size as FileSize)) as FileChunkIndex;
632
633        let range = (node, first_deletable_index)..(node, MAX_FILE_CHUNK_COUNT);
634
635        let mut chunks: Vec<(Node, FileChunkIndex)> = Vec::new();
636
637        for k in self.filechunk.keys_range(range) {
638            chunks.push(k);
639        }
640
641        for (nd, idx) in chunks.into_iter() {
642            assert!(nd == node);
643            self.filechunk.remove(&(node, idx));
644        }
645
646        // fill with zeros the last chunk memory above the file size
647        if first_deletable_index > 0 {
648            let offset = new_size as FileSize % chunk_size as FileSize;
649
650            self.write_filechunk_v1(
651                node,
652                first_deletable_index - 1,
653                offset,
654                &ZEROES[0..(chunk_size - offset as usize)],
655            );
656        }
657
658        // delete v2 chunks
659
660        let chunk_size = self.chunk_size();
661
662        let first_deletable_index = (new_size.div_ceil(chunk_size as FileSize)) as FileChunkIndex;
663
664        let range = (node, first_deletable_index)..(node, MAX_FILE_CHUNK_COUNT);
665        let mut chunks: Vec<(Node, FileChunkIndex)> = Vec::new();
666        for k in self.v2_filechunk.v2_chunk_ptr.keys_range(range) {
667            chunks.push(k);
668        }
669
670        for (nd, idx) in chunks.into_iter() {
671            assert!(nd == node);
672            let removed = self.v2_filechunk.v2_chunk_ptr.remove(&(node, idx));
673
674            if let Some(removed) = removed {
675                self.v2_filechunk.v2_allocator.free(removed);
676            }
677        }
678
679        // fill with zeros the last chunk memory above the file size
680        if first_deletable_index > 0 {
681            let offset = new_size as FileSize % chunk_size as FileSize;
682            self.write_chunks_v2(node, new_size, &ZEROES[0..(chunk_size - offset as usize)])?;
683        }
684
685        Ok(())
686    }
687}
688
689impl<M: Memory> Storage for StableStorage<M> {
690    // Get the root node ID of the storage.
691    fn root_node(&self) -> Node {
692        ROOT_NODE
693    }
694
695    // Generate the next available node ID.
696    fn new_node(&mut self) -> Node {
697        let mut header = self.header.get().clone();
698
699        let result = header.next_node;
700
701        header.next_node += 1;
702
703        self.header.set(header);
704
705        result
706    }
707
708    fn get_version(&self) -> u32 {
709        let header = self.header.get();
710        header.version
711    }
712
713    // Get the metadata associated with the node.
714    fn get_metadata(&self, node: Node) -> Result<Metadata, Error> {
715        self.meta_provider
716            .get_metadata(
717                node,
718                self.is_mounted(node),
719                &self.v2_filechunk.v2_chunk_ptr,
720                &self.v2_filechunk.v2_chunks,
721            )
722            .map(|x| x.0)
723            .ok_or(Error::NoSuchFileOrDirectory)
724    }
725
726    // Update the metadata associated with the node.
727    fn put_metadata(&mut self, node: Node, metadata: &Metadata) -> Result<(), Error> {
728        let is_mounted = self.is_mounted(node);
729
730        let meta_rec = self.meta_provider.get_metadata(
731            node,
732            is_mounted,
733            &self.v2_filechunk.v2_chunk_ptr,
734            &self.v2_filechunk.v2_chunks,
735        );
736
737        let (old_meta, meta_ptr) = match meta_rec.as_ref() {
738            Some((m, p)) => (Some(m), *p),
739            None => (None, None),
740        };
741
742        Self::validate_metadata_update(old_meta, metadata)?;
743
744        if let Some(old_meta) = old_meta {
745            // if the size was reduced, we need to delete the file chunks above the file size
746            if metadata.size < old_meta.size {
747                self.ptr_cache.clear();
748                self.resize_file_internal(node, metadata.size)?;
749            }
750        }
751
752        self.meta_provider.put_metadata(
753            node,
754            is_mounted,
755            metadata,
756            meta_ptr,
757            &mut self.v2_filechunk,
758        );
759
760        Ok(())
761    }
762
763    // Retrieve the DirEntry instance given the Node and DirEntryIndex.
764    fn get_direntry(&self, node: Node, index: DirEntryIndex) -> Result<DirEntry, Error> {
765        self.direntry
766            .get(&(node, index))
767            .ok_or(Error::NoSuchFileOrDirectory)
768    }
769
770    fn get_direntry_index_by_name(&self, el: &(Node, FileName)) -> Option<DirEntryIndex> {
771        self.direntry_lookup.get(el)
772    }
773
774    fn new_direntry_index(&self, node: Node) -> DirEntryIndex {
775        let start = (node, 0);
776        let end = (node, u32::MAX);
777
778        // Iterate in that range and take the last element
779        let last = self.direntry.range(start..=end).next_back();
780
781        if let Some(l) = last {
782            let key = l.key();
783            if key.1 == u32::MAX {
784                panic!("Cannot inssert a new directory entry, the directory is full!");
785            }
786
787            return key.1 + 1;
788        }
789
790        // empty list, return 1 as the first index
791        1
792    }
793
794    fn with_direntries(
795        &self,
796        node: Node,
797        initial_index: Option<DirEntryIndex>,
798        f: &mut dyn FnMut(&DirEntryIndex, &DirEntry) -> bool,
799    ) {
800        if initial_index.is_none() {
801            let mut dot_entry = DUMMY_DOT_ENTRY;
802            dot_entry.1.node = node;
803
804            if !f(&dot_entry.0, &dot_entry.1) {
805                return;
806            }
807
808            if !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1) {
809                return;
810            }
811        }
812
813        let initial_index = initial_index.unwrap_or(0);
814
815        if initial_index == DUMMY_DOT_ENTRY_INDEX {
816            let mut dot_entry = DUMMY_DOT_ENTRY;
817            dot_entry.1.node = node;
818
819            if !f(&dot_entry.0, &dot_entry.1) {
820                return;
821            }
822
823            if !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1) {
824                return;
825            }
826        }
827
828        if initial_index == DUMMY_DOT_DOT_ENTRY_INDEX
829            && !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1)
830        {
831            return;
832        }
833
834        let max_index = MAX_FILE_ENTRY_INDEX;
835
836        for en in self
837            .direntry
838            .range((node, initial_index)..(node, max_index))
839        {
840            let (_node, index) = *en.key();
841            let entry = en.value();
842
843            if !f(&index, &entry) {
844                return;
845            }
846        }
847    }
848
849    // Update or insert the DirEntry instance given the Node and DirEntryIndex.
850    fn put_direntry(&mut self, node: Node, index: DirEntryIndex, entry: DirEntry) {
851        let name = entry.name.clone();
852
853        // main direntry
854        self.direntry.insert((node, index), entry);
855
856        // direntry lookup
857        self.direntry_lookup.insert((node, name), index);
858    }
859
860    // Remove the DirEntry instance given the Node and DirEntryIndex.
861    fn rm_direntry(&mut self, node: Node, index: DirEntryIndex) {
862        let r = self.direntry.remove(&(node, index));
863        if let Some(v) = r {
864            self.direntry_lookup.remove(&(node, v.name));
865        }
866    }
867
868    // Fill the buffer contents with data of a chosen data range.
869    fn read(&mut self, node: Node, offset: FileSize, buf: &mut [u8]) -> Result<FileSize, Error> {
870        let metadata = self.get_metadata(node)?;
871
872        let max_size = metadata.maximum_size_allowed.unwrap_or(MAX_FILE_SIZE);
873        let file_size = metadata.size.min(max_size);
874
875        if offset >= file_size {
876            return Ok(0);
877        }
878
879        let size_read = if let Some(memory) = self.active_mounts.get(&node) {
880            let remainder = file_size - offset;
881            let to_read = remainder.min(buf.len() as FileSize);
882
883            // grow memory also for reading
884            grow_memory(memory.as_ref(), offset + to_read);
885
886            memory.read(offset, &mut buf[..to_read as usize]);
887            to_read
888        } else {
889            let use_v2 = self.use_v2(&metadata, node);
890
891            if use_v2 {
892                self.read_chunks_v2(node, offset, file_size, buf)?
893            } else {
894                self.read_chunks_v1(node, offset, file_size, buf)?
895            }
896        };
897
898        Ok(size_read)
899    }
900
901    // Write file at the current file cursor, the cursor position will NOT be updated after writing.
902    fn write(&mut self, node: Node, offset: FileSize, buf: &[u8]) -> Result<FileSize, Error> {
903        let mut metadata = self.get_metadata(node)?;
904
905        // do not attempt to write 0 bytes to avoid file resize (when writing above file size)
906        if buf.is_empty() {
907            return Ok(0);
908        }
909
910        let max_size = metadata.maximum_size_allowed.unwrap_or(MAX_FILE_SIZE);
911
912        if offset + buf.len() as FileSize > max_size {
913            return Err(Error::FileTooLarge);
914        }
915
916        let written_size = if let Some(memory) = self.get_mounted_memory(node) {
917            self.write_mounted(memory, offset, buf);
918
919            buf.len() as FileSize
920        } else {
921            let end = offset + buf.len() as FileSize;
922
923            let use_v2 = self.use_v2(&metadata, node);
924
925            if use_v2 {
926                self.write_chunks_v2(node, offset, buf)?
927            } else {
928                let chunk_infos = get_chunk_infos(offset, end, FILE_CHUNK_SIZE_V1);
929
930                let mut written = 0usize;
931
932                for chunk in chunk_infos.into_iter() {
933                    self.write_filechunk_v1(
934                        node,
935                        chunk.index,
936                        chunk.offset,
937                        &buf[written..(written + chunk.len as usize)],
938                    );
939
940                    written += chunk.len as usize;
941                }
942
943                written as FileSize
944            }
945        };
946
947        let end = offset + buf.len() as FileSize;
948        if end > metadata.size {
949            metadata.size = end;
950            self.put_metadata(node, &metadata)?;
951        }
952
953        Ok(written_size)
954    }
955
956    fn resize_file(&mut self, node: Node, new_size: FileSize) -> Result<(), Error> {
957        let mut meta = self.get_metadata(node)?;
958
959        meta.size = new_size;
960
961        self.put_metadata(node, &meta)
962    }
963
964    //
965    fn rm_file(&mut self, node: Node) -> Result<(), Error> {
966        if self.is_mounted(node) {
967            return Err(Error::DeviceOrResourceBusy);
968        }
969
970        self.resize_file(node, 0)?;
971
972        self.meta_provider.remove_metadata(
973            node,
974            &mut self.ptr_cache,
975            &mut self.filechunk,
976            &mut self.v2_filechunk.v2_chunk_ptr,
977            &mut self.v2_filechunk.v2_allocator,
978        );
979
980        Ok(())
981    }
982
983    fn mount_node(
984        &mut self,
985        node: Node,
986        memory: Box<dyn Memory>,
987        policy: MountedFileSizePolicy,
988    ) -> Result<(), Error> {
989        if self.is_mounted(node) {
990            return Err(Error::DeviceOrResourceBusy);
991        }
992
993        // do extra meta preparation
994        // get the file metadata (we are not mounted at this point)
995        let mut file_meta = self.get_metadata(node)?;
996
997        let memory_size = memory.size();
998
999        // activate mount, we use mounted metadata after this line
1000        self.active_mounts.insert(node, memory);
1001
1002        let old_size = if let Ok(old_mounted_meta) = self.get_metadata(node) {
1003            let size = old_mounted_meta.size;
1004            file_meta = old_mounted_meta;
1005            Some(size)
1006        } else {
1007            None
1008        };
1009
1010        let new_size = policy.get_mounted_file_size(old_size, memory_size);
1011
1012        file_meta.size = new_size;
1013
1014        // update mounted metadata
1015        self.put_metadata(node, &file_meta)?;
1016
1017        Ok(())
1018    }
1019
1020    fn unmount_node(&mut self, node: Node) -> Result<Box<dyn Memory>, Error> {
1021        let memory = self.active_mounts.remove(&node);
1022
1023        memory.ok_or(Error::NoSuchDevice)
1024    }
1025
1026    fn is_mounted(&self, node: Node) -> bool {
1027        self.active_mounts.contains_key(&node)
1028    }
1029
1030    fn get_mounted_memory(&self, node: Node) -> Option<&dyn Memory> {
1031        let res: Option<&Box<dyn Memory>> = self.active_mounts.get(&node);
1032
1033        res.map(|b| b.as_ref())
1034    }
1035
1036    fn init_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
1037        // temporary disable mount to activate access to the original file
1038        let memory: Box<dyn Memory> = self.unmount_node(node)?;
1039
1040        let meta = self.get_metadata(node)?;
1041        let file_size = meta.size;
1042
1043        // grow memory if needed
1044        grow_memory(memory.as_ref(), file_size);
1045
1046        let mut remainder = file_size;
1047
1048        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
1049
1050        let mut offset = 0;
1051
1052        while remainder > 0 {
1053            let to_read = remainder.min(buf.len() as FileSize);
1054
1055            self.read(node, offset, &mut buf[..to_read as usize])?;
1056
1057            memory.write(offset, &buf[..to_read as usize]);
1058
1059            offset += to_read;
1060            remainder -= to_read;
1061        }
1062
1063        self.mount_node(node, memory, MountedFileSizePolicy::PreviousOrZero)?;
1064
1065        self.put_metadata(node, &meta)?;
1066
1067        Ok(())
1068    }
1069
1070    fn store_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
1071        // get current size of the mounted memory
1072        let meta = self.get_metadata(node)?;
1073        let file_size = meta.size;
1074
1075        // temporary disable mount to activate access to the original file
1076        let memory: Box<dyn Memory> = self.unmount_node(node)?;
1077
1078        // grow memory if needed
1079        grow_memory(memory.as_ref(), file_size);
1080
1081        let mut remainder = file_size;
1082
1083        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
1084
1085        let mut offset = 0;
1086
1087        while remainder > 0 {
1088            let to_read = remainder.min(buf.len() as FileSize);
1089
1090            // grow memory also for reading
1091            grow_memory(memory.as_ref(), offset + to_read);
1092
1093            memory.read(offset, &mut buf[..to_read as usize]);
1094
1095            self.write(node, offset, &buf[..to_read as usize])?;
1096
1097            offset += to_read;
1098            remainder -= to_read;
1099        }
1100
1101        self.put_metadata(node, &meta)?;
1102
1103        self.mount_node(node, memory, MountedFileSizePolicy::PreviousOrZero)?;
1104
1105        Ok(())
1106    }
1107
1108    fn set_chunk_size(&mut self, chunk_size: ChunkSize) -> Result<(), Error> {
1109        self.v2_filechunk
1110            .v2_allocator
1111            .set_chunk_size(chunk_size as usize)
1112    }
1113
1114    fn chunk_size(&self) -> usize {
1115        self.v2_filechunk.v2_allocator.chunk_size()
1116    }
1117
1118    fn set_chunk_type(&mut self, chunk_type: ChunkType) {
1119        self.chunk_type = chunk_type;
1120    }
1121
1122    fn chunk_type(&self) -> ChunkType {
1123        self.chunk_type
1124    }
1125
1126    fn flush(&mut self, _node: Node) {
1127        // nothing to flush, the system immediately stores data on write
1128    }
1129}
1130
1131#[cfg(test)]
1132mod tests {
1133
1134    use ic_stable_structures::DefaultMemoryImpl;
1135
1136    use crate::storage::types::FileName;
1137
1138    use super::*;
1139
1140    #[test]
1141    fn read_and_write_filechunk() {
1142        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1143        let node = storage.new_node();
1144        storage
1145            .put_metadata(
1146                node,
1147                &Metadata {
1148                    node,
1149                    file_type: FileType::RegularFile,
1150                    link_count: 1,
1151                    size: 10,
1152                    times: Times::default(),
1153                    chunk_type: Some(storage.chunk_type()),
1154                    maximum_size_allowed: None,
1155                    first_dir_entry: None,
1156                    last_dir_entry: None,
1157                },
1158            )
1159            .unwrap();
1160        let metadata = storage.get_metadata(node).unwrap();
1161        assert_eq!(metadata.node, node);
1162        assert_eq!(metadata.file_type, FileType::RegularFile);
1163        assert_eq!(metadata.link_count, 1);
1164        storage.write(node, 0, &[42; 10]).unwrap();
1165
1166        let mut buf = [0; 10];
1167        storage.read(node, 0, &mut buf).unwrap();
1168        assert_eq!(buf, [42; 10]);
1169    }
1170
1171    #[test]
1172    fn read_and_write_direntry() {
1173        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1174        let node = storage.new_node();
1175        storage.put_direntry(
1176            node,
1177            7,
1178            DirEntry {
1179                node,
1180                name: FileName::new("test".as_bytes()).unwrap(),
1181                entry_type: None,
1182            },
1183        );
1184        let direntry = storage.get_direntry(node, 7).unwrap();
1185        assert_eq!(direntry.node, node);
1186        assert_eq!(
1187            direntry.name.bytes,
1188            FileName::new("test".as_bytes()).unwrap().bytes
1189        );
1190    }
1191
1192    fn new_file<M: Memory>(storage: &mut StableStorage<M>) -> Node {
1193        let node = storage.new_node();
1194
1195        storage
1196            .put_metadata(
1197                node,
1198                &Metadata {
1199                    node,
1200                    file_type: FileType::RegularFile,
1201                    link_count: 1,
1202                    size: 0,
1203                    times: Times::default(),
1204                    first_dir_entry: None,
1205                    last_dir_entry: None,
1206                    chunk_type: Some(storage.chunk_type()),
1207                    maximum_size_allowed: None,
1208                },
1209            )
1210            .unwrap();
1211
1212        node
1213    }
1214
1215    #[test]
1216    fn read_beyond_file_size() {
1217        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1218
1219        let node = new_file(&mut storage);
1220
1221        storage.write(node, 0, b"hello").unwrap();
1222
1223        let mut buf = [0u8; 10];
1224        let bytes_read = storage.read(node, 3, &mut buf).unwrap();
1225
1226        assert_eq!(bytes_read, 2);
1227        assert_eq!(&buf[..2], b"lo");
1228
1229        assert_eq!(buf[2..], [0; 8]);
1230    }
1231
1232    #[test]
1233    fn switch_chunk_types() {
1234        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1235
1236        storage.set_chunk_type(ChunkType::V1);
1237
1238        let node_v1 = new_file(&mut storage);
1239
1240        storage.write(node_v1, 0, b"v1_data").unwrap();
1241
1242        storage.set_chunk_type(ChunkType::V2);
1243
1244        let node_v2 = new_file(&mut storage);
1245
1246        // Write data
1247        storage.write(node_v2, 0, b"v2_data").unwrap();
1248
1249        // Confirm reads
1250        let mut buf_v1 = [0u8; 7];
1251        storage.read(node_v1, 0, &mut buf_v1).unwrap();
1252        assert_eq!(&buf_v1, b"v1_data");
1253        let meta = storage.get_metadata(node_v1).unwrap();
1254        assert_eq!(meta.chunk_type.unwrap(), ChunkType::V1);
1255
1256        let mut buf_v2 = [0u8; 7];
1257        storage.read(node_v2, 0, &mut buf_v2).unwrap();
1258        assert_eq!(&buf_v2, b"v2_data");
1259        let meta = storage.get_metadata(node_v2).unwrap();
1260        assert_eq!(meta.chunk_type.unwrap(), ChunkType::V2);
1261    }
1262
1263    #[test]
1264    fn resize_file_shrink_and_grow() {
1265        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1266        let node = new_file(&mut storage);
1267
1268        storage.write(node, 0, b"1234567890").unwrap();
1269        let mut buf = [0u8; 10];
1270        storage.read(node, 0, &mut buf).unwrap();
1271        assert_eq!(&buf, b"1234567890");
1272
1273        // Shrink to 5 bytes
1274        storage.resize_file(node, 5).unwrap();
1275
1276        let meta = storage.get_metadata(node).unwrap();
1277        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1278
1279        // Reading the file now
1280        let mut buf_small = [0u8; 10];
1281        let bytes_read = storage.read(node, 0, &mut buf_small).unwrap();
1282        assert_eq!(bytes_read, 5);
1283        assert_eq!(&buf_small[..5], b"12345");
1284        assert_eq!(&buf_small[5..], [0; 5]);
1285
1286        // check zero fill
1287        let mut meta = storage.get_metadata(node).unwrap();
1288        meta.size = 10;
1289        storage.put_metadata(node, &meta).unwrap();
1290
1291        // Confirm new bytes are zeroed or remain uninitialized, depending on design
1292        let mut buf_grow = [0u8; 10];
1293        storage.read(node, 0, &mut buf_grow).unwrap();
1294        // First 5 bytes should remain "12345", rest must be zero:
1295        assert_eq!(&buf_grow[..5], b"12345");
1296        assert_eq!(&buf_grow[5..], [0; 5]);
1297    }
1298
1299    #[test]
1300    fn resize_file_shrink_deletes_v2_chunks() {
1301        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1302        let node = new_file(&mut storage);
1303
1304        let chunk_size = storage.chunk_size() as FileSize;
1305
1306        // write something into the second chunk
1307        storage.write(node, chunk_size + 4, b"1234567890").unwrap();
1308
1309        // write something into the first chunk
1310        storage.write(node, 4, b"1234567890").unwrap();
1311
1312        let mut buf = [0u8; 10];
1313        // read second chunk
1314        storage.read(node, chunk_size + 9, &mut buf).unwrap();
1315
1316        assert_eq!(&buf, b"67890\0\0\0\0\0");
1317
1318        let chunks: Vec<_> = storage
1319            .v2_filechunk
1320            .v2_chunk_ptr
1321            .range((node, 0)..(node, 5))
1322            .collect();
1323        assert_eq!(chunks.len(), 2);
1324
1325        // Shrink to 5 bytes
1326        storage.resize_file(node, 5).unwrap();
1327
1328        let meta = storage.get_metadata(node).unwrap();
1329
1330        // only one chunk should be present
1331        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1332
1333        let chunks: Vec<_> = storage
1334            .v2_filechunk
1335            .v2_chunk_ptr
1336            .range((node, 0)..(node, 5))
1337            .collect();
1338        assert_eq!(chunks.len(), 1);
1339
1340        // for 0 size, all chunks have to be deleted
1341        storage.resize_file(node, 0).unwrap();
1342        let chunks: Vec<_> = storage
1343            .v2_filechunk
1344            .v2_chunk_ptr
1345            .range((node, 0)..(node, 5))
1346            .collect();
1347        assert_eq!(chunks.len(), 0);
1348    }
1349
1350    #[test]
1351    fn resize_file_shrink_deletes_v1_chunks() {
1352        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1353        storage.set_chunk_type(ChunkType::V1);
1354
1355        let node = new_file(&mut storage);
1356
1357        let chunk_size = FILE_CHUNK_SIZE_V1 as FileSize;
1358
1359        // write something into the second chunk
1360        storage.write(node, chunk_size + 4, b"1234567890").unwrap();
1361
1362        // write something into the first chunk
1363        storage.write(node, 4, b"1234567890").unwrap();
1364
1365        let mut buf = [0u8; 10];
1366        // read second chunk
1367        storage.read(node, chunk_size + 9, &mut buf).unwrap();
1368
1369        assert_eq!(&buf, b"67890\0\0\0\0\0");
1370
1371        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1372        assert_eq!(chunks.len(), 2);
1373
1374        // Shrink to 5 bytes
1375        storage.resize_file(node, 5).unwrap();
1376
1377        let meta = storage.get_metadata(node).unwrap();
1378
1379        // only one chunk should be present
1380        assert_eq!(meta.size, 5); // Check the metadata reflects new size
1381
1382        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1383        assert_eq!(chunks.len(), 1);
1384
1385        // for 0 size, all chunks have to be deleted
1386        storage.resize_file(node, 0).unwrap();
1387        let chunks: Vec<_> = storage.filechunk.range((node, 0)..(node, 5)).collect();
1388        assert_eq!(chunks.len(), 0);
1389    }
1390
1391    #[test]
1392    fn remove_file_chunks_v2() {
1393        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1394
1395        let chunk_size = storage.chunk_size() as FileSize;
1396
1397        // some other files present
1398        let node_other = new_file(&mut storage);
1399        storage.write(node_other, 0, b"some data").unwrap();
1400
1401        let node = new_file(&mut storage);
1402
1403        // write into 5 chunks + 1 metadata chunk, expect to find 6 chunks
1404        storage.write(node, 0, b"some data").unwrap();
1405        storage.write(node, chunk_size, b"some data").unwrap();
1406        storage.write(node, chunk_size * 2, b"some data").unwrap();
1407        // write into two chunks with one call
1408        storage
1409            .write(node, chunk_size * 5 - 2, b"some data")
1410            .unwrap();
1411
1412        // some other files present
1413        let node_other2 = new_file(&mut storage);
1414        storage.write(node_other2, 0, b"some data").unwrap();
1415
1416        // check chunk count
1417        let chunks: Vec<_> = storage
1418            .v2_filechunk
1419            .v2_chunk_ptr
1420            .range((node, 0)..(node, u32::MAX))
1421            .collect();
1422
1423        // chunks of the given node
1424        assert_eq!(chunks.len(), 6);
1425
1426        // check the allocator is also holding 6 chunks of the main file, and 4 chunks from the two other files
1427        assert_eq!(
1428            storage.v2_filechunk.v2_allocator.get_current_max_ptr(),
1429            (6 + 4) * chunk_size
1430        );
1431
1432        // Remove file
1433        storage.rm_file(node).unwrap();
1434
1435        // Confirm reading fails or returns NotFound
1436        let mut buf = [0u8; 9];
1437        let res = storage.read(node, 0, &mut buf);
1438        assert!(matches!(res, Err(Error::NoSuchFileOrDirectory)));
1439
1440        // Confirm metadata is removed
1441        let meta_res = storage.get_metadata(node);
1442        assert!(matches!(meta_res, Err(Error::NoSuchFileOrDirectory)));
1443
1444        // check there are no chunks left after deleting the node
1445        let chunks: Vec<_> = storage
1446            .v2_filechunk
1447            .v2_chunk_ptr
1448            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1449            .collect();
1450
1451        assert_eq!(chunks.len(), 0);
1452    }
1453
1454    #[test]
1455    fn remove_file_chunks_v1() {
1456        let mut storage = StableStorage::new(DefaultMemoryImpl::default());
1457        storage.set_chunk_type(ChunkType::V1);
1458
1459        let chunk_size = FILE_CHUNK_SIZE_V1 as FileSize;
1460
1461        // some other files present
1462        let node_other = new_file(&mut storage);
1463        storage.write(node_other, 0, b"some data").unwrap();
1464
1465        let node = new_file(&mut storage);
1466
1467        // write into 5 chunks
1468        storage.write(node, 0, b"some data").unwrap();
1469        storage.write(node, chunk_size, b"some data").unwrap();
1470        storage.write(node, chunk_size * 2, b"some data").unwrap();
1471        // write into two chunks with one call
1472        storage
1473            .write(node, chunk_size * 5 - 2, b"some data")
1474            .unwrap();
1475
1476        // some other files present
1477        let node_other2 = new_file(&mut storage);
1478        storage.write(node_other2, 0, b"some data").unwrap();
1479
1480        // check chunk count
1481        let chunks: Vec<_> = storage
1482            .filechunk
1483            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1484            .collect();
1485
1486        // chunks of the given node
1487        assert_eq!(chunks.len(), 5);
1488
1489        // check the allocator is holding three chunks for 3 stored metadata
1490        assert_eq!(
1491            storage.v2_filechunk.v2_allocator.get_current_max_ptr(),
1492            storage.chunk_size() as FileSize * 3
1493        );
1494
1495        // Remove file
1496        storage.rm_file(node).unwrap();
1497
1498        // Confirm reading fails or returns NotFound
1499        let mut buf = [0u8; 9];
1500        let res = storage.read(node, 0, &mut buf);
1501        assert!(matches!(res, Err(Error::NoSuchFileOrDirectory)));
1502
1503        // Confirm metadata is removed
1504        let meta_res = storage.get_metadata(node);
1505        assert!(matches!(meta_res, Err(Error::NoSuchFileOrDirectory)));
1506
1507        // check there are no chunks left after deleting the node
1508        let chunks: Vec<_> = storage
1509            .filechunk
1510            .range((node, 0)..(node, MAX_FILE_CHUNK_COUNT))
1511            .collect();
1512
1513        assert_eq!(chunks.len(), 0);
1514    }
1515}