stable_fs/storage/
transient.rs

1use std::collections::{BTreeMap, HashMap};
2
3use ic_cdk::stable::WASM_PAGE_SIZE_IN_BYTES;
4use ic_stable_structures::Memory;
5
6use crate::{
7    error::Error,
8    fs::{ChunkSize, ChunkType},
9    runtime::structure_helpers::{get_chunk_infos, grow_memory},
10    storage::{
11        Storage,
12        types::{
13            DirEntry, DirEntryIndex, FileChunk, FileChunkIndex, FileName, FileSize, FileType,
14            Metadata, MountedFileSizePolicy, Node, Times, ZEROES,
15        },
16    },
17};
18
19use super::types::{
20    DUMMY_DOT_DOT_ENTRY, DUMMY_DOT_DOT_ENTRY_INDEX, DUMMY_DOT_ENTRY, DUMMY_DOT_ENTRY_INDEX,
21    FILE_CHUNK_SIZE_V1, Header, MAX_FILE_CHUNK_COUNT, MAX_FILE_ENTRY_INDEX,
22};
23
24// The root node ID.
25const ROOT_NODE: Node = 0;
26
27const FS_TRANSIENT_VERSION: u32 = 1;
28
29// Transient storage representation.
30#[derive(Default)]
31pub struct TransientStorage {
32    header: Header,
33    // Node metadata information.
34    metadata: BTreeMap<Node, Metadata>,
35    // Directory entries for each of the directory node.
36    direntry: BTreeMap<(Node, DirEntryIndex), DirEntry>,
37
38    // Quick lookup of direntries by name
39    direntry_lookup: BTreeMap<(Node, FileName), DirEntryIndex>,
40
41    // File contents for each of the file node.
42    filechunk: BTreeMap<(Node, FileChunkIndex), FileChunk>,
43    // Mounted memory Node metadata information.
44    mounted_meta: BTreeMap<Node, Metadata>,
45    // Active mounts.
46    active_mounts: HashMap<Node, Box<dyn Memory>>,
47}
48
49impl TransientStorage {
50    // Initializes a new TransientStorage.
51    pub fn new() -> Self {
52        let metadata = Metadata {
53            node: ROOT_NODE,
54            file_type: FileType::Directory,
55            link_count: 1,
56            size: 0,
57            times: Times::default(),
58            chunk_type: None,
59            maximum_size_allowed: None,
60            first_dir_entry: None,
61            last_dir_entry: None,
62        };
63
64        let mut result = Self {
65            header: Header {
66                version: 1,
67                next_node: ROOT_NODE + 1,
68            },
69            metadata: Default::default(),
70            direntry: Default::default(),
71            filechunk: Default::default(),
72
73            mounted_meta: Default::default(),
74            active_mounts: Default::default(),
75            direntry_lookup: Default::default(),
76        };
77
78        result
79            .put_metadata(ROOT_NODE, &metadata)
80            .expect("Failed to create metadata");
81
82        result
83    }
84
85    // Insert of update a selected file chunk with the data provided in buffer.
86    fn write_filechunk(&mut self, node: Node, index: FileChunkIndex, offset: FileSize, buf: &[u8]) {
87        if let Some(memory) = self.get_mounted_memory(node) {
88            // grow memory if needed
89            let max_address = index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize
90                + offset as FileSize
91                + buf.len() as FileSize;
92
93            grow_memory(memory, max_address);
94
95            // store data
96            let address = index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize + offset as FileSize;
97            memory.write(address, buf);
98        } else {
99            let entry = self.filechunk.entry((node, index)).or_default();
100            entry.bytes[offset as usize..offset as usize + buf.len()].copy_from_slice(buf)
101        }
102    }
103
104    fn validate_metadata_update(
105        old_meta: Option<&Metadata>,
106        new_meta: &Metadata,
107    ) -> Result<(), Error> {
108        if let Some(max_size) = new_meta.maximum_size_allowed
109            && new_meta.size > max_size
110        {
111            return Err(Error::FileTooLarge);
112        }
113
114        if let Some(old_meta) = old_meta
115            && old_meta.node != new_meta.node
116        {
117            return Err(Error::InvalidArgument);
118        }
119
120        Ok(())
121    }
122}
123
124impl Storage for TransientStorage {
125    // Get the root node ID of the storage
126    fn root_node(&self) -> Node {
127        ROOT_NODE
128    }
129
130    // Get version of the file system
131    fn get_version(&self) -> u32 {
132        FS_TRANSIENT_VERSION
133    }
134
135    // Generate the next available node ID.
136    fn new_node(&mut self) -> Node {
137        let result = self.header.next_node;
138        self.header.next_node += 1;
139        result
140    }
141
142    // Get the metadata associated with the node.
143    fn get_metadata(&self, node: Node) -> Result<Metadata, Error> {
144        let meta = if self.is_mounted(node) {
145            self.mounted_meta
146                .get(&node)
147                .ok_or(Error::NoSuchFileOrDirectory)?
148        } else {
149            self.metadata
150                .get(&node)
151                .ok_or(Error::NoSuchFileOrDirectory)?
152        };
153
154        Ok(meta.clone())
155    }
156
157    // Update the metadata associated with the node.
158    fn put_metadata(&mut self, node: Node, metadata: &Metadata) -> Result<(), Error> {
159        let meta_storage = if self.is_mounted(node) {
160            &mut self.mounted_meta
161        } else {
162            &mut self.metadata
163        };
164
165        if let Some(existing) = meta_storage.get(&node) {
166            Self::validate_metadata_update(Some(existing), metadata)?;
167        } else {
168            Self::validate_metadata_update(None, metadata)?;
169        }
170
171        meta_storage.insert(node, metadata.clone());
172
173        Ok(())
174    }
175
176    // Retrieve the DirEntry instance given the Node and DirEntryIndex.
177    fn get_direntry(&self, node: Node, index: DirEntryIndex) -> Result<DirEntry, Error> {
178        let value = self
179            .direntry
180            .get(&(node, index))
181            .ok_or(Error::NoSuchFileOrDirectory)?;
182        Ok(value.clone())
183    }
184
185    fn get_direntry_index_by_name(&self, entry: &(Node, FileName)) -> Option<DirEntryIndex> {
186        self.direntry_lookup.get(entry).copied()
187    }
188
189    // Update or insert the DirEntry instance given the Node and DirEntryIndex.
190    fn put_direntry(&mut self, node: Node, index: DirEntryIndex, entry: DirEntry) {
191        self.direntry.insert((node, index), entry);
192    }
193
194    // Remove the DirEntry instance given the Node and DirEntryIndex.
195    fn rm_direntry(&mut self, node: Node, index: DirEntryIndex) {
196        self.direntry.remove(&(node, index));
197    }
198
199    // Fill the buffer contents with data
200    fn read(&mut self, node: Node, offset: FileSize, buf: &mut [u8]) -> Result<FileSize, Error> {
201        let file_size = self.get_metadata(node)?.size;
202
203        if offset >= file_size {
204            return Ok(0);
205        }
206
207        let size_read = if let Some(memory) = self.active_mounts.get(&node) {
208            let remainder = file_size - offset;
209            let to_read = remainder.min(buf.len() as FileSize);
210
211            // grow memory also for reading
212            grow_memory(memory.as_ref(), offset + to_read);
213
214            memory.read(offset, &mut buf[..to_read as usize]);
215            to_read
216        } else {
217            let start_index = (offset / FILE_CHUNK_SIZE_V1 as FileSize) as FileChunkIndex;
218
219            let end_index = ((offset + buf.len() as FileSize) / FILE_CHUNK_SIZE_V1 as FileSize + 1)
220                as FileChunkIndex;
221
222            let mut chunk_offset =
223                offset - start_index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize;
224
225            let range = (node, start_index)..(node, MAX_FILE_CHUNK_COUNT);
226
227            let mut size_read: FileSize = 0;
228            let mut remainder = file_size - offset;
229
230            let mut iter = self.filechunk.range(range);
231            let mut cur_fetched = None;
232
233            for cur_index in start_index..end_index {
234                let chunk_space = FILE_CHUNK_SIZE_V1 as FileSize - chunk_offset;
235
236                let to_read = remainder
237                    .min(chunk_space)
238                    .min(buf.len() as FileSize - size_read);
239
240                // finished reading, buffer full
241                if size_read == buf.len() as FileSize {
242                    break;
243                }
244
245                if cur_fetched.is_none() {
246                    cur_fetched = iter.next();
247                }
248
249                let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize];
250
251                if let Some(((nd, idx), value)) = cur_fetched {
252                    if *idx == cur_index {
253                        assert!(*nd == node);
254
255                        read_buf.copy_from_slice(
256                            &value.bytes
257                                [chunk_offset as usize..chunk_offset as usize + to_read as usize],
258                        );
259
260                        // consume token
261                        cur_fetched = None;
262                    } else {
263                        // fill up with zeroes
264                        read_buf.iter_mut().for_each(|m| *m = 0)
265                    }
266                } else {
267                    // fill up with zeroes
268                    read_buf.iter_mut().for_each(|m| *m = 0)
269                }
270
271                chunk_offset = 0;
272                size_read += to_read;
273                remainder -= to_read;
274            }
275
276            size_read
277        };
278
279        Ok(size_read)
280    }
281
282    fn resize_file(&mut self, node: Node, new_size: FileSize) -> Result<(), Error> {
283        let chunk_size = FILE_CHUNK_SIZE_V1;
284
285        let first_deletable_index = (new_size.div_ceil(chunk_size as FileSize)) as FileChunkIndex;
286
287        let range = (node, 0)..(node, MAX_FILE_CHUNK_COUNT);
288
289        // delete v1 chunks
290        let mut chunks: Vec<(Node, FileChunkIndex)> = Vec::new();
291        for (k, _v) in self.filechunk.range(range) {
292            chunks.push((k.0, k.1));
293        }
294
295        for (nd, idx) in chunks.into_iter() {
296            assert!(nd == node);
297            self.filechunk.remove(&(node, idx));
298        }
299
300        // fill with zeros the last chunk memory above the file size
301        if first_deletable_index > 0 {
302            let offset = new_size as FileSize % chunk_size as FileSize;
303
304            self.write_filechunk(
305                node,
306                first_deletable_index - 1,
307                offset,
308                &ZEROES[0..(chunk_size - offset as usize)],
309            );
310        }
311
312        Ok(())
313    }
314
315    fn rm_file(&mut self, node: Node) -> Result<(), Error> {
316        if self.is_mounted(node) {
317            return Err(Error::DeviceOrResourceBusy);
318        }
319
320        self.resize_file(node, 0)?;
321
322        // remove metadata
323        self.mounted_meta.remove(&node);
324        self.metadata.remove(&node);
325
326        Ok(())
327    }
328
329    fn mount_node(
330        &mut self,
331        node: Node,
332        memory: Box<dyn Memory>,
333        policy: MountedFileSizePolicy,
334    ) -> Result<(), Error> {
335        if self.is_mounted(node) {
336            return Err(Error::DeviceOrResourceBusy);
337        }
338
339        // do extra meta preparation
340        // get the file metadata (we are not mounted at this point)
341        let mut file_meta = self.get_metadata(node)?;
342
343        let memory_size = memory.size();
344
345        // activate mount, we use mounted metadata after this line
346        self.active_mounts.insert(node, memory);
347
348        let old_size = if let Ok(old_mounted_meta) = self.get_metadata(node) {
349            let size = old_mounted_meta.size;
350            file_meta = old_mounted_meta;
351            Some(size)
352        } else {
353            None
354        };
355
356        let new_size = policy.get_mounted_file_size(old_size, memory_size);
357
358        file_meta.size = new_size;
359
360        // update mounted metadata
361        self.put_metadata(node, &file_meta)?;
362
363        Ok(())
364    }
365
366    fn unmount_node(&mut self, node: Node) -> Result<Box<dyn Memory>, Error> {
367        let memory = self.active_mounts.remove(&node);
368
369        memory.ok_or(Error::NoSuchDevice)
370    }
371
372    fn is_mounted(&self, node: Node) -> bool {
373        self.active_mounts.contains_key(&node)
374    }
375
376    fn get_mounted_memory(&self, node: Node) -> Option<&dyn Memory> {
377        let res = self.active_mounts.get(&node);
378
379        res.map(|b| b.as_ref())
380    }
381
382    fn init_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
383        // temporary disable mount to activate access to the original file
384        let memory: Box<dyn Memory> = self.unmount_node(node)?;
385
386        let meta = self.get_metadata(node)?;
387        let file_size = meta.size;
388
389        // grow memory if needed
390        grow_memory(memory.as_ref(), file_size);
391
392        let mut remainder = file_size;
393
394        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
395
396        let mut offset = 0;
397
398        while remainder > 0 {
399            let to_read = remainder.min(buf.len() as FileSize);
400
401            self.read(node, offset, &mut buf[..to_read as usize])?;
402
403            memory.write(offset, &buf[..to_read as usize]);
404
405            offset += to_read;
406            remainder -= to_read;
407        }
408
409        self.mount_node(node, memory, MountedFileSizePolicy::PreviousOrZero)?;
410
411        self.put_metadata(node, &meta)?;
412
413        Ok(())
414    }
415
416    fn store_mounted_memory(&mut self, node: Node) -> Result<(), Error> {
417        // get current size of the mounted memory
418        let meta = self.get_metadata(node)?;
419        let file_size = meta.size;
420
421        // temporary disable mount to activate access to the original file
422        let memory: Box<dyn Memory> = self.unmount_node(node)?;
423
424        // grow memory if needed
425        grow_memory(memory.as_ref(), file_size);
426
427        let mut remainder = file_size;
428
429        let mut buf = [0u8; WASM_PAGE_SIZE_IN_BYTES as usize];
430
431        let mut offset = 0;
432
433        while remainder > 0 {
434            let to_read = remainder.min(buf.len() as FileSize);
435
436            memory.read(offset, &mut buf[..to_read as usize]);
437
438            self.write(node, offset, &buf[..to_read as usize])?;
439
440            offset += to_read;
441            remainder -= to_read;
442        }
443
444        self.put_metadata(node, &meta)?;
445
446        self.mount_node(node, memory, MountedFileSizePolicy::PreviousOrZero)?;
447
448        Ok(())
449    }
450
451    fn write(&mut self, node: Node, offset: FileSize, buf: &[u8]) -> Result<FileSize, Error> {
452        let mut metadata = self.get_metadata(node)?;
453
454        // do not attempt to write 0 bytes to avoid file resize (when writing above file size)
455        if buf.is_empty() {
456            return Ok(0);
457        }
458
459        let end = offset + buf.len() as FileSize;
460
461        if let Some(max_size) = metadata.maximum_size_allowed
462            && end > max_size
463        {
464            return Err(Error::FileTooLarge);
465        }
466
467        let chunk_infos = get_chunk_infos(offset, end, FILE_CHUNK_SIZE_V1);
468        let mut written_size = 0;
469        for chunk in chunk_infos.into_iter() {
470            self.write_filechunk(
471                node,
472                chunk.index,
473                chunk.offset,
474                &buf[written_size..written_size + chunk.len as usize],
475            );
476            written_size += chunk.len as usize;
477        }
478
479        if end > metadata.size {
480            metadata.size = end;
481            self.put_metadata(node, &metadata)?;
482        }
483
484        Ok(written_size as FileSize)
485    }
486
487    fn set_chunk_size(&mut self, _chunk_size: ChunkSize) -> Result<(), Error> {
488        // Noop
489        Ok(())
490    }
491
492    fn chunk_size(&self) -> usize {
493        FILE_CHUNK_SIZE_V1
494    }
495
496    fn set_chunk_type(&mut self, _chunk_type: ChunkType) {
497        // Noop
498    }
499
500    fn chunk_type(&self) -> ChunkType {
501        ChunkType::V1
502    }
503
504    fn flush(&mut self, _node: Node) {
505        // Noop
506    }
507
508    fn new_direntry_index(&self, node: Node) -> DirEntryIndex {
509        let start = (node, 0);
510        let end = (node, u32::MAX);
511
512        // Iterate in that range and take the last element
513        let last = self.direntry.range(start..=end).next_back();
514
515        if let Some(l) = last {
516            let key = l.0;
517            if key.1 == u32::MAX {
518                panic!("Cannot inssert a new directory entry, the directory is full!");
519            }
520
521            return key.1 + 1;
522        }
523
524        // empty list, return 1 as the first index
525        1
526    }
527
528    fn with_direntries(
529        &self,
530        node: Node,
531        initial_index: Option<DirEntryIndex>,
532        f: &mut dyn FnMut(&DirEntryIndex, &DirEntry) -> bool,
533    ) {
534        if initial_index.is_none() {
535            let mut dot_entry = DUMMY_DOT_ENTRY;
536            dot_entry.1.node = node;
537            if !f(&dot_entry.0, &dot_entry.1) {
538                return;
539            }
540
541            if !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1) {
542                return;
543            }
544        }
545
546        let initial_index = initial_index.unwrap_or(0);
547
548        if initial_index == DUMMY_DOT_ENTRY_INDEX {
549            let mut dot_entry = DUMMY_DOT_ENTRY;
550            dot_entry.1.node = node;
551
552            if !f(&dot_entry.0, &dot_entry.1) {
553                return;
554            }
555
556            if !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1) {
557                return;
558            }
559        }
560
561        if initial_index == DUMMY_DOT_DOT_ENTRY_INDEX
562            && !f(&DUMMY_DOT_DOT_ENTRY.0, &DUMMY_DOT_DOT_ENTRY.1)
563        {
564            return;
565        }
566
567        let max_index = MAX_FILE_ENTRY_INDEX;
568
569        for ((_node, index), entry) in self
570            .direntry
571            .range((node, initial_index)..(node, max_index))
572        {
573            if !f(index, entry) {
574                return;
575            }
576        }
577    }
578}
579
580#[cfg(test)]
581mod tests {
582    use super::*;
583
584    #[test]
585    fn read_and_write_filechunk() {
586        let mut storage = TransientStorage::default();
587        let node = storage.new_node();
588        storage
589            .put_metadata(
590                node,
591                &Metadata {
592                    node,
593                    file_type: FileType::RegularFile,
594                    link_count: 1,
595                    size: 10,
596                    times: Times::default(),
597                    chunk_type: Some(storage.chunk_type()),
598                    maximum_size_allowed: None,
599                    first_dir_entry: None,
600                    last_dir_entry: None,
601                },
602            )
603            .unwrap();
604        storage.write(node, 0, &[42; 10]).unwrap();
605        let mut buf = [0; 10];
606        storage.read(node, 0, &mut buf).unwrap();
607        assert_eq!(buf, [42; 10]);
608    }
609}