syscall/
dirent.rs

1use core::{
2    mem::size_of,
3    ops::{Deref, DerefMut},
4    slice,
5};
6
7use crate::{
8    error::{Error, Result, EINVAL},
9    ENAMETOOLONG,
10};
11
12#[derive(Clone, Copy, Debug, Default)]
13#[repr(packed)]
14pub struct DirentHeader {
15    pub inode: u64,
16    /// A filesystem-specific opaque value used to uniquely identify directory entries. This value,
17    /// in the last returned entry from a SYS_GETDENTS invocation, shall be passed to the next
18    /// call.
19    pub next_opaque_id: u64,
20    // This struct intentionally does not include a "next" offset field, unlike Linux, to easily
21    // guarantee the iterator will be reasonably deterministic, even if the scheme is adversarial.
22    pub record_len: u16,
23    /// A `DirentKind`.
24    ///
25    /// May not be directly available (Unspecified), and if so needs to be looked using fstat.
26    pub kind: u8,
27}
28
29impl Deref for DirentHeader {
30    type Target = [u8];
31    fn deref(&self) -> &[u8] {
32        unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
33    }
34}
35
36impl DerefMut for DirentHeader {
37    fn deref_mut(&mut self) -> &mut [u8] {
38        unsafe { slice::from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
39    }
40}
41
42// Note: Must match relibc/include/bits/dirent.h
43#[derive(Clone, Copy, Debug, Default)]
44#[repr(u8)]
45pub enum DirentKind {
46    #[default]
47    Unspecified = 0,
48
49    CharDev = 2,
50    Directory = 4,
51    BlockDev = 6,
52    Regular = 8,
53    Symlink = 10,
54    Socket = 12,
55}
56
57impl DirentKind {
58    // TODO: derive(FromPrimitive)
59    pub fn try_from_raw(raw: u8) -> Option<Self> {
60        Some(match raw {
61            0 => Self::Unspecified,
62
63            2 => Self::CharDev,
64            4 => Self::Directory,
65            6 => Self::BlockDev,
66            8 => Self::Regular,
67            10 => Self::Symlink,
68            12 => Self::Socket,
69
70            _ => return None,
71        })
72    }
73}
74
75
76pub struct DirentIter<'a>(&'a [u8]);
77
78impl<'a> DirentIter<'a> {
79    pub const fn new(buffer: &'a [u8]) -> Self {
80        Self(buffer)
81    }
82}
83#[derive(Debug)]
84pub struct Invalid;
85
86impl<'a> Iterator for DirentIter<'a> {
87    type Item = Result<(&'a DirentHeader, &'a [u8]), Invalid>;
88
89    fn next(&mut self) -> Option<Self::Item> {
90        if self.0.len() < size_of::<DirentHeader>() {
91            return None;
92        }
93        let header = unsafe { &*(self.0.as_ptr().cast::<DirentHeader>()) };
94        if self.0.len() < usize::from(header.record_len) {
95            return Some(Err(Invalid));
96        }
97        let (this, remaining) = self.0.split_at(usize::from(header.record_len));
98        self.0 = remaining;
99
100        let name_and_nul = &this[size_of::<DirentHeader>()..];
101        let name = &name_and_nul[..name_and_nul.len() - 1];
102
103        Some(Ok((header, name)))
104    }
105}
106
107#[derive(Debug)]
108pub struct DirentBuf<B> {
109    buffer: B,
110
111    // Exists in order to allow future extensions to the DirentHeader struct.
112
113    // TODO: Might add an upper bound to protect against cache miss DoS. The kernel currently
114    // forbids any other value than size_of::<DirentHeader>().
115    header_size: u16,
116
117    written: usize,
118}
119/// Abstraction between &mut [u8] and the kernel's UserSliceWo.
120pub trait Buffer<'a>: Sized + 'a {
121    fn empty() -> Self;
122    fn length(&self) -> usize;
123
124    /// Split all of `self` into two disjoint contiguous subbuffers of lengths `index` and `length
125    /// - index` respectively.
126    ///
127    /// Returns None if and only if `index > length`.
128    fn split_at(self, index: usize) -> Option<[Self; 2]>;
129
130    /// Copy from `src`, lengths must match exactly.
131    ///
132    /// Allowed to overwrite subsequent buffer space, for performance reasons. Can be changed in
133    /// the future if too restrictive.
134    fn copy_from_slice_exact(self, src: &[u8]) -> Result<()>;
135
136    /// Write zeroes to this part of the buffer.
137    ///
138    /// Allowed to overwrite subsequent buffer space, for performance reasons. Can be changed in
139    /// the future if too restrictive.
140    fn zero_out(self) -> Result<()>;
141}
142impl<'a> Buffer<'a> for &'a mut [u8] {
143    fn empty() -> Self {
144        &mut []
145    }
146    fn length(&self) -> usize {
147        self.len()
148    }
149
150    fn split_at(self, index: usize) -> Option<[Self; 2]> {
151        self.split_at_mut_checked(index).map(|(a, b)| [a, b])
152    }
153    fn copy_from_slice_exact(self, src: &[u8]) -> Result<()> {
154        self.copy_from_slice(src);
155        Ok(())
156    }
157    fn zero_out(self) -> Result<()> {
158        self.fill(0);
159        Ok(())
160    }
161}
162
163pub struct DirEntry<'name> {
164    pub inode: u64,
165    pub next_opaque_id: u64,
166    pub name: &'name str,
167    pub kind: DirentKind,
168}
169
170impl<'a, B: Buffer<'a>> DirentBuf<B> {
171    pub fn new(buffer: B, header_size: u16) -> Option<Self> {
172        if usize::from(header_size) < size_of::<DirentHeader>() {
173            return None;
174        }
175
176        Some(Self {
177            buffer,
178            header_size,
179            written: 0,
180        })
181    }
182    pub fn entry(&mut self, entry: DirEntry<'_>) -> Result<()> {
183        let name16 = u16::try_from(entry.name.len()).map_err(|_| Error::new(EINVAL))?;
184        let record_len = self
185            .header_size
186            .checked_add(name16)
187            // XXX: NUL byte. Unfortunately this is probably the only performant way to be
188            // compatible with C.
189            .and_then(|l| l.checked_add(1))
190            .ok_or(Error::new(ENAMETOOLONG))?;
191
192        let [this, remaining] = core::mem::replace(&mut self.buffer, B::empty())
193            .split_at(usize::from(record_len))
194            .ok_or(Error::new(EINVAL))?;
195
196        let [this_header_variable, this_name_and_nul] = this
197            .split_at(usize::from(self.header_size))
198            .expect("already know header_size + ... >= header_size");
199
200        let [this_name, this_name_nul] = this_name_and_nul
201            .split_at(usize::from(name16))
202            .expect("already know name.len() <= name.len() + 1");
203
204        // Every write here is currently sequential, allowing the buffer trait to do optimizations
205        // where subbuffer writes are out-of-bounds (but inside the total buffer).
206
207        let [this_header, this_header_extra] = this_header_variable
208            .split_at(size_of::<DirentHeader>())
209            .expect("already checked header_size <= size_of Header");
210
211        this_header.copy_from_slice_exact(&DirentHeader {
212            record_len,
213            next_opaque_id: entry.next_opaque_id,
214            inode: entry.inode,
215            kind: entry.kind as u8,
216        })?;
217        this_header_extra.zero_out()?;
218        this_name.copy_from_slice_exact(entry.name.as_bytes())?;
219        this_name_nul.copy_from_slice_exact(&[0])?;
220
221        self.written += usize::from(record_len);
222        self.buffer = remaining;
223
224        Ok(())
225    }
226    pub fn finalize(self) -> usize {
227        self.written
228    }
229}