syscall/
dirent.rs

1use core::{
2    mem::size_of,
3    ops::{Deref, DerefMut},
4    slice,
5};
6
7use crate::{
8    error::{Error, Result, EINVAL},
9    ENAMETOOLONG,
10};
11
12#[derive(Clone, Copy, Debug, Default)]
13#[repr(packed)]
14pub struct DirentHeader {
15    pub inode: u64,
16    /// A filesystem-specific opaque value used to uniquely identify directory entries. This value,
17    /// in the last returned entry from a SYS_GETDENTS invocation, shall be passed to the next
18    /// call.
19    pub next_opaque_id: u64,
20    // This struct intentionally does not include a "next" offset field, unlike Linux, to easily
21    // guarantee the iterator will be reasonably deterministic, even if the scheme is adversarial.
22    pub record_len: u16,
23    /// A `DirentKind`.
24    ///
25    /// May not be directly available (Unspecified), and if so needs to be looked using fstat.
26    pub kind: u8,
27}
28
29impl Deref for DirentHeader {
30    type Target = [u8];
31    fn deref(&self) -> &[u8] {
32        unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
33    }
34}
35
36impl DerefMut for DirentHeader {
37    fn deref_mut(&mut self) -> &mut [u8] {
38        unsafe { slice::from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
39    }
40}
41#[derive(Clone, Copy, Debug, Default)]
42#[repr(u8)]
43pub enum DirentKind {
44    #[default]
45    Unspecified = 0,
46
47    Regular = 1,
48    Directory = 2,
49    Symlink = 3,
50    BlockDev = 4,
51    CharDev = 5,
52    Socket = 6,
53}
54impl DirentKind {
55    // TODO: derive(FromPrimitive)
56    pub fn try_from_raw(raw: u8) -> Option<Self> {
57        Some(match raw {
58            0 => Self::Unspecified,
59
60            1 => Self::Regular,
61            2 => Self::Directory,
62            3 => Self::Symlink,
63            4 => Self::BlockDev,
64            5 => Self::CharDev,
65            6 => Self::Socket,
66
67            _ => return None,
68        })
69    }
70}
71
72pub struct DirentIter<'a>(&'a [u8]);
73
74impl<'a> DirentIter<'a> {
75    pub const fn new(buffer: &'a [u8]) -> Self {
76        Self(buffer)
77    }
78}
79#[derive(Debug)]
80pub struct Invalid;
81
82impl<'a> Iterator for DirentIter<'a> {
83    type Item = Result<(&'a DirentHeader, &'a [u8]), Invalid>;
84
85    fn next(&mut self) -> Option<Self::Item> {
86        if self.0.len() < size_of::<DirentHeader>() {
87            return None;
88        }
89        let header = unsafe { &*(self.0.as_ptr().cast::<DirentHeader>()) };
90        if self.0.len() < usize::from(header.record_len) {
91            return Some(Err(Invalid));
92        }
93        let (this, remaining) = self.0.split_at(usize::from(header.record_len));
94        self.0 = remaining;
95
96        let name_and_nul = &this[size_of::<DirentHeader>()..];
97        let name = &name_and_nul[..name_and_nul.len() - 1];
98
99        Some(Ok((header, name)))
100    }
101}
102
103pub struct DirentBuf<B> {
104    buffer: B,
105
106    // Exists in order to allow future extensions to the DirentHeader struct.
107
108    // TODO: Might add an upper bound to protect against cache miss DoS. The kernel currently
109    // forbids any other value than size_of::<DirentHeader>().
110    header_size: u16,
111
112    written: usize,
113}
114/// Abstraction between &mut [u8] and the kernel's UserSliceWo.
115pub trait Buffer<'a>: Sized + 'a {
116    fn empty() -> Self;
117    fn length(&self) -> usize;
118
119    /// Split all of `self` into two disjoint contiguous subbuffers of lengths `index` and `length
120    /// - index` respectively.
121    ///
122    /// Returns None if and only if `index > length`.
123    fn split_at(self, index: usize) -> Option<[Self; 2]>;
124
125    /// Copy from `src`, lengths must match exactly.
126    ///
127    /// Allowed to overwrite subsequent buffer space, for performance reasons. Can be changed in
128    /// the future if too restrictive.
129    fn copy_from_slice_exact(self, src: &[u8]) -> Result<()>;
130
131    /// Write zeroes to this part of the buffer.
132    ///
133    /// Allowed to overwrite subsequent buffer space, for performance reasons. Can be changed in
134    /// the future if too restrictive.
135    fn zero_out(self) -> Result<()>;
136}
137impl<'a> Buffer<'a> for &'a mut [u8] {
138    fn empty() -> Self {
139        &mut []
140    }
141    fn length(&self) -> usize {
142        self.len()
143    }
144
145    fn split_at(self, index: usize) -> Option<[Self; 2]> {
146        self.split_at_mut_checked(index).map(|(a, b)| [a, b])
147    }
148    fn copy_from_slice_exact(self, src: &[u8]) -> Result<()> {
149        self.copy_from_slice(src);
150        Ok(())
151    }
152    fn zero_out(self) -> Result<()> {
153        self.fill(0);
154        Ok(())
155    }
156}
157
158pub struct DirEntry<'name> {
159    pub inode: u64,
160    pub next_opaque_id: u64,
161    pub name: &'name str,
162    pub kind: DirentKind,
163}
164
165impl<'a, B: Buffer<'a>> DirentBuf<B> {
166    pub fn new(buffer: B, header_size: u16) -> Option<Self> {
167        if usize::from(header_size) < size_of::<DirentHeader>() {
168            return None;
169        }
170
171        Some(Self {
172            buffer,
173            header_size,
174            written: 0,
175        })
176    }
177    pub fn entry(&mut self, entry: DirEntry<'_>) -> Result<()> {
178        let name16 = u16::try_from(entry.name.len()).map_err(|_| Error::new(EINVAL))?;
179        let record_len = self
180            .header_size
181            .checked_add(name16)
182            // XXX: NUL byte. Unfortunately this is probably the only performant way to be
183            // compatible with C.
184            .and_then(|l| l.checked_add(1))
185            .ok_or(Error::new(ENAMETOOLONG))?;
186
187        let [this, remaining] = core::mem::replace(&mut self.buffer, B::empty())
188            .split_at(usize::from(record_len))
189            .ok_or(Error::new(EINVAL))?;
190
191        let [this_header_variable, this_name_and_nul] = this
192            .split_at(usize::from(self.header_size))
193            .expect("already know header_size + ... >= header_size");
194
195        let [this_name, this_name_nul] = this_name_and_nul
196            .split_at(usize::from(name16))
197            .expect("already know name.len() <= name.len() + 1");
198
199        // Every write here is currently sequential, allowing the buffer trait to do optimizations
200        // where subbuffer writes are out-of-bounds (but inside the total buffer).
201
202        let [this_header, this_header_extra] = this_header_variable
203            .split_at(size_of::<DirentHeader>())
204            .expect("already checked header_size <= size_of Header");
205
206        this_header.copy_from_slice_exact(&DirentHeader {
207            record_len,
208            next_opaque_id: entry.next_opaque_id,
209            inode: entry.inode,
210            kind: entry.kind as u8,
211        })?;
212        this_header_extra.zero_out()?;
213        this_name.copy_from_slice_exact(entry.name.as_bytes())?;
214        this_name_nul.copy_from_slice_exact(&[0])?;
215
216        self.written += usize::from(record_len);
217        self.buffer = remaining;
218
219        Ok(())
220    }
221    pub fn finalize(self) -> usize {
222        self.written
223    }
224}