wasmtime_slab/lib.rs
1//! A very simple, uniformly-typed slab arena that supports deallocation and
2//! reusing deallocated entries' space.
3//!
4//! The free list of vacant entries in the slab are stored inline in the slab's
5//! existing storage.
6//!
7//! # Example
8//!
9//! ```
10//! use wasmtime_slab::{Id, Slab};
11//!
12//! let mut slab = Slab::new();
13//!
14//! // Insert some values into the slab.
15//! let rza = slab.alloc("Robert Fitzgerald Diggs");
16//! let gza = slab.alloc("Gary Grice");
17//! let bill = slab.alloc("Bill Gates");
18//!
19//! // Allocated elements can be accessed infallibly via indexing (and missing and
20//! // deallocated entries will panic).
21//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");
22//!
23//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.
24//! if let Some(genius) = slab.get(gza) {
25//! println!("The gza gza genius: {}", genius);
26//! }
27//! if let Some(val) = slab.get_mut(bill) {
28//! *val = "Bill Gates doesn't belong in this set...";
29//! }
30//!
31//! // We can remove values from the slab.
32//! slab.dealloc(bill);
33//!
34//! // Allocate a new entry.
35//! let bill = slab.alloc("Bill Murray");
36//! ```
37//!
38//! # Using `Id`s with the Wrong `Slab`
39//!
40//! `Slab` does NOT check that `Id`s used to access previously-allocated values
41//! came from the current `Slab` instance (as opposed to a different `Slab`
42//! instance). Using `Id`s from a different `Slab` is safe, but will yield an
43//! unrelated value, if any at all.
44//!
45//! If you desire checking that an `Id` came from the correct `Slab` instance,
46//! it should be easy to layer that functionality on top of this crate by
47//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance
48//! identifier.
49//!
50//! # The ABA Problem
51//!
52//! This `Slab` type does NOT protect against ABA bugs, such as the following
53//! sequence:
54//!
55//! * Value `A` is allocated into the slab, yielding id `i`.
56//!
57//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's
58//! free list.
59//!
60//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,
61//! yielding id `i`.
62//!
63//! * The "original" id `i` is used to access the arena, expecting the
64//! deallocated value `A`, but getting the new value `B`.
65//!
66//! That is, it does not detect and prevent against the memory-safe version of
67//! use-after-free bugs.
68//!
69//! If you need to protect against ABA bugs, it should be easy to layer that
70//! functionality on top of this crate by wrapping `Slab` with something like
71//! the following:
72//!
73//! ```rust
74//! pub struct GenerationalId {
75//! id: wasmtime_slab::Id,
76//! generation: u32,
77//! }
78//!
79//! struct GenerationalEntry<T> {
80//! value: T,
81//! generation: u32,
82//! }
83//!
84//! pub struct GenerationalSlab<T> {
85//! slab: wasmtime_slab::Slab<GenerationalEntry<T>>,
86//! generation: u32,
87//! }
88//!
89//! impl<T> GenerationalSlab<T> {
90//! pub fn alloc(&mut self, value: T) -> GenerationalId {
91//! let generation = self.generation;
92//! let id = self.slab.alloc(GenerationalEntry { value, generation });
93//! GenerationalId { id, generation }
94//! }
95//!
96//! pub fn get(&self, id: GenerationalId) -> Option<&T> {
97//! let entry = self.slab.get(id.id)?;
98//!
99//! // Check that the entry's generation matches the id's generation,
100//! // else we have an ABA bug. (Alternatively, return `None` instead
101//! // of panicking.)
102//! assert_eq!(id.generation, entry.generation);
103//!
104//! Some(&entry.value)
105//! }
106//!
107//! pub fn dealloc(&mut self, id: GenerationalId) {
108//! // Check that the entry's generation matches the id's generation,
109//! // else we have an ABA bug. (Alternatively, silently return on
110//! // double-free instead of panicking.)
111//! assert_eq!(id.generation, self.slab[id.id].generation);
112//!
113//! self.slab.dealloc(id.id);
114//!
115//! // Increment our generation whenever we deallocate so that any new
116//! // value placed in this same entry will have a different generation
117//! // and we can detect ABA bugs.
118//! self.generation += 1;
119//! }
120//! }
121//! ```
122
123#![no_std]
124#![forbid(unsafe_code)]
125#![deny(missing_docs, missing_debug_implementations)]
126
127extern crate alloc;
128
129use alloc::vec::Vec;
130use core::fmt;
131use core::num::NonZeroU32;
132
133/// An identifier for an allocated value inside a `slab`.
134#[derive(Clone, Copy, PartialEq, Eq, Hash)]
135#[repr(transparent)]
136pub struct Id(EntryIndex);
137
138impl fmt::Debug for Id {
139 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
140 f.debug_tuple("Id").field(&self.0.index()).finish()
141 }
142}
143
144impl Id {
145 /// Get the raw underlying representation of this `Id`.
146 #[inline]
147 pub fn into_raw(self) -> u32 {
148 u32::try_from(self.0.index()).unwrap()
149 }
150
151 /// Construct an `Id` from its raw underlying representation.
152 ///
153 /// `raw` should be a value that was previously created via
154 /// `Id::into_raw`. May panic if given arbitrary values.
155 #[inline]
156 pub fn from_raw(raw: u32) -> Self {
157 let raw = usize::try_from(raw).unwrap();
158 Self(EntryIndex::new(raw))
159 }
160}
161
162/// A simple, uni-typed slab arena.
163pub struct Slab<T> {
164 /// The slab's entries, each is either occupied and holding a `T` or vacant
165 /// and is a link the free list.
166 entries: Vec<Entry<T>>,
167
168 /// The index of the first free entry in the free list.
169 free: Option<EntryIndex>,
170
171 /// The number of occupied entries is this slab.
172 len: u32,
173}
174
175impl<T> fmt::Debug for Slab<T>
176where
177 T: fmt::Debug,
178{
179 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
180 f.debug_map().entries(self.iter()).finish()
181 }
182}
183
184enum Entry<T> {
185 /// An occupied entry holding a `T`.
186 Occupied(T),
187
188 /// A vacant entry.
189 Free {
190 /// A link in the slab's free list, pointing to the next free entry, if
191 /// any.
192 next_free: Option<EntryIndex>,
193 },
194}
195
196#[derive(Clone, Copy, PartialEq, Eq, Hash)]
197#[repr(transparent)]
198struct EntryIndex(NonZeroU32);
199
200impl EntryIndex {
201 #[inline]
202 fn new(index: usize) -> Self {
203 assert!(index <= Slab::<()>::MAX_CAPACITY);
204 let x = u32::try_from(index + 1).unwrap();
205 Self(NonZeroU32::new(x).unwrap())
206 }
207
208 #[inline]
209 fn index(&self) -> usize {
210 let index = self.0.get() - 1;
211 usize::try_from(index).unwrap()
212 }
213}
214
215impl<T> Default for Slab<T> {
216 #[inline]
217 fn default() -> Self {
218 Self {
219 entries: Vec::default(),
220 free: None,
221 len: 0,
222 }
223 }
224}
225
226impl<T> core::ops::Index<Id> for Slab<T> {
227 type Output = T;
228
229 #[inline]
230 fn index(&self, id: Id) -> &Self::Output {
231 self.get(id)
232 .expect("id from different slab or value was deallocated")
233 }
234}
235
236impl<T> core::ops::IndexMut<Id> for Slab<T> {
237 #[inline]
238 fn index_mut(&mut self, id: Id) -> &mut Self::Output {
239 self.get_mut(id)
240 .expect("id from different slab or value was deallocated")
241 }
242}
243
244impl<T> Slab<T> {
245 /// The maximum capacity any `Slab` can have: `u32::MAX - 1`.
246 pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;
247
248 /// Construct a new, empty slab.
249 #[inline]
250 pub fn new() -> Self {
251 Slab::default()
252 }
253
254 /// Construct a new, empty slab, pre-reserving space for at least `capacity`
255 /// elements.
256 #[inline]
257 pub fn with_capacity(capacity: usize) -> Self {
258 let mut slab = Self::new();
259 slab.reserve(capacity);
260 slab
261 }
262
263 /// Ensure that there is space for at least `additional` elements in this
264 /// slab.
265 ///
266 /// # Panics
267 ///
268 /// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.
269 pub fn reserve(&mut self, additional: usize) {
270 let cap = self.capacity();
271 let len = self.len();
272 assert!(cap >= len);
273 if cap - len >= additional {
274 // Already have `additional` capacity available.
275 return;
276 }
277
278 self.entries.reserve(additional);
279
280 // Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`
281 // in `self.entries`.
282 assert!(self.entries.capacity() <= Self::MAX_CAPACITY);
283 }
284
285 fn double_capacity(&mut self) {
286 // Double our capacity to amortize the cost of resizing. But make sure
287 // we add some amount of minimum additional capacity, since doubling
288 // zero capacity isn't useful.
289 const MIN_CAPACITY: usize = 16;
290 let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);
291 self.reserve(additional);
292 }
293
294 /// What is the capacity of this slab? That is, how many entries can it
295 /// contain within its current underlying storage?
296 #[inline]
297 pub fn capacity(&self) -> usize {
298 self.entries.capacity()
299 }
300
301 /// How many values are currently allocated within this slab?
302 #[inline]
303 pub fn len(&self) -> usize {
304 usize::try_from(self.len).unwrap()
305 }
306
307 /// Are there zero allocated values within this slab?
308 #[inline]
309 pub fn is_empty(&self) -> bool {
310 self.len() == 0
311 }
312
313 /// Try to allocate a `T` value within this slab.
314 ///
315 /// If there is no available capacity, ownership of the given value is
316 /// returned via `Err(value)`.
317 #[inline]
318 pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {
319 if let Some(index) = self.try_alloc_index() {
320 let next_free = match self.entries[index.index()] {
321 Entry::Free { next_free } => next_free,
322 Entry::Occupied { .. } => unreachable!(),
323 };
324 self.free = next_free;
325 self.entries[index.index()] = Entry::Occupied(value);
326 self.len += 1;
327 Ok(Id(index))
328 } else {
329 Err(value)
330 }
331 }
332
333 #[inline]
334 fn try_alloc_index(&mut self) -> Option<EntryIndex> {
335 self.free.take().or_else(|| {
336 if self.entries.len() < self.entries.capacity() {
337 let index = EntryIndex::new(self.entries.len());
338 self.entries.push(Entry::Free { next_free: None });
339 Some(index)
340 } else {
341 None
342 }
343 })
344 }
345
346 /// Allocate a `T` value within this slab, allocating additional underlying
347 /// storage if there is no available capacity.
348 ///
349 /// # Panics
350 ///
351 /// Panics if allocating this value requires reallocating the underlying
352 /// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.
353 #[inline]
354 pub fn alloc(&mut self, value: T) -> Id {
355 self.try_alloc(value)
356 .unwrap_or_else(|value| self.alloc_slow(value))
357 }
358
359 /// Get the `Id` that will be returned for the next allocation in this slab.
360 #[inline]
361 pub fn next_id(&self) -> Id {
362 let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));
363 Id(index)
364 }
365
366 #[inline(never)]
367 #[cold]
368 fn alloc_slow(&mut self, value: T) -> Id {
369 // Reserve additional capacity, since we didn't have space for the
370 // allocation.
371 self.double_capacity();
372 // After which the allocation will succeed.
373 self.try_alloc(value).ok().unwrap()
374 }
375
376 /// Get a shared borrow of the value associated with `id`.
377 ///
378 /// Returns `None` if the value has since been deallocated.
379 ///
380 /// If `id` comes from a different `Slab` instance, this method may panic,
381 /// return `None`, or return an arbitrary value.
382 #[inline]
383 pub fn get(&self, id: Id) -> Option<&T> {
384 match self
385 .entries
386 .get(id.0.index())
387 .expect("id from different slab")
388 {
389 Entry::Occupied(x) => Some(x),
390 Entry::Free { .. } => None,
391 }
392 }
393
394 /// Get an exclusive borrow of the value associated with `id`.
395 ///
396 /// Returns `None` if the value has since been deallocated.
397 ///
398 /// If `id` comes from a different `Slab` instance, this method may panic,
399 /// return `None`, or return an arbitrary value.
400 #[inline]
401 pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {
402 match self
403 .entries
404 .get_mut(id.0.index())
405 .expect("id from different slab")
406 {
407 Entry::Occupied(x) => Some(x),
408 Entry::Free { .. } => None,
409 }
410 }
411
412 /// Does this slab contain an allocated value for `id`?
413 #[inline]
414 pub fn contains(&self, id: Id) -> bool {
415 match self.entries.get(id.0.index()) {
416 Some(Entry::Occupied(_)) => true,
417 None | Some(Entry::Free { .. }) => false,
418 }
419 }
420
421 /// Deallocate the value associated with the given `id`.
422 ///
423 /// If `id` comes from a different `Slab` instance, this method may panic or
424 /// deallocate an arbitrary value.
425 #[inline]
426 pub fn dealloc(&mut self, id: Id) -> T {
427 let entry = core::mem::replace(
428 self.entries
429 .get_mut(id.0.index())
430 .expect("id from a different slab"),
431 Entry::Free { next_free: None },
432 );
433 match entry {
434 Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),
435 Entry::Occupied(value) => {
436 let next_free = core::mem::replace(&mut self.free, Some(id.0));
437 self.entries[id.0.index()] = Entry::Free { next_free };
438 self.len -= 1;
439 value
440 }
441 }
442 }
443
444 /// Iterate over all values currently allocated within this `Slab`.
445 ///
446 /// Yields pairs of an `Id` and the `Id`'s associated value.
447 ///
448 /// Iteration order is undefined.
449 #[inline]
450 pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {
451 assert!(self.entries.len() <= Self::MAX_CAPACITY);
452 self.entries
453 .iter()
454 .enumerate()
455 .filter_map(|(i, e)| match e {
456 Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
457 Entry::Free { .. } => None,
458 })
459 }
460
461 /// Mutably iterate over all values currently allocated within this `Slab`.
462 ///
463 /// Yields pairs of an `Id` and the `Id`'s associated value.
464 ///
465 /// Iteration order is undefined.
466 #[inline]
467 pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {
468 assert!(self.entries.len() <= Self::MAX_CAPACITY);
469 self.entries
470 .iter_mut()
471 .enumerate()
472 .filter_map(|(i, e)| match e {
473 Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
474 Entry::Free { .. } => None,
475 })
476 }
477
478 /// Iterate over and remove all entries in this slab.
479 ///
480 /// The slab will be empty after calling this method.
481 ///
482 /// Yields pairs of an `Id` and the `Id`'s associated value.
483 ///
484 /// Iteration order is undefined.
485 #[inline]
486 pub fn drain(&mut self) -> impl Iterator<Item = (Id, T)> + '_ {
487 assert!(self.entries.len() <= Self::MAX_CAPACITY);
488 self.len = 0;
489 self.free = None;
490 self.entries
491 .drain(..)
492 .enumerate()
493 .filter_map(|(i, e)| match e {
494 Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
495 Entry::Free { .. } => None,
496 })
497 }
498}