dyn_stack/alloc.rs
1// copied from libcore/liballoc
2
3use core::alloc::Layout;
4use core::cell::UnsafeCell;
5use core::fmt;
6use core::marker::PhantomData;
7use core::mem::MaybeUninit;
8use core::ptr;
9use core::ptr::NonNull;
10
11extern crate alloc;
12
13#[derive(Copy, Clone, PartialEq, Eq, Debug)]
14pub struct AllocError;
15
16#[cfg(any(feature = "std", feature = "core-error"))]
17impl crate::Error for AllocError {}
18
19impl fmt::Display for AllocError {
20 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21 f.write_str("memory allocation failed")
22 }
23}
24
25/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
26/// data described via [`Layout`][].
27///
28/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
29/// an allocator like `MyAllocator([u8; N])` cannot be moved, without updating the pointers to the
30/// allocated memory.
31///
32/// Unlike [`alloc::alloc::GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
33/// allocator does not support this (like jemalloc) or return a null pointer (such as
34/// `libc::malloc`), this must be caught by the implementation.
35///
36/// ### Currently allocated memory
37///
38/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
39/// means that:
40///
41/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
42/// [`shrink`], and
43///
44/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
45/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
46/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
47/// remains valid.
48///
49/// [`allocate`]: Allocator::allocate
50/// [`grow`]: Allocator::grow
51/// [`shrink`]: Allocator::shrink
52/// [`deallocate`]: Allocator::deallocate
53///
54/// ### Memory fitting
55///
56/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
57/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
58/// following conditions must hold:
59///
60/// * The block must be allocated with the same alignment as [`layout.align()`], and
61///
62/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
63/// - `min` is the size of the layout most recently used to allocate the block, and
64/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
65///
66/// [`layout.align()`]: Layout::align
67/// [`layout.size()`]: Layout::size
68///
69/// # Safety
70///
71/// * Memory blocks returned from an allocator that are [*currently allocated*] must point to
72/// valid memory and retain their validity while they are [*currently allocated*] and the shorter
73/// of:
74/// - the borrow-checker lifetime of the allocator type itself.
75///
76/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
77/// method of the allocator.
78///
79/// [*currently allocated*]: #currently-allocated-memory
80pub unsafe trait Allocator {
81 /// Attempts to allocate a block of memory.
82 ///
83 /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
84 ///
85 /// The returned block may have a larger size than specified by `layout.size()`, and may or may
86 /// not have its contents initialized.
87 ///
88 /// The returned block of memory remains valid as long as it is [*currently allocated*] and the shorter of:
89 /// - the borrow-checker lifetime of the allocator type itself.
90 ///
91 /// # Errors
92 ///
93 /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
94 /// allocator's size or alignment constraints.
95 ///
96 /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
97 /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
98 /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
99 ///
100 /// Clients wishing to abort computation in response to an allocation error are encouraged to
101 /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
102 ///
103 /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
104 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
105
106 /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
107 ///
108 /// # Errors
109 ///
110 /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
111 /// allocator's size or alignment constraints.
112 ///
113 /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
114 /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
115 /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
116 ///
117 /// Clients wishing to abort computation in response to an allocation error are encouraged to
118 /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
119 ///
120 /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
121 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
122 let ptr = self.allocate(layout)?;
123 // SAFETY: `alloc` returns a valid memory block
124 unsafe { (ptr.as_ptr() as *mut u8).write_bytes(0, ptr.len()) }
125 Ok(ptr)
126 }
127
128 /// Deallocates the memory referenced by `ptr`.
129 ///
130 /// # Safety
131 ///
132 /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
133 /// * `layout` must [*fit*] that block of memory.
134 ///
135 /// [*currently allocated*]: #currently-allocated-memory
136 /// [*fit*]: #memory-fitting
137 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
138
139 /// Attempts to extend the memory block.
140 ///
141 /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
142 /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
143 /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
144 ///
145 /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
146 /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
147 /// allocation was grown in-place. The newly returned pointer is the only valid pointer
148 /// for accessing this memory now.
149 ///
150 /// If this method returns `Err`, then ownership of the memory block has not been transferred to
151 /// this allocator, and the contents of the memory block are unaltered.
152 ///
153 /// # Safety
154 ///
155 /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
156 /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
157 /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
158 ///
159 /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
160 ///
161 /// [*currently allocated*]: #currently-allocated-memory
162 /// [*fit*]: #memory-fitting
163 ///
164 /// # Errors
165 ///
166 /// Returns `Err` if the new layout does not meet the allocator's size and alignment
167 /// constraints of the allocator, or if growing otherwise fails.
168 ///
169 /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
170 /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
171 /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
172 ///
173 /// Clients wishing to abort computation in response to an allocation error are encouraged to
174 /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
175 ///
176 /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
177 unsafe fn grow(
178 &self,
179 ptr: NonNull<u8>,
180 old_layout: Layout,
181 new_layout: Layout,
182 ) -> Result<NonNull<[u8]>, AllocError> {
183 debug_assert!(
184 new_layout.size() >= old_layout.size(),
185 "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
186 );
187
188 let new_ptr = self.allocate(new_layout)?;
189
190 // SAFETY: because `new_layout.size()` must be greater than or equal to
191 // `old_layout.size()`, both the old and new memory allocation are valid for reads and
192 // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
193 // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
194 // safe. The safety contract for `dealloc` must be upheld by the caller.
195 unsafe {
196 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size());
197 self.deallocate(ptr, old_layout);
198 }
199
200 Ok(new_ptr)
201 }
202
203 /// Behaves like `grow`, but also ensures that the new contents are set to zero before being
204 /// returned.
205 ///
206 /// The memory block will contain the following contents after a successful call to
207 /// `grow_zeroed`:
208 /// * Bytes `0..old_layout.size()` are preserved from the original allocation.
209 /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
210 /// the allocator implementation. `old_size` refers to the size of the memory block prior
211 /// to the `grow_zeroed` call, which may be larger than the size that was originally
212 /// requested when it was allocated.
213 /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
214 /// block returned by the `grow_zeroed` call.
215 ///
216 /// # Safety
217 ///
218 /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
219 /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
220 /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
221 ///
222 /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
223 ///
224 /// [*currently allocated*]: #currently-allocated-memory
225 /// [*fit*]: #memory-fitting
226 ///
227 /// # Errors
228 ///
229 /// Returns `Err` if the new layout does not meet the allocator's size and alignment
230 /// constraints of the allocator, or if growing otherwise fails.
231 ///
232 /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
233 /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
234 /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
235 ///
236 /// Clients wishing to abort computation in response to an allocation error are encouraged to
237 /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
238 ///
239 /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
240 unsafe fn grow_zeroed(
241 &self,
242 ptr: NonNull<u8>,
243 old_layout: Layout,
244 new_layout: Layout,
245 ) -> Result<NonNull<[u8]>, AllocError> {
246 debug_assert!(
247 new_layout.size() >= old_layout.size(),
248 "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
249 );
250
251 let new_ptr = self.allocate_zeroed(new_layout)?;
252
253 // SAFETY: because `new_layout.size()` must be greater than or equal to
254 // `old_layout.size()`, both the old and new memory allocation are valid for reads and
255 // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
256 // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
257 // safe. The safety contract for `dealloc` must be upheld by the caller.
258 unsafe {
259 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size());
260 self.deallocate(ptr, old_layout);
261 }
262
263 Ok(new_ptr)
264 }
265
266 /// Attempts to shrink the memory block.
267 ///
268 /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
269 /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
270 /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
271 ///
272 /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
273 /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
274 /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
275 /// for accessing this memory now.
276 ///
277 /// If this method returns `Err`, then ownership of the memory block has not been transferred to
278 /// this allocator, and the contents of the memory block are unaltered.
279 ///
280 /// # Safety
281 ///
282 /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
283 /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
284 /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
285 ///
286 /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
287 ///
288 /// [*currently allocated*]: #currently-allocated-memory
289 /// [*fit*]: #memory-fitting
290 ///
291 /// # Errors
292 ///
293 /// Returns `Err` if the new layout does not meet the allocator's size and alignment
294 /// constraints of the allocator, or if shrinking otherwise fails.
295 ///
296 /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
297 /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
298 /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
299 ///
300 /// Clients wishing to abort computation in response to an allocation error are encouraged to
301 /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
302 ///
303 /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
304 unsafe fn shrink(
305 &self,
306 ptr: NonNull<u8>,
307 old_layout: Layout,
308 new_layout: Layout,
309 ) -> Result<NonNull<[u8]>, AllocError> {
310 debug_assert!(
311 new_layout.size() <= old_layout.size(),
312 "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
313 );
314
315 let new_ptr = self.allocate(new_layout)?;
316
317 // SAFETY: because `new_layout.size()` must be lower than or equal to
318 // `old_layout.size()`, both the old and new memory allocation are valid for reads and
319 // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
320 // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
321 // safe. The safety contract for `dealloc` must be upheld by the caller.
322 unsafe {
323 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, new_layout.size());
324 self.deallocate(ptr, old_layout);
325 }
326
327 Ok(new_ptr)
328 }
329
330 /// Creates a "by reference" adapter for this instance of `Allocator`.
331 ///
332 /// The returned adapter also implements `Allocator` and will simply borrow this.
333 #[inline(always)]
334 fn by_ref(&self) -> &Self
335 where
336 Self: Sized,
337 {
338 self
339 }
340}
341
342unsafe impl<T: ?Sized + Allocator> Allocator for &T {
343 #[inline(always)]
344 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
345 (**self).allocate(layout)
346 }
347
348 #[inline(always)]
349 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
350 (**self).deallocate(ptr, layout)
351 }
352
353 #[inline(always)]
354 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
355 (**self).allocate_zeroed(layout)
356 }
357
358 #[inline(always)]
359 unsafe fn grow(
360 &self,
361 ptr: NonNull<u8>,
362 old_layout: Layout,
363 new_layout: Layout,
364 ) -> Result<NonNull<[u8]>, AllocError> {
365 (**self).grow(ptr, old_layout, new_layout)
366 }
367
368 #[inline(always)]
369 unsafe fn grow_zeroed(
370 &self,
371 ptr: NonNull<u8>,
372 old_layout: Layout,
373 new_layout: Layout,
374 ) -> Result<NonNull<[u8]>, AllocError> {
375 (**self).grow_zeroed(ptr, old_layout, new_layout)
376 }
377
378 #[inline(always)]
379 unsafe fn shrink(
380 &self,
381 ptr: NonNull<u8>,
382 old_layout: Layout,
383 new_layout: Layout,
384 ) -> Result<NonNull<[u8]>, AllocError> {
385 (**self).shrink(ptr, old_layout, new_layout)
386 }
387}
388
389unsafe impl<T: ?Sized + Allocator> Allocator for &mut T {
390 #[inline(always)]
391 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
392 (**self).allocate(layout)
393 }
394
395 #[inline(always)]
396 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
397 (**self).deallocate(ptr, layout)
398 }
399
400 #[inline(always)]
401 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
402 (**self).allocate_zeroed(layout)
403 }
404
405 #[inline(always)]
406 unsafe fn grow(
407 &self,
408 ptr: NonNull<u8>,
409 old_layout: Layout,
410 new_layout: Layout,
411 ) -> Result<NonNull<[u8]>, AllocError> {
412 (**self).grow(ptr, old_layout, new_layout)
413 }
414
415 #[inline(always)]
416 unsafe fn grow_zeroed(
417 &self,
418 ptr: NonNull<u8>,
419 old_layout: Layout,
420 new_layout: Layout,
421 ) -> Result<NonNull<[u8]>, AllocError> {
422 (**self).grow_zeroed(ptr, old_layout, new_layout)
423 }
424
425 #[inline(always)]
426 unsafe fn shrink(
427 &self,
428 ptr: NonNull<u8>,
429 old_layout: Layout,
430 new_layout: Layout,
431 ) -> Result<NonNull<[u8]>, AllocError> {
432 (**self).shrink(ptr, old_layout, new_layout)
433 }
434}
435
436#[cfg(feature = "alloc")]
437#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
438unsafe impl<T: ?Sized + Allocator> Allocator for alloc::boxed::Box<T> {
439 #[inline(always)]
440 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
441 (**self).allocate(layout)
442 }
443
444 #[inline(always)]
445 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
446 (**self).deallocate(ptr, layout)
447 }
448
449 #[inline(always)]
450 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
451 (**self).allocate_zeroed(layout)
452 }
453
454 #[inline(always)]
455 unsafe fn grow(
456 &self,
457 ptr: NonNull<u8>,
458 old_layout: Layout,
459 new_layout: Layout,
460 ) -> Result<NonNull<[u8]>, AllocError> {
461 (**self).grow(ptr, old_layout, new_layout)
462 }
463
464 #[inline(always)]
465 unsafe fn grow_zeroed(
466 &self,
467 ptr: NonNull<u8>,
468 old_layout: Layout,
469 new_layout: Layout,
470 ) -> Result<NonNull<[u8]>, AllocError> {
471 (**self).grow_zeroed(ptr, old_layout, new_layout)
472 }
473
474 #[inline(always)]
475 unsafe fn shrink(
476 &self,
477 ptr: NonNull<u8>,
478 old_layout: Layout,
479 new_layout: Layout,
480 ) -> Result<NonNull<[u8]>, AllocError> {
481 (**self).shrink(ptr, old_layout, new_layout)
482 }
483}
484
485#[cfg(feature = "alloc")]
486#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
487pub struct Global;
488
489#[cfg(feature = "alloc")]
490#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
491unsafe impl Allocator for Global {
492 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
493 let ptr = if layout.size() == 0 {
494 core::ptr::null_mut::<u8>().wrapping_add(layout.align())
495 } else {
496 unsafe { alloc::alloc::alloc(layout) }
497 };
498
499 if ptr.is_null() {
500 Err(AllocError)
501 } else {
502 Ok(unsafe {
503 NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size()))
504 })
505 }
506 }
507
508 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
509 let ptr = if layout.size() == 0 {
510 core::ptr::null_mut::<u8>().wrapping_add(layout.align())
511 } else {
512 unsafe { alloc::alloc::alloc_zeroed(layout) }
513 };
514
515 if ptr.is_null() {
516 Err(AllocError)
517 } else {
518 Ok(unsafe {
519 NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size()))
520 })
521 }
522 }
523
524 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
525 if layout.size() != 0 {
526 alloc::alloc::dealloc(ptr.as_ptr(), layout);
527 }
528 }
529
530 unsafe fn grow(
531 &self,
532 ptr: NonNull<u8>,
533 old_layout: Layout,
534 new_layout: Layout,
535 ) -> Result<NonNull<[u8]>, AllocError> {
536 core::debug_assert!(
537 new_layout.size() >= old_layout.size(),
538 "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
539 );
540
541 if old_layout.align() == new_layout.align() {
542 let ptr = if new_layout.size() == 0 {
543 core::ptr::null_mut::<u8>().wrapping_add(new_layout.align())
544 } else {
545 alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size())
546 };
547 if ptr.is_null() {
548 Err(AllocError)
549 } else {
550 Ok(unsafe {
551 NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
552 ptr,
553 new_layout.size(),
554 ))
555 })
556 }
557 } else {
558 let new_ptr = self.allocate(new_layout)?;
559
560 // SAFETY: because `new_layout.size()` must be greater than or equal to
561 // `old_layout.size()`, both the old and new memory allocation are valid for reads and
562 // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
563 // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
564 // safe. The safety contract for `dealloc` must be upheld by the caller.
565 unsafe {
566 ptr::copy_nonoverlapping(
567 ptr.as_ptr(),
568 new_ptr.as_ptr() as *mut u8,
569 old_layout.size(),
570 );
571 self.deallocate(ptr, old_layout);
572 }
573
574 Ok(new_ptr)
575 }
576 }
577
578 unsafe fn shrink(
579 &self,
580 ptr: NonNull<u8>,
581 old_layout: Layout,
582 new_layout: Layout,
583 ) -> Result<NonNull<[u8]>, AllocError> {
584 core::debug_assert!(
585 new_layout.size() <= old_layout.size(),
586 "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
587 );
588
589 if old_layout.align() == new_layout.align() {
590 let ptr = if new_layout.size() == 0 {
591 core::ptr::null_mut::<u8>().wrapping_add(new_layout.align())
592 } else {
593 alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size())
594 };
595
596 if ptr.is_null() {
597 Err(AllocError)
598 } else {
599 Ok(unsafe {
600 NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
601 ptr,
602 new_layout.size(),
603 ))
604 })
605 }
606 } else {
607 let new_ptr = self.allocate(new_layout)?;
608
609 // SAFETY: because `new_layout.size()` must be lower than or equal to
610 // `old_layout.size()`, both the old and new memory allocation are valid for reads and
611 // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
612 // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
613 // safe. The safety contract for `dealloc` must be upheld by the caller.
614 unsafe {
615 ptr::copy_nonoverlapping(
616 ptr.as_ptr(),
617 new_ptr.as_ptr() as *mut u8,
618 new_layout.size(),
619 );
620 self.deallocate(ptr, old_layout);
621 }
622
623 Ok(new_ptr)
624 }
625 }
626}
627
628#[derive(Copy, Clone, Debug)]
629pub(crate) struct VTable {
630 pub allocate: unsafe fn(*const (), Layout) -> Result<NonNull<[u8]>, AllocError>,
631 pub allocate_zeroed: unsafe fn(*const (), Layout) -> Result<NonNull<[u8]>, AllocError>,
632 pub deallocate: unsafe fn(*const (), ptr: NonNull<u8>, Layout),
633 pub grow:
634 unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
635 pub grow_zeroed:
636 unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
637 pub shrink:
638 unsafe fn(*const (), NonNull<u8>, Layout, Layout) -> Result<NonNull<[u8]>, AllocError>,
639
640 pub clone: Option<unsafe fn(*mut (), *const ())>,
641 pub drop: unsafe fn(*mut ()),
642}
643
644pub struct DynAlloc<'a> {
645 pub(crate) alloc: UnsafeCell<MaybeUninit<*const ()>>,
646 pub(crate) vtable: &'static VTable,
647 __marker: PhantomData<&'a ()>,
648}
649
650unsafe impl Send for DynAlloc<'_> {}
651
652unsafe impl Allocator for DynAlloc<'_> {
653 #[inline]
654 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
655 unsafe { (self.vtable.allocate)(core::ptr::addr_of!(self.alloc) as *const (), layout) }
656 }
657
658 #[inline]
659 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
660 unsafe {
661 (self.vtable.deallocate)(core::ptr::addr_of!(self.alloc) as *const (), ptr, layout)
662 }
663 }
664
665 #[inline]
666 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
667 unsafe {
668 (self.vtable.allocate_zeroed)(core::ptr::addr_of!(self.alloc) as *const (), layout)
669 }
670 }
671
672 #[inline]
673 unsafe fn grow(
674 &self,
675 ptr: NonNull<u8>,
676 old_layout: Layout,
677 new_layout: Layout,
678 ) -> Result<NonNull<[u8]>, AllocError> {
679 unsafe {
680 (self.vtable.grow)(
681 core::ptr::addr_of!(self.alloc) as *const (),
682 ptr,
683 old_layout,
684 new_layout,
685 )
686 }
687 }
688
689 #[inline]
690 unsafe fn grow_zeroed(
691 &self,
692 ptr: NonNull<u8>,
693 old_layout: Layout,
694 new_layout: Layout,
695 ) -> Result<NonNull<[u8]>, AllocError> {
696 unsafe {
697 (self.vtable.grow_zeroed)(
698 core::ptr::addr_of!(self.alloc) as *const (),
699 ptr,
700 old_layout,
701 new_layout,
702 )
703 }
704 }
705
706 #[inline]
707 unsafe fn shrink(
708 &self,
709 ptr: NonNull<u8>,
710 old_layout: Layout,
711 new_layout: Layout,
712 ) -> Result<NonNull<[u8]>, AllocError> {
713 unsafe {
714 (self.vtable.shrink)(
715 core::ptr::addr_of!(self.alloc) as *const (),
716 ptr,
717 old_layout,
718 new_layout,
719 )
720 }
721 }
722}
723
724impl Drop for DynAlloc<'_> {
725 #[inline]
726 fn drop(&mut self) {
727 unsafe { (self.vtable.drop)(core::ptr::addr_of_mut!(self.alloc) as *mut ()) }
728 }
729}
730
731impl Clone for DynAlloc<'_> {
732 #[inline]
733 fn clone(&self) -> Self {
734 let mut alloc = UnsafeCell::new(MaybeUninit::uninit());
735 unsafe {
736 self.vtable.clone.unwrap()(
737 core::ptr::addr_of_mut!(alloc) as *mut (),
738 core::ptr::addr_of!(self.alloc) as *const (),
739 );
740 }
741
742 Self {
743 alloc,
744 vtable: self.vtable,
745 __marker: PhantomData,
746 }
747 }
748}
749
750impl<'a> DynAlloc<'a> {
751 #[inline]
752 pub fn try_new_unclone<A: 'a + Allocator + Send>(alloc: A) -> Result<Self, A> {
753 if core::mem::size_of::<A>() <= core::mem::size_of::<*const ()>()
754 && core::mem::align_of::<A>() <= core::mem::align_of::<*const ()>()
755 {
756 trait AllocUnclone: Allocator + Send {
757 const VTABLE: &'static VTable = &unsafe {
758 VTable {
759 allocate: core::mem::transmute(Self::allocate as fn(&Self, _) -> _),
760 allocate_zeroed: core::mem::transmute(
761 Self::allocate_zeroed as fn(&Self, _) -> _,
762 ),
763 deallocate: core::mem::transmute(
764 Self::deallocate as unsafe fn(&Self, _, _) -> _,
765 ),
766 grow: core::mem::transmute(Self::grow as unsafe fn(&Self, _, _, _) -> _),
767 grow_zeroed: core::mem::transmute(
768 Self::grow_zeroed as unsafe fn(&Self, _, _, _) -> _,
769 ),
770 shrink: core::mem::transmute(
771 Self::shrink as unsafe fn(&Self, _, _, _) -> _,
772 ),
773
774 clone: None,
775 drop: core::mem::transmute(
776 core::ptr::drop_in_place::<Self> as unsafe fn(_) -> _,
777 ),
778 }
779 };
780 }
781 impl<A: Allocator + Send> AllocUnclone for A {}
782
783 Ok(Self {
784 alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) },
785 vtable: <A as AllocUnclone>::VTABLE,
786 __marker: PhantomData,
787 })
788 } else {
789 Err(alloc)
790 }
791 }
792
793 #[inline]
794 pub fn try_new_clone<A: 'a + Clone + Allocator + Send>(alloc: A) -> Result<Self, A> {
795 if core::mem::size_of::<A>() <= core::mem::size_of::<*const ()>()
796 && core::mem::align_of::<A>() <= core::mem::align_of::<*const ()>()
797 {
798 trait AllocClone: Allocator + Send + Clone {
799 const VTABLE: &'static VTable = &unsafe {
800 VTable {
801 allocate: core::mem::transmute(Self::allocate as fn(_, _) -> _),
802 allocate_zeroed: core::mem::transmute(
803 Self::allocate_zeroed as fn(_, _) -> _,
804 ),
805 deallocate: core::mem::transmute(
806 Self::deallocate as unsafe fn(_, _, _) -> _,
807 ),
808 grow: core::mem::transmute(Self::grow as unsafe fn(_, _, _, _) -> _),
809 grow_zeroed: core::mem::transmute(
810 Self::grow_zeroed as unsafe fn(_, _, _, _) -> _,
811 ),
812 shrink: core::mem::transmute(Self::shrink as unsafe fn(_, _, _, _) -> _),
813
814 clone: Some(|dst: *mut (), src: *const ()| {
815 (dst as *mut Self).write((*(src as *const Self)).clone())
816 }),
817 drop: core::mem::transmute(
818 core::ptr::drop_in_place::<Self> as unsafe fn(_) -> _,
819 ),
820 }
821 };
822 }
823 impl<A: Allocator + Send + Clone> AllocClone for A {}
824
825 Ok(Self {
826 alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) },
827 vtable: <A as AllocClone>::VTABLE,
828 __marker: PhantomData,
829 })
830 } else {
831 Err(alloc)
832 }
833 }
834
835 #[inline]
836 pub fn from_ref<A: Allocator + Sync>(alloc: &'a A) -> Self {
837 match Self::try_new_clone(alloc) {
838 Ok(me) => me,
839 Err(_) => unreachable!(),
840 }
841 }
842
843 #[inline]
844 pub fn from_mut<A: Allocator + Send>(alloc: &'a mut A) -> Self {
845 match Self::try_new_unclone(alloc) {
846 Ok(me) => me,
847 Err(_) => unreachable!(),
848 }
849 }
850
851 #[inline]
852 pub fn by_mut(&mut self) -> DynAlloc<'_> {
853 DynAlloc::from_mut(self)
854 }
855
856 #[inline]
857 pub fn cloneable(&self) -> bool {
858 self.vtable.clone.is_some()
859 }
860}