sized_chunks/inline_array/
mod.rs

1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5//! A fixed capacity array sized to match some other type `T`.
6//!
7//! See [`InlineArray`](struct.InlineArray.html)
8
9use core::borrow::{Borrow, BorrowMut};
10use core::cmp::Ordering;
11use core::fmt::{Debug, Error, Formatter};
12use core::hash::{Hash, Hasher};
13use core::iter::FromIterator;
14use core::marker::PhantomData;
15use core::mem::{self, MaybeUninit};
16use core::ops::{Deref, DerefMut};
17use core::ptr;
18use core::ptr::NonNull;
19use core::slice::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut};
20
21mod iter;
22pub use self::iter::{Drain, Iter};
23
24/// A fixed capacity array sized to match some other type `T`.
25///
26/// This works like a vector, but allocated on the stack (and thus marginally
27/// faster than `Vec`), with the allocated space exactly matching the size of
28/// the given type `T`. The vector consists of a `usize` tracking its current
29/// length and zero or more elements of type `A`. The capacity is thus
30/// `( size_of::<T>() - size_of::<usize>() ) / size_of::<A>()`. This could lead
31/// to situations where the capacity is zero, if `size_of::<A>()` is greater
32/// than `size_of::<T>() - size_of::<usize>()`, which is not an error and
33/// handled properly by the data structure.
34///
35/// If `size_of::<T>()` is less than `size_of::<usize>()`, meaning the vector
36/// has no space to store its length, `InlineArray::new()` will panic.
37///
38/// This is meant to facilitate optimisations where a list data structure
39/// allocates a fairly large struct for itself, allowing you to replace it with
40/// an `InlineArray` until it grows beyond its capacity. This not only gives you
41/// a performance boost at very small sizes, it also saves you from having to
42/// allocate anything on the heap until absolutely necessary.
43///
44/// For instance, `im::Vector<A>` in its final form currently looks like this
45/// (approximately):
46///
47/// ```rust, ignore
48/// struct RRB<A> {
49///     length: usize,
50///     tree_height: usize,
51///     outer_head: Rc<Chunk<A>>,
52///     inner_head: Rc<Chunk<A>>,
53///     tree: Rc<TreeNode<A>>,
54///     inner_tail: Rc<Chunk<A>>,
55///     outer_tail: Rc<Chunk<A>>,
56/// }
57/// ```
58///
59/// That's two `usize`s and five `Rc`s, which comes in at 56 bytes on x86_64
60/// architectures. With `InlineArray`, that leaves us with 56 -
61/// `size_of::<usize>()` = 48 bytes we can use before having to expand into the
62/// full data struture. If `A` is `u8`, that's 48 elements, and even if `A` is a
63/// pointer we can still keep 6 of them inline before we run out of capacity.
64///
65/// We can declare an enum like this:
66///
67/// ```rust, ignore
68/// enum VectorWrapper<A> {
69///     Inline(InlineArray<A, RRB<A>>),
70///     Full(RRB<A>),
71/// }
72/// ```
73///
74/// Both of these will have the same size, and we can swap the `Inline` case out
75/// with the `Full` case once the `InlineArray` runs out of capacity.
76#[repr(C)]
77pub struct InlineArray<A, T> {
78    // Alignment tricks
79    //
80    // We need both the `_header_align` and `data` to be properly aligned in memory. We do a few tricks
81    // to handle that.
82    //
83    // * An alignment is always power of 2. Therefore, with a pair of alignments, one is always
84    //   a multiple of the other (one way or the other).
85    // * A struct is aligned to at least the max alignment of each of its fields.
86    // * A `repr(C)` struct follows the order of fields and pushes each as close to the previous one
87    //   as allowed by alignment.
88    //
89    // By placing two "fake" fields that have 0 size, but an alignment first, we make sure that all
90    // 3 start at the beginning of the struct and that all of them are aligned to their maximum
91    // alignment.
92    //
93    // Unfortunately, we can't use `[A; 0]` to align to actual alignment of the type `A`, because
94    // it prevents use of `InlineArray` in recursive types.
95    // We rely on alignment of `u64`/`usize` or `T` to be sufficient, and panic otherwise. We use
96    // `u64` to handle all common types on 32-bit systems too.
97    //
98    // Furthermore, because we don't know if `u64` or `A` has bigger alignment, we decide on case by
99    // case basis if the header or the elements go first. By placing the one with higher alignment
100    // requirements first, we align that one and the other one will be aligned "automatically" when
101    // placed just after it.
102    //
103    // To the best of our knowledge, this is all guaranteed by the compiler. But just to make sure,
104    // we have bunch of asserts in the constructor to check; as these are invariants enforced by
105    // the compiler, it should be trivial for it to remove the checks so they are for free (if we
106    // are correct) or will save us (if we are not).
107    _header_align: [(u64, usize); 0],
108    _phantom: PhantomData<A>,
109    data: MaybeUninit<T>,
110}
111
112const fn capacity(
113    host_size: usize,
114    header_size: usize,
115    element_size: usize,
116    element_align: usize,
117    container_align: usize,
118) -> usize {
119    if element_size == 0 {
120        usize::MAX
121    } else if element_align <= container_align && host_size > header_size {
122        (host_size - header_size) / element_size
123    } else {
124        0 // larger alignment can't be guaranteed, so it'd be unsafe to store any elements
125    }
126}
127
128impl<A, T> InlineArray<A, T> {
129    const HOST_SIZE: usize = mem::size_of::<T>();
130    const ELEMENT_SIZE: usize = mem::size_of::<A>();
131    const HEADER_SIZE: usize = mem::size_of::<usize>();
132    // Do we place the header before the elements or the other way around?
133    const HEADER_FIRST: bool = mem::align_of::<usize>() >= mem::align_of::<A>();
134    // Note: one of the following is always 0
135    // How many usizes to skip before the first element?
136    const ELEMENT_SKIP: usize = Self::HEADER_FIRST as usize;
137    // How many elements to skip before the header
138    const HEADER_SKIP: usize = Self::CAPACITY * (1 - Self::ELEMENT_SKIP);
139
140    /// The maximum number of elements the `InlineArray` can hold.
141    pub const CAPACITY: usize = capacity(
142        Self::HOST_SIZE,
143        Self::HEADER_SIZE,
144        Self::ELEMENT_SIZE,
145        mem::align_of::<A>(),
146        mem::align_of::<Self>(),
147    );
148
149    #[inline]
150    #[must_use]
151    unsafe fn len_const(&self) -> *const usize {
152        let ptr = self
153            .data
154            .as_ptr()
155            .cast::<A>()
156            .add(Self::HEADER_SKIP)
157            .cast::<usize>();
158        debug_assert!(ptr as usize % mem::align_of::<usize>() == 0);
159        ptr
160    }
161
162    #[inline]
163    #[must_use]
164    pub(crate) unsafe fn len_mut(&mut self) -> *mut usize {
165        let ptr = self
166            .data
167            .as_mut_ptr()
168            .cast::<A>()
169            .add(Self::HEADER_SKIP)
170            .cast::<usize>();
171        debug_assert!(ptr as usize % mem::align_of::<usize>() == 0);
172        ptr
173    }
174
175    #[inline]
176    #[must_use]
177    pub(crate) unsafe fn data(&self) -> *const A {
178        if Self::CAPACITY == 0 {
179            return NonNull::<A>::dangling().as_ptr();
180        }
181        let ptr = self
182            .data
183            .as_ptr()
184            .cast::<usize>()
185            .add(Self::ELEMENT_SKIP)
186            .cast::<A>();
187        debug_assert!(ptr as usize % mem::align_of::<A>() == 0);
188        ptr
189    }
190
191    #[inline]
192    #[must_use]
193    unsafe fn data_mut(&mut self) -> *mut A {
194        if Self::CAPACITY == 0 {
195            return NonNull::<A>::dangling().as_ptr();
196        }
197        let ptr = self
198            .data
199            .as_mut_ptr()
200            .cast::<usize>()
201            .add(Self::ELEMENT_SKIP)
202            .cast::<A>();
203        debug_assert!(ptr as usize % mem::align_of::<A>() == 0);
204        ptr
205    }
206
207    #[inline]
208    #[must_use]
209    unsafe fn ptr_at(&self, index: usize) -> *const A {
210        debug_assert!(index < Self::CAPACITY);
211        self.data().add(index)
212    }
213
214    #[inline]
215    #[must_use]
216    unsafe fn ptr_at_mut(&mut self, index: usize) -> *mut A {
217        debug_assert!(index < Self::CAPACITY);
218        self.data_mut().add(index)
219    }
220
221    #[inline]
222    unsafe fn read_at(&self, index: usize) -> A {
223        ptr::read(self.ptr_at(index))
224    }
225
226    #[inline]
227    unsafe fn write_at(&mut self, index: usize, value: A) {
228        ptr::write(self.ptr_at_mut(index), value);
229    }
230
231    /// Get the length of the array.
232    #[inline]
233    #[must_use]
234    pub fn len(&self) -> usize {
235        unsafe { *self.len_const() }
236    }
237
238    /// Test if the array is empty.
239    #[inline]
240    #[must_use]
241    pub fn is_empty(&self) -> bool {
242        self.len() == 0
243    }
244
245    /// Test if the array is at capacity.
246    #[inline]
247    #[must_use]
248    pub fn is_full(&self) -> bool {
249        self.len() >= Self::CAPACITY
250    }
251
252    /// Construct a new empty array.
253    ///
254    /// # Panics
255    ///
256    /// If the element type requires large alignment, which is larger than
257    /// both alignment of `usize` and alignment of the type that provides the capacity.
258    #[inline]
259    #[must_use]
260    pub fn new() -> Self {
261        assert!(Self::HOST_SIZE > Self::HEADER_SIZE);
262        assert!(
263            (Self::CAPACITY == 0) || (mem::align_of::<Self>() % mem::align_of::<A>() == 0),
264            "InlineArray can't satisfy alignment of {}",
265            core::any::type_name::<A>()
266        );
267
268        let mut self_ = Self {
269            _header_align: [],
270            _phantom: PhantomData,
271            data: MaybeUninit::uninit(),
272        };
273        // Sanity check our assumptions about what is guaranteed by the compiler. If we are right,
274        // these should completely optimize out of the resulting binary.
275        assert_eq!(
276            &self_ as *const _ as usize,
277            self_.data.as_ptr() as usize,
278            "Padding at the start of struct",
279        );
280        assert_eq!(
281            self_.data.as_ptr() as usize % mem::align_of::<usize>(),
282            0,
283            "Unaligned header"
284        );
285        assert!(mem::size_of::<Self>() == mem::size_of::<T>() || mem::align_of::<T>() < mem::align_of::<Self>());
286        assert_eq!(0, unsafe { self_.data() } as usize % mem::align_of::<A>());
287        assert_eq!(0, unsafe { self_.data_mut() } as usize % mem::align_of::<A>());
288        assert!(Self::ELEMENT_SKIP == 0 || Self::HEADER_SKIP == 0);
289        unsafe { ptr::write(self_.len_mut(), 0usize) };
290        self_
291    }
292
293    /// Push an item to the back of the array.
294    ///
295    /// Panics if the capacity of the array is exceeded.
296    ///
297    /// Time: O(1)
298    pub fn push(&mut self, value: A) {
299        if self.is_full() {
300            panic!("InlineArray::push: chunk size overflow");
301        }
302        unsafe {
303            self.write_at(self.len(), value);
304            *self.len_mut() += 1;
305        }
306    }
307
308    /// Pop an item from the back of the array.
309    ///
310    /// Returns `None` if the array is empty.
311    ///
312    /// Time: O(1)
313    pub fn pop(&mut self) -> Option<A> {
314        if self.is_empty() {
315            None
316        } else {
317            unsafe {
318                *self.len_mut() -= 1;
319            }
320            Some(unsafe { self.read_at(self.len()) })
321        }
322    }
323
324    /// Insert a new value at index `index`, shifting all the following values
325    /// to the right.
326    ///
327    /// Panics if the index is out of bounds or the array is at capacity.
328    ///
329    /// Time: O(n) for the number of items shifted
330    pub fn insert(&mut self, index: usize, value: A) {
331        if self.is_full() {
332            panic!("InlineArray::push: chunk size overflow");
333        }
334        if index > self.len() {
335            panic!("InlineArray::insert: index out of bounds");
336        }
337        unsafe {
338            let src = self.ptr_at_mut(index);
339            ptr::copy(src, src.add(1), self.len() - index);
340            ptr::write(src, value);
341            *self.len_mut() += 1;
342        }
343    }
344
345    /// Remove the value at index `index`, shifting all the following values to
346    /// the left.
347    ///
348    /// Returns the removed value, or `None` if the array is empty or the index
349    /// is out of bounds.
350    ///
351    /// Time: O(n) for the number of items shifted
352    pub fn remove(&mut self, index: usize) -> Option<A> {
353        if index >= self.len() {
354            None
355        } else {
356            unsafe {
357                let src = self.ptr_at_mut(index);
358                let value = ptr::read(src);
359                *self.len_mut() -= 1;
360                ptr::copy(src.add(1), src, self.len() - index);
361                Some(value)
362            }
363        }
364    }
365
366    /// Split an array into two, the original array containing
367    /// everything up to `index` and the returned array containing
368    /// everything from `index` onwards.
369    ///
370    /// Panics if `index` is out of bounds.
371    ///
372    /// Time: O(n) for the number of items in the new chunk
373    pub fn split_off(&mut self, index: usize) -> Self {
374        if index > self.len() {
375            panic!("InlineArray::split_off: index out of bounds");
376        }
377        let mut out = Self::new();
378        if index < self.len() {
379            unsafe {
380                ptr::copy(self.ptr_at(index), out.data_mut(), self.len() - index);
381                *out.len_mut() = self.len() - index;
382                *self.len_mut() = index;
383            }
384        }
385        out
386    }
387
388    #[inline]
389    unsafe fn drop_contents(&mut self) {
390        ptr::drop_in_place::<[A]>(&mut **self) // uses DerefMut
391    }
392
393    /// Discard the contents of the array.
394    ///
395    /// Time: O(n)
396    pub fn clear(&mut self) {
397        unsafe {
398            self.drop_contents();
399            *self.len_mut() = 0;
400        }
401    }
402
403    /// Construct an iterator that drains values from the front of the array.
404    pub fn drain(&mut self) -> Drain<'_, A, T> {
405        Drain { array: self }
406    }
407}
408
409impl<A, T> Drop for InlineArray<A, T> {
410    fn drop(&mut self) {
411        unsafe { self.drop_contents() }
412    }
413}
414
415impl<A, T> Default for InlineArray<A, T> {
416    fn default() -> Self {
417        Self::new()
418    }
419}
420
421// WANT:
422// impl<A, T> Copy for InlineArray<A, T> where A: Copy {}
423
424impl<A, T> Clone for InlineArray<A, T>
425where
426    A: Clone,
427{
428    fn clone(&self) -> Self {
429        let mut copy = Self::new();
430        for i in 0..self.len() {
431            unsafe {
432                copy.write_at(i, self.get_unchecked(i).clone());
433            }
434        }
435        unsafe {
436            *copy.len_mut() = self.len();
437        }
438        copy
439    }
440}
441
442impl<A, T> Deref for InlineArray<A, T> {
443    type Target = [A];
444    fn deref(&self) -> &Self::Target {
445        unsafe { from_raw_parts(self.data(), self.len()) }
446    }
447}
448
449impl<A, T> DerefMut for InlineArray<A, T> {
450    fn deref_mut(&mut self) -> &mut Self::Target {
451        unsafe { from_raw_parts_mut(self.data_mut(), self.len()) }
452    }
453}
454
455impl<A, T> Borrow<[A]> for InlineArray<A, T> {
456    fn borrow(&self) -> &[A] {
457        self.deref()
458    }
459}
460
461impl<A, T> BorrowMut<[A]> for InlineArray<A, T> {
462    fn borrow_mut(&mut self) -> &mut [A] {
463        self.deref_mut()
464    }
465}
466
467impl<A, T> AsRef<[A]> for InlineArray<A, T> {
468    fn as_ref(&self) -> &[A] {
469        self.deref()
470    }
471}
472
473impl<A, T> AsMut<[A]> for InlineArray<A, T> {
474    fn as_mut(&mut self) -> &mut [A] {
475        self.deref_mut()
476    }
477}
478impl<A, T, Slice> PartialEq<Slice> for InlineArray<A, T>
479where
480    Slice: Borrow<[A]>,
481    A: PartialEq,
482{
483    fn eq(&self, other: &Slice) -> bool {
484        self.deref() == other.borrow()
485    }
486}
487
488impl<A, T> Eq for InlineArray<A, T> where A: Eq {}
489
490impl<A, T> PartialOrd for InlineArray<A, T>
491where
492    A: PartialOrd,
493{
494    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
495        self.iter().partial_cmp(other.iter())
496    }
497}
498
499impl<A, T> Ord for InlineArray<A, T>
500where
501    A: Ord,
502{
503    fn cmp(&self, other: &Self) -> Ordering {
504        self.iter().cmp(other.iter())
505    }
506}
507
508impl<A, T> Debug for InlineArray<A, T>
509where
510    A: Debug,
511{
512    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
513        f.write_str("Chunk")?;
514        f.debug_list().entries(self.iter()).finish()
515    }
516}
517
518impl<A, T> Hash for InlineArray<A, T>
519where
520    A: Hash,
521{
522    fn hash<H>(&self, hasher: &mut H)
523    where
524        H: Hasher,
525    {
526        for item in self {
527            item.hash(hasher)
528        }
529    }
530}
531
532impl<A, T> IntoIterator for InlineArray<A, T> {
533    type Item = A;
534    type IntoIter = Iter<A, T>;
535    fn into_iter(self) -> Self::IntoIter {
536        Iter { array: self }
537    }
538}
539
540impl<A, T> FromIterator<A> for InlineArray<A, T> {
541    fn from_iter<I>(it: I) -> Self
542    where
543        I: IntoIterator<Item = A>,
544    {
545        let mut chunk = Self::new();
546        for item in it {
547            chunk.push(item);
548        }
549        chunk
550    }
551}
552
553impl<'a, A, T> IntoIterator for &'a InlineArray<A, T> {
554    type Item = &'a A;
555    type IntoIter = SliceIter<'a, A>;
556    fn into_iter(self) -> Self::IntoIter {
557        self.iter()
558    }
559}
560
561impl<'a, A, T> IntoIterator for &'a mut InlineArray<A, T> {
562    type Item = &'a mut A;
563    type IntoIter = SliceIterMut<'a, A>;
564    fn into_iter(self) -> Self::IntoIter {
565        self.iter_mut()
566    }
567}
568
569impl<A, T> Extend<A> for InlineArray<A, T> {
570    /// Append the contents of the iterator to the back of the array.
571    ///
572    /// Panics if the array exceeds its capacity.
573    ///
574    /// Time: O(n) for the length of the iterator
575    fn extend<I>(&mut self, it: I)
576    where
577        I: IntoIterator<Item = A>,
578    {
579        for item in it {
580            self.push(item);
581        }
582    }
583}
584
585impl<'a, A, T> Extend<&'a A> for InlineArray<A, T>
586where
587    A: 'a + Copy,
588{
589    /// Append the contents of the iterator to the back of the array.
590    ///
591    /// Panics if the array exceeds its capacity.
592    ///
593    /// Time: O(n) for the length of the iterator
594    fn extend<I>(&mut self, it: I)
595    where
596        I: IntoIterator<Item = &'a A>,
597    {
598        for item in it {
599            self.push(*item);
600        }
601    }
602}
603
604#[cfg(test)]
605mod test {
606    use super::*;
607    use crate::tests::DropTest;
608    use std::sync::atomic::{AtomicUsize, Ordering};
609
610    #[test]
611    fn dropping() {
612        let counter = AtomicUsize::new(0);
613        {
614            let mut chunk: InlineArray<DropTest<'_>, [usize; 32]> = InlineArray::new();
615            for _i in 0..16 {
616                chunk.push(DropTest::new(&counter));
617            }
618            assert_eq!(16, counter.load(Ordering::Relaxed));
619            for _i in 0..8 {
620                chunk.pop();
621            }
622            assert_eq!(8, counter.load(Ordering::Relaxed));
623        }
624        assert_eq!(0, counter.load(Ordering::Relaxed));
625    }
626
627    #[test]
628    fn zero_sized_values() {
629        let mut chunk: InlineArray<(), [usize; 32]> = InlineArray::new();
630        for _i in 0..65536 {
631            chunk.push(());
632        }
633        assert_eq!(65536, chunk.len());
634        assert_eq!(Some(()), chunk.pop());
635    }
636
637    #[test]
638    fn low_align_base() {
639        let mut chunk: InlineArray<String, [u8; 512]> = InlineArray::new();
640        chunk.push("Hello".to_owned());
641        assert_eq!(chunk[0], "Hello");
642
643        let mut chunk: InlineArray<String, [u16; 512]> = InlineArray::new();
644        chunk.push("Hello".to_owned());
645        assert_eq!(chunk[0], "Hello");
646    }
647
648    #[test]
649    fn float_align() {
650        let mut chunk: InlineArray<f64, [u8; 16]> = InlineArray::new();
651        chunk.push(1234.);
652        assert_eq!(chunk[0], 1234.);
653
654        let mut chunk: InlineArray<f64, [u8; 17]> = InlineArray::new();
655        chunk.push(1234.);
656        assert_eq!(chunk[0], 1234.);
657    }
658
659    #[test]
660    fn recursive_types_compile() {
661        #[allow(dead_code)]
662        enum Recursive {
663            A(InlineArray<Recursive, u64>),
664            B,
665        }
666    }
667
668    #[test]
669    fn insufficient_alignment1() {
670        #[repr(align(256))]
671        struct BigAlign(u8);
672        #[repr(align(32))]
673        struct MediumAlign(u8);
674
675        assert_eq!(0, InlineArray::<BigAlign, [usize; 256]>::CAPACITY);
676        assert_eq!(0, InlineArray::<BigAlign, [u64; 256]>::CAPACITY);
677        assert_eq!(0, InlineArray::<BigAlign, [f64; 256]>::CAPACITY);
678        assert_eq!(0, InlineArray::<BigAlign, [MediumAlign; 256]>::CAPACITY);
679    }
680
681    #[test]
682    fn insufficient_alignment2() {
683        #[repr(align(256))]
684        struct BigAlign(usize);
685
686        let mut bad: InlineArray<BigAlign, [usize; 256]> = InlineArray::new();
687        assert_eq!(0, InlineArray::<BigAlign, [usize; 256]>::CAPACITY);
688        assert_eq!(0, bad.len());
689        assert_eq!(0, bad[..].len());
690        assert_eq!(true, bad.is_full());
691        assert_eq!(0, bad.drain().count());
692        assert!(bad.pop().is_none());
693        assert!(bad.remove(0).is_none());
694        assert!(bad.split_off(0).is_full());
695        bad.clear();
696    }
697
698    #[test]
699    fn sufficient_alignment1() {
700        #[repr(align(256))]
701        struct BigAlign(u8);
702
703        assert_eq!(13, InlineArray::<BigAlign, [BigAlign; 14]>::CAPACITY);
704        assert_eq!(1, InlineArray::<BigAlign, [BigAlign; 2]>::CAPACITY);
705        assert_eq!(0, InlineArray::<BigAlign, [BigAlign; 1]>::CAPACITY);
706
707        let mut chunk: InlineArray<BigAlign, [BigAlign; 2]> = InlineArray::new();
708        chunk.push(BigAlign(42));
709        assert_eq!(
710            chunk.get(0).unwrap() as *const _ as usize % mem::align_of::<BigAlign>(),
711            0
712        );
713    }
714
715    #[test]
716    fn sufficient_alignment2() {
717        #[repr(align(128))]
718        struct BigAlign([u8; 64]);
719        #[repr(align(256))]
720        struct BiggerAlign(u8);
721        assert_eq!(128, mem::align_of::<BigAlign>());
722        assert_eq!(256, mem::align_of::<BiggerAlign>());
723
724        assert_eq!(199, InlineArray::<BigAlign, [BiggerAlign; 100]>::CAPACITY);
725        assert_eq!(3, InlineArray::<BigAlign, [BiggerAlign; 2]>::CAPACITY);
726        assert_eq!(1, InlineArray::<BigAlign, [BiggerAlign; 1]>::CAPACITY);
727        assert_eq!(0, InlineArray::<BigAlign, [BiggerAlign; 0]>::CAPACITY);
728
729        let mut chunk: InlineArray<BigAlign, [BiggerAlign; 1]> = InlineArray::new();
730        chunk.push(BigAlign([0; 64]));
731        assert_eq!(
732            chunk.get(0).unwrap() as *const _ as usize % mem::align_of::<BigAlign>(),
733            0
734        );
735    }
736}