1#![deny(missing_docs)]
29#![deny(warnings)]
30#![cfg_attr(not(test), no_std)]
31
32use core::{
33 cmp::Ordering,
34 fmt::{Debug, Display},
35 hash::{Hash, Hasher},
36 iter::{FromIterator, IntoIterator},
37 ops,
38};
39
40use generic_array::{typenum, ArrayLength, GenericArray};
41use typenum::{Diff, IsGreaterOrEqual, IsLessOrEqual, PartialDiv, Unsigned, B1, U8};
42
43#[cfg(feature = "subtle")]
44pub use subtle;
45#[cfg(feature = "subtle")]
46use subtle::{Choice, ConstantTimeEq};
47
48mod sealed;
49
50#[repr(C, align(2))]
52pub struct A2;
53
54#[repr(C, align(4))]
56pub struct A4;
57
58#[repr(C, align(8))]
60pub struct A8;
61
62#[repr(C, align(16))]
64pub struct A16;
65
66#[repr(C, align(32))]
68pub struct A32;
69
70#[repr(C, align(64))]
72pub struct A64;
73
74#[repr(C)]
76pub struct Aligned<A, T>
77where
78 T: ?Sized,
79{
80 _alignment: [A; 0],
81 value: T,
82}
83
84#[allow(non_snake_case)]
86pub const fn Aligned<A, T>(value: T) -> Aligned<A, T> {
87 Aligned {
88 _alignment: [],
89 value,
90 }
91}
92
93impl<A, T> ops::Deref for Aligned<A, T>
94where
95 A: sealed::Alignment,
96 T: ?Sized,
97{
98 type Target = T;
99
100 #[inline]
101 fn deref(&self) -> &T {
102 &self.value
103 }
104}
105
106impl<A, T> ops::DerefMut for Aligned<A, T>
107where
108 A: sealed::Alignment,
109 T: ?Sized,
110{
111 #[inline]
112 fn deref_mut(&mut self) -> &mut T {
113 &mut self.value
114 }
115}
116
117impl<A, T> ops::Index<ops::RangeTo<usize>> for Aligned<A, [T]>
118where
119 A: sealed::Alignment,
120{
121 type Output = Aligned<A, [T]>;
122
123 fn index(&self, range: ops::RangeTo<usize>) -> &Aligned<A, [T]> {
124 unsafe { &*(&self.value[range] as *const [T] as *const Aligned<A, [T]>) }
125 }
126}
127
128impl<A, T> Clone for Aligned<A, T>
129where
130 A: sealed::Alignment,
131 T: Clone,
132{
133 #[inline]
134 fn clone(&self) -> Self {
135 Self {
136 _alignment: [],
137 value: self.value.clone(),
138 }
139 }
140}
141
142impl<A, T> Default for Aligned<A, T>
143where
144 A: sealed::Alignment,
145 T: Default,
146{
147 #[inline]
148 fn default() -> Self {
149 Self {
150 _alignment: [],
151 value: Default::default(),
152 }
153 }
154}
155
156impl<A, T> Debug for Aligned<A, T>
157where
158 A: sealed::Alignment,
159 T: Debug,
160{
161 #[inline]
162 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
163 self.value.fmt(f)
164 }
165}
166
167impl<A, T> Display for Aligned<A, T>
168where
169 A: sealed::Alignment,
170 T: Display,
171{
172 #[inline]
173 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
174 self.value.fmt(f)
175 }
176}
177
178impl<A, T> PartialEq for Aligned<A, T>
179where
180 A: sealed::Alignment,
181 T: PartialEq,
182{
183 #[inline]
184 fn eq(&self, other: &Self) -> bool {
185 self.value == other.value
186 }
187}
188
189impl<A, T> Eq for Aligned<A, T>
190where
191 A: sealed::Alignment,
192 T: Eq,
193{
194}
195
196impl<A, T> Hash for Aligned<A, T>
197where
198 A: sealed::Alignment,
199 T: Hash,
200{
201 #[inline]
202 fn hash<H: Hasher>(&self, state: &mut H) {
203 self.value.hash(state);
204 }
205}
206
207impl<A, T> Ord for Aligned<A, T>
208where
209 A: sealed::Alignment,
210 T: Ord,
211{
212 #[inline]
213 fn cmp(&self, other: &Self) -> Ordering {
214 self.value.cmp(&other.value)
215 }
216}
217
218impl<A, T> PartialOrd for Aligned<A, T>
219where
220 A: sealed::Alignment,
221 T: PartialOrd,
222{
223 #[inline]
224 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
225 self.value.partial_cmp(&other.value)
226 }
227}
228
229impl<A, T, V> FromIterator<V> for Aligned<A, T>
230where
231 A: sealed::Alignment,
232 T: FromIterator<V>,
233{
234 fn from_iter<U: IntoIterator<Item = V>>(iter: U) -> Self {
235 Aligned(T::from_iter(iter))
236 }
237}
238
239impl<A, T> IntoIterator for Aligned<A, T>
240where
241 A: sealed::Alignment,
242 T: IntoIterator,
243{
244 type Item = T::Item;
245 type IntoIter = T::IntoIter;
246
247 fn into_iter(self) -> Self::IntoIter {
248 self.value.into_iter()
249 }
250}
251
252impl<'a, A, T> IntoIterator for &'a Aligned<A, T>
253where
254 A: sealed::Alignment,
255 &'a T: IntoIterator,
256{
257 type Item = <&'a T as IntoIterator>::Item;
258 type IntoIter = <&'a T as IntoIterator>::IntoIter;
259
260 fn into_iter(self) -> Self::IntoIter {
261 self.value.into_iter()
262 }
263}
264
265impl<'a, A, T> IntoIterator for &'a mut Aligned<A, T>
266where
267 A: sealed::Alignment,
268 &'a mut T: IntoIterator,
269{
270 type Item = <&'a mut T as IntoIterator>::Item;
271 type IntoIter = <&'a mut T as IntoIterator>::IntoIter;
272
273 fn into_iter(self) -> Self::IntoIter {
274 self.value.into_iter()
275 }
276}
277
278impl<'a, A, A2, T> AsRef<Aligned<A, T>> for &'a Aligned<A2, T>
280where
281 A: sealed::Alignment,
282 A2: sealed::Alignment,
283 A::Num: IsLessOrEqual<A2::Num, Output = B1>,
284{
285 #[inline]
286 fn as_ref(&self) -> &Aligned<A, T> {
287 assert_aligned(*self)
288 }
289}
290
291impl<'a, A, A2, T> AsMut<Aligned<A, T>> for &'a mut Aligned<A2, T>
293where
294 A: sealed::Alignment,
295 A2: sealed::Alignment,
296 A::Num: IsLessOrEqual<A2::Num, Output = B1>,
297{
298 #[inline]
299 fn as_mut(&mut self) -> &mut Aligned<A, T> {
300 assert_aligned_mut(*self)
301 }
302}
303
304unsafe impl<A, T, N> generic_array::sequence::GenericSequence<T> for Aligned<A, GenericArray<T, N>>
306where
307 N: ArrayLength<T>,
308 A: sealed::Alignment,
309{
310 type Length = N;
311 type Sequence = Self;
312
313 #[inline]
314 fn generate<F>(f: F) -> Self::Sequence
315 where
316 F: FnMut(usize) -> T,
317 {
318 Aligned(GenericArray::generate(f))
319 }
320}
321
322unsafe impl<'a, A, N, K> generic_array::sequence::Split<u8, K>
326 for &'a Aligned<A, GenericArray<u8, N>>
327where
328 A: sealed::Alignment,
329 N: ArrayLength<u8> + ops::Sub<K>,
330 K: ArrayLength<u8> + PartialDiv<A::Num> + 'static,
331 Diff<N, K>: ArrayLength<u8>,
332{
333 type First = &'a Aligned<A, GenericArray<u8, K>>;
334 type Second = &'a Aligned<A, GenericArray<u8, Diff<N, K>>>;
335 #[inline]
336 fn split(self) -> (Self::First, Self::Second) {
337 let (first, second): (&GenericArray<u8, K>, &GenericArray<u8, Diff<N, K>>) =
345 (&self.value).split();
346 (assert_aligned(first), assert_aligned(second))
347 }
348}
349
350unsafe impl<'a, A, N, K> generic_array::sequence::Split<u8, K>
354 for &'a mut Aligned<A, GenericArray<u8, N>>
355where
356 A: sealed::Alignment,
357 N: ArrayLength<u8> + ops::Sub<K>,
358 K: ArrayLength<u8> + PartialDiv<A::Num> + 'static,
359 Diff<N, K>: ArrayLength<u8>,
360{
361 type First = &'a mut Aligned<A, GenericArray<u8, K>>;
362 type Second = &'a mut Aligned<A, GenericArray<u8, Diff<N, K>>>;
363 #[inline]
364 fn split(self) -> (Self::First, Self::Second) {
365 let (first, second): (&mut GenericArray<u8, K>, &mut GenericArray<u8, Diff<N, K>>) =
373 (&mut self.value).split();
374 (assert_aligned_mut(first), assert_aligned_mut(second))
375 }
376}
377
378#[inline]
381fn assert_aligned<A: sealed::Alignment, T>(t: &T) -> &Aligned<A, T> {
382 unsafe {
383 let ptr: *const T = t;
384 assert!(ptr.align_offset(A::Num::USIZE) == 0);
385 &*(ptr as *const Aligned<A, T>)
386 }
387}
388
389#[inline]
392fn assert_aligned_mut<A: sealed::Alignment, T>(t: &mut T) -> &mut Aligned<A, T> {
393 unsafe {
394 let ptr: *mut T = t;
395 assert!(ptr.align_offset(A::Num::USIZE) == 0);
396 &mut *(ptr as *mut Aligned<A, T>)
397 }
398}
399
400pub trait AsNeSlice {
411 fn as_ne_u16_slice(&self) -> &[u16];
413 fn as_mut_ne_u16_slice(&mut self) -> &mut [u16];
415 fn as_ne_u32_slice(&self) -> &[u32];
417 fn as_mut_ne_u32_slice(&mut self) -> &mut [u32];
419 fn as_ne_u64_slice(&self) -> &[u64];
421 fn as_mut_ne_u64_slice(&mut self) -> &mut [u64];
423}
424
425impl<A, N> AsNeSlice for Aligned<A, GenericArray<u8, N>>
427where
428 A: sealed::Alignment,
429 A::Num: IsGreaterOrEqual<U8, Output = B1>,
430 N: ArrayLength<u8> + PartialDiv<U8>,
431{
432 #[inline]
433 fn as_ne_u16_slice(&self) -> &[u16] {
434 let (l, result, r) = unsafe { self.as_slice().align_to::<u16>() };
435 debug_assert!(l.is_empty());
436 debug_assert!(r.is_empty());
437 result
438 }
439
440 #[inline]
441 fn as_mut_ne_u16_slice(&mut self) -> &mut [u16] {
442 let (l, result, r) = unsafe { self.as_mut_slice().align_to_mut::<u16>() };
443 debug_assert!(l.is_empty());
444 debug_assert!(r.is_empty());
445 result
446 }
447
448 #[inline]
449 fn as_ne_u32_slice(&self) -> &[u32] {
450 let (l, result, r) = unsafe { self.as_slice().align_to::<u32>() };
451 debug_assert!(l.is_empty());
452 debug_assert!(r.is_empty());
453 result
454 }
455
456 #[inline]
457 fn as_mut_ne_u32_slice(&mut self) -> &mut [u32] {
458 let (l, result, r) = unsafe { self.as_mut_slice().align_to_mut::<u32>() };
459 debug_assert!(l.is_empty());
460 debug_assert!(r.is_empty());
461 result
462 }
463
464 #[inline]
465 fn as_ne_u64_slice(&self) -> &[u64] {
466 let (l, result, r) = unsafe { self.as_slice().align_to::<u64>() };
467 debug_assert!(l.is_empty());
468 debug_assert!(r.is_empty());
469 result
470 }
471
472 #[inline]
473 fn as_mut_ne_u64_slice(&mut self) -> &mut [u64] {
474 let (l, result, r) = unsafe { self.as_mut_slice().align_to_mut::<u64>() };
475 debug_assert!(l.is_empty());
476 debug_assert!(r.is_empty());
477 result
478 }
479}
480
481#[cfg(feature = "subtle")]
489impl<A, N> ConstantTimeEq for Aligned<A, GenericArray<u8, N>>
490where
491 A: sealed::Alignment,
492 A::Num: IsGreaterOrEqual<U8, Output = B1>,
493 N: ArrayLength<u8> + PartialDiv<U8>,
494{
495 #[inline]
496 fn ct_eq(&self, other: &Self) -> Choice {
497 self.as_ne_u64_slice().ct_eq(&other.as_ne_u64_slice())
498 }
499}
500
501pub trait AsAlignedChunks<A: sealed::Alignment, M: ArrayLength<u8> + PartialDiv<A::Num>> {
504 fn as_aligned_chunks(&self) -> &[Aligned<A, GenericArray<u8, M>>];
508 fn as_mut_aligned_chunks(&mut self) -> &mut [Aligned<A, GenericArray<u8, M>>];
512}
513
514impl<A, A2, N, M> AsAlignedChunks<A2, M> for Aligned<A, GenericArray<u8, N>>
520where
521 A: sealed::Alignment,
522 A2: sealed::Alignment,
523 A2::Num: IsLessOrEqual<A::Num, Output = B1>,
524 N: ArrayLength<u8>,
525 M: ArrayLength<u8> + PartialDiv<A2::Num>,
526{
527 #[inline]
528 fn as_aligned_chunks(&self) -> &[Aligned<A2, GenericArray<u8, M>>] {
529 unsafe {
530 let ptr = self as *const Aligned<A, GenericArray<u8, N>>
531 as *const Aligned<A2, GenericArray<u8, M>>;
532 assert!(ptr.align_offset(A::Num::USIZE) == 0);
533 assert!(M::USIZE > 0, "Division by zero");
534 core::slice::from_raw_parts(ptr, N::USIZE / M::USIZE)
540 }
541 }
542 #[inline]
543 fn as_mut_aligned_chunks(&mut self) -> &mut [Aligned<A2, GenericArray<u8, M>>] {
544 unsafe {
545 let ptr = self as *mut Aligned<A, GenericArray<u8, N>>
546 as *mut Aligned<A2, GenericArray<u8, M>>;
547 assert!(ptr.align_offset(A::Num::USIZE) == 0);
548 assert!(M::USIZE > 0, "Division by zero");
549 core::slice::from_raw_parts_mut(ptr, N::USIZE / M::USIZE)
555 }
556 }
557}
558
559#[cfg(test)]
560mod testing {
561 use super::*;
562 use generic_array::arr;
563
564 use core::mem;
565 use generic_array::{
566 sequence::Split,
567 typenum::{U128, U16, U192, U24, U32, U64, U8, U96},
568 };
569
570 type A8Bytes<N> = Aligned<A8, GenericArray<u8, N>>;
572 type A64Bytes<N> = Aligned<A64, GenericArray<u8, N>>;
573
574 #[test]
575 fn sanity() {
576 let x: Aligned<A2, _> = Aligned([0u8; 3]);
577 let y: Aligned<A4, _> = Aligned([0u8; 3]);
578 let z: Aligned<A8, _> = Aligned([0u8; 3]);
579 let w: Aligned<A16, _> = Aligned([0u8; 3]);
580
581 assert_eq!(mem::align_of_val(&x), 2);
583 assert_eq!(mem::align_of_val(&y), 4);
584 assert_eq!(mem::align_of_val(&z), 8);
585 assert_eq!(mem::align_of_val(&w), 16);
586
587 assert!(x.as_ptr() as usize % 2 == 0);
588 assert!(y.as_ptr() as usize % 4 == 0);
589 assert!(z.as_ptr() as usize % 8 == 0);
590 assert!(w.as_ptr() as usize % 16 == 0);
591
592 assert_eq!(x.len(), 3);
594 assert_eq!(y.len(), 3);
595 assert_eq!(z.len(), 3);
596 assert_eq!(w.len(), 3);
597
598 let x: &Aligned<_, [_]> = &x;
600 let y: &Aligned<_, [_]> = &y;
601 let z: &Aligned<_, [_]> = &z;
602 let w: &Aligned<_, [_]> = &w;
603
604 let x: &Aligned<_, _> = &x[..2];
605 let y: &Aligned<_, _> = &y[..2];
606 let z: &Aligned<_, _> = &z[..2];
607 let w: &Aligned<_, _> = &w[..2];
608
609 assert!(x.as_ptr() as usize % 2 == 0);
610 assert!(y.as_ptr() as usize % 4 == 0);
611 assert!(z.as_ptr() as usize % 8 == 0);
612 assert!(w.as_ptr() as usize % 16 == 0);
613
614 let x: Box<Aligned<A2, [u8]>> = Box::new(Aligned([0u8; 3]));
616 let y: Box<Aligned<A4, [u8]>> = Box::new(Aligned([0u8; 3]));
617 let z: Box<Aligned<A8, [u8]>> = Box::new(Aligned([0u8; 3]));
618 let w: Box<Aligned<A16, [u8]>> = Box::new(Aligned([0u8; 3]));
619
620 assert_eq!(mem::align_of_val(&*x), 2);
621 assert_eq!(mem::align_of_val(&*y), 4);
622 assert_eq!(mem::align_of_val(&*z), 8);
623 assert_eq!(mem::align_of_val(&*w), 16);
624
625 let x: Aligned<A2, _> = Aligned([0u8; 3]);
627 let y: &Aligned<A2, [u8]> = &x;
628 let _: &[u8] = y;
629 }
630
631 #[test]
632 fn aligned_split() {
633 let x: A8Bytes<U24> = Aligned(
634 arr![u8; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
635 );
636
637 let (y, z) = <&A8Bytes<U24> as Split<u8, U8>>::split(&x);
638 assert_eq!(y, &Aligned(arr![u8; 0, 1, 2, 3, 4, 5, 6, 7]));
639 assert_eq!(
640 z,
641 &Aligned(arr![u8; 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
642 );
643
644 let (v, w) = <&A8Bytes<U24> as Split<u8, U16>>::split(&x);
645 assert_eq!(
646 v,
647 &Aligned(arr![u8; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
648 );
649 assert_eq!(w, &Aligned(arr![u8; 16, 17, 18, 19, 20, 21, 22, 23]));
650 }
651
652 #[test]
653 fn aligned_split_64() {
654 let mut x = A64Bytes::<U192>::default();
655 for (idx, byte) in x.iter_mut().enumerate() {
656 *byte = idx as u8;
657 }
658
659 let (y, z) = <&A64Bytes<U192> as Split<u8, U64>>::split(&x);
660 for (idx, byte) in y.iter().enumerate() {
661 assert_eq!(*byte, idx as u8);
662 }
663 for (idx, byte) in z.iter().enumerate() {
664 assert_eq!(*byte, 64 + idx as u8);
665 }
666
667 let (v, w) = <&A64Bytes<U192> as Split<u8, U128>>::split(&x);
668 for (idx, byte) in v.iter().enumerate() {
669 assert_eq!(*byte, idx as u8);
670 }
671 for (idx, byte) in w.iter().enumerate() {
672 assert_eq!(*byte, 128 + idx as u8);
673 }
674 }
675
676 #[test]
677 fn test_aligned_chunks() {
678 let buff = A8Bytes::<U32>::default();
679 let chunks = AsAlignedChunks::<A8, U16>::as_aligned_chunks(&buff);
680 assert_eq!(chunks.len(), 2);
681
682 let buff = A8Bytes::<U64>::default();
683 let chunks = AsAlignedChunks::<A8, U16>::as_aligned_chunks(&buff);
684 assert_eq!(chunks.len(), 4);
685
686 let buff = A8Bytes::<U96>::default();
687 let chunks = AsAlignedChunks::<A8, U8>::as_aligned_chunks(&buff);
688 assert_eq!(chunks.len(), 12);
689 }
690
691 #[test]
692 fn test_aligned_chunks_64() {
693 let buff = A64Bytes::<U128>::default();
694 let chunks = AsAlignedChunks::<A64, U64>::as_aligned_chunks(&buff);
695 assert_eq!(chunks.len(), 2);
696
697 let buff = A64Bytes::<U64>::default();
698 let chunks = AsAlignedChunks::<A8, U8>::as_aligned_chunks(&buff);
699 assert_eq!(chunks.len(), 8);
700
701 let buff = A64Bytes::<U96>::default();
702 let chunks = AsAlignedChunks::<A32, U32>::as_aligned_chunks(&buff);
703 assert_eq!(chunks.len(), 3);
704 }
705
706 #[cfg(target_arch = "x86_64")]
708 #[test]
709 fn test_as_ne_slice() {
710 let mut buff = A8Bytes::<U32>::default();
711 {
712 let u16s = buff.as_ne_u16_slice();
713 assert_eq!(u16s.len(), 16);
714 for num in u16s.iter() {
715 assert_eq!(*num, 0u16);
716 }
717 }
718
719 {
720 let u32s = buff.as_ne_u32_slice();
721 assert_eq!(u32s.len(), 8);
722 for num in u32s.iter() {
723 assert_eq!(*num, 0u32);
724 }
725 }
726
727 {
728 let u64s = buff.as_mut_ne_u64_slice();
729 assert_eq!(u64s.len(), 4);
730 for num in u64s.iter() {
731 assert_eq!(*num, 0u64);
732 }
733
734 u64s[2] = !7;
735 }
736
737 {
738 let u64s = buff.as_ne_u64_slice();
739 assert_eq!(u64s.len(), 4);
740 assert_eq!(u64s[0], 0u64);
741 assert_eq!(u64s[1], 0u64);
742 assert_eq!(u64s[2], !7u64);
743 assert_eq!(u64s[3], 0u64);
744 }
745
746 {
747 let u32s = buff.as_ne_u32_slice();
748 assert_eq!(u32s.len(), 8);
749 assert_eq!(u32s[0], 0u32);
750 assert_eq!(u32s[1], 0u32);
751 assert_eq!(u32s[2], 0u32);
752 assert_eq!(u32s[3], 0u32);
753 assert_eq!(u32s[4], !7u32);
754 assert_eq!(u32s[5], !0u32);
755 assert_eq!(u32s[6], 0u32);
756 assert_eq!(u32s[7], 0u32);
757 }
758
759 {
760 let u16s = buff.as_ne_u16_slice();
761 assert_eq!(u16s.len(), 16);
762 assert_eq!(u16s[0], 0u16);
763 assert_eq!(u16s[1], 0u16);
764 assert_eq!(u16s[2], 0u16);
765 assert_eq!(u16s[3], 0u16);
766 assert_eq!(u16s[4], 0u16);
767 assert_eq!(u16s[5], 0u16);
768 assert_eq!(u16s[6], 0u16);
769 assert_eq!(u16s[7], 0u16);
770 assert_eq!(u16s[8], !7u16);
771 assert_eq!(u16s[9], !0u16);
772 assert_eq!(u16s[10], !0u16);
773 assert_eq!(u16s[11], !0u16);
774 assert_eq!(u16s[12], 0u16);
775 assert_eq!(u16s[13], 0u16);
776 assert_eq!(u16s[14], 0u16);
777 assert_eq!(u16s[15], 0u16);
778 }
779
780 {
781 let u16s = buff.as_mut_ne_u16_slice();
782 u16s[2] = !5u16;
783 }
784
785 {
786 let u32s = buff.as_ne_u32_slice();
787 assert_eq!(u32s.len(), 8);
788 assert_eq!(u32s[0], 0u32);
789 assert_eq!(u32s[1], !5u16 as u32);
790 assert_eq!(u32s[2], 0u32);
791 assert_eq!(u32s[3], 0u32);
792 assert_eq!(u32s[4], !7u32);
793 assert_eq!(u32s[5], !0u32);
794 assert_eq!(u32s[6], 0u32);
795 assert_eq!(u32s[7], 0u32);
796 }
797
798 {
799 let u64s = buff.as_ne_u64_slice();
800 assert_eq!(u64s.len(), 4);
801 assert_eq!(u64s[0], (!5u16 as u64) << 32);
802 assert_eq!(u64s[1], 0u64);
803 assert_eq!(u64s[2], !7u64);
804 assert_eq!(u64s[3], 0u64);
805 }
806 }
807}