1use core::borrow::Borrow;
10use core::cmp::Ordering;
11use core::fmt::{Debug, Error, Formatter};
12use core::hash::{Hash, Hasher};
13use core::iter::FromIterator;
14use core::mem::{replace, MaybeUninit};
15use core::ops::{Bound, Range, RangeBounds};
16use core::ops::{Index, IndexMut};
17
18pub use array_ops::{Array, ArrayMut, HasLength};
19
20mod index;
21use index::{IndexIter, RawIndex};
22
23mod iter;
24pub use iter::{Drain, Iter, IterMut, OwnedIter};
25
26mod slice;
27pub use slice::{Slice, SliceMut};
28
29#[cfg(feature = "refpool")]
30mod refpool;
31
32pub struct RingBuffer<A, const N: usize> {
61 origin: RawIndex<N>,
62 length: usize,
63 data: MaybeUninit<[A; N]>,
64}
65
66impl<A, const N: usize> Drop for RingBuffer<A, N> {
67 #[inline]
68 fn drop(&mut self) {
69 if core::mem::needs_drop::<A>() {
70 for i in self.range() {
71 unsafe { self.force_drop(i) }
72 }
73 }
74 }
75}
76
77impl<A, const N: usize> HasLength for RingBuffer<A, N> {
78 #[inline]
80 #[must_use]
81 fn len(&self) -> usize {
82 self.length
83 }
84}
85
86impl<A, const N: usize> Array for RingBuffer<A, N> {
87 #[must_use]
89 fn get(&self, index: usize) -> Option<&A> {
90 if index >= self.len() {
91 None
92 } else {
93 Some(unsafe { self.get_unchecked(index) })
94 }
95 }
96}
97
98impl<A, const N: usize> ArrayMut for RingBuffer<A, N> {
99 #[must_use]
101 fn get_mut(&mut self, index: usize) -> Option<&mut A> {
102 if index >= self.len() {
103 None
104 } else {
105 Some(unsafe { self.get_unchecked_mut(index) })
106 }
107 }
108}
109
110impl<A, const N: usize> RingBuffer<A, N> {
111 pub const CAPACITY: usize = N;
113
114 #[inline]
116 fn raw(&self, index: usize) -> RawIndex<N> {
117 self.origin + index
118 }
119
120 #[inline]
121 unsafe fn ptr(&self, index: RawIndex<N>) -> *const A {
122 debug_assert!(index.to_usize() < Self::CAPACITY);
123 (&self.data as *const _ as *const A).add(index.to_usize())
124 }
125
126 #[inline]
127 unsafe fn mut_ptr(&mut self, index: RawIndex<N>) -> *mut A {
128 debug_assert!(index.to_usize() < Self::CAPACITY);
129 (&mut self.data as *mut _ as *mut A).add(index.to_usize())
130 }
131
132 #[inline]
134 unsafe fn force_drop(&mut self, index: RawIndex<N>) {
135 core::ptr::drop_in_place(self.mut_ptr(index))
136 }
137
138 #[inline]
140 unsafe fn force_read(&self, index: RawIndex<N>) -> A {
141 core::ptr::read(self.ptr(index))
142 }
143
144 #[inline]
146 unsafe fn force_write(&mut self, index: RawIndex<N>, value: A) {
147 core::ptr::write(self.mut_ptr(index), value)
148 }
149
150 unsafe fn copy_from(
152 &mut self,
153 source: &mut Self,
154 from: RawIndex<N>,
155 to: RawIndex<N>,
156 count: usize,
157 ) {
158 #[inline]
159 unsafe fn force_copy_to<A, const N: usize>(
160 source: &mut RingBuffer<A, N>,
161 from: RawIndex<N>,
162 target: &mut RingBuffer<A, N>,
163 to: RawIndex<N>,
164 count: usize,
165 ) {
166 if count > 0 {
167 debug_assert!(from.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
168 debug_assert!(to.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
169 core::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count)
170 }
171 }
172
173 if from.to_usize() + count > Self::CAPACITY {
174 let first_length = Self::CAPACITY - from.to_usize();
175 let last_length = count - first_length;
176 self.copy_from(source, from, to, first_length);
177 self.copy_from(source, 0.into(), to + first_length, last_length);
178 } else if to.to_usize() + count > Self::CAPACITY {
179 let first_length = Self::CAPACITY - to.to_usize();
180 let last_length = count - first_length;
181 force_copy_to(source, from, self, to, first_length);
182 force_copy_to(source, from + first_length, self, 0.into(), last_length);
183 } else {
184 force_copy_to(source, from, self, to, count);
185 }
186 }
187
188 #[allow(dead_code)]
190 unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex<N>) {
191 let count = source.len();
192 debug_assert!(to.to_usize() + count <= Self::CAPACITY);
193 if to.to_usize() + count > Self::CAPACITY {
194 let first_length = Self::CAPACITY - to.to_usize();
195 let first_slice = &source[..first_length];
196 let last_slice = &source[first_length..];
197 core::ptr::copy_nonoverlapping(
198 first_slice.as_ptr(),
199 self.mut_ptr(to),
200 first_slice.len(),
201 );
202 core::ptr::copy_nonoverlapping(
203 last_slice.as_ptr(),
204 self.mut_ptr(0.into()),
205 last_slice.len(),
206 );
207 } else {
208 core::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count)
209 }
210 }
211
212 #[inline]
214 fn range(&self) -> IndexIter<N> {
215 IndexIter {
216 remaining: self.len(),
217 left_index: self.origin,
218 right_index: self.origin + self.len(),
219 }
220 }
221
222 #[inline]
224 #[must_use]
225 pub fn new() -> Self {
226 Self {
227 origin: 0.into(),
228 length: 0,
229 data: MaybeUninit::uninit(),
230 }
231 }
232
233 #[inline]
235 #[must_use]
236 pub fn unit(value: A) -> Self {
237 assert!(Self::CAPACITY >= 1);
238 let mut buffer = Self {
239 origin: 0.into(),
240 length: 1,
241 data: MaybeUninit::uninit(),
242 };
243 unsafe {
244 buffer.force_write(0.into(), value);
245 }
246 buffer
247 }
248
249 #[inline]
251 #[must_use]
252 pub fn pair(value1: A, value2: A) -> Self {
253 assert!(Self::CAPACITY >= 2);
254 let mut buffer = Self {
255 origin: 0.into(),
256 length: 2,
257 data: MaybeUninit::uninit(),
258 };
259 unsafe {
260 buffer.force_write(0.into(), value1);
261 buffer.force_write(1.into(), value2);
262 }
263 buffer
264 }
265
266 #[inline]
271 #[must_use]
272 pub fn drain_from(other: &mut Self) -> Self {
273 Self::from_front(other, other.len())
274 }
275
276 #[must_use]
283 pub fn collect_from<I>(iter: &mut I, count: usize) -> Self
284 where
285 I: Iterator<Item = A>,
286 {
287 let buffer = Self::from_iter(iter.take(count));
288 if buffer.len() < count {
289 panic!("RingBuffer::collect_from: underfull iterator");
290 }
291 buffer
292 }
293
294 #[must_use]
299 pub fn from_front(other: &mut Self, count: usize) -> Self {
300 let mut buffer = Self::new();
301 buffer.drain_from_front(other, count);
302 buffer
303 }
304
305 #[must_use]
310 pub fn from_back(other: &mut Self, count: usize) -> Self {
311 let mut buffer = Self::new();
312 buffer.drain_from_back(other, count);
313 buffer
314 }
315
316 #[inline]
318 #[must_use]
319 pub fn is_full(&self) -> bool {
320 self.len() == Self::CAPACITY
321 }
322
323 #[inline]
326 #[must_use]
327 pub fn iter(&self) -> Iter<'_, A, N> {
328 Iter {
329 buffer: self,
330 left_index: self.origin,
331 right_index: self.origin + self.len(),
332 remaining: self.len(),
333 }
334 }
335
336 #[inline]
339 #[must_use]
340 pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
341 IterMut::new(self)
342 }
343
344 #[must_use]
345 fn parse_range<R: RangeBounds<usize>>(&self, range: R) -> Range<usize> {
346 let new_range = Range {
347 start: match range.start_bound() {
348 Bound::Unbounded => 0,
349 Bound::Included(index) => *index,
350 Bound::Excluded(_) => unimplemented!(),
351 },
352 end: match range.end_bound() {
353 Bound::Unbounded => self.len(),
354 Bound::Included(index) => *index + 1,
355 Bound::Excluded(index) => *index,
356 },
357 };
358 if new_range.end > self.len() || new_range.start > new_range.end {
359 panic!("Slice::parse_range: index out of bounds");
360 }
361 new_range
362 }
363
364 #[must_use]
366 pub fn slice<R: RangeBounds<usize>>(&self, range: R) -> Slice<'_, A, N> {
367 Slice {
368 buffer: self,
369 range: self.parse_range(range),
370 }
371 }
372
373 #[must_use]
375 pub fn slice_mut<R: RangeBounds<usize>>(&mut self, range: R) -> SliceMut<'_, A, N> {
376 SliceMut {
377 range: self.parse_range(range),
378 buffer: self,
379 }
380 }
381
382 #[must_use]
388 pub unsafe fn get_unchecked(&self, index: usize) -> &A {
389 &*self.ptr(self.raw(index))
390 }
391
392 #[must_use]
398 pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A {
399 &mut *self.mut_ptr(self.raw(index))
400 }
401
402 pub fn push_back(&mut self, value: A) {
408 if self.is_full() {
409 panic!("RingBuffer::push_back: can't push to a full buffer")
410 } else {
411 unsafe { self.force_write(self.raw(self.length), value) }
412 self.length += 1;
413 }
414 }
415
416 pub fn push_front(&mut self, value: A) {
422 if self.is_full() {
423 panic!("RingBuffer::push_front: can't push to a full buffer")
424 } else {
425 let origin = self.origin.dec();
426 self.length += 1;
427 unsafe { self.force_write(origin, value) }
428 }
429 }
430
431 pub fn pop_back(&mut self) -> Option<A> {
437 if self.is_empty() {
438 None
439 } else {
440 self.length -= 1;
441 Some(unsafe { self.force_read(self.raw(self.length)) })
442 }
443 }
444
445 pub fn pop_front(&mut self) -> Option<A> {
451 if self.is_empty() {
452 None
453 } else {
454 self.length -= 1;
455 let index = self.origin.inc();
456 Some(unsafe { self.force_read(index) })
457 }
458 }
459
460 pub fn drop_left(&mut self, index: usize) {
466 if index > 0 {
467 if index > self.len() {
468 panic!("RingBuffer::drop_left: index out of bounds");
469 }
470 for i in self.range().take(index) {
471 unsafe { self.force_drop(i) }
472 }
473 self.origin += index;
474 self.length -= index;
475 }
476 }
477
478 pub fn drop_right(&mut self, index: usize) {
484 if index > self.len() {
485 panic!("RingBuffer::drop_right: index out of bounds");
486 }
487 if index == self.len() {
488 return;
489 }
490 for i in self.range().skip(index) {
491 unsafe { self.force_drop(i) }
492 }
493 self.length = index;
494 }
495
496 #[must_use]
504 pub fn split_off(&mut self, index: usize) -> Self {
505 if index > self.len() {
506 panic!("RingBuffer::split: index out of bounds");
507 }
508 if index == self.len() {
509 return Self::new();
510 }
511 let mut right = Self::new();
512 let length = self.length - index;
513 unsafe { right.copy_from(self, self.raw(index), 0.into(), length) };
514 self.length = index;
515 right.length = length;
516 right
517 }
518
519 #[inline]
527 pub fn append(&mut self, other: &mut Self) {
528 self.drain_from_front(other, other.len());
529 }
530
531 pub fn drain_from_front(&mut self, other: &mut Self, count: usize) {
539 let self_len = self.len();
540 let other_len = other.len();
541 if self_len + count > Self::CAPACITY {
542 panic!("RingBuffer::drain_from_front: chunk size overflow");
543 }
544 if other_len < count {
545 panic!("RingBuffer::drain_from_front: index out of bounds");
546 }
547 unsafe { self.copy_from(other, other.origin, self.raw(self.len()), count) };
548 other.origin += count;
549 other.length -= count;
550 self.length += count;
551 }
552
553 pub fn drain_from_back(&mut self, other: &mut Self, count: usize) {
561 let self_len = self.len();
562 let other_len = other.len();
563 if self_len + count > Self::CAPACITY {
564 panic!("RingBuffer::drain_from_back: chunk size overflow");
565 }
566 if other_len < count {
567 panic!("RingBuffer::drain_from_back: index out of bounds");
568 }
569 self.origin -= count;
570 let source_index = other.origin + (other.len() - count);
571 unsafe { self.copy_from(other, source_index, self.origin, count) };
572 other.length -= count;
573 self.length += count;
574 }
575
576 pub fn insert(&mut self, index: usize, value: A) {
583 if self.is_full() {
584 panic!("RingBuffer::insert: chunk size overflow");
585 }
586 if index > self.len() {
587 panic!("RingBuffer::insert: index out of bounds");
588 }
589 if index == 0 {
590 return self.push_front(value);
591 }
592 if index == self.len() {
593 return self.push_back(value);
594 }
595 let right_count = self.len() - index;
596 if right_count < index {
598 let mut i = self.raw(self.len() - 1);
600 let target = self.raw(index);
601 while i != target {
602 unsafe { self.force_write(i + 1, self.force_read(i)) };
603 i -= 1;
604 }
605 unsafe { self.force_write(target + 1, self.force_read(target)) };
606 self.length += 1;
607 } else {
608 self.origin -= 1;
610 self.length += 1;
611 for i in self.range().take(index) {
612 unsafe { self.force_write(i, self.force_read(i + 1)) };
613 }
614 }
615 unsafe { self.force_write(self.raw(index), value) };
616 }
617
618 pub fn insert_ordered(&mut self, value: A)
627 where
628 A: Ord,
629 {
630 if self.is_full() {
631 panic!("Chunk::insert: chunk is full");
632 }
633 match self.slice(..).binary_search(&value) {
634 Ok(index) => self.insert(index, value),
635 Err(index) => self.insert(index, value),
636 }
637 }
638
639 pub fn insert_from<Iterable, I>(&mut self, index: usize, iter: Iterable)
649 where
650 Iterable: IntoIterator<Item = A, IntoIter = I>,
651 I: ExactSizeIterator<Item = A>,
652 {
653 let iter = iter.into_iter();
654 let insert_size = iter.len();
655 if self.len() + insert_size > Self::CAPACITY {
656 panic!(
657 "Chunk::insert_from: chunk cannot fit {} elements",
658 insert_size
659 );
660 }
661 if index > self.len() {
662 panic!("Chunk::insert_from: index out of bounds");
663 }
664 if index == self.len() {
665 self.extend(iter);
666 return;
667 }
668 let right_count = self.len() - index;
669 if right_count < index {
671 let mut i = self.raw(self.len() - 1);
673 let target = self.raw(index);
674 while i != target {
675 unsafe { self.force_write(i + insert_size, self.force_read(i)) };
676 i -= 1;
677 }
678 unsafe { self.force_write(target + insert_size, self.force_read(target)) };
679 self.length += insert_size;
680 } else {
681 self.origin -= insert_size;
683 self.length += insert_size;
684 for i in self.range().take(index) {
685 unsafe { self.force_write(i, self.force_read(i + insert_size)) };
686 }
687 }
688 let mut index = self.raw(index);
689 let mut inserted = 0;
693 let length = replace(&mut self.length, 0);
694 for value in iter.take(insert_size) {
695 unsafe { self.force_write(index, value) };
696 index += 1;
697 inserted += 1;
698 }
699 assert_eq!(
701 inserted, insert_size,
702 "Iterator has fewer elements than advertised",
703 );
704 self.length = length;
705 }
706
707 pub fn remove(&mut self, index: usize) -> A {
716 if index >= self.len() {
717 panic!("RingBuffer::remove: index out of bounds");
718 }
719 let value = unsafe { self.force_read(self.raw(index)) };
720 let right_count = self.len() - index;
721 if right_count < index {
723 self.length -= 1;
725 let mut i = self.raw(index);
726 let target = self.raw(self.len());
727 while i != target {
728 unsafe { self.force_write(i, self.force_read(i + 1)) };
729 i += 1;
730 }
731 } else {
732 let mut i = self.raw(index);
734 while i != self.origin {
735 unsafe { self.force_write(i, self.force_read(i - 1)) };
736 i -= 1;
737 }
738 self.origin += 1;
739 self.length -= 1;
740 }
741 value
742 }
743
744 pub fn drain(&mut self) -> Drain<'_, A, N> {
746 Drain { buffer: self }
747 }
748
749 pub fn clear(&mut self) {
753 for i in self.range() {
754 unsafe { self.force_drop(i) };
755 }
756 self.origin = 0.into();
757 self.length = 0;
758 }
759}
760
761impl<A, const N: usize> Default for RingBuffer<A, N> {
762 #[inline]
763 #[must_use]
764 fn default() -> Self {
765 Self::new()
766 }
767}
768
769impl<A: Clone, const N: usize> Clone for RingBuffer<A, N> {
770 fn clone(&self) -> Self {
771 let mut out = Self::new();
772 out.origin = self.origin;
773 out.length = self.length;
774 let range = self.range();
775 out.length = 0;
777 for index in range {
778 unsafe { out.force_write(index, (&*self.ptr(index)).clone()) };
779 out.length += 1;
780 }
781 out
782 }
783}
784
785impl<A, const N: usize> Index<usize> for RingBuffer<A, N> {
786 type Output = A;
787
788 #[must_use]
789 fn index(&self, index: usize) -> &Self::Output {
790 if index >= self.len() {
791 panic!(
792 "RingBuffer::index: index out of bounds {} >= {}",
793 index,
794 self.len()
795 );
796 }
797 unsafe { &*self.ptr(self.raw(index)) }
798 }
799}
800
801impl<A, const N: usize> IndexMut<usize> for RingBuffer<A, N> {
802 #[must_use]
803 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
804 if index >= self.len() {
805 panic!(
806 "RingBuffer::index_mut: index out of bounds {} >= {}",
807 index,
808 self.len()
809 );
810 }
811 unsafe { &mut *self.mut_ptr(self.raw(index)) }
812 }
813}
814
815impl<A: PartialEq, const N: usize> PartialEq for RingBuffer<A, N> {
816 #[inline]
817 #[must_use]
818 fn eq(&self, other: &Self) -> bool {
819 self.len() == other.len() && self.iter().eq(other.iter())
820 }
821}
822
823impl<A, PrimSlice, const N: usize> PartialEq<PrimSlice> for RingBuffer<A, N>
824where
825 PrimSlice: Borrow<[A]>,
826 A: PartialEq,
827{
828 #[inline]
829 #[must_use]
830 fn eq(&self, other: &PrimSlice) -> bool {
831 let other = other.borrow();
832 self.len() == other.len() && self.iter().eq(other.iter())
833 }
834}
835
836impl<A, const N: usize> PartialEq<Slice<'_, A, N>> for RingBuffer<A, N>
837where
838 A: PartialEq,
839{
840 fn eq(&self, other: &Slice<'_, A, N>) -> bool {
841 self.len() == other.len() && self.iter().eq(other.iter())
842 }
843}
844
845impl<A, const N: usize> PartialEq<SliceMut<'_, A, N>> for RingBuffer<A, N>
846where
847 A: PartialEq,
848{
849 fn eq(&self, other: &SliceMut<'_, A, N>) -> bool {
850 self.len() == other.len() && self.iter().eq(other.iter())
851 }
852}
853
854impl<A: Eq, const N: usize> Eq for RingBuffer<A, N> {}
855
856impl<A: PartialOrd, const N: usize> PartialOrd for RingBuffer<A, N> {
857 #[inline]
858 #[must_use]
859 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
860 self.iter().partial_cmp(other.iter())
861 }
862}
863
864impl<A: Ord, const N: usize> Ord for RingBuffer<A, N> {
865 #[inline]
866 #[must_use]
867 fn cmp(&self, other: &Self) -> Ordering {
868 self.iter().cmp(other.iter())
869 }
870}
871
872impl<A, const N: usize> Extend<A> for RingBuffer<A, N> {
873 #[inline]
874 fn extend<I: IntoIterator<Item = A>>(&mut self, iter: I) {
875 for item in iter {
876 self.push_back(item);
877 }
878 }
879}
880
881impl<'a, A: Clone + 'a, const N: usize> Extend<&'a A> for RingBuffer<A, N> {
882 #[inline]
883 fn extend<I: IntoIterator<Item = &'a A>>(&mut self, iter: I) {
884 for item in iter {
885 self.push_back(item.clone());
886 }
887 }
888}
889
890impl<A: Debug, const N: usize> Debug for RingBuffer<A, N> {
891 fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
892 f.write_str("RingBuffer")?;
893 f.debug_list().entries(self.iter()).finish()
894 }
895}
896
897impl<A: Hash, const N: usize> Hash for RingBuffer<A, N> {
898 #[inline]
899 fn hash<H: Hasher>(&self, hasher: &mut H) {
900 for item in self {
901 item.hash(hasher)
902 }
903 }
904}
905
906#[cfg(feature = "std")]
907impl<const N: usize> std::io::Write for RingBuffer<u8, N> {
908 fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
909 let max_new = Self::CAPACITY - self.len();
910 if buf.len() > max_new {
911 buf = &buf[..max_new];
912 }
913 unsafe { self.copy_from_slice(buf, self.origin + self.len()) };
914 self.length += buf.len();
915 Ok(buf.len())
916 }
917
918 #[inline]
919 fn flush(&mut self) -> std::io::Result<()> {
920 Ok(())
921 }
922}
923
924#[cfg(feature = "std")]
925impl<const N: usize> std::io::Read for RingBuffer<u8, N> {
926 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
927 let read_size = buf.len().min(self.len());
928 if read_size == 0 {
929 Ok(0)
930 } else {
931 for p in buf.iter_mut().take(read_size) {
932 *p = self.pop_front().unwrap();
933 }
934 Ok(read_size)
935 }
936 }
937}
938
939impl<A, const N: usize> FromIterator<A> for RingBuffer<A, N> {
940 #[must_use]
941 fn from_iter<I: IntoIterator<Item = A>>(iter: I) -> Self {
942 let mut buffer = RingBuffer::new();
943 buffer.extend(iter);
944 buffer
945 }
946}
947
948impl<A, const N: usize> IntoIterator for RingBuffer<A, N> {
949 type Item = A;
950 type IntoIter = OwnedIter<A, N>;
951
952 #[inline]
953 #[must_use]
954 fn into_iter(self) -> Self::IntoIter {
955 OwnedIter { buffer: self }
956 }
957}
958
959impl<'a, A, const N: usize> IntoIterator for &'a RingBuffer<A, N> {
960 type Item = &'a A;
961 type IntoIter = Iter<'a, A, N>;
962
963 #[inline]
964 #[must_use]
965 fn into_iter(self) -> Self::IntoIter {
966 self.iter()
967 }
968}
969
970impl<'a, A, const N: usize> IntoIterator for &'a mut RingBuffer<A, N> {
971 type Item = &'a mut A;
972 type IntoIter = IterMut<'a, A, N>;
973
974 #[inline]
975 #[must_use]
976 fn into_iter(self) -> Self::IntoIter {
977 self.iter_mut()
978 }
979}
980
981#[cfg(test)]
984mod test {
985 use super::*;
986
987 #[test]
988 fn validity_invariant() {
989 assert!(Some(RingBuffer::<Box<()>, 64>::new()).is_some());
990 }
991
992 #[test]
993 fn is_full() {
994 let mut chunk = RingBuffer::<_, 64>::new();
995 for i in 0..64 {
996 assert_eq!(false, chunk.is_full());
997 chunk.push_back(i);
998 }
999 assert_eq!(true, chunk.is_full());
1000 }
1001
1002 #[test]
1003 fn ref_iter() {
1004 let chunk: RingBuffer<i32, 64> = (0..64).collect();
1005 let out_vec: Vec<&i32> = chunk.iter().collect();
1006 let should_vec_p: Vec<i32> = (0..64).collect();
1007 let should_vec: Vec<&i32> = should_vec_p.iter().collect();
1008 assert_eq!(should_vec, out_vec);
1009 }
1010
1011 #[test]
1012 fn mut_ref_iter() {
1013 let mut chunk: RingBuffer<i32, 64> = (0..64).collect();
1014 let out_vec: Vec<&mut i32> = chunk.iter_mut().collect();
1015 let mut should_vec_p: Vec<i32> = (0..64).collect();
1016 let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect();
1017 assert_eq!(should_vec, out_vec);
1018 }
1019
1020 #[test]
1021 fn consuming_iter() {
1022 let chunk: RingBuffer<i32, 64> = (0..64).collect();
1023 let out_vec: Vec<i32> = chunk.into_iter().collect();
1024 let should_vec: Vec<i32> = (0..64).collect();
1025 assert_eq!(should_vec, out_vec);
1026 }
1027
1028 #[test]
1029 fn draining_iter() {
1030 let mut chunk: RingBuffer<i32, 64> = (0..64).collect();
1031 let mut half: RingBuffer<i32, 64> = chunk.drain().take(16).collect();
1032 half.extend(chunk.drain().rev().take(16));
1033 let should: Vec<i32> = (16..48).collect();
1034 assert_eq!(chunk, should);
1035 let should: Vec<i32> = (0..16).chain((48..64).rev()).collect();
1036 assert_eq!(half, should);
1037 }
1038
1039 #[cfg(feature = "std")]
1040 #[test]
1041 fn io_write() {
1042 use std::io::Write;
1043 let mut buffer: RingBuffer<u8, 64> = (0..32).collect();
1044 let to_write: Vec<u8> = (32..128).collect();
1045 assert_eq!(32, buffer.write(&to_write).unwrap());
1046 assert_eq!(buffer, (0..64).collect::<Vec<u8>>());
1047 }
1048
1049 #[cfg(feature = "std")]
1050 #[test]
1051 fn io_read() {
1052 use std::io::Read;
1053 let mut buffer: RingBuffer<u8, 64> = (16..48).collect();
1054 let mut read_buf: Vec<u8> = (0..16).collect();
1055 assert_eq!(16, buffer.read(&mut read_buf).unwrap());
1056 assert_eq!(read_buf, (16..32).collect::<Vec<u8>>());
1057 assert_eq!(buffer, (32..48).collect::<Vec<u8>>());
1058 assert_eq!(16, buffer.read(&mut read_buf).unwrap());
1059 assert_eq!(read_buf, (32..48).collect::<Vec<u8>>());
1060 assert_eq!(buffer, vec![]);
1061 assert_eq!(0, buffer.read(&mut read_buf).unwrap());
1062 }
1063
1064 #[test]
1065 fn clone() {
1066 let buffer: RingBuffer<u32, 64> = (0..50).collect();
1067 assert_eq!(buffer, buffer.clone());
1068 }
1069
1070 #[test]
1071 fn failing() {
1072 let mut buffer: RingBuffer<u32, 64> = RingBuffer::new();
1073 buffer.push_front(0);
1074 let mut add: RingBuffer<u32, 64> = vec![1, 0, 0, 0, 0, 0].into_iter().collect();
1075 buffer.append(&mut add);
1076 assert_eq!(1, buffer.remove(1));
1077 let expected = vec![0, 0, 0, 0, 0, 0];
1078 assert_eq!(buffer, expected);
1079 }
1080
1081 use crate::tests::DropTest;
1082 use std::sync::atomic::{AtomicUsize, Ordering};
1083
1084 #[test]
1085 fn dropping() {
1086 let counter = AtomicUsize::new(0);
1087 {
1088 let mut chunk: RingBuffer<DropTest<'_>, 64> = RingBuffer::new();
1089 for _i in 0..20 {
1090 chunk.push_back(DropTest::new(&counter))
1091 }
1092 for _i in 0..20 {
1093 chunk.push_front(DropTest::new(&counter))
1094 }
1095 assert_eq!(40, counter.load(Ordering::Relaxed));
1096 for _i in 0..10 {
1097 chunk.pop_back();
1098 }
1099 assert_eq!(30, counter.load(Ordering::Relaxed));
1100 }
1101 assert_eq!(0, counter.load(Ordering::Relaxed));
1102 }
1103
1104 #[test]
1105 #[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
1106 fn unit_on_empty() {
1107 let _ = RingBuffer::<usize, 0>::unit(1);
1108 }
1109
1110 #[test]
1111 #[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
1112 fn pair_on_empty() {
1113 let _ = RingBuffer::<usize, 0>::pair(1, 2);
1114 }
1115}