sized_chunks/ring_buffer/
mod.rs

1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5//! A fixed capacity ring buffer.
6//!
7//! See [`RingBuffer`](struct.RingBuffer.html)
8
9use core::borrow::Borrow;
10use core::cmp::Ordering;
11use core::fmt::{Debug, Error, Formatter};
12use core::hash::{Hash, Hasher};
13use core::iter::FromIterator;
14use core::mem::{replace, MaybeUninit};
15use core::ops::{Bound, Range, RangeBounds};
16use core::ops::{Index, IndexMut};
17
18pub use array_ops::{Array, ArrayMut, HasLength};
19
20mod index;
21use index::{IndexIter, RawIndex};
22
23mod iter;
24pub use iter::{Drain, Iter, IterMut, OwnedIter};
25
26mod slice;
27pub use slice::{Slice, SliceMut};
28
29#[cfg(feature = "refpool")]
30mod refpool;
31
32/// A fixed capacity ring buffer.
33///
34/// A ring buffer is an array where the first logical index is at some arbitrary
35/// location inside the array, and the indices wrap around to the start of the
36/// array once they overflow its bounds.
37///
38/// This gives us the ability to push to either the front or the end of the
39/// array in constant time, at the cost of losing the ability to get a single
40/// contiguous slice reference to the contents.
41///
42/// It differs from the [`Chunk`][Chunk] in that the latter will have mostly
43/// constant time pushes, but may occasionally need to shift its contents around
44/// to make room. They both have constant time pop, and they both have linear
45/// time insert and remove.
46///
47/// The `RingBuffer` offers its own [`Slice`][Slice] and [`SliceMut`][SliceMut]
48/// types to compensate for the loss of being able to take a slice, but they're
49/// somewhat less efficient, so the general rule should be that you shouldn't
50/// choose a `RingBuffer` if you rely heavily on slices - but if you don't,
51/// it's probably a marginally better choice overall than [`Chunk`][Chunk].
52///
53/// # Feature Flag
54///
55/// To use this data structure, you need to enable the `ringbuffer` feature.
56///
57/// [Chunk]: ../sized_chunk/struct.Chunk.html
58/// [Slice]: struct.Slice.html
59/// [SliceMut]: struct.SliceMut.html
60pub struct RingBuffer<A, const N: usize> {
61    origin: RawIndex<N>,
62    length: usize,
63    data: MaybeUninit<[A; N]>,
64}
65
66impl<A, const N: usize> Drop for RingBuffer<A, N> {
67    #[inline]
68    fn drop(&mut self) {
69        if core::mem::needs_drop::<A>() {
70            for i in self.range() {
71                unsafe { self.force_drop(i) }
72            }
73        }
74    }
75}
76
77impl<A, const N: usize> HasLength for RingBuffer<A, N> {
78    /// Get the length of the ring buffer.
79    #[inline]
80    #[must_use]
81    fn len(&self) -> usize {
82        self.length
83    }
84}
85
86impl<A, const N: usize> Array for RingBuffer<A, N> {
87    /// Get a reference to the value at a given index.
88    #[must_use]
89    fn get(&self, index: usize) -> Option<&A> {
90        if index >= self.len() {
91            None
92        } else {
93            Some(unsafe { self.get_unchecked(index) })
94        }
95    }
96}
97
98impl<A, const N: usize> ArrayMut for RingBuffer<A, N> {
99    /// Get a mutable reference to the value at a given index.
100    #[must_use]
101    fn get_mut(&mut self, index: usize) -> Option<&mut A> {
102        if index >= self.len() {
103            None
104        } else {
105            Some(unsafe { self.get_unchecked_mut(index) })
106        }
107    }
108}
109
110impl<A, const N: usize> RingBuffer<A, N> {
111    /// The capacity of this ring buffer, as a `usize`.
112    pub const CAPACITY: usize = N;
113
114    /// Get the raw index for a logical index.
115    #[inline]
116    fn raw(&self, index: usize) -> RawIndex<N> {
117        self.origin + index
118    }
119
120    #[inline]
121    unsafe fn ptr(&self, index: RawIndex<N>) -> *const A {
122        debug_assert!(index.to_usize() < Self::CAPACITY);
123        (&self.data as *const _ as *const A).add(index.to_usize())
124    }
125
126    #[inline]
127    unsafe fn mut_ptr(&mut self, index: RawIndex<N>) -> *mut A {
128        debug_assert!(index.to_usize() < Self::CAPACITY);
129        (&mut self.data as *mut _ as *mut A).add(index.to_usize())
130    }
131
132    /// Drop the value at a raw index.
133    #[inline]
134    unsafe fn force_drop(&mut self, index: RawIndex<N>) {
135        core::ptr::drop_in_place(self.mut_ptr(index))
136    }
137
138    /// Copy the value at a raw index, discarding ownership of the copied value
139    #[inline]
140    unsafe fn force_read(&self, index: RawIndex<N>) -> A {
141        core::ptr::read(self.ptr(index))
142    }
143
144    /// Write a value at a raw index without trying to drop what's already there
145    #[inline]
146    unsafe fn force_write(&mut self, index: RawIndex<N>, value: A) {
147        core::ptr::write(self.mut_ptr(index), value)
148    }
149
150    /// Copy a range of raw indices from another buffer.
151    unsafe fn copy_from(
152        &mut self,
153        source: &mut Self,
154        from: RawIndex<N>,
155        to: RawIndex<N>,
156        count: usize,
157    ) {
158        #[inline]
159        unsafe fn force_copy_to<A, const N: usize>(
160            source: &mut RingBuffer<A, N>,
161            from: RawIndex<N>,
162            target: &mut RingBuffer<A, N>,
163            to: RawIndex<N>,
164            count: usize,
165        ) {
166            if count > 0 {
167                debug_assert!(from.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
168                debug_assert!(to.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
169                core::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count)
170            }
171        }
172
173        if from.to_usize() + count > Self::CAPACITY {
174            let first_length = Self::CAPACITY - from.to_usize();
175            let last_length = count - first_length;
176            self.copy_from(source, from, to, first_length);
177            self.copy_from(source, 0.into(), to + first_length, last_length);
178        } else if to.to_usize() + count > Self::CAPACITY {
179            let first_length = Self::CAPACITY - to.to_usize();
180            let last_length = count - first_length;
181            force_copy_to(source, from, self, to, first_length);
182            force_copy_to(source, from + first_length, self, 0.into(), last_length);
183        } else {
184            force_copy_to(source, from, self, to, count);
185        }
186    }
187
188    /// Copy values from a slice.
189    #[allow(dead_code)]
190    unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex<N>) {
191        let count = source.len();
192        debug_assert!(to.to_usize() + count <= Self::CAPACITY);
193        if to.to_usize() + count > Self::CAPACITY {
194            let first_length = Self::CAPACITY - to.to_usize();
195            let first_slice = &source[..first_length];
196            let last_slice = &source[first_length..];
197            core::ptr::copy_nonoverlapping(
198                first_slice.as_ptr(),
199                self.mut_ptr(to),
200                first_slice.len(),
201            );
202            core::ptr::copy_nonoverlapping(
203                last_slice.as_ptr(),
204                self.mut_ptr(0.into()),
205                last_slice.len(),
206            );
207        } else {
208            core::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count)
209        }
210    }
211
212    /// Get an iterator over the raw indices of the buffer from left to right.
213    #[inline]
214    fn range(&self) -> IndexIter<N> {
215        IndexIter {
216            remaining: self.len(),
217            left_index: self.origin,
218            right_index: self.origin + self.len(),
219        }
220    }
221
222    /// Construct an empty ring buffer.
223    #[inline]
224    #[must_use]
225    pub fn new() -> Self {
226        Self {
227            origin: 0.into(),
228            length: 0,
229            data: MaybeUninit::uninit(),
230        }
231    }
232
233    /// Construct a ring buffer with a single item.
234    #[inline]
235    #[must_use]
236    pub fn unit(value: A) -> Self {
237        assert!(Self::CAPACITY >= 1);
238        let mut buffer = Self {
239            origin: 0.into(),
240            length: 1,
241            data: MaybeUninit::uninit(),
242        };
243        unsafe {
244            buffer.force_write(0.into(), value);
245        }
246        buffer
247    }
248
249    /// Construct a ring buffer with two items.
250    #[inline]
251    #[must_use]
252    pub fn pair(value1: A, value2: A) -> Self {
253        assert!(Self::CAPACITY >= 2);
254        let mut buffer = Self {
255            origin: 0.into(),
256            length: 2,
257            data: MaybeUninit::uninit(),
258        };
259        unsafe {
260            buffer.force_write(0.into(), value1);
261            buffer.force_write(1.into(), value2);
262        }
263        buffer
264    }
265
266    /// Construct a new ring buffer and move every item from `other` into the
267    /// new buffer.
268    ///
269    /// Time: O(n)
270    #[inline]
271    #[must_use]
272    pub fn drain_from(other: &mut Self) -> Self {
273        Self::from_front(other, other.len())
274    }
275
276    /// Construct a new ring buffer and populate it by taking `count` items from
277    /// the iterator `iter`.
278    ///
279    /// Panics if the iterator contains less than `count` items.
280    ///
281    /// Time: O(n)
282    #[must_use]
283    pub fn collect_from<I>(iter: &mut I, count: usize) -> Self
284    where
285        I: Iterator<Item = A>,
286    {
287        let buffer = Self::from_iter(iter.take(count));
288        if buffer.len() < count {
289            panic!("RingBuffer::collect_from: underfull iterator");
290        }
291        buffer
292    }
293
294    /// Construct a new ring buffer and populate it by taking `count` items from
295    /// the front of `other`.
296    ///
297    /// Time: O(n) for the number of items moved
298    #[must_use]
299    pub fn from_front(other: &mut Self, count: usize) -> Self {
300        let mut buffer = Self::new();
301        buffer.drain_from_front(other, count);
302        buffer
303    }
304
305    /// Construct a new ring buffer and populate it by taking `count` items from
306    /// the back of `other`.
307    ///
308    /// Time: O(n) for the number of items moved
309    #[must_use]
310    pub fn from_back(other: &mut Self, count: usize) -> Self {
311        let mut buffer = Self::new();
312        buffer.drain_from_back(other, count);
313        buffer
314    }
315
316    /// Test if the ring buffer is full.
317    #[inline]
318    #[must_use]
319    pub fn is_full(&self) -> bool {
320        self.len() == Self::CAPACITY
321    }
322
323    /// Get an iterator over references to the items in the ring buffer in
324    /// order.
325    #[inline]
326    #[must_use]
327    pub fn iter(&self) -> Iter<'_, A, N> {
328        Iter {
329            buffer: self,
330            left_index: self.origin,
331            right_index: self.origin + self.len(),
332            remaining: self.len(),
333        }
334    }
335
336    /// Get an iterator over mutable references to the items in the ring buffer
337    /// in order.
338    #[inline]
339    #[must_use]
340    pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
341        IterMut::new(self)
342    }
343
344    #[must_use]
345    fn parse_range<R: RangeBounds<usize>>(&self, range: R) -> Range<usize> {
346        let new_range = Range {
347            start: match range.start_bound() {
348                Bound::Unbounded => 0,
349                Bound::Included(index) => *index,
350                Bound::Excluded(_) => unimplemented!(),
351            },
352            end: match range.end_bound() {
353                Bound::Unbounded => self.len(),
354                Bound::Included(index) => *index + 1,
355                Bound::Excluded(index) => *index,
356            },
357        };
358        if new_range.end > self.len() || new_range.start > new_range.end {
359            panic!("Slice::parse_range: index out of bounds");
360        }
361        new_range
362    }
363
364    /// Get a `Slice` for a subset of the ring buffer.
365    #[must_use]
366    pub fn slice<R: RangeBounds<usize>>(&self, range: R) -> Slice<'_, A, N> {
367        Slice {
368            buffer: self,
369            range: self.parse_range(range),
370        }
371    }
372
373    /// Get a `SliceMut` for a subset of the ring buffer.
374    #[must_use]
375    pub fn slice_mut<R: RangeBounds<usize>>(&mut self, range: R) -> SliceMut<'_, A, N> {
376        SliceMut {
377            range: self.parse_range(range),
378            buffer: self,
379        }
380    }
381
382    /// Get an unchecked reference to the value at the given index.
383    ///
384    /// # Safety
385    ///
386    /// You must ensure the index is not out of bounds.
387    #[must_use]
388    pub unsafe fn get_unchecked(&self, index: usize) -> &A {
389        &*self.ptr(self.raw(index))
390    }
391
392    /// Get an unchecked mutable reference to the value at the given index.
393    ///
394    /// # Safety
395    ///
396    /// You must ensure the index is not out of bounds.
397    #[must_use]
398    pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A {
399        &mut *self.mut_ptr(self.raw(index))
400    }
401
402    /// Push a value to the back of the buffer.
403    ///
404    /// Panics if the capacity of the buffer is exceeded.
405    ///
406    /// Time: O(1)
407    pub fn push_back(&mut self, value: A) {
408        if self.is_full() {
409            panic!("RingBuffer::push_back: can't push to a full buffer")
410        } else {
411            unsafe { self.force_write(self.raw(self.length), value) }
412            self.length += 1;
413        }
414    }
415
416    /// Push a value to the front of the buffer.
417    ///
418    /// Panics if the capacity of the buffer is exceeded.
419    ///
420    /// Time: O(1)
421    pub fn push_front(&mut self, value: A) {
422        if self.is_full() {
423            panic!("RingBuffer::push_front: can't push to a full buffer")
424        } else {
425            let origin = self.origin.dec();
426            self.length += 1;
427            unsafe { self.force_write(origin, value) }
428        }
429    }
430
431    /// Pop a value from the back of the buffer.
432    ///
433    /// Returns `None` if the buffer is empty.
434    ///
435    /// Time: O(1)
436    pub fn pop_back(&mut self) -> Option<A> {
437        if self.is_empty() {
438            None
439        } else {
440            self.length -= 1;
441            Some(unsafe { self.force_read(self.raw(self.length)) })
442        }
443    }
444
445    /// Pop a value from the front of the buffer.
446    ///
447    /// Returns `None` if the buffer is empty.
448    ///
449    /// Time: O(1)
450    pub fn pop_front(&mut self) -> Option<A> {
451        if self.is_empty() {
452            None
453        } else {
454            self.length -= 1;
455            let index = self.origin.inc();
456            Some(unsafe { self.force_read(index) })
457        }
458    }
459
460    /// Discard all items up to but not including `index`.
461    ///
462    /// Panics if `index` is out of bounds.
463    ///
464    /// Time: O(n) for the number of items dropped
465    pub fn drop_left(&mut self, index: usize) {
466        if index > 0 {
467            if index > self.len() {
468                panic!("RingBuffer::drop_left: index out of bounds");
469            }
470            for i in self.range().take(index) {
471                unsafe { self.force_drop(i) }
472            }
473            self.origin += index;
474            self.length -= index;
475        }
476    }
477
478    /// Discard all items from `index` onward.
479    ///
480    /// Panics if `index` is out of bounds.
481    ///
482    /// Time: O(n) for the number of items dropped
483    pub fn drop_right(&mut self, index: usize) {
484        if index > self.len() {
485            panic!("RingBuffer::drop_right: index out of bounds");
486        }
487        if index == self.len() {
488            return;
489        }
490        for i in self.range().skip(index) {
491            unsafe { self.force_drop(i) }
492        }
493        self.length = index;
494    }
495
496    /// Split a buffer into two, the original buffer containing
497    /// everything up to `index` and the returned buffer containing
498    /// everything from `index` onwards.
499    ///
500    /// Panics if `index` is out of bounds.
501    ///
502    /// Time: O(n) for the number of items in the new buffer
503    #[must_use]
504    pub fn split_off(&mut self, index: usize) -> Self {
505        if index > self.len() {
506            panic!("RingBuffer::split: index out of bounds");
507        }
508        if index == self.len() {
509            return Self::new();
510        }
511        let mut right = Self::new();
512        let length = self.length - index;
513        unsafe { right.copy_from(self, self.raw(index), 0.into(), length) };
514        self.length = index;
515        right.length = length;
516        right
517    }
518
519    /// Remove all items from `other` and append them to the back of `self`.
520    ///
521    /// Panics if the capacity of `self` is exceeded.
522    ///
523    /// `other` will be an empty buffer after this operation.
524    ///
525    /// Time: O(n) for the number of items moved
526    #[inline]
527    pub fn append(&mut self, other: &mut Self) {
528        self.drain_from_front(other, other.len());
529    }
530
531    /// Remove `count` items from the front of `other` and append them to the
532    /// back of `self`.
533    ///
534    /// Panics if `self` doesn't have `count` items left, or if `other` has
535    /// fewer than `count` items.
536    ///
537    /// Time: O(n) for the number of items moved
538    pub fn drain_from_front(&mut self, other: &mut Self, count: usize) {
539        let self_len = self.len();
540        let other_len = other.len();
541        if self_len + count > Self::CAPACITY {
542            panic!("RingBuffer::drain_from_front: chunk size overflow");
543        }
544        if other_len < count {
545            panic!("RingBuffer::drain_from_front: index out of bounds");
546        }
547        unsafe { self.copy_from(other, other.origin, self.raw(self.len()), count) };
548        other.origin += count;
549        other.length -= count;
550        self.length += count;
551    }
552
553    /// Remove `count` items from the back of `other` and append them to the
554    /// front of `self`.
555    ///
556    /// Panics if `self` doesn't have `count` items left, or if `other` has
557    /// fewer than `count` items.
558    ///
559    /// Time: O(n) for the number of items moved
560    pub fn drain_from_back(&mut self, other: &mut Self, count: usize) {
561        let self_len = self.len();
562        let other_len = other.len();
563        if self_len + count > Self::CAPACITY {
564            panic!("RingBuffer::drain_from_back: chunk size overflow");
565        }
566        if other_len < count {
567            panic!("RingBuffer::drain_from_back: index out of bounds");
568        }
569        self.origin -= count;
570        let source_index = other.origin + (other.len() - count);
571        unsafe { self.copy_from(other, source_index, self.origin, count) };
572        other.length -= count;
573        self.length += count;
574    }
575
576    /// Insert a new value at index `index`, shifting all the following values
577    /// to the right.
578    ///
579    /// Panics if the index is out of bounds.
580    ///
581    /// Time: O(n) for the number of items shifted
582    pub fn insert(&mut self, index: usize, value: A) {
583        if self.is_full() {
584            panic!("RingBuffer::insert: chunk size overflow");
585        }
586        if index > self.len() {
587            panic!("RingBuffer::insert: index out of bounds");
588        }
589        if index == 0 {
590            return self.push_front(value);
591        }
592        if index == self.len() {
593            return self.push_back(value);
594        }
595        let right_count = self.len() - index;
596        // Check which side has fewer elements to shift.
597        if right_count < index {
598            // Shift to the right.
599            let mut i = self.raw(self.len() - 1);
600            let target = self.raw(index);
601            while i != target {
602                unsafe { self.force_write(i + 1, self.force_read(i)) };
603                i -= 1;
604            }
605            unsafe { self.force_write(target + 1, self.force_read(target)) };
606            self.length += 1;
607        } else {
608            // Shift to the left.
609            self.origin -= 1;
610            self.length += 1;
611            for i in self.range().take(index) {
612                unsafe { self.force_write(i, self.force_read(i + 1)) };
613            }
614        }
615        unsafe { self.force_write(self.raw(index), value) };
616    }
617
618    /// Insert a new value into the buffer in sorted order.
619    ///
620    /// This assumes every element of the buffer is already in sorted order.
621    /// If not, the value will still be inserted but the ordering is not
622    /// guaranteed.
623    ///
624    /// Time: O(log n) to find the insert position, then O(n) for the number
625    /// of elements shifted.
626    pub fn insert_ordered(&mut self, value: A)
627    where
628        A: Ord,
629    {
630        if self.is_full() {
631            panic!("Chunk::insert: chunk is full");
632        }
633        match self.slice(..).binary_search(&value) {
634            Ok(index) => self.insert(index, value),
635            Err(index) => self.insert(index, value),
636        }
637    }
638
639    /// Insert multiple values at index `index`, shifting all the following values
640    /// to the right.
641    ///
642    /// Panics if the index is out of bounds or the chunk doesn't have room for
643    /// all the values.
644    ///
645    /// Time: O(m+n) where m is the number of elements inserted and n is the number
646    /// of elements following the insertion index. Calling `insert`
647    /// repeatedly would be O(m*n).
648    pub fn insert_from<Iterable, I>(&mut self, index: usize, iter: Iterable)
649    where
650        Iterable: IntoIterator<Item = A, IntoIter = I>,
651        I: ExactSizeIterator<Item = A>,
652    {
653        let iter = iter.into_iter();
654        let insert_size = iter.len();
655        if self.len() + insert_size > Self::CAPACITY {
656            panic!(
657                "Chunk::insert_from: chunk cannot fit {} elements",
658                insert_size
659            );
660        }
661        if index > self.len() {
662            panic!("Chunk::insert_from: index out of bounds");
663        }
664        if index == self.len() {
665            self.extend(iter);
666            return;
667        }
668        let right_count = self.len() - index;
669        // Check which side has fewer elements to shift.
670        if right_count < index {
671            // Shift to the right.
672            let mut i = self.raw(self.len() - 1);
673            let target = self.raw(index);
674            while i != target {
675                unsafe { self.force_write(i + insert_size, self.force_read(i)) };
676                i -= 1;
677            }
678            unsafe { self.force_write(target + insert_size, self.force_read(target)) };
679            self.length += insert_size;
680        } else {
681            // Shift to the left.
682            self.origin -= insert_size;
683            self.length += insert_size;
684            for i in self.range().take(index) {
685                unsafe { self.force_write(i, self.force_read(i + insert_size)) };
686            }
687        }
688        let mut index = self.raw(index);
689        // Panic safety: unless and until we fill it fully, there's a hole somewhere in the middle
690        // and the destructor would drop non-existing elements. Therefore we pretend to be empty
691        // for a while (and leak the elements instead in case something bad happens).
692        let mut inserted = 0;
693        let length = replace(&mut self.length, 0);
694        for value in iter.take(insert_size) {
695            unsafe { self.force_write(index, value) };
696            index += 1;
697            inserted += 1;
698        }
699        // This would/could create a hole in the middle if it was less
700        assert_eq!(
701            inserted, insert_size,
702            "Iterator has fewer elements than advertised",
703        );
704        self.length = length;
705    }
706
707    /// Remove the value at index `index`, shifting all the following values to
708    /// the left.
709    ///
710    /// Returns the removed value.
711    ///
712    /// Panics if the index is out of bounds.
713    ///
714    /// Time: O(n) for the number of items shifted
715    pub fn remove(&mut self, index: usize) -> A {
716        if index >= self.len() {
717            panic!("RingBuffer::remove: index out of bounds");
718        }
719        let value = unsafe { self.force_read(self.raw(index)) };
720        let right_count = self.len() - index;
721        // Check which side has fewer elements to shift.
722        if right_count < index {
723            // Shift from the right.
724            self.length -= 1;
725            let mut i = self.raw(index);
726            let target = self.raw(self.len());
727            while i != target {
728                unsafe { self.force_write(i, self.force_read(i + 1)) };
729                i += 1;
730            }
731        } else {
732            // Shift from the left.
733            let mut i = self.raw(index);
734            while i != self.origin {
735                unsafe { self.force_write(i, self.force_read(i - 1)) };
736                i -= 1;
737            }
738            self.origin += 1;
739            self.length -= 1;
740        }
741        value
742    }
743
744    /// Construct an iterator that drains values from the front of the buffer.
745    pub fn drain(&mut self) -> Drain<'_, A, N> {
746        Drain { buffer: self }
747    }
748
749    /// Discard the contents of the buffer.
750    ///
751    /// Time: O(n)
752    pub fn clear(&mut self) {
753        for i in self.range() {
754            unsafe { self.force_drop(i) };
755        }
756        self.origin = 0.into();
757        self.length = 0;
758    }
759}
760
761impl<A, const N: usize> Default for RingBuffer<A, N> {
762    #[inline]
763    #[must_use]
764    fn default() -> Self {
765        Self::new()
766    }
767}
768
769impl<A: Clone, const N: usize> Clone for RingBuffer<A, N> {
770    fn clone(&self) -> Self {
771        let mut out = Self::new();
772        out.origin = self.origin;
773        out.length = self.length;
774        let range = self.range();
775        // Panic safety. If we panic, we don't want to drop more than we have initialized.
776        out.length = 0;
777        for index in range {
778            unsafe { out.force_write(index, (&*self.ptr(index)).clone()) };
779            out.length += 1;
780        }
781        out
782    }
783}
784
785impl<A, const N: usize> Index<usize> for RingBuffer<A, N> {
786    type Output = A;
787
788    #[must_use]
789    fn index(&self, index: usize) -> &Self::Output {
790        if index >= self.len() {
791            panic!(
792                "RingBuffer::index: index out of bounds {} >= {}",
793                index,
794                self.len()
795            );
796        }
797        unsafe { &*self.ptr(self.raw(index)) }
798    }
799}
800
801impl<A, const N: usize> IndexMut<usize> for RingBuffer<A, N> {
802    #[must_use]
803    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
804        if index >= self.len() {
805            panic!(
806                "RingBuffer::index_mut: index out of bounds {} >= {}",
807                index,
808                self.len()
809            );
810        }
811        unsafe { &mut *self.mut_ptr(self.raw(index)) }
812    }
813}
814
815impl<A: PartialEq, const N: usize> PartialEq for RingBuffer<A, N> {
816    #[inline]
817    #[must_use]
818    fn eq(&self, other: &Self) -> bool {
819        self.len() == other.len() && self.iter().eq(other.iter())
820    }
821}
822
823impl<A, PrimSlice, const N: usize> PartialEq<PrimSlice> for RingBuffer<A, N>
824where
825    PrimSlice: Borrow<[A]>,
826    A: PartialEq,
827{
828    #[inline]
829    #[must_use]
830    fn eq(&self, other: &PrimSlice) -> bool {
831        let other = other.borrow();
832        self.len() == other.len() && self.iter().eq(other.iter())
833    }
834}
835
836impl<A, const N: usize> PartialEq<Slice<'_, A, N>> for RingBuffer<A, N>
837where
838    A: PartialEq,
839{
840    fn eq(&self, other: &Slice<'_, A, N>) -> bool {
841        self.len() == other.len() && self.iter().eq(other.iter())
842    }
843}
844
845impl<A, const N: usize> PartialEq<SliceMut<'_, A, N>> for RingBuffer<A, N>
846where
847    A: PartialEq,
848{
849    fn eq(&self, other: &SliceMut<'_, A, N>) -> bool {
850        self.len() == other.len() && self.iter().eq(other.iter())
851    }
852}
853
854impl<A: Eq, const N: usize> Eq for RingBuffer<A, N> {}
855
856impl<A: PartialOrd, const N: usize> PartialOrd for RingBuffer<A, N> {
857    #[inline]
858    #[must_use]
859    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
860        self.iter().partial_cmp(other.iter())
861    }
862}
863
864impl<A: Ord, const N: usize> Ord for RingBuffer<A, N> {
865    #[inline]
866    #[must_use]
867    fn cmp(&self, other: &Self) -> Ordering {
868        self.iter().cmp(other.iter())
869    }
870}
871
872impl<A, const N: usize> Extend<A> for RingBuffer<A, N> {
873    #[inline]
874    fn extend<I: IntoIterator<Item = A>>(&mut self, iter: I) {
875        for item in iter {
876            self.push_back(item);
877        }
878    }
879}
880
881impl<'a, A: Clone + 'a, const N: usize> Extend<&'a A> for RingBuffer<A, N> {
882    #[inline]
883    fn extend<I: IntoIterator<Item = &'a A>>(&mut self, iter: I) {
884        for item in iter {
885            self.push_back(item.clone());
886        }
887    }
888}
889
890impl<A: Debug, const N: usize> Debug for RingBuffer<A, N> {
891    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
892        f.write_str("RingBuffer")?;
893        f.debug_list().entries(self.iter()).finish()
894    }
895}
896
897impl<A: Hash, const N: usize> Hash for RingBuffer<A, N> {
898    #[inline]
899    fn hash<H: Hasher>(&self, hasher: &mut H) {
900        for item in self {
901            item.hash(hasher)
902        }
903    }
904}
905
906#[cfg(feature = "std")]
907impl<const N: usize> std::io::Write for RingBuffer<u8, N> {
908    fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
909        let max_new = Self::CAPACITY - self.len();
910        if buf.len() > max_new {
911            buf = &buf[..max_new];
912        }
913        unsafe { self.copy_from_slice(buf, self.origin + self.len()) };
914        self.length += buf.len();
915        Ok(buf.len())
916    }
917
918    #[inline]
919    fn flush(&mut self) -> std::io::Result<()> {
920        Ok(())
921    }
922}
923
924#[cfg(feature = "std")]
925impl<const N: usize> std::io::Read for RingBuffer<u8, N> {
926    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
927        let read_size = buf.len().min(self.len());
928        if read_size == 0 {
929            Ok(0)
930        } else {
931            for p in buf.iter_mut().take(read_size) {
932                *p = self.pop_front().unwrap();
933            }
934            Ok(read_size)
935        }
936    }
937}
938
939impl<A, const N: usize> FromIterator<A> for RingBuffer<A, N> {
940    #[must_use]
941    fn from_iter<I: IntoIterator<Item = A>>(iter: I) -> Self {
942        let mut buffer = RingBuffer::new();
943        buffer.extend(iter);
944        buffer
945    }
946}
947
948impl<A, const N: usize> IntoIterator for RingBuffer<A, N> {
949    type Item = A;
950    type IntoIter = OwnedIter<A, N>;
951
952    #[inline]
953    #[must_use]
954    fn into_iter(self) -> Self::IntoIter {
955        OwnedIter { buffer: self }
956    }
957}
958
959impl<'a, A, const N: usize> IntoIterator for &'a RingBuffer<A, N> {
960    type Item = &'a A;
961    type IntoIter = Iter<'a, A, N>;
962
963    #[inline]
964    #[must_use]
965    fn into_iter(self) -> Self::IntoIter {
966        self.iter()
967    }
968}
969
970impl<'a, A, const N: usize> IntoIterator for &'a mut RingBuffer<A, N> {
971    type Item = &'a mut A;
972    type IntoIter = IterMut<'a, A, N>;
973
974    #[inline]
975    #[must_use]
976    fn into_iter(self) -> Self::IntoIter {
977        self.iter_mut()
978    }
979}
980
981// Tests
982
983#[cfg(test)]
984mod test {
985    use super::*;
986
987    #[test]
988    fn validity_invariant() {
989        assert!(Some(RingBuffer::<Box<()>, 64>::new()).is_some());
990    }
991
992    #[test]
993    fn is_full() {
994        let mut chunk = RingBuffer::<_, 64>::new();
995        for i in 0..64 {
996            assert_eq!(false, chunk.is_full());
997            chunk.push_back(i);
998        }
999        assert_eq!(true, chunk.is_full());
1000    }
1001
1002    #[test]
1003    fn ref_iter() {
1004        let chunk: RingBuffer<i32, 64> = (0..64).collect();
1005        let out_vec: Vec<&i32> = chunk.iter().collect();
1006        let should_vec_p: Vec<i32> = (0..64).collect();
1007        let should_vec: Vec<&i32> = should_vec_p.iter().collect();
1008        assert_eq!(should_vec, out_vec);
1009    }
1010
1011    #[test]
1012    fn mut_ref_iter() {
1013        let mut chunk: RingBuffer<i32, 64> = (0..64).collect();
1014        let out_vec: Vec<&mut i32> = chunk.iter_mut().collect();
1015        let mut should_vec_p: Vec<i32> = (0..64).collect();
1016        let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect();
1017        assert_eq!(should_vec, out_vec);
1018    }
1019
1020    #[test]
1021    fn consuming_iter() {
1022        let chunk: RingBuffer<i32, 64> = (0..64).collect();
1023        let out_vec: Vec<i32> = chunk.into_iter().collect();
1024        let should_vec: Vec<i32> = (0..64).collect();
1025        assert_eq!(should_vec, out_vec);
1026    }
1027
1028    #[test]
1029    fn draining_iter() {
1030        let mut chunk: RingBuffer<i32, 64> = (0..64).collect();
1031        let mut half: RingBuffer<i32, 64> = chunk.drain().take(16).collect();
1032        half.extend(chunk.drain().rev().take(16));
1033        let should: Vec<i32> = (16..48).collect();
1034        assert_eq!(chunk, should);
1035        let should: Vec<i32> = (0..16).chain((48..64).rev()).collect();
1036        assert_eq!(half, should);
1037    }
1038
1039    #[cfg(feature = "std")]
1040    #[test]
1041    fn io_write() {
1042        use std::io::Write;
1043        let mut buffer: RingBuffer<u8, 64> = (0..32).collect();
1044        let to_write: Vec<u8> = (32..128).collect();
1045        assert_eq!(32, buffer.write(&to_write).unwrap());
1046        assert_eq!(buffer, (0..64).collect::<Vec<u8>>());
1047    }
1048
1049    #[cfg(feature = "std")]
1050    #[test]
1051    fn io_read() {
1052        use std::io::Read;
1053        let mut buffer: RingBuffer<u8, 64> = (16..48).collect();
1054        let mut read_buf: Vec<u8> = (0..16).collect();
1055        assert_eq!(16, buffer.read(&mut read_buf).unwrap());
1056        assert_eq!(read_buf, (16..32).collect::<Vec<u8>>());
1057        assert_eq!(buffer, (32..48).collect::<Vec<u8>>());
1058        assert_eq!(16, buffer.read(&mut read_buf).unwrap());
1059        assert_eq!(read_buf, (32..48).collect::<Vec<u8>>());
1060        assert_eq!(buffer, vec![]);
1061        assert_eq!(0, buffer.read(&mut read_buf).unwrap());
1062    }
1063
1064    #[test]
1065    fn clone() {
1066        let buffer: RingBuffer<u32, 64> = (0..50).collect();
1067        assert_eq!(buffer, buffer.clone());
1068    }
1069
1070    #[test]
1071    fn failing() {
1072        let mut buffer: RingBuffer<u32, 64> = RingBuffer::new();
1073        buffer.push_front(0);
1074        let mut add: RingBuffer<u32, 64> = vec![1, 0, 0, 0, 0, 0].into_iter().collect();
1075        buffer.append(&mut add);
1076        assert_eq!(1, buffer.remove(1));
1077        let expected = vec![0, 0, 0, 0, 0, 0];
1078        assert_eq!(buffer, expected);
1079    }
1080
1081    use crate::tests::DropTest;
1082    use std::sync::atomic::{AtomicUsize, Ordering};
1083
1084    #[test]
1085    fn dropping() {
1086        let counter = AtomicUsize::new(0);
1087        {
1088            let mut chunk: RingBuffer<DropTest<'_>, 64> = RingBuffer::new();
1089            for _i in 0..20 {
1090                chunk.push_back(DropTest::new(&counter))
1091            }
1092            for _i in 0..20 {
1093                chunk.push_front(DropTest::new(&counter))
1094            }
1095            assert_eq!(40, counter.load(Ordering::Relaxed));
1096            for _i in 0..10 {
1097                chunk.pop_back();
1098            }
1099            assert_eq!(30, counter.load(Ordering::Relaxed));
1100        }
1101        assert_eq!(0, counter.load(Ordering::Relaxed));
1102    }
1103
1104    #[test]
1105    #[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
1106    fn unit_on_empty() {
1107        let _ = RingBuffer::<usize, 0>::unit(1);
1108    }
1109
1110    #[test]
1111    #[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
1112    fn pair_on_empty() {
1113        let _ = RingBuffer::<usize, 0>::pair(1, 2);
1114    }
1115}