use std::borrow::Borrow;
use std::cmp::Ordering;
use std::fmt::{Debug, Error, Formatter};
use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::mem::MaybeUninit;
use std::ops::{Bound, Range, RangeBounds};
use std::ops::{Index, IndexMut};
use typenum::U64;
use crate::types::ChunkLength;
mod index;
use index::{IndexIter, RawIndex};
mod iter;
pub use iter::{Drain, Iter, IterMut, OwnedIter};
mod slice;
pub use slice::{Slice, SliceMut};
#[cfg(feature = "refpool")]
mod refpool;
pub struct RingBuffer<A, N = U64>
where
N: ChunkLength<A>,
{
origin: RawIndex<A, N>,
length: usize,
data: MaybeUninit<N::SizedType>,
}
impl<A, N: ChunkLength<A>> Drop for RingBuffer<A, N> {
#[inline]
fn drop(&mut self) {
if std::mem::needs_drop::<A>() {
for i in self.range() {
unsafe { self.force_drop(i) }
}
}
}
}
impl<A, N> RingBuffer<A, N>
where
N: ChunkLength<A>,
{
pub const CAPACITY: usize = N::USIZE;
#[inline]
fn raw(&self, index: usize) -> RawIndex<A, N> {
self.origin + index
}
#[inline]
unsafe fn ptr(&self, index: RawIndex<A, N>) -> *const A {
debug_assert!(index.to_usize() < Self::CAPACITY);
(&self.data as *const _ as *const A).add(index.to_usize())
}
#[inline]
unsafe fn mut_ptr(&mut self, index: RawIndex<A, N>) -> *mut A {
debug_assert!(index.to_usize() < Self::CAPACITY);
(&mut self.data as *mut _ as *mut A).add(index.to_usize())
}
#[inline]
unsafe fn force_drop(&mut self, index: RawIndex<A, N>) {
std::ptr::drop_in_place(self.mut_ptr(index))
}
#[inline]
unsafe fn force_read(&self, index: RawIndex<A, N>) -> A {
std::ptr::read(self.ptr(index))
}
#[inline]
unsafe fn force_write(&mut self, index: RawIndex<A, N>, value: A) {
std::ptr::write(self.mut_ptr(index), value)
}
unsafe fn copy_from(
&mut self,
source: &mut Self,
from: RawIndex<A, N>,
to: RawIndex<A, N>,
count: usize,
) {
#[inline]
unsafe fn force_copy_to<A, N: ChunkLength<A>>(
source: &mut RingBuffer<A, N>,
from: RawIndex<A, N>,
target: &mut RingBuffer<A, N>,
to: RawIndex<A, N>,
count: usize,
) {
if count > 0 {
debug_assert!(from.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
debug_assert!(to.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
std::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count)
}
}
if from.to_usize() + count > Self::CAPACITY {
let first_length = Self::CAPACITY - from.to_usize();
let last_length = count - first_length;
self.copy_from(source, from, to, first_length);
self.copy_from(source, 0.into(), to + first_length, last_length);
} else if to.to_usize() + count > Self::CAPACITY {
let first_length = Self::CAPACITY - to.to_usize();
let last_length = count - first_length;
force_copy_to(source, from, self, to, first_length);
force_copy_to(source, from + first_length, self, 0.into(), last_length);
} else {
force_copy_to(source, from, self, to, count);
}
}
unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex<A, N>) {
let count = source.len();
debug_assert!(to.to_usize() + count <= Self::CAPACITY);
if to.to_usize() + count > Self::CAPACITY {
let first_length = Self::CAPACITY - to.to_usize();
let first_slice = &source[..first_length];
let last_slice = &source[first_length..];
std::ptr::copy_nonoverlapping(
first_slice.as_ptr(),
self.mut_ptr(to),
first_slice.len(),
);
std::ptr::copy_nonoverlapping(
last_slice.as_ptr(),
self.mut_ptr(0.into()),
last_slice.len(),
);
} else {
std::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count)
}
}
#[inline]
fn range(&self) -> IndexIter<A, N> {
IndexIter {
remaining: self.len(),
left_index: self.origin,
right_index: self.origin + self.len(),
}
}
#[inline]
#[must_use]
pub fn new() -> Self {
Self {
origin: 0.into(),
length: 0,
data: MaybeUninit::uninit(),
}
}
#[inline]
#[must_use]
pub fn unit(value: A) -> Self {
let mut buffer = Self {
origin: 0.into(),
length: 1,
data: MaybeUninit::uninit(),
};
unsafe {
buffer.force_write(0.into(), value);
}
buffer
}
#[inline]
#[must_use]
pub fn pair(value1: A, value2: A) -> Self {
let mut buffer = Self {
origin: 0.into(),
length: 2,
data: MaybeUninit::uninit(),
};
unsafe {
buffer.force_write(0.into(), value1);
buffer.force_write(1.into(), value2);
}
buffer
}
#[inline]
#[must_use]
pub fn drain_from(other: &mut Self) -> Self {
Self::from_front(other, other.len())
}
#[must_use]
pub fn collect_from<I>(iter: &mut I, count: usize) -> Self
where
I: Iterator<Item = A>,
{
let buffer = Self::from_iter(iter.take(count));
if buffer.len() < count {
panic!("RingBuffer::collect_from: underfull iterator");
}
buffer
}
#[must_use]
pub fn from_front(other: &mut Self, count: usize) -> Self {
let mut buffer = Self::new();
buffer.drain_from_front(other, count);
buffer
}
#[must_use]
pub fn from_back(other: &mut Self, count: usize) -> Self {
let mut buffer = Self::new();
buffer.drain_from_back(other, count);
buffer
}
#[inline]
#[must_use]
pub fn len(&self) -> usize {
self.length
}
#[inline]
#[must_use]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
#[must_use]
pub fn is_full(&self) -> bool {
self.len() == Self::CAPACITY
}
#[inline]
#[must_use]
pub fn iter(&self) -> Iter<'_, A, N> {
Iter {
buffer: self,
left_index: self.origin,
right_index: self.origin + self.len(),
remaining: self.len(),
}
}
#[inline]
#[must_use]
pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
IterMut {
left_index: self.origin,
right_index: self.origin + self.len(),
remaining: self.len(),
buffer: self,
}
}
#[must_use]
fn parse_range<R: RangeBounds<usize>>(&self, range: R) -> Range<usize> {
let new_range = Range {
start: match range.start_bound() {
Bound::Unbounded => 0,
Bound::Included(index) => *index,
Bound::Excluded(_) => unimplemented!(),
},
end: match range.end_bound() {
Bound::Unbounded => self.len(),
Bound::Included(index) => *index + 1,
Bound::Excluded(index) => *index,
},
};
if new_range.end > self.len() || new_range.start > new_range.end {
panic!("Slice::parse_range: index out of bounds");
}
new_range
}
#[must_use]
pub fn slice<R: RangeBounds<usize>>(&self, range: R) -> Slice<'_, A, N> {
Slice {
buffer: self,
range: self.parse_range(range),
}
}
#[must_use]
pub fn slice_mut<R: RangeBounds<usize>>(&mut self, range: R) -> SliceMut<'_, A, N> {
SliceMut {
range: self.parse_range(range),
buffer: self,
}
}
#[must_use]
pub fn get(&self, index: usize) -> Option<&A> {
if index >= self.len() {
None
} else {
Some(unsafe { &*self.ptr(self.raw(index)) })
}
}
#[must_use]
pub fn get_mut(&mut self, index: usize) -> Option<&mut A> {
if index >= self.len() {
None
} else {
Some(unsafe { &mut *self.mut_ptr(self.raw(index)) })
}
}
#[inline]
#[must_use]
pub fn first(&self) -> Option<&A> {
self.get(0)
}
#[inline]
#[must_use]
pub fn first_mut(&mut self) -> Option<&mut A> {
self.get_mut(0)
}
#[inline]
#[must_use]
pub fn last(&self) -> Option<&A> {
if self.is_empty() {
None
} else {
self.get(self.len() - 1)
}
}
#[inline]
#[must_use]
pub fn last_mut(&mut self) -> Option<&mut A> {
if self.is_empty() {
None
} else {
self.get_mut(self.len() - 1)
}
}
pub fn push_back(&mut self, value: A) {
if self.is_full() {
panic!("RingBuffer::push_back: can't push to a full buffer")
} else {
unsafe { self.force_write(self.raw(self.length), value) }
self.length += 1;
}
}
pub fn push_front(&mut self, value: A) {
if self.is_full() {
panic!("RingBuffer::push_front: can't push to a full buffer")
} else {
let origin = self.origin.dec();
self.length += 1;
unsafe { self.force_write(origin, value) }
}
}
pub fn pop_back(&mut self) -> Option<A> {
if self.is_empty() {
None
} else {
self.length -= 1;
Some(unsafe { self.force_read(self.raw(self.length)) })
}
}
pub fn pop_front(&mut self) -> Option<A> {
if self.is_empty() {
None
} else {
self.length -= 1;
let index = self.origin.inc();
Some(unsafe { self.force_read(index) })
}
}
pub fn drop_left(&mut self, index: usize) {
if index > 0 {
if index > self.len() {
panic!("RingBuffer::drop_left: index out of bounds");
}
for i in self.range().take(index) {
unsafe { self.force_drop(i) }
}
self.origin += index;
self.length -= index;
}
}
pub fn drop_right(&mut self, index: usize) {
if index > self.len() {
panic!("RingBuffer::drop_right: index out of bounds");
}
if index == self.len() {
return;
}
for i in self.range().skip(index) {
unsafe { self.force_drop(i) }
}
self.length = index;
}
#[must_use]
pub fn split_off(&mut self, index: usize) -> Self {
if index > self.len() {
panic!("RingBuffer::split: index out of bounds");
}
if index == self.len() {
return Self::new();
}
let mut right = Self::new();
let length = self.length - index;
unsafe { right.copy_from(self, self.raw(index), 0.into(), length) };
self.length = index;
right.length = length;
right
}
#[inline]
pub fn append(&mut self, other: &mut Self) {
self.drain_from_front(other, other.len());
}
pub fn drain_from_front(&mut self, other: &mut Self, count: usize) {
let self_len = self.len();
let other_len = other.len();
if self_len + count > Self::CAPACITY {
panic!("RingBuffer::drain_from_front: chunk size overflow");
}
if other_len < count {
panic!("RingBuffer::drain_from_front: index out of bounds");
}
unsafe { self.copy_from(other, other.origin, self.raw(self.len()), count) };
other.origin += count;
other.length -= count;
self.length += count;
}
pub fn drain_from_back(&mut self, other: &mut Self, count: usize) {
let self_len = self.len();
let other_len = other.len();
if self_len + count > Self::CAPACITY {
panic!("RingBuffer::drain_from_back: chunk size overflow");
}
if other_len < count {
panic!("RingBuffer::drain_from_back: index out of bounds");
}
self.origin -= count;
let source_index = other.origin + (other.len() - count);
unsafe { self.copy_from(other, source_index, self.origin, count) };
other.length -= count;
self.length += count;
}
pub fn set(&mut self, index: usize, value: A) -> A {
std::mem::replace(&mut self[index], value)
}
pub fn insert(&mut self, index: usize, value: A) {
if self.is_full() {
panic!("RingBuffer::insert: chunk size overflow");
}
if index > self.len() {
panic!("RingBuffer::insert: index out of bounds");
}
if index == 0 {
return self.push_front(value);
}
if index == self.len() {
return self.push_back(value);
}
let right_count = self.len() - index;
if right_count < index {
let mut i = self.raw(self.len() - 1);
let target = self.raw(index);
while i != target {
unsafe { self.force_write(i + 1, self.force_read(i)) };
i -= 1;
}
unsafe { self.force_write(target + 1, self.force_read(target)) };
self.length += 1;
} else {
self.origin -= 1;
self.length += 1;
for i in self.range().take(index) {
unsafe { self.force_write(i, self.force_read(i + 1)) };
}
}
unsafe { self.force_write(self.raw(index), value) };
}
pub fn remove(&mut self, index: usize) -> A {
if index >= self.len() {
panic!("RingBuffer::remove: index out of bounds");
}
let value = unsafe { self.force_read(self.raw(index)) };
let right_count = self.len() - index;
if right_count < index {
self.length -= 1;
let mut i = self.raw(index);
let target = self.raw(self.len());
while i != target {
unsafe { self.force_write(i, self.force_read(i + 1)) };
i += 1;
}
} else {
let mut i = self.raw(index);
while i != self.origin {
unsafe { self.force_write(i, self.force_read(i - 1)) };
i -= 1;
}
self.origin += 1;
self.length -= 1;
}
value
}
pub fn drain(&mut self) -> Drain<'_, A, N> {
Drain { buffer: self }
}
pub fn clear(&mut self) {
for i in self.range() {
unsafe { self.force_drop(i) };
}
self.origin = 0.into();
self.length = 0;
}
}
impl<A, N: ChunkLength<A>> Default for RingBuffer<A, N> {
#[inline]
#[must_use]
fn default() -> Self {
Self::new()
}
}
impl<A: Clone, N: ChunkLength<A>> Clone for RingBuffer<A, N> {
fn clone(&self) -> Self {
let mut out = Self::new();
out.origin = self.origin;
out.length = self.length;
for index in out.range() {
unsafe { out.force_write(index, (&*self.ptr(index)).clone()) };
}
out
}
}
impl<A, N> Index<usize> for RingBuffer<A, N>
where
N: ChunkLength<A>,
{
type Output = A;
#[must_use]
fn index(&self, index: usize) -> &Self::Output {
if index >= self.len() {
panic!(
"RingBuffer::index: index out of bounds {} >= {}",
index,
self.len()
);
}
unsafe { &*self.ptr(self.raw(index)) }
}
}
impl<A, N> IndexMut<usize> for RingBuffer<A, N>
where
N: ChunkLength<A>,
{
#[must_use]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
if index >= self.len() {
panic!(
"RingBuffer::index_mut: index out of bounds {} >= {}",
index,
self.len()
);
}
unsafe { &mut *self.mut_ptr(self.raw(index)) }
}
}
impl<A: PartialEq, N: ChunkLength<A>> PartialEq for RingBuffer<A, N> {
#[inline]
#[must_use]
fn eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.iter().eq(other.iter())
}
}
impl<A, N, Slice> PartialEq<Slice> for RingBuffer<A, N>
where
Slice: Borrow<[A]>,
A: PartialEq,
N: ChunkLength<A>,
{
#[inline]
#[must_use]
fn eq(&self, other: &Slice) -> bool {
let other = other.borrow();
self.len() == other.len() && self.iter().eq(other.iter())
}
}
impl<A: Eq, N: ChunkLength<A>> Eq for RingBuffer<A, N> {}
impl<A: PartialOrd, N: ChunkLength<A>> PartialOrd for RingBuffer<A, N> {
#[inline]
#[must_use]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
impl<A: Ord, N: ChunkLength<A>> Ord for RingBuffer<A, N> {
#[inline]
#[must_use]
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other.iter())
}
}
impl<A, N: ChunkLength<A>> Extend<A> for RingBuffer<A, N> {
#[inline]
fn extend<I: IntoIterator<Item = A>>(&mut self, iter: I) {
for item in iter {
self.push_back(item);
}
}
}
impl<'a, A: Clone + 'a, N: ChunkLength<A>> Extend<&'a A> for RingBuffer<A, N> {
#[inline]
fn extend<I: IntoIterator<Item = &'a A>>(&mut self, iter: I) {
for item in iter {
self.push_back(item.clone());
}
}
}
impl<A: Debug, N: ChunkLength<A>> Debug for RingBuffer<A, N> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.write_str("RingBuffer")?;
f.debug_list().entries(self.iter()).finish()
}
}
impl<A: Hash, N: ChunkLength<A>> Hash for RingBuffer<A, N> {
#[inline]
fn hash<H: Hasher>(&self, hasher: &mut H) {
for item in self {
item.hash(hasher)
}
}
}
impl<N: ChunkLength<u8>> std::io::Write for RingBuffer<u8, N> {
fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
let max_new = Self::CAPACITY - self.len();
if buf.len() > max_new {
buf = &buf[..max_new];
}
unsafe { self.copy_from_slice(buf, self.origin + self.len()) };
self.length += buf.len();
Ok(buf.len())
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl<N: ChunkLength<u8>> std::io::Read for RingBuffer<u8, N> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let read_size = buf.len().min(self.len());
if read_size == 0 {
Ok(0)
} else {
for p in buf.iter_mut().take(read_size) {
*p = self.pop_front().unwrap();
}
Ok(read_size)
}
}
}
impl<A, N: ChunkLength<A>> FromIterator<A> for RingBuffer<A, N> {
#[must_use]
fn from_iter<I: IntoIterator<Item = A>>(iter: I) -> Self {
let mut buffer = RingBuffer::new();
buffer.extend(iter);
buffer
}
}
impl<A, N: ChunkLength<A>> IntoIterator for RingBuffer<A, N> {
type Item = A;
type IntoIter = OwnedIter<A, N>;
#[inline]
#[must_use]
fn into_iter(self) -> Self::IntoIter {
OwnedIter { buffer: self }
}
}
impl<'a, A, N: ChunkLength<A>> IntoIterator for &'a RingBuffer<A, N> {
type Item = &'a A;
type IntoIter = Iter<'a, A, N>;
#[inline]
#[must_use]
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, A, N: ChunkLength<A>> IntoIterator for &'a mut RingBuffer<A, N> {
type Item = &'a mut A;
type IntoIter = IterMut<'a, A, N>;
#[inline]
#[must_use]
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn is_full() {
let mut chunk = RingBuffer::<_, U64>::new();
for i in 0..64 {
assert_eq!(false, chunk.is_full());
chunk.push_back(i);
}
assert_eq!(true, chunk.is_full());
}
#[test]
fn ref_iter() {
let chunk: RingBuffer<i32> = (0..64).collect();
let out_vec: Vec<&i32> = chunk.iter().collect();
let should_vec_p: Vec<i32> = (0..64).collect();
let should_vec: Vec<&i32> = should_vec_p.iter().collect();
assert_eq!(should_vec, out_vec);
}
#[test]
fn mut_ref_iter() {
let mut chunk: RingBuffer<i32> = (0..64).collect();
let out_vec: Vec<&mut i32> = chunk.iter_mut().collect();
let mut should_vec_p: Vec<i32> = (0..64).collect();
let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect();
assert_eq!(should_vec, out_vec);
}
#[test]
fn consuming_iter() {
let chunk: RingBuffer<i32> = (0..64).collect();
let out_vec: Vec<i32> = chunk.into_iter().collect();
let should_vec: Vec<i32> = (0..64).collect();
assert_eq!(should_vec, out_vec);
}
#[test]
fn draining_iter() {
let mut chunk: RingBuffer<i32> = (0..64).collect();
let mut half: RingBuffer<i32> = chunk.drain().take(16).collect();
half.extend(chunk.drain().rev().take(16));
let should: Vec<i32> = (16..48).collect();
assert_eq!(chunk, should);
let should: Vec<i32> = (0..16).chain((48..64).rev()).collect();
assert_eq!(half, should);
}
#[test]
fn io_write() {
use std::io::Write;
let mut buffer: RingBuffer<u8> = (0..32).collect();
let to_write: Vec<u8> = (32..128).collect();
assert_eq!(32, buffer.write(&to_write).unwrap());
assert_eq!(buffer, (0..64).collect::<Vec<u8>>());
}
#[test]
fn io_read() {
use std::io::Read;
let mut buffer: RingBuffer<u8> = (16..48).collect();
let mut read_buf: Vec<u8> = (0..16).collect();
assert_eq!(16, buffer.read(&mut read_buf).unwrap());
assert_eq!(read_buf, (16..32).collect::<Vec<u8>>());
assert_eq!(buffer, (32..48).collect::<Vec<u8>>());
assert_eq!(16, buffer.read(&mut read_buf).unwrap());
assert_eq!(read_buf, (32..48).collect::<Vec<u8>>());
assert_eq!(buffer, vec![]);
assert_eq!(0, buffer.read(&mut read_buf).unwrap());
}
#[test]
fn clone() {
let buffer: RingBuffer<u32> = (0..50).collect();
assert_eq!(buffer, buffer.clone());
}
#[test]
fn failing() {
let mut buffer: RingBuffer<u32> = RingBuffer::new();
buffer.push_front(0);
let mut add: RingBuffer<u32> = vec![1, 0, 0, 0, 0, 0].into_iter().collect();
buffer.append(&mut add);
assert_eq!(1, buffer.remove(1));
let expected = vec![0, 0, 0, 0, 0, 0];
assert_eq!(buffer, expected);
}
use crate::tests::DropTest;
use std::sync::atomic::{AtomicUsize, Ordering};
#[test]
fn dropping() {
let counter = AtomicUsize::new(0);
{
let mut chunk: RingBuffer<DropTest<'_>> = RingBuffer::new();
for _i in 0..20 {
chunk.push_back(DropTest::new(&counter))
}
for _i in 0..20 {
chunk.push_front(DropTest::new(&counter))
}
assert_eq!(40, counter.load(Ordering::Relaxed));
for _i in 0..10 {
chunk.pop_back();
}
assert_eq!(30, counter.load(Ordering::Relaxed));
}
assert_eq!(0, counter.load(Ordering::Relaxed));
}
}