#[cfg(unix)]
use core::ffi::c_int;
use core::{
alloc::Layout,
ffi::{c_uint, c_void},
marker::PhantomData,
};
#[cfg(feature = "rust-allocator")]
use alloc::alloc::GlobalAlloc;
#[allow(non_camel_case_types)]
type size_t = usize;
#[cfg(unix)]
unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
let _ = opaque;
extern "C" {
fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int;
}
let mut ptr = core::ptr::null_mut();
match posix_memalign(&mut ptr, 64, items as size_t * size as size_t) {
0 => ptr,
_ => core::ptr::null_mut(),
}
}
#[cfg(not(unix))]
unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
let _ = opaque;
extern "C" {
fn malloc(size: size_t) -> *mut c_void;
}
malloc(items as size_t * size as size_t)
}
unsafe extern "C" fn zalloc_c_calloc(
opaque: *mut c_void,
items: c_uint,
size: c_uint,
) -> *mut c_void {
let _ = opaque;
extern "C" {
fn calloc(nitems: size_t, size: size_t) -> *mut c_void;
}
calloc(items as size_t, size as size_t)
}
unsafe extern "C" fn zfree_c(opaque: *mut c_void, ptr: *mut c_void) {
let _ = opaque;
extern "C" {
fn free(p: *mut c_void);
}
unsafe { free(ptr) }
}
#[cfg(feature = "rust-allocator")]
unsafe extern "C" fn zalloc_rust(_opaque: *mut c_void, count: c_uint, size: c_uint) -> *mut c_void {
let align = 64;
let size = count as usize * size as usize;
let layout = Layout::from_size_align(size, align).unwrap();
let ptr = std::alloc::System.alloc(layout);
ptr as *mut c_void
}
#[cfg(feature = "rust-allocator")]
unsafe extern "C" fn zfree_rust(opaque: *mut c_void, ptr: *mut c_void) {
if ptr.is_null() {
return;
}
debug_assert!(!opaque.is_null());
if opaque.is_null() {
return;
}
let size = *(opaque as *mut usize);
let align = 64;
let layout = Layout::from_size_align(size, align);
let layout = layout.unwrap();
std::alloc::System.dealloc(ptr.cast(), layout);
}
#[cfg(test)]
unsafe extern "C" fn zalloc_fail(_: *mut c_void, _: c_uint, _: c_uint) -> *mut c_void {
core::ptr::null_mut()
}
#[cfg(test)]
unsafe extern "C" fn zfree_fail(_: *mut c_void, _: *mut c_void) {
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct Allocator<'a> {
pub zalloc: crate::c_api::alloc_func,
pub zfree: crate::c_api::free_func,
pub opaque: crate::c_api::voidpf,
pub _marker: PhantomData<&'a ()>,
}
impl Allocator<'static> {
#[cfg(feature = "rust-allocator")]
pub const RUST: Self = Self {
zalloc: zalloc_rust,
zfree: zfree_rust,
opaque: core::ptr::null_mut(),
_marker: PhantomData,
};
#[cfg(feature = "c-allocator")]
pub const C: Self = Self {
zalloc: zalloc_c,
zfree: zfree_c,
opaque: core::ptr::null_mut(),
_marker: PhantomData,
};
#[cfg(test)]
const FAIL: Self = Self {
zalloc: zalloc_fail,
zfree: zfree_fail,
opaque: core::ptr::null_mut(),
_marker: PhantomData,
};
}
impl<'a> Allocator<'a> {
pub fn allocate_layout(&self, layout: Layout) -> *mut c_void {
#[cfg(feature = "rust-allocator")]
if self.zalloc == Allocator::RUST.zalloc {
let ptr = unsafe { (Allocator::RUST.zalloc)(self.opaque, layout.size() as _, 1) };
debug_assert_eq!(ptr as usize % layout.align(), 0);
return ptr;
}
let extra_space = core::mem::size_of::<*mut c_void>() + layout.align();
let ptr = unsafe { (self.zalloc)(self.opaque, (layout.size() + extra_space) as _, 1) };
if ptr.is_null() {
return ptr;
}
let align_diff = (ptr as usize).next_multiple_of(layout.align()) - (ptr as usize);
let mut return_ptr = unsafe { ptr.cast::<u8>().add(align_diff) };
if align_diff < core::mem::size_of::<*mut c_void>() {
let offset = Ord::max(core::mem::size_of::<*mut c_void>(), layout.align());
return_ptr = unsafe { return_ptr.add(offset) };
}
unsafe {
let original_ptr = return_ptr.sub(core::mem::size_of::<*mut c_void>());
core::ptr::write_unaligned(original_ptr.cast::<*mut c_void>(), ptr);
};
let ptr = return_ptr.cast::<c_void>();
debug_assert_eq!(ptr as usize % layout.align(), 0);
ptr
}
pub fn allocate_raw<T>(&self) -> Option<*mut T> {
let ptr = self.allocate_layout(Layout::new::<T>());
if ptr.is_null() {
None
} else {
Some(ptr as *mut T)
}
}
pub fn allocate_slice_raw<T>(&self, len: usize) -> Option<*mut T> {
let ptr = self.allocate_layout(Layout::array::<T>(len).ok()?);
if ptr.is_null() {
None
} else {
Some(ptr.cast())
}
}
pub fn allocate_zeroed(&self, len: usize) -> *mut u8 {
#[cfg(feature = "rust-allocator")]
if self.zalloc == Allocator::RUST.zalloc {
let layout = Layout::from_size_align(len, 64).unwrap();
return unsafe { std::alloc::System.alloc_zeroed(layout) };
}
#[cfg(feature = "c-allocator")]
if self.zalloc == Allocator::C.zalloc {
let alloc = Allocator {
zalloc: zalloc_c_calloc,
zfree: zfree_c,
opaque: core::ptr::null_mut(),
_marker: PhantomData,
};
let ptr = alloc.allocate_layout(Layout::array::<u8>(len).ok().unwrap());
if ptr.is_null() {
return core::ptr::null_mut();
}
return ptr.cast();
}
let ptr = self.allocate_layout(Layout::array::<u8>(len).ok().unwrap());
if ptr.is_null() {
return core::ptr::null_mut();
}
unsafe { core::ptr::write_bytes(ptr, 0, len) };
ptr.cast()
}
#[allow(unused)] pub unsafe fn deallocate<T>(&self, ptr: *mut T, len: usize) {
if !ptr.is_null() {
#[cfg(feature = "rust-allocator")]
if self.zfree == Allocator::RUST.zfree {
assert_ne!(len, 0, "invalid size for {:?}", ptr);
let mut size = core::mem::size_of::<T>() * len;
return (Allocator::RUST.zfree)(&mut size as *mut usize as *mut c_void, ptr.cast());
}
let original_ptr = (ptr as *mut u8).sub(core::mem::size_of::<*const c_void>());
let free_ptr = core::ptr::read_unaligned(original_ptr as *mut *mut c_void);
(self.zfree)(self.opaque, free_ptr)
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicPtr, Ordering};
use std::sync::Mutex;
use super::*;
static PTR: AtomicPtr<c_void> = AtomicPtr::new(core::ptr::null_mut());
static MUTEX: Mutex<()> = Mutex::new(());
unsafe extern "C" fn unaligned_alloc(
_opaque: *mut c_void,
_items: c_uint,
_size: c_uint,
) -> *mut c_void {
PTR.load(Ordering::Relaxed)
}
unsafe extern "C" fn unaligned_free(_opaque: *mut c_void, ptr: *mut c_void) {
let expected = PTR.load(Ordering::Relaxed);
assert_eq!(expected, ptr)
}
fn unaligned_allocator_help<T>() {
let mut buf = [0u8; 1024];
let _guard = MUTEX.lock().unwrap();
for i in 0..64 {
let ptr = unsafe { buf.as_mut_ptr().add(i).cast() };
PTR.store(ptr, Ordering::Relaxed);
let allocator = Allocator {
zalloc: unaligned_alloc,
zfree: unaligned_free,
opaque: core::ptr::null_mut(),
_marker: PhantomData,
};
let ptr = allocator.allocate_raw::<T>().unwrap();
assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
unsafe { allocator.deallocate(ptr, 1) }
let ptr = allocator.allocate_slice_raw::<T>(10).unwrap();
assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
unsafe { allocator.deallocate(ptr, 10) }
}
}
#[test]
fn unaligned_allocator_0() {
unaligned_allocator_help::<()>()
}
#[test]
fn unaligned_allocator_1() {
unaligned_allocator_help::<u8>()
}
#[test]
fn unaligned_allocator_2() {
unaligned_allocator_help::<u16>()
}
#[test]
fn unaligned_allocator_4() {
unaligned_allocator_help::<u32>()
}
#[test]
fn unaligned_allocator_8() {
unaligned_allocator_help::<u64>()
}
#[test]
fn unaligned_allocator_16() {
unaligned_allocator_help::<u128>()
}
#[test]
fn unaligned_allocator_32() {
#[repr(C, align(32))]
struct Align32(u8);
unaligned_allocator_help::<Align32>()
}
#[test]
fn unaligned_allocator_64() {
#[repr(C, align(64))]
struct Align64(u8);
unaligned_allocator_help::<Align64>()
}
fn test_allocate_zeroed_help(allocator: Allocator) {
let len = 42;
let buf = allocator.allocate_zeroed(len);
if !buf.is_null() {
let slice = unsafe { core::slice::from_raw_parts_mut(buf, len) };
assert_eq!(slice.iter().sum::<u8>(), 0);
}
unsafe { allocator.deallocate(buf, len) };
}
#[test]
fn test_allocate_zeroed() {
#[cfg(feature = "rust-allocator")]
test_allocate_zeroed_help(Allocator::RUST);
#[cfg(feature = "c-allocator")]
test_allocate_zeroed_help(Allocator::C);
test_allocate_zeroed_help(Allocator::FAIL);
}
#[test]
fn test_deallocate_null() {
unsafe {
#[cfg(feature = "rust-allocator")]
(Allocator::RUST.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
#[cfg(feature = "c-allocator")]
(Allocator::C.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
(Allocator::FAIL.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
}
}
}