zlib_rs/
allocate.rs

1#[cfg(unix)]
2use core::ffi::c_int;
3use core::{
4    alloc::Layout,
5    ffi::{c_uint, c_void},
6    marker::PhantomData,
7    ptr::NonNull,
8};
9
10#[cfg(feature = "rust-allocator")]
11use alloc::alloc::GlobalAlloc;
12
13#[allow(non_camel_case_types)]
14type size_t = usize;
15
16/// # Safety
17///
18/// This function is safe, but must have this type signature to be used elsewhere in the library
19#[cfg(unix)]
20unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
21    let _ = opaque;
22
23    extern "C" {
24        fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int;
25    }
26
27    let mut ptr = core::ptr::null_mut();
28    match posix_memalign(&mut ptr, 64, items as size_t * size as size_t) {
29        0 => ptr,
30        _ => core::ptr::null_mut(),
31    }
32}
33
34/// # Safety
35///
36/// This function is safe, but must have this type signature to be used elsewhere in the library
37#[cfg(not(unix))]
38unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
39    let _ = opaque;
40
41    extern "C" {
42        fn malloc(size: size_t) -> *mut c_void;
43    }
44
45    malloc(items as size_t * size as size_t)
46}
47
48/// # Safety
49///
50/// This function is safe, but must have this type signature to be used elsewhere in the library
51unsafe extern "C" fn zalloc_c_calloc(
52    opaque: *mut c_void,
53    items: c_uint,
54    size: c_uint,
55) -> *mut c_void {
56    let _ = opaque;
57
58    extern "C" {
59        fn calloc(nitems: size_t, size: size_t) -> *mut c_void;
60    }
61
62    calloc(items as size_t, size as size_t)
63}
64
65/// # Safety
66///
67/// The `ptr` must be allocated with the allocator that is used internally by `zcfree`
68unsafe extern "C" fn zfree_c(opaque: *mut c_void, ptr: *mut c_void) {
69    let _ = opaque;
70
71    extern "C" {
72        fn free(p: *mut c_void);
73    }
74
75    unsafe { free(ptr) }
76}
77
78/// # Safety
79///
80/// This function is safe to call.
81#[cfg(feature = "rust-allocator")]
82unsafe extern "C" fn zalloc_rust(_opaque: *mut c_void, count: c_uint, size: c_uint) -> *mut c_void {
83    let align = 64;
84    let size = count as usize * size as usize;
85
86    // internally, we want to align allocations to 64 bytes (in part for SIMD reasons)
87    let layout = Layout::from_size_align(size, align).unwrap();
88
89    let ptr = std::alloc::System.alloc(layout);
90
91    ptr as *mut c_void
92}
93
94/// # Safety
95///
96/// - `ptr` must be allocated with the rust `alloc::System` allocator
97/// - `opaque` is a `&usize` that represents the size of the allocation
98#[cfg(feature = "rust-allocator")]
99unsafe extern "C" fn zfree_rust(opaque: *mut c_void, ptr: *mut c_void) {
100    if ptr.is_null() {
101        return;
102    }
103
104    // we can't really do much else. Deallocating with an invalid layout is UB.
105    debug_assert!(!opaque.is_null());
106    if opaque.is_null() {
107        return;
108    }
109
110    let size = *(opaque as *mut usize);
111    let align = 64;
112
113    let layout = Layout::from_size_align(size, align);
114    let layout = layout.unwrap();
115
116    std::alloc::System.dealloc(ptr.cast(), layout);
117}
118
119#[cfg(test)]
120unsafe extern "C" fn zalloc_fail(_: *mut c_void, _: c_uint, _: c_uint) -> *mut c_void {
121    core::ptr::null_mut()
122}
123
124#[cfg(test)]
125unsafe extern "C" fn zfree_fail(_: *mut c_void, _: *mut c_void) {
126    // do nothing
127}
128
129#[derive(Clone, Copy)]
130#[repr(C)]
131pub struct Allocator<'a> {
132    pub zalloc: crate::c_api::alloc_func,
133    pub zfree: crate::c_api::free_func,
134    pub opaque: crate::c_api::voidpf,
135    pub _marker: PhantomData<&'a ()>,
136}
137
138impl Allocator<'static> {
139    #[cfg(feature = "rust-allocator")]
140    pub const RUST: Self = Self {
141        zalloc: zalloc_rust,
142        zfree: zfree_rust,
143        opaque: core::ptr::null_mut(),
144        _marker: PhantomData,
145    };
146
147    #[cfg(feature = "c-allocator")]
148    pub const C: Self = Self {
149        zalloc: zalloc_c,
150        zfree: zfree_c,
151        opaque: core::ptr::null_mut(),
152        _marker: PhantomData,
153    };
154
155    #[cfg(test)]
156    const FAIL: Self = Self {
157        zalloc: zalloc_fail,
158        zfree: zfree_fail,
159        opaque: core::ptr::null_mut(),
160        _marker: PhantomData,
161    };
162}
163
164impl Allocator<'_> {
165    pub fn allocate_layout(&self, layout: Layout) -> *mut c_void {
166        // Special case for the Rust `alloc` backed allocator
167        #[cfg(feature = "rust-allocator")]
168        if self.zalloc == Allocator::RUST.zalloc {
169            let ptr = unsafe { (Allocator::RUST.zalloc)(self.opaque, layout.size() as _, 1) };
170
171            debug_assert_eq!(ptr as usize % layout.align(), 0);
172
173            return ptr;
174        }
175
176        // General case for c-style allocation
177
178        // We cannot rely on the allocator giving properly aligned allocations and have to fix that ourselves.
179        //
180        // The general approach is to allocate a bit more than the layout needs, so that we can
181        // give the application a properly aligned address and also store the real allocation
182        // pointer in the allocation so that `free` can free the real allocation pointer.
183        //
184        //
185        // Example: The layout represents `(u32, u32)`, with an alignment of 4 bytes and a
186        // total size of 8 bytes.
187        //
188        // Assume that the allocator will give us address `0x07`. We need that to be a multiple
189        // of the alignment, so that shifts the starting position to `0x08`. Then we also need
190        // to store the pointer to the start of the allocation so that `free` can free that
191        // pointer, bumping to `0x10`. The `0x10` pointer is then the pointer that the application
192        // deals with. When free'ing, the original allocation pointer can be read from `0x10 - size_of::<*const c_void>()`.
193        //
194        // Of course there does need to be enough space in the allocation such that when we
195        // shift the start forwards, the end is still within the allocation. Hence we allocate
196        // `extra_space` bytes: enough for a full alignment plus a pointer.
197
198        // we need at least
199        //
200        // - `align` extra space so that no matter what pointer we get from zalloc, we can shift the start of the
201        //      allocation by at most `align - 1` so that `ptr as usize % align == 0
202        // - `size_of::<*mut _>` extra space so that after aligning to `align`,
203        //      there is `size_of::<*mut _>` space to store the pointer to the allocation.
204        //      This pointer is then retrieved in `free`
205        let extra_space = core::mem::size_of::<*mut c_void>() + layout.align();
206
207        // Safety: we assume allocating works correctly in the safety assumptions on
208        // `DeflateStream` and `InflateStream`.
209        let ptr = unsafe { (self.zalloc)(self.opaque, (layout.size() + extra_space) as _, 1) };
210
211        if ptr.is_null() {
212            return ptr;
213        }
214
215        // Calculate return pointer address with space enough to store original pointer
216        let align_diff = (ptr as usize).next_multiple_of(layout.align()) - (ptr as usize);
217
218        // Safety: offset is smaller than 64, and we allocated 64 extra bytes in the allocation
219        let mut return_ptr = unsafe { ptr.cast::<u8>().add(align_diff) };
220
221        // if there is not enough space to store a pointer we need to make more
222        if align_diff < core::mem::size_of::<*mut c_void>() {
223            // # Safety
224            //
225            // - `return_ptr` is well-aligned, therefore `return_ptr + align` is also well-aligned
226            // - we reserve `size_of::<*mut _> + align` extra space in the allocation, so
227            //      `ptr + align_diff + align` is still valid for (at least) `layout.size` bytes
228            let offset = Ord::max(core::mem::size_of::<*mut c_void>(), layout.align());
229            return_ptr = unsafe { return_ptr.add(offset) };
230        }
231
232        // Store the original pointer for free()
233        //
234        // Safety: `align >= size_of::<*mut _>`, so there is now space for a pointer before `return_ptr`
235        // in the allocation
236        unsafe {
237            let original_ptr = return_ptr.sub(core::mem::size_of::<*mut c_void>());
238            core::ptr::write_unaligned(original_ptr.cast::<*mut c_void>(), ptr);
239        };
240
241        // Return properly aligned pointer in allocation
242        let ptr = return_ptr.cast::<c_void>();
243
244        debug_assert_eq!(ptr as usize % layout.align(), 0);
245
246        ptr
247    }
248
249    pub fn allocate_raw<T>(&self) -> Option<NonNull<T>> {
250        NonNull::new(self.allocate_layout(Layout::new::<T>()).cast())
251    }
252
253    pub fn allocate_slice_raw<T>(&self, len: usize) -> Option<NonNull<T>> {
254        NonNull::new(self.allocate_layout(Layout::array::<T>(len).ok()?).cast())
255    }
256
257    pub fn allocate_zeroed(&self, len: usize) -> Option<NonNull<u8>> {
258        #[cfg(feature = "rust-allocator")]
259        if self.zalloc == Allocator::RUST.zalloc {
260            // internally, we want to align allocations to 64 bytes (in part for SIMD reasons)
261            let layout = Layout::from_size_align(len, 64).unwrap();
262
263            return NonNull::new(unsafe { std::alloc::System.alloc_zeroed(layout) });
264        }
265
266        #[cfg(feature = "c-allocator")]
267        if self.zalloc == Allocator::C.zalloc {
268            let alloc = Allocator {
269                zalloc: zalloc_c_calloc,
270                zfree: zfree_c,
271                opaque: core::ptr::null_mut(),
272                _marker: PhantomData,
273            };
274
275            let ptr = alloc.allocate_layout(Layout::array::<u8>(len).ok().unwrap());
276
277            return NonNull::new(ptr.cast());
278        }
279
280        // create the allocation (contents are uninitialized)
281        let ptr = self.allocate_layout(Layout::array::<u8>(len).ok().unwrap());
282
283        let ptr = NonNull::new(ptr)?;
284
285        // zero all contents (thus initializing the buffer)
286        unsafe { core::ptr::write_bytes(ptr.as_ptr(), 0, len) };
287
288        Some(ptr.cast())
289    }
290
291    /// # Panics
292    ///
293    /// - when `len` is 0
294    ///
295    /// # Safety
296    ///
297    /// - `ptr` must be allocated with this allocator
298    /// - `len` must be the number of `T`s that are in this allocation
299    #[allow(unused)] // Rust needs `len` for deallocation
300    pub unsafe fn deallocate<T>(&self, ptr: *mut T, len: usize) {
301        if !ptr.is_null() {
302            // Special case for the Rust `alloc` backed allocator
303            #[cfg(feature = "rust-allocator")]
304            if self.zfree == Allocator::RUST.zfree {
305                assert_ne!(len, 0, "invalid size for {:?}", ptr);
306                let mut size = core::mem::size_of::<T>() * len;
307                return (Allocator::RUST.zfree)(&mut size as *mut usize as *mut c_void, ptr.cast());
308            }
309
310            // General case for c-style allocation
311            let original_ptr = (ptr as *mut u8).sub(core::mem::size_of::<*const c_void>());
312            let free_ptr = core::ptr::read_unaligned(original_ptr as *mut *mut c_void);
313
314            (self.zfree)(self.opaque, free_ptr)
315        }
316    }
317}
318
319#[cfg(test)]
320mod tests {
321    use core::sync::atomic::{AtomicPtr, Ordering};
322    use std::sync::Mutex;
323
324    use super::*;
325
326    static PTR: AtomicPtr<c_void> = AtomicPtr::new(core::ptr::null_mut());
327    static MUTEX: Mutex<()> = Mutex::new(());
328
329    unsafe extern "C" fn unaligned_alloc(
330        _opaque: *mut c_void,
331        _items: c_uint,
332        _size: c_uint,
333    ) -> *mut c_void {
334        PTR.load(Ordering::Relaxed)
335    }
336
337    unsafe extern "C" fn unaligned_free(_opaque: *mut c_void, ptr: *mut c_void) {
338        let expected = PTR.load(Ordering::Relaxed);
339        assert_eq!(expected, ptr)
340    }
341
342    fn unaligned_allocator_help<T>() {
343        let mut buf = [0u8; 1024];
344
345        // we don't want anyone else messing with the PTR static
346        let _guard = MUTEX.lock().unwrap();
347
348        for i in 0..64 {
349            let ptr = unsafe { buf.as_mut_ptr().add(i).cast() };
350            PTR.store(ptr, Ordering::Relaxed);
351
352            let allocator = Allocator {
353                zalloc: unaligned_alloc,
354                zfree: unaligned_free,
355                opaque: core::ptr::null_mut(),
356                _marker: PhantomData,
357            };
358
359            let ptr = allocator.allocate_raw::<T>().unwrap().as_ptr();
360            assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
361            unsafe { allocator.deallocate(ptr, 1) }
362
363            let ptr = allocator.allocate_slice_raw::<T>(10).unwrap().as_ptr();
364            assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
365            unsafe { allocator.deallocate(ptr, 10) }
366        }
367    }
368
369    #[test]
370    fn unaligned_allocator_0() {
371        unaligned_allocator_help::<()>()
372    }
373
374    #[test]
375    fn unaligned_allocator_1() {
376        unaligned_allocator_help::<u8>()
377    }
378
379    #[test]
380    fn unaligned_allocator_2() {
381        unaligned_allocator_help::<u16>()
382    }
383    #[test]
384    fn unaligned_allocator_4() {
385        unaligned_allocator_help::<u32>()
386    }
387    #[test]
388    fn unaligned_allocator_8() {
389        unaligned_allocator_help::<u64>()
390    }
391    #[test]
392    fn unaligned_allocator_16() {
393        unaligned_allocator_help::<u128>()
394    }
395
396    #[test]
397    fn unaligned_allocator_32() {
398        #[repr(C, align(32))]
399        struct Align32(u8);
400
401        unaligned_allocator_help::<Align32>()
402    }
403
404    #[test]
405    fn unaligned_allocator_64() {
406        #[repr(C, align(64))]
407        struct Align64(u8);
408
409        unaligned_allocator_help::<Align64>()
410    }
411
412    fn test_allocate_zeroed_help(allocator: Allocator) {
413        let len = 42;
414        let Some(buf) = allocator.allocate_zeroed(len) else {
415            return;
416        };
417
418        let slice = unsafe { core::slice::from_raw_parts_mut(buf.as_ptr(), len) };
419
420        assert_eq!(slice.iter().sum::<u8>(), 0);
421
422        unsafe { allocator.deallocate(buf.as_ptr(), len) };
423    }
424
425    #[test]
426    fn test_allocate_zeroed() {
427        #[cfg(feature = "rust-allocator")]
428        test_allocate_zeroed_help(Allocator::RUST);
429
430        #[cfg(feature = "c-allocator")]
431        test_allocate_zeroed_help(Allocator::C);
432
433        test_allocate_zeroed_help(Allocator::FAIL);
434    }
435
436    #[test]
437    fn test_deallocate_null() {
438        unsafe {
439            #[cfg(feature = "rust-allocator")]
440            (Allocator::RUST.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
441
442            #[cfg(feature = "c-allocator")]
443            (Allocator::C.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
444
445            (Allocator::FAIL.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
446        }
447    }
448}