tantivy_bitpacker/
bitpacker.rs

1use std::io;
2use std::ops::{Range, RangeInclusive};
3
4use bitpacking::{BitPacker as ExternalBitPackerTrait, BitPacker1x};
5
6pub struct BitPacker {
7    mini_buffer: u64,
8    mini_buffer_written: usize,
9}
10
11impl Default for BitPacker {
12    fn default() -> Self {
13        BitPacker::new()
14    }
15}
16impl BitPacker {
17    pub fn new() -> BitPacker {
18        BitPacker {
19            mini_buffer: 0u64,
20            mini_buffer_written: 0,
21        }
22    }
23
24    #[inline]
25    pub fn write<TWrite: io::Write + ?Sized>(
26        &mut self,
27        val: u64,
28        num_bits: u8,
29        output: &mut TWrite,
30    ) -> io::Result<()> {
31        let num_bits = num_bits as usize;
32        if self.mini_buffer_written + num_bits > 64 {
33            self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
34            output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
35            self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
36            self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
37        } else {
38            self.mini_buffer |= val << self.mini_buffer_written;
39            self.mini_buffer_written += num_bits;
40            if self.mini_buffer_written == 64 {
41                output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
42                self.mini_buffer_written = 0;
43                self.mini_buffer = 0u64;
44            }
45        }
46        Ok(())
47    }
48
49    pub fn flush<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
50        if self.mini_buffer_written > 0 {
51            let num_bytes = (self.mini_buffer_written + 7) / 8;
52            let bytes = self.mini_buffer.to_le_bytes();
53            output.write_all(&bytes[..num_bytes])?;
54            self.mini_buffer_written = 0;
55            self.mini_buffer = 0;
56        }
57        Ok(())
58    }
59
60    pub fn close<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
61        self.flush(output)?;
62        Ok(())
63    }
64}
65
66#[derive(Clone, Debug, Default, Copy)]
67pub struct BitUnpacker {
68    num_bits: u32,
69    mask: u64,
70}
71
72impl BitUnpacker {
73    /// Creates a bit unpacker, that assumes the same bitwidth for all values.
74    ///
75    /// The bitunpacker works by doing an unaligned read of 8 bytes.
76    /// For this reason, values of `num_bits` between
77    /// [57..63] are forbidden.
78    pub fn new(num_bits: u8) -> BitUnpacker {
79        assert!(num_bits <= 7 * 8 || num_bits == 64);
80        let mask: u64 = if num_bits == 64 {
81            !0u64
82        } else {
83            (1u64 << num_bits) - 1u64
84        };
85        BitUnpacker {
86            num_bits: u32::from(num_bits),
87            mask,
88        }
89    }
90
91    pub fn bit_width(&self) -> u8 {
92        self.num_bits as u8
93    }
94
95    #[inline]
96    pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
97        let addr_in_bits = idx * self.num_bits;
98        let addr = (addr_in_bits >> 3) as usize;
99        if addr + 8 > data.len() {
100            if self.num_bits == 0 {
101                return 0;
102            }
103            let bit_shift = addr_in_bits & 7;
104            return self.get_slow_path(addr, bit_shift, data);
105        }
106        let bit_shift = addr_in_bits & 7;
107        let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
108        let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
109        let val_shifted = val_unshifted_unmasked >> bit_shift;
110        val_shifted & self.mask
111    }
112
113    #[inline(never)]
114    fn get_slow_path(&self, addr: usize, bit_shift: u32, data: &[u8]) -> u64 {
115        let mut bytes: [u8; 8] = [0u8; 8];
116        let available_bytes = data.len() - addr;
117        // This function is meant to only be called if we did not have 8 bytes to load.
118        debug_assert!(available_bytes < 8);
119        bytes[..available_bytes].copy_from_slice(&data[addr..]);
120        let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
121        let val_shifted = val_unshifted_unmasked >> bit_shift;
122        val_shifted & self.mask
123    }
124
125    // Decodes the range of bitpacked `u32` values with idx
126    // in [start_idx, start_idx + output.len()).
127    //
128    // #Panics
129    //
130    // This methods panics if `num_bits` is > 32.
131    fn get_batch_u32s(&self, start_idx: u32, data: &[u8], output: &mut [u32]) {
132        assert!(
133            self.bit_width() <= 32,
134            "Bitwidth must be <= 32 to use this method."
135        );
136
137        let end_idx = start_idx + output.len() as u32;
138
139        let end_bit_read = end_idx * self.num_bits;
140        let end_byte_read = (end_bit_read + 7) / 8;
141        assert!(
142            end_byte_read as usize <= data.len(),
143            "Requested index is out of bounds."
144        );
145
146        // Simple slow implementation of get_batch_u32s, to deal with our ramps.
147        let get_batch_ramp = |start_idx: u32, output: &mut [u32]| {
148            for (out, idx) in output.iter_mut().zip(start_idx..) {
149                *out = self.get(idx, data) as u32;
150            }
151        };
152
153        // We use an unrolled routine to decode 32 values at once.
154        // We therefore decompose our range of values to decode into three ranges:
155        // - Entrance ramp: [start_idx, fast_track_start) (up to 31 values)
156        // - Highway: [fast_track_start, fast_track_end) (a length multiple of 32s)
157        // - Exit ramp: [fast_track_end, start_idx + output.len()) (up to 31 values)
158
159        // We want the start of the fast track to start align with bytes.
160        // A sufficient condition is to start with an idx that is a multiple of 8,
161        // so highway start is the closest multiple of 8 that is >= start_idx.
162        let entrance_ramp_len = 8 - (start_idx % 8) % 8;
163
164        let highway_start: u32 = start_idx + entrance_ramp_len;
165
166        if highway_start + BitPacker1x::BLOCK_LEN as u32 > end_idx {
167            // We don't have enough values to have even a single block of highway.
168            // Let's just supply the values the simple way.
169            get_batch_ramp(start_idx, output);
170            return;
171        }
172
173        let num_blocks: u32 = (end_idx - highway_start) / BitPacker1x::BLOCK_LEN as u32;
174
175        // Entrance ramp
176        get_batch_ramp(start_idx, &mut output[..entrance_ramp_len as usize]);
177
178        // Highway
179        let mut offset = (highway_start * self.num_bits) as usize / 8;
180        let mut output_cursor = (highway_start - start_idx) as usize;
181        for _ in 0..num_blocks {
182            offset += BitPacker1x.decompress(
183                &data[offset..],
184                &mut output[output_cursor..],
185                self.num_bits as u8,
186            );
187            output_cursor += 32;
188        }
189
190        // Exit ramp
191        let highway_end = highway_start + num_blocks * BitPacker1x::BLOCK_LEN as u32;
192        get_batch_ramp(highway_end, &mut output[output_cursor..]);
193    }
194
195    pub fn get_ids_for_value_range(
196        &self,
197        range: RangeInclusive<u64>,
198        id_range: Range<u32>,
199        data: &[u8],
200        positions: &mut Vec<u32>,
201    ) {
202        if self.bit_width() > 32 {
203            self.get_ids_for_value_range_slow(range, id_range, data, positions)
204        } else {
205            if *range.start() > u32::MAX as u64 {
206                positions.clear();
207                return;
208            }
209            let range_u32 = (*range.start() as u32)..=(*range.end()).min(u32::MAX as u64) as u32;
210            self.get_ids_for_value_range_fast(range_u32, id_range, data, positions)
211        }
212    }
213
214    fn get_ids_for_value_range_slow(
215        &self,
216        range: RangeInclusive<u64>,
217        id_range: Range<u32>,
218        data: &[u8],
219        positions: &mut Vec<u32>,
220    ) {
221        positions.clear();
222        for i in id_range {
223            // If we cared we could make this branchless, but the slow implementation should rarely
224            // kick in.
225            let val = self.get(i, data);
226            if range.contains(&val) {
227                positions.push(i);
228            }
229        }
230    }
231
232    fn get_ids_for_value_range_fast(
233        &self,
234        value_range: RangeInclusive<u32>,
235        id_range: Range<u32>,
236        data: &[u8],
237        positions: &mut Vec<u32>,
238    ) {
239        positions.resize(id_range.len(), 0u32);
240        self.get_batch_u32s(id_range.start, data, positions);
241        crate::filter_vec::filter_vec_in_place(value_range, id_range.start, positions)
242    }
243}
244
245#[cfg(test)]
246mod test {
247    use super::{BitPacker, BitUnpacker};
248
249    fn create_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
250        let mut data = Vec::new();
251        let mut bitpacker = BitPacker::new();
252        let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
253        let vals: Vec<u64> = (0u64..len as u64)
254            .map(|i| if max_val == 0 { 0 } else { i % max_val })
255            .collect();
256        for &val in &vals {
257            bitpacker.write(val, num_bits, &mut data).unwrap();
258        }
259        bitpacker.close(&mut data).unwrap();
260        assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8);
261        let bitunpacker = BitUnpacker::new(num_bits);
262        (bitunpacker, vals, data)
263    }
264
265    fn test_bitpacker_util(len: usize, num_bits: u8) {
266        let (bitunpacker, vals, data) = create_bitpacker(len, num_bits);
267        for (i, val) in vals.iter().enumerate() {
268            assert_eq!(bitunpacker.get(i as u32, &data), *val);
269        }
270    }
271
272    #[test]
273    fn test_bitpacker() {
274        test_bitpacker_util(10, 3);
275        test_bitpacker_util(10, 0);
276        test_bitpacker_util(10, 1);
277        test_bitpacker_util(6, 14);
278        test_bitpacker_util(1000, 14);
279    }
280
281    use proptest::prelude::*;
282
283    fn num_bits_strategy() -> impl Strategy<Value = u8> {
284        prop_oneof!(Just(0), Just(1), 2u8..56u8, Just(56), Just(64),)
285    }
286
287    fn vals_strategy() -> impl Strategy<Value = (u8, Vec<u64>)> {
288        (num_bits_strategy(), 0usize..100usize).prop_flat_map(|(num_bits, len)| {
289            let max_val = if num_bits == 64 {
290                u64::MAX
291            } else {
292                (1u64 << num_bits as u32) - 1
293            };
294            let vals = proptest::collection::vec(0..=max_val, len);
295            vals.prop_map(move |vals| (num_bits, vals))
296        })
297    }
298
299    fn test_bitpacker_aux(num_bits: u8, vals: &[u64]) {
300        let mut buffer: Vec<u8> = Vec::new();
301        let mut bitpacker = BitPacker::new();
302        for &val in vals {
303            bitpacker.write(val, num_bits, &mut buffer).unwrap();
304        }
305        bitpacker.flush(&mut buffer).unwrap();
306        assert_eq!(buffer.len(), (vals.len() * num_bits as usize + 7) / 8);
307        let bitunpacker = BitUnpacker::new(num_bits);
308        let max_val = if num_bits == 64 {
309            u64::MAX
310        } else {
311            (1u64 << num_bits) - 1
312        };
313        for (i, val) in vals.iter().copied().enumerate() {
314            assert!(val <= max_val);
315            assert_eq!(bitunpacker.get(i as u32, &buffer), val);
316        }
317    }
318
319    proptest::proptest! {
320        #[test]
321        fn test_bitpacker_proptest((num_bits, vals) in vals_strategy()) {
322            test_bitpacker_aux(num_bits, &vals);
323        }
324    }
325
326    #[test]
327    #[should_panic]
328    fn test_get_batch_panics_over_32_bits() {
329        let bitunpacker = BitUnpacker::new(33);
330        let mut output: [u32; 1] = [0u32];
331        bitunpacker.get_batch_u32s(0, &[0, 0, 0, 0, 0, 0, 0, 0], &mut output[..]);
332    }
333
334    #[test]
335    fn test_get_batch_limit() {
336        let bitunpacker = BitUnpacker::new(1);
337        let mut output: [u32; 3] = [0u32, 0u32, 0u32];
338        bitunpacker.get_batch_u32s(8 * 4 - 3, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
339    }
340
341    #[test]
342    #[should_panic]
343    fn test_get_batch_panics_when_off_scope() {
344        let bitunpacker = BitUnpacker::new(1);
345        let mut output: [u32; 3] = [0u32, 0u32, 0u32];
346        // We are missing exactly one bit.
347        bitunpacker.get_batch_u32s(8 * 4 - 2, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
348    }
349
350    proptest::proptest! {
351        #[test]
352        fn test_get_batch_u32s_proptest(num_bits in 0u8..=32u8) {
353            let mask =
354                if num_bits == 32u8 {
355                    u32::MAX
356                } else {
357                    (1u32 << num_bits) - 1
358                };
359            let mut buffer: Vec<u8> = Vec::new();
360            let mut bitpacker = BitPacker::new();
361            for val in 0..100 {
362                bitpacker.write(val & mask as u64, num_bits, &mut buffer).unwrap();
363            }
364            bitpacker.flush(&mut buffer).unwrap();
365            let bitunpacker = BitUnpacker::new(num_bits);
366            let mut output: Vec<u32> = Vec::new();
367            for len in [0, 1, 2, 32, 33, 34, 64] {
368                for start_idx in 0u32..32u32 {
369                    output.resize(len, 0);
370                    bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
371                    for i in 0..len {
372                        let expected = (start_idx + i as u32) & mask;
373                        assert_eq!(output[i], expected);
374                    }
375                }
376            }
377        }
378    }
379}