1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
//! The decompression algorithm. use byteorder::{LittleEndian, ByteOrder}; quick_error! { /// An error representing invalid compressed data. #[derive(Debug)] pub enum Error { /// Expected another byte, but none found. ExpectedAnotherByte { description("Expected another byte, found none.") } /// Deduplication offset out of bounds (not in buffer). OffsetOutOfBounds { description("The offset to copy is not contained in the decompressed buffer.") } } } /// A LZ4 decoder. /// /// This will decode in accordance to the LZ4 format. It represents a particular state of the /// decompressor. struct Decoder<'a> { /// The compressed input. input: &'a [u8], /// The decompressed output. output: &'a mut Vec<u8>, /// The current block's "token". /// /// This token contains to 4-bit "fields", a higher and a lower, representing the literals' /// length and the back reference's length, respectively. LSIC is used if either are their /// maximal values. token: u8, } impl<'a> Decoder<'a> { /// Internal (partial) function for `take`. #[inline] fn take_imp(input: &mut &'a [u8], n: usize) -> Result<&'a [u8], Error> { // Check if we have enough bytes left. if input.len() < n { // No extra bytes. This is clearly not expected, so we return an error. Err(Error::ExpectedAnotherByte) } else { // Take the first n bytes. let res = Ok(&input[..n]); // Shift the stream to left, so that it is no longer the first byte. *input = &input[n..]; // Return the former first byte. res } } /// Pop n bytes from the start of the input stream. fn take(&mut self, n: usize) -> Result<&[u8], Error> { Self::take_imp(&mut self.input, n) } /// Write a buffer to the output stream. /// /// The reason this doesn't take `&mut self` is that we need partial borrowing due to the rules /// of the borrow checker. For this reason, we instead take some number of segregated /// references so we can read and write them independently. fn output(output: &mut Vec<u8>, buf: &[u8]) { // We use simple memcpy to extend the vector. output.extend_from_slice(&buf[..buf.len()]); } /// Write an already decompressed match to the output stream. /// /// This is used for the essential part of the algorithm: deduplication. We start at some /// position `start` and then keep pushing the following element until we've added /// `match_length` elements. fn duplicate(&mut self, start: usize, match_length: usize) { // We cannot simply use memcpy or `extend_from_slice`, because these do not allow // self-referential copies: http://ticki.github.io/img/lz4_runs_encoding_diagram.svg for i in start..start + match_length { let b = self.output[i]; self.output.push(b); } } /// Read an integer LSIC (linear small integer code) encoded. /// /// In LZ4, we encode small integers in a way that we can have an arbitrary number of bytes. In /// particular, we add the bytes repeatedly until we hit a non-0xFF byte. When we do, we add /// this byte to our sum and terminate the loop. /// /// # Example /// /// ```notest /// 255, 255, 255, 4, 2, 3, 4, 6, 7 /// ``` /// /// is encoded to _255 + 255 + 255 + 4 = 769_. The bytes after the first 4 is ignored, because /// 4 is the first non-0xFF byte. #[inline] fn read_integer(&mut self) -> Result<usize, Error> { // We start at zero and count upwards. let mut n = 0; // If this byte takes value 255 (the maximum value it can take), another byte is read // and added to the sum. This repeats until a byte lower than 255 is read. while { // We add the next byte until we get a byte which we add to the counting variable. let extra = self.take(1)?[0]; n += extra as usize; // We continue if we got 255. extra == 0xFF } {} Ok(n) } /// Read a little-endian 16-bit integer from the input stream. #[inline] fn read_u16(&mut self) -> Result<u16, Error> { // We use byteorder to read an u16 in little endian. Ok(LittleEndian::read_u16(self.take(2)?)) } /// Read the literals section of a block. /// /// The literals section encodes some bytes which are to be copied to the output without any /// modification. /// /// It consists of two parts: /// /// 1. An LSIC integer extension to the literals length as defined by the first part of the /// token, if it takes the highest value (15). /// 2. The literals themself. fn read_literal_section(&mut self) -> Result<(), Error> { // The higher token is the literals part of the token. It takes a value from 0 to 15. let mut literal = (self.token >> 4) as usize; // If the initial value is 15, it is indicated that another byte will be read and added to // it. if literal == 15 { // The literal length took the maximal value, indicating that there is more than 15 // literal bytes. We read the extra integer. literal += self.read_integer()?; } // Now we know the literal length. The number will be used to indicate how long the // following literal copied to the output buffer is. // Read the literals segment and output them without processing. Self::output(&mut self.output, Self::take_imp(&mut self.input, literal)?); Ok(()) } /// Read the duplicates section of the block. /// /// The duplicates section serves to reference an already decoded segment. This consists of two /// parts: /// /// 1. A 16-bit little-endian integer defining the "offset", i.e. how long back we need to go /// in the decoded buffer and copy. /// 2. An LSIC integer extension to the duplicate length as defined by the first part of the /// token, if it takes the highest value (15). fn read_duplicate_section(&mut self) -> Result<(), Error> { // Now, we will obtain the offset which we will use to copy from the output. It is an // 16-bit integer. let offset = self.read_u16()?; // Obtain the initial match length. The match length is the length of the duplicate segment // which will later be copied from data previously decompressed into the output buffer. The // initial length is derived from the second part of the token (the lower nibble), we read // earlier. Since having a match length of less than 4 would mean negative compression // ratio, we start at 4. let mut match_length = (4 + (self.token & 0xF)) as usize; // The intial match length can maximally be 19. As with the literal length, this indicates // that there are more bytes to read. if match_length == 4 + 15 { // The match length took the maximal value, indicating that there is more bytes. We // read the extra integer. match_length += self.read_integer()?; } // We now copy from the already decompressed buffer. This allows us for storing duplicates // by simply referencing the other location. // Calculate the start of this duplicate segment. We use wrapping subtraction to avoid // overflow checks, which we will catch later. let start = self.output.len().wrapping_sub(offset as usize); // We'll do a bound check to avoid panicking. if start < self.output.len() { // Write the duplicate segment to the output buffer. self.duplicate(start, match_length); Ok(()) } else { Err(Error::OffsetOutOfBounds) } } /// Complete the decompression by reading all the blocks. /// /// # Decompressing a block /// /// Blocks consists of: /// - A 1 byte token /// * A 4 bit integer $t_1$. /// * A 4 bit integer $t_2$. /// - A $n$ byte sequence of 0xFF bytes (if $t_1 \neq 15$, then $n = 0$). /// - $x$ non-0xFF 8-bit integers, L (if $t_1 = 15$, $x = 1$, else $x = 0$). /// - $t_1 + 15n + L$ bytes of uncompressed data (literals). /// - 16-bits offset (little endian), $a$. /// - A $m$ byte sequence of 0xFF bytes (if $t_2 \neq 15$, then $m = 0$). /// - $y$ non-0xFF 8-bit integers, $c$ (if $t_2 = 15$, $y = 1$, else $y = 0$). /// /// First, the literals are copied directly and unprocessed to the output buffer, then (after /// the involved parameters are read) $t_2 + 15m + c$ bytes are copied from the output buffer /// at position $a + 4$ and appended to the output buffer. Note that this copy can be /// overlapping. #[inline] fn complete(&mut self) -> Result<(), Error> { // Exhaust the decoder by reading and decompressing all blocks until the remaining buffer // is empty. while !self.input.is_empty() { // Read the token. The token is the first byte in a block. It is divided into two 4-bit // subtokens, the higher and the lower. self.token = self.take(1)?[0]; // Now, we read the literals section. self.read_literal_section()?; // If the input stream is emptied, we break out of the loop. This is only the case // in the end of the stream, since the block is intact otherwise. if self.input.is_empty() { break; } // Now, we read the duplicates section. self.read_duplicate_section()?; } Ok(()) } } /// Decompress all bytes of `input` into `output`. pub fn decompress_into(input: &[u8], output: &mut Vec<u8>) -> Result<(), Error> { // Decode into our vector. Decoder { input: input, output: output, token: 0, }.complete()?; Ok(()) } /// Decompress all bytes of `input`. pub fn decompress(input: &[u8]) -> Result<Vec<u8>, Error> { // Allocate a vector to contain the decompressed stream. let mut vec = Vec::with_capacity(4096); decompress_into(input, &mut vec)?; Ok(vec) } #[cfg(test)] mod test { use super::*; #[test] fn aaaaaaaaaaa_lots_of_aaaaaaaaa() { assert_eq!(decompress(&[0x11, b'a', 1, 0]).unwrap(), b"aaaaaa"); } #[test] fn multiple_repeated_blocks() { assert_eq!(decompress(&[0x11, b'a', 1, 0, 0x22, b'b', b'c', 2, 0]).unwrap(), b"aaaaaabcbcbcbc"); } #[test] fn all_literal() { assert_eq!(decompress(&[0x30, b'a', b'4', b'9']).unwrap(), b"a49"); } #[test] fn offset_oob() { decompress(&[0x10, b'a', 2, 0]).unwrap_err(); decompress(&[0x40, b'a', 1, 0]).unwrap_err(); } }