zune_jpeg/decoder.rs
1/*
2 * Copyright (c) 2023.
3 *
4 * This software is free software;
5 *
6 * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license
7 */
8
9//! Main image logic.
10#![allow(clippy::doc_markdown)]
11
12use alloc::string::ToString;
13use alloc::vec::Vec;
14use alloc::{format, vec};
15
16use zune_core::bytestream::{ZByteReader, ZReaderTrait};
17use zune_core::colorspace::ColorSpace;
18use zune_core::log::{error, trace, warn};
19use zune_core::options::DecoderOptions;
20
21use crate::color_convert::choose_ycbcr_to_rgb_convert_func;
22use crate::components::{Components, SampleRatios};
23use crate::errors::{DecodeErrors, UnsupportedSchemes};
24use crate::headers::{
25 parse_app1, parse_app14, parse_app2, parse_dqt, parse_huffman, parse_sos, parse_start_of_frame
26};
27use crate::huffman::HuffmanTable;
28use crate::idct::choose_idct_func;
29use crate::marker::Marker;
30use crate::misc::SOFMarkers;
31use crate::upsampler::{
32 choose_horizontal_samp_function, choose_hv_samp_function, choose_v_samp_function,
33 upsample_no_op
34};
35
36/// Maximum components
37pub(crate) const MAX_COMPONENTS: usize = 4;
38
39/// Maximum image dimensions supported.
40pub(crate) const MAX_DIMENSIONS: usize = 1 << 27;
41
42/// Color conversion function that can convert YCbCr colorspace to RGB(A/X) for
43/// 16 values
44///
45/// The following are guarantees to the following functions
46///
47/// 1. The `&[i16]` slices passed contain 16 items
48///
49/// 2. The slices passed are in the following order
50/// `y,cb,cr`
51///
52/// 3. `&mut [u8]` is zero initialized
53///
54/// 4. `&mut usize` points to the position in the array where new values should
55/// be used
56///
57/// The pointer should
58/// 1. Carry out color conversion
59/// 2. Update `&mut usize` with the new position
60
61pub type ColorConvert16Ptr = fn(&[i16; 16], &[i16; 16], &[i16; 16], &mut [u8], &mut usize);
62
63/// IDCT function prototype
64///
65/// This encapsulates a dequantize and IDCT function which will carry out the
66/// following functions
67///
68/// Multiply each 64 element block of `&mut [i16]` with `&Aligned32<[i32;64]>`
69/// Carry out IDCT (type 3 dct) on ach block of 64 i16's
70pub type IDCTPtr = fn(&mut [i32; 64], &mut [i16], usize);
71
72/// An encapsulation of an ICC chunk
73pub(crate) struct ICCChunk {
74 pub(crate) seq_no: u8,
75 pub(crate) num_markers: u8,
76 pub(crate) data: Vec<u8>
77}
78
79/// A JPEG Decoder Instance.
80#[allow(clippy::upper_case_acronyms, clippy::struct_excessive_bools)]
81pub struct JpegDecoder<T: ZReaderTrait> {
82 /// Struct to hold image information from SOI
83 pub(crate) info: ImageInfo,
84 /// Quantization tables, will be set to none and the tables will
85 /// be moved to `components` field
86 pub(crate) qt_tables: [Option<[i32; 64]>; MAX_COMPONENTS],
87 /// DC Huffman Tables with a maximum of 4 tables for each component
88 pub(crate) dc_huffman_tables: [Option<HuffmanTable>; MAX_COMPONENTS],
89 /// AC Huffman Tables with a maximum of 4 tables for each component
90 pub(crate) ac_huffman_tables: [Option<HuffmanTable>; MAX_COMPONENTS],
91 /// Image components, holds information like DC prediction and quantization
92 /// tables of a component
93 pub(crate) components: Vec<Components>,
94 /// maximum horizontal component of all channels in the image
95 pub(crate) h_max: usize,
96 // maximum vertical component of all channels in the image
97 pub(crate) v_max: usize,
98 /// mcu's width (interleaved scans)
99 pub(crate) mcu_width: usize,
100 /// MCU height(interleaved scans
101 pub(crate) mcu_height: usize,
102 /// Number of MCU's in the x plane
103 pub(crate) mcu_x: usize,
104 /// Number of MCU's in the y plane
105 pub(crate) mcu_y: usize,
106 /// Is the image interleaved?
107 pub(crate) is_interleaved: bool,
108 pub(crate) sub_sample_ratio: SampleRatios,
109 /// Image input colorspace, should be YCbCr for a sane image, might be
110 /// grayscale too
111 pub(crate) input_colorspace: ColorSpace,
112 // Progressive image details
113 /// Is the image progressive?
114 pub(crate) is_progressive: bool,
115
116 /// Start of spectral scan
117 pub(crate) spec_start: u8,
118 /// End of spectral scan
119 pub(crate) spec_end: u8,
120 /// Successive approximation bit position high
121 pub(crate) succ_high: u8,
122 /// Successive approximation bit position low
123 pub(crate) succ_low: u8,
124 /// Number of components.
125 pub(crate) num_scans: u8,
126 // Function pointers, for pointy stuff.
127 /// Dequantize and idct function
128 // This is determined at runtime which function to run, statically it's
129 // initialized to a platform independent one and during initialization
130 // of this struct, we check if we can switch to a faster one which
131 // depend on certain CPU extensions.
132 pub(crate) idct_func: IDCTPtr,
133 // Color convert function which acts on 16 YCbCr values
134 pub(crate) color_convert_16: ColorConvert16Ptr,
135 pub(crate) z_order: [usize; MAX_COMPONENTS],
136 /// restart markers
137 pub(crate) restart_interval: usize,
138 pub(crate) todo: usize,
139 // decoder options
140 pub(crate) options: DecoderOptions,
141 // byte-stream
142 pub(crate) stream: ZByteReader<T>,
143 // Indicate whether headers have been decoded
144 pub(crate) headers_decoded: bool,
145 pub(crate) seen_sof: bool,
146 // exif data, lifted from app2
147 pub(crate) exif_data: Option<Vec<u8>>,
148
149 pub(crate) icc_data: Vec<ICCChunk>,
150 pub(crate) is_mjpeg: bool,
151 pub(crate) coeff: usize // Solves some weird bug :)
152}
153
154impl<T> JpegDecoder<T>
155where
156 T: ZReaderTrait
157{
158 #[allow(clippy::redundant_field_names)]
159 fn default(options: DecoderOptions, buffer: T) -> Self {
160 let color_convert = choose_ycbcr_to_rgb_convert_func(ColorSpace::RGB, &options).unwrap();
161 JpegDecoder {
162 info: ImageInfo::default(),
163 qt_tables: [None, None, None, None],
164 dc_huffman_tables: [None, None, None, None],
165 ac_huffman_tables: [None, None, None, None],
166 components: vec![],
167 // Interleaved information
168 h_max: 1,
169 v_max: 1,
170 mcu_height: 0,
171 mcu_width: 0,
172 mcu_x: 0,
173 mcu_y: 0,
174 is_interleaved: false,
175 sub_sample_ratio: SampleRatios::None,
176 is_progressive: false,
177 spec_start: 0,
178 spec_end: 0,
179 succ_high: 0,
180 succ_low: 0,
181 num_scans: 0,
182 idct_func: choose_idct_func(&options),
183 color_convert_16: color_convert,
184 input_colorspace: ColorSpace::YCbCr,
185 z_order: [0; MAX_COMPONENTS],
186 restart_interval: 0,
187 todo: 0x7fff_ffff,
188 options: options,
189 stream: ZByteReader::new(buffer),
190 headers_decoded: false,
191 seen_sof: false,
192 exif_data: None,
193 icc_data: vec![],
194 is_mjpeg: false,
195 coeff: 1
196 }
197 }
198 /// Decode a buffer already in memory
199 ///
200 /// The buffer should be a valid jpeg file, perhaps created by the command
201 /// `std:::fs::read()` or a JPEG file downloaded from the internet.
202 ///
203 /// # Errors
204 /// See DecodeErrors for an explanation
205 pub fn decode(&mut self) -> Result<Vec<u8>, DecodeErrors> {
206 self.decode_headers()?;
207 let size = self.output_buffer_size().unwrap();
208 let mut out = vec![0; size];
209 self.decode_into(&mut out)?;
210 Ok(out)
211 }
212
213 /// Create a new Decoder instance
214 ///
215 /// # Arguments
216 /// - `stream`: The raw bytes of a jpeg file.
217 #[must_use]
218 #[allow(clippy::new_without_default)]
219 pub fn new(stream: T) -> JpegDecoder<T> {
220 JpegDecoder::default(DecoderOptions::default(), stream)
221 }
222
223 /// Returns the image information
224 ///
225 /// This **must** be called after a subsequent call to [`decode`] or [`decode_headers`]
226 /// it will return `None`
227 ///
228 /// # Returns
229 /// - `Some(info)`: Image information,width, height, number of components
230 /// - None: Indicates image headers haven't been decoded
231 ///
232 /// [`decode`]: JpegDecoder::decode
233 /// [`decode_headers`]: JpegDecoder::decode_headers
234 #[must_use]
235 pub fn info(&self) -> Option<ImageInfo> {
236 // we check for fails to that call by comparing what we have to the default, if
237 // it's default we assume that the caller failed to uphold the
238 // guarantees. We can be sure that an image cannot be the default since
239 // its a hard panic in-case width or height are set to zero.
240 if !self.headers_decoded {
241 return None;
242 }
243
244 return Some(self.info.clone());
245 }
246
247 /// Return the number of bytes required to hold a decoded image frame
248 /// decoded using the given input transformations
249 ///
250 /// # Returns
251 /// - `Some(usize)`: Minimum size for a buffer needed to decode the image
252 /// - `None`: Indicates the image was not decoded, or image dimensions would overflow a usize
253 ///
254 #[must_use]
255 pub fn output_buffer_size(&self) -> Option<usize> {
256 return if self.headers_decoded {
257 Some(
258 usize::from(self.width())
259 .checked_mul(usize::from(self.height()))?
260 .checked_mul(self.options.jpeg_get_out_colorspace().num_components())?
261 )
262 } else {
263 None
264 };
265 }
266
267 /// Get a mutable reference to the decoder options
268 /// for the decoder instance
269 ///
270 /// This can be used to modify options before actual decoding
271 /// but after initial creation
272 ///
273 /// # Example
274 /// ```no_run
275 /// use zune_jpeg::JpegDecoder;
276 ///
277 /// let mut decoder = JpegDecoder::new(&[]);
278 /// // get current options
279 /// let mut options = decoder.get_options();
280 /// // modify it
281 /// let new_options = options.set_max_width(10);
282 /// // set it back
283 /// decoder.set_options(new_options);
284 ///
285 /// ```
286 #[must_use]
287 pub const fn get_options(&self) -> &DecoderOptions {
288 &self.options
289 }
290 /// Return the input colorspace of the image
291 ///
292 /// This indicates the colorspace that is present in
293 /// the image, but this may be different to the colorspace that
294 /// the output will be transformed to
295 ///
296 /// # Returns
297 /// -`Some(Colorspace)`: Input colorspace
298 /// - None : Indicates the headers weren't decoded
299 #[must_use]
300 pub fn get_input_colorspace(&self) -> Option<ColorSpace> {
301 return if self.headers_decoded { Some(self.input_colorspace) } else { None };
302 }
303 /// Set decoder options
304 ///
305 /// This can be used to set new options even after initialization
306 /// but before decoding.
307 ///
308 /// This does not bear any significance after decoding an image
309 ///
310 /// # Arguments
311 /// - `options`: New decoder options
312 ///
313 /// # Example
314 /// Set maximum jpeg progressive passes to be 4
315 ///
316 /// ```no_run
317 /// use zune_jpeg::JpegDecoder;
318 /// let mut decoder =JpegDecoder::new(&[]);
319 /// // this works also because DecoderOptions implements `Copy`
320 /// let options = decoder.get_options().jpeg_set_max_scans(4);
321 /// // set the new options
322 /// decoder.set_options(options);
323 /// // now decode
324 /// decoder.decode().unwrap();
325 /// ```
326 pub fn set_options(&mut self, options: DecoderOptions) {
327 self.options = options;
328 }
329 /// Decode Decoder headers
330 ///
331 /// This routine takes care of parsing supported headers from a Decoder
332 /// image
333 ///
334 /// # Supported Headers
335 /// - APP(0)
336 /// - SOF(O)
337 /// - DQT -> Quantization tables
338 /// - DHT -> Huffman tables
339 /// - SOS -> Start of Scan
340 /// # Unsupported Headers
341 /// - SOF(n) -> Decoder images which are not baseline/progressive
342 /// - DAC -> Images using Arithmetic tables
343 /// - JPG(n)
344 fn decode_headers_internal(&mut self) -> Result<(), DecodeErrors> {
345 if self.headers_decoded {
346 trace!("Headers decoded!");
347 return Ok(());
348 }
349 // match output colorspace here
350 // we know this will only be called once per image
351 // so makes sense
352 // We only care for ycbcr to rgb/rgba here
353 // in case one is using another colorspace.
354 // May god help you
355 let out_colorspace = self.options.jpeg_get_out_colorspace();
356
357 if matches!(
358 out_colorspace,
359 ColorSpace::BGR | ColorSpace::BGRA | ColorSpace::RGB | ColorSpace::RGBA
360 ) {
361 self.color_convert_16 = choose_ycbcr_to_rgb_convert_func(
362 self.options.jpeg_get_out_colorspace(),
363 &self.options
364 )
365 .unwrap();
366 }
367 // First two bytes should be jpeg soi marker
368 let magic_bytes = self.stream.get_u16_be_err()?;
369
370 let mut last_byte = 0;
371 let mut bytes_before_marker = 0;
372
373 if magic_bytes != 0xffd8 {
374 return Err(DecodeErrors::IllegalMagicBytes(magic_bytes));
375 }
376
377 loop {
378 // read a byte
379 let mut m = self.stream.get_u8_err()?;
380
381 // AND OF COURSE some images will have fill bytes in their marker
382 // bitstreams because why not.
383 //
384 // I am disappointed as a man.
385 if (m == 0xFF || m == 0) && last_byte == 0xFF {
386 // This handles the edge case where
387 // images have markers with fill bytes(0xFF)
388 // or byte stuffing (0)
389 // I.e 0xFF 0xFF 0xDA
390 // and
391 // 0xFF 0 0xDA
392 // It should ignore those fill bytes and take 0xDA
393 // I don't know why such images exist
394 // but they do.
395 // so this is for you (with love)
396 while m == 0xFF || m == 0x0 {
397 last_byte = m;
398 m = self.stream.get_u8_err()?;
399 }
400 }
401 // Last byte should be 0xFF to confirm existence of a marker since markers look
402 // like OxFF(some marker data)
403 if last_byte == 0xFF {
404 let marker = Marker::from_u8(m);
405 if let Some(n) = marker {
406 if bytes_before_marker > 3 {
407 if self.options.get_strict_mode()
408 /*No reason to use this*/
409 {
410 return Err(DecodeErrors::FormatStatic(
411 "[strict-mode]: Extra bytes between headers"
412 ));
413 }
414
415 error!(
416 "Extra bytes {} before marker 0xFF{:X}",
417 bytes_before_marker - 3,
418 m
419 );
420 }
421
422 bytes_before_marker = 0;
423
424 self.parse_marker_inner(n)?;
425
426 if n == Marker::SOS {
427 self.headers_decoded = true;
428 trace!("Input colorspace {:?}", self.input_colorspace);
429 return Ok(());
430 }
431 } else {
432 bytes_before_marker = 0;
433
434 warn!("Marker 0xFF{:X} not known", m);
435
436 let length = self.stream.get_u16_be_err()?;
437
438 if length < 2 {
439 return Err(DecodeErrors::Format(format!(
440 "Found a marker with invalid length : {length}"
441 )));
442 }
443
444 warn!("Skipping {} bytes", length - 2);
445 self.stream.skip((length - 2) as usize);
446 }
447 }
448 last_byte = m;
449 bytes_before_marker += 1;
450 }
451 }
452 #[allow(clippy::too_many_lines)]
453 pub(crate) fn parse_marker_inner(&mut self, m: Marker) -> Result<(), DecodeErrors> {
454 match m {
455 Marker::SOF(0..=2) => {
456 let marker = {
457 // choose marker
458 if m == Marker::SOF(0) || m == Marker::SOF(1) {
459 SOFMarkers::BaselineDct
460 } else {
461 self.is_progressive = true;
462 SOFMarkers::ProgressiveDctHuffman
463 }
464 };
465
466 trace!("Image encoding scheme =`{:?}`", marker);
467 // get components
468 parse_start_of_frame(marker, self)?;
469 }
470 // Start of Frame Segments not supported
471 Marker::SOF(v) => {
472 let feature = UnsupportedSchemes::from_int(v);
473
474 if let Some(feature) = feature {
475 return Err(DecodeErrors::Unsupported(feature));
476 }
477
478 return Err(DecodeErrors::Format("Unsupported image format".to_string()));
479 }
480 //APP(0) segment
481 Marker::APP(0) => {
482 let mut length = self.stream.get_u16_be_err()?;
483
484 if length < 2 {
485 return Err(DecodeErrors::Format(format!(
486 "Found a marker with invalid length:{length}\n"
487 )));
488 }
489 // skip for now
490 if length > 5 && self.stream.has(5) {
491 let mut buffer = [0u8; 5];
492 self.stream.read_exact(&mut buffer).unwrap();
493 if &buffer == b"AVI1\0" {
494 self.is_mjpeg = true;
495 }
496 length -= 5;
497 }
498 self.stream.skip(length.saturating_sub(2) as usize);
499
500 //parse_app(buf, m, &mut self.info)?;
501 }
502 Marker::APP(1) => {
503 parse_app1(self)?;
504 }
505
506 Marker::APP(2) => {
507 parse_app2(self)?;
508 }
509 // Quantization tables
510 Marker::DQT => {
511 parse_dqt(self)?;
512 }
513 // Huffman tables
514 Marker::DHT => {
515 parse_huffman(self)?;
516 }
517 // Start of Scan Data
518 Marker::SOS => {
519 parse_sos(self)?;
520
521 // break after reading the start of scan.
522 // what follows is the image data
523 return Ok(());
524 }
525 Marker::EOI => return Err(DecodeErrors::FormatStatic("Premature End of image")),
526
527 Marker::DAC | Marker::DNL => {
528 return Err(DecodeErrors::Format(format!(
529 "Parsing of the following header `{m:?}` is not supported,\
530 cannot continue"
531 )));
532 }
533 Marker::DRI => {
534 trace!("DRI marker present");
535
536 if self.stream.get_u16_be_err()? != 4 {
537 return Err(DecodeErrors::Format(
538 "Bad DRI length, Corrupt JPEG".to_string()
539 ));
540 }
541
542 self.restart_interval = usize::from(self.stream.get_u16_be_err()?);
543 self.todo = self.restart_interval;
544 }
545 Marker::APP(14) => {
546 parse_app14(self)?;
547 }
548 _ => {
549 warn!(
550 "Capabilities for processing marker \"{:?}\" not implemented",
551 m
552 );
553
554 let length = self.stream.get_u16_be_err()?;
555
556 if length < 2 {
557 return Err(DecodeErrors::Format(format!(
558 "Found a marker with invalid length:{length}\n"
559 )));
560 }
561 warn!("Skipping {} bytes", length - 2);
562 self.stream.skip((length - 2) as usize);
563 }
564 }
565 Ok(())
566 }
567 /// Get the embedded ICC profile if it exists
568 /// and is correct
569 ///
570 /// One needs not to decode the whole image to extract this,
571 /// calling [`decode_headers`] for an image with an ICC profile
572 /// allows you to decode this
573 ///
574 /// # Returns
575 /// - `Some(Vec<u8>)`: The raw ICC profile of the image
576 /// - `None`: May indicate an error in the ICC profile , non-existence of
577 /// an ICC profile, or that the headers weren't decoded.
578 ///
579 /// [`decode_headers`]:Self::decode_headers
580 #[must_use]
581 pub fn icc_profile(&self) -> Option<Vec<u8>> {
582 let mut marker_present: [Option<&ICCChunk>; 256] = [None; 256];
583
584 if !self.headers_decoded {
585 return None;
586 }
587 let num_markers = self.icc_data.len();
588
589 if num_markers == 0 || num_markers >= 255 {
590 return None;
591 }
592 // check validity
593 for chunk in &self.icc_data {
594 if usize::from(chunk.num_markers) != num_markers {
595 // all the lengths must match
596 return None;
597 }
598 if chunk.seq_no == 0 {
599 warn!("Zero sequence number in ICC, corrupt ICC chunk");
600 return None;
601 }
602 if marker_present[usize::from(chunk.seq_no)].is_some() {
603 // duplicate seq_no
604 warn!("Duplicate sequence number in ICC, corrupt chunk");
605 return None;
606 }
607
608 marker_present[usize::from(chunk.seq_no)] = Some(chunk);
609 }
610 let mut data = Vec::with_capacity(1000);
611 // assemble the data now
612 for chunk in marker_present.get(1..=num_markers).unwrap() {
613 if let Some(ch) = chunk {
614 data.extend_from_slice(&ch.data);
615 } else {
616 warn!("Missing icc sequence number, corrupt ICC chunk ");
617 return None;
618 }
619 }
620
621 Some(data)
622 }
623 /// Return the exif data for the file
624 ///
625 /// This returns the raw exif data starting at the
626 /// TIFF header
627 ///
628 /// # Returns
629 /// -`Some(data)`: The raw exif data, if present in the image
630 /// - None: May indicate the following
631 ///
632 /// 1. The image doesn't have exif data
633 /// 2. The image headers haven't been decoded
634 #[must_use]
635 pub fn exif(&self) -> Option<&Vec<u8>> {
636 return self.exif_data.as_ref();
637 }
638 /// Get the output colorspace the image pixels will be decoded into
639 ///
640 ///
641 /// # Note.
642 /// This field can only be regarded after decoding headers,
643 /// as markers such as Adobe APP14 may dictate different colorspaces
644 /// than requested.
645 ///
646 /// Calling `decode_headers` is sufficient to know what colorspace the
647 /// output is, if this is called after `decode` it indicates the colorspace
648 /// the output is currently in
649 ///
650 /// Additionally not all input->output colorspace mappings are supported
651 /// but all input colorspaces can map to RGB colorspace, so that's a safe bet
652 /// if one is handling image formats
653 ///
654 ///# Returns
655 /// - `Some(Colorspace)`: If headers have been decoded, the colorspace the
656 ///output array will be in
657 ///- `None
658 #[must_use]
659 pub fn get_output_colorspace(&self) -> Option<ColorSpace> {
660 return if self.headers_decoded {
661 Some(self.options.jpeg_get_out_colorspace())
662 } else {
663 None
664 };
665 }
666
667 /// Decode into a pre-allocated buffer
668 ///
669 /// It is an error if the buffer size is smaller than
670 /// [`output_buffer_size()`](Self::output_buffer_size)
671 ///
672 /// If the buffer is bigger than expected, we ignore the end padding bytes
673 ///
674 /// # Example
675 ///
676 /// - Read headers and then alloc a buffer big enough to hold the image
677 ///
678 /// ```no_run
679 /// use zune_jpeg::JpegDecoder;
680 /// let mut decoder = JpegDecoder::new(&[]);
681 /// // before we get output, we must decode the headers to get width
682 /// // height, and input colorspace
683 /// decoder.decode_headers().unwrap();
684 ///
685 /// let mut out = vec![0;decoder.output_buffer_size().unwrap()];
686 /// // write into out
687 /// decoder.decode_into(&mut out).unwrap();
688 /// ```
689 ///
690 ///
691 pub fn decode_into(&mut self, out: &mut [u8]) -> Result<(), DecodeErrors> {
692 self.decode_headers_internal()?;
693
694 let expected_size = self.output_buffer_size().unwrap();
695
696 if out.len() < expected_size {
697 // too small of a size
698 return Err(DecodeErrors::TooSmallOutput(expected_size, out.len()));
699 }
700
701 // ensure we don't touch anyone else's scratch space
702 let out_len = core::cmp::min(out.len(), expected_size);
703 let out = &mut out[0..out_len];
704
705 if self.is_progressive {
706 self.decode_mcu_ycbcr_progressive(out)
707 } else {
708 self.decode_mcu_ycbcr_baseline(out)
709 }
710 }
711
712 /// Read only headers from a jpeg image buffer
713 ///
714 /// This allows you to extract important information like
715 /// image width and height without decoding the full image
716 ///
717 /// # Examples
718 /// ```no_run
719 /// use zune_jpeg::{JpegDecoder};
720 ///
721 /// let img_data = std::fs::read("a_valid.jpeg").unwrap();
722 /// let mut decoder = JpegDecoder::new(&img_data);
723 /// decoder.decode_headers().unwrap();
724 ///
725 /// println!("Total decoder dimensions are : {:?} pixels",decoder.dimensions());
726 /// println!("Number of components in the image are {}", decoder.info().unwrap().components);
727 /// ```
728 /// # Errors
729 /// See DecodeErrors enum for list of possible errors during decoding
730 pub fn decode_headers(&mut self) -> Result<(), DecodeErrors> {
731 self.decode_headers_internal()?;
732 Ok(())
733 }
734 /// Create a new decoder with the specified options to be used for decoding
735 /// an image
736 ///
737 /// # Arguments
738 /// - `buf`: The input buffer from where we will pull in compressed jpeg bytes from
739 /// - `options`: Options specific to this decoder instance
740 #[must_use]
741 pub fn new_with_options(buf: T, options: DecoderOptions) -> JpegDecoder<T> {
742 JpegDecoder::default(options, buf)
743 }
744
745 /// Set up-sampling routines in case an image is down sampled
746 pub(crate) fn set_upsampling(&mut self) -> Result<(), DecodeErrors> {
747 // no sampling, return early
748 // check if horizontal max ==1
749 if self.h_max == self.v_max && self.h_max == 1 {
750 return Ok(());
751 }
752 match (self.h_max, self.v_max) {
753 (1, 1) => {
754 self.sub_sample_ratio = SampleRatios::None;
755 }
756 (1, 2) => {
757 self.sub_sample_ratio = SampleRatios::V;
758 }
759 (2, 1) => {
760 self.sub_sample_ratio = SampleRatios::H;
761 }
762 (2, 2) => {
763 self.sub_sample_ratio = SampleRatios::HV;
764 }
765 _ => {
766 return Err(DecodeErrors::Format(
767 "Unknown down-sampling method, cannot continue".to_string()
768 ))
769 }
770 }
771
772 for comp in self.components.iter_mut() {
773 let hs = self.h_max / comp.horizontal_sample;
774 let vs = self.v_max / comp.vertical_sample;
775
776 let samp_factor = match (hs, vs) {
777 (1, 1) => {
778 comp.sample_ratio = SampleRatios::None;
779 upsample_no_op
780 }
781 (2, 1) => {
782 comp.sample_ratio = SampleRatios::H;
783 choose_horizontal_samp_function(self.options.get_use_unsafe())
784 }
785 (1, 2) => {
786 comp.sample_ratio = SampleRatios::V;
787 choose_v_samp_function(self.options.get_use_unsafe())
788 }
789 (2, 2) => {
790 comp.sample_ratio = SampleRatios::HV;
791 choose_hv_samp_function(self.options.get_use_unsafe())
792 }
793 _ => {
794 return Err(DecodeErrors::Format(
795 "Unknown down-sampling method, cannot continue".to_string()
796 ))
797 }
798 };
799 comp.setup_upsample_scanline();
800 comp.up_sampler = samp_factor;
801 }
802
803 return Ok(());
804 }
805 #[must_use]
806 /// Get the width of the image as a u16
807 ///
808 /// The width lies between 1 and 65535
809 pub(crate) fn width(&self) -> u16 {
810 self.info.width
811 }
812
813 /// Get the height of the image as a u16
814 ///
815 /// The height lies between 1 and 65535
816 #[must_use]
817 pub(crate) fn height(&self) -> u16 {
818 self.info.height
819 }
820
821 /// Get image dimensions as a tuple of width and height
822 /// or `None` if the image hasn't been decoded.
823 ///
824 /// # Returns
825 /// - `Some(width,height)`: Image dimensions
826 /// - None : The image headers haven't been decoded
827 #[must_use]
828 pub const fn dimensions(&self) -> Option<(usize, usize)> {
829 return if self.headers_decoded {
830 Some((self.info.width as usize, self.info.height as usize))
831 } else {
832 None
833 };
834 }
835}
836
837/// A struct representing Image Information
838#[derive(Default, Clone, Eq, PartialEq)]
839#[allow(clippy::module_name_repetitions)]
840pub struct ImageInfo {
841 /// Width of the image
842 pub width: u16,
843 /// Height of image
844 pub height: u16,
845 /// PixelDensity
846 pub pixel_density: u8,
847 /// Start of frame markers
848 pub sof: SOFMarkers,
849 /// Horizontal sample
850 pub x_density: u16,
851 /// Vertical sample
852 pub y_density: u16,
853 /// Number of components
854 pub components: u8
855}
856
857impl ImageInfo {
858 /// Set width of the image
859 ///
860 /// Found in the start of frame
861
862 pub(crate) fn set_width(&mut self, width: u16) {
863 self.width = width;
864 }
865
866 /// Set height of the image
867 ///
868 /// Found in the start of frame
869
870 pub(crate) fn set_height(&mut self, height: u16) {
871 self.height = height;
872 }
873
874 /// Set the image density
875 ///
876 /// Found in the start of frame
877
878 pub(crate) fn set_density(&mut self, density: u8) {
879 self.pixel_density = density;
880 }
881
882 /// Set image Start of frame marker
883 ///
884 /// found in the Start of frame header
885
886 pub(crate) fn set_sof_marker(&mut self, marker: SOFMarkers) {
887 self.sof = marker;
888 }
889
890 /// Set image x-density(dots per pixel)
891 ///
892 /// Found in the APP(0) marker
893 #[allow(dead_code)]
894 pub(crate) fn set_x(&mut self, sample: u16) {
895 self.x_density = sample;
896 }
897
898 /// Set image y-density
899 ///
900 /// Found in the APP(0) marker
901 #[allow(dead_code)]
902 pub(crate) fn set_y(&mut self, sample: u16) {
903 self.y_density = sample;
904 }
905}