1use std::collections::BTreeMap;
20use std::sync::atomic::{AtomicUsize, Ordering};
21use std::sync::Arc;
22
23use jxl_bitstream::Bitstream;
24use jxl_grid::{AllocHandle, AllocTracker};
25use jxl_image::ImageHeader;
26use jxl_oxide_common::Bundle;
27
28pub mod data;
29mod error;
30pub mod filter;
31pub mod header;
32
33pub use error::{Error, Result};
34pub use header::FrameHeader;
35use jxl_modular::Sample;
36use jxl_modular::{image::TransformedModularSubimage, MaConfig};
37use jxl_threadpool::JxlThreadPool;
38
39use crate::data::*;
40
41#[derive(Debug)]
45pub struct Frame {
46 pool: JxlThreadPool,
47 tracker: Option<AllocTracker>,
48 image_header: Arc<ImageHeader>,
49 header: FrameHeader,
50 toc: Toc,
51 data: Vec<GroupData>,
52 all_group_offsets: AllGroupOffsets,
53 reading_data_index: usize,
54 pass_shifts: BTreeMap<u32, (i32, i32)>,
55}
56
57#[derive(Debug, Default)]
58struct AllGroupOffsets {
59 lf_group: AtomicUsize,
60 hf_global: AtomicUsize,
61 pass_group: AtomicUsize,
62 has_error: AtomicUsize,
63}
64
65#[derive(Debug)]
66struct GroupData {
67 toc_group: TocGroup,
68 bytes: Vec<u8>,
69 handle: Option<AllocHandle>,
70}
71
72impl From<TocGroup> for GroupData {
73 fn from(value: TocGroup) -> Self {
74 Self {
75 toc_group: value,
76 bytes: Vec::new(),
77 handle: None,
78 }
79 }
80}
81
82impl GroupData {
83 fn ensure_allocated(&mut self, tracker: Option<&AllocTracker>) -> Result<()> {
84 if let Some(tracker) = tracker {
85 if self.handle.is_some() {
86 return Ok(());
87 }
88
89 let size = self.toc_group.size as usize;
90 let handle = tracker.alloc::<u8>(size)?;
91 self.bytes.try_reserve(size)?;
92 self.handle = Some(handle);
93 } else {
94 let additional = (self.toc_group.size as usize).saturating_sub(self.bytes.capacity());
95 self.bytes.try_reserve(additional)?;
96 }
97
98 Ok(())
99 }
100}
101
102#[derive(Debug, Clone)]
103pub struct FrameContext<'a> {
104 pub image_header: Arc<ImageHeader>,
105 pub tracker: Option<&'a AllocTracker>,
106 pub pool: JxlThreadPool,
107}
108
109impl Bundle<FrameContext<'_>> for Frame {
110 type Error = crate::Error;
111
112 fn parse(bitstream: &mut Bitstream, ctx: FrameContext) -> Result<Self> {
113 let FrameContext {
114 image_header,
115 tracker,
116 pool,
117 } = ctx;
118 let tracker = tracker.cloned();
119
120 bitstream.zero_pad_to_byte()?;
121 let base_offset = bitstream.num_read_bits() / 8;
122 let header = FrameHeader::parse(bitstream, &image_header)?;
123
124 let width = header.width as u64;
125 let height = header.height as u64;
126 if width > (1 << 30) {
127 tracing::error!(width, "Frame width too large; limit is 2^30");
128 return Err(jxl_bitstream::Error::ProfileConformance("frame width too large").into());
129 }
130 if height > (1 << 30) {
131 tracing::error!(width, "Frame height too large; limit is 2^30");
132 return Err(jxl_bitstream::Error::ProfileConformance("frame height too large").into());
133 }
134 if (width * height) > (1 << 40) {
135 tracing::error!(
136 area = width * height,
137 "Frame area (width * height) too large; limit is 2^40"
138 );
139 return Err(jxl_bitstream::Error::ProfileConformance("frame area too large").into());
140 }
141
142 let has_extra = !header.ec_blending_info.is_empty();
143 for blending_info in std::iter::once(&header.blending_info).chain(&header.ec_blending_info)
144 {
145 if blending_info.mode.use_alpha() && has_extra {
146 let alpha_idx = blending_info.alpha_channel as usize;
147 let Some(alpha_ec_info) = image_header.metadata.ec_info.get(alpha_idx) else {
148 tracing::error!(?blending_info, "blending_info.alpha_channel out of range");
149 return Err(jxl_bitstream::Error::ValidationFailed(
150 "blending_info.alpha_channel out of range",
151 )
152 .into());
153 };
154 if !alpha_ec_info.is_alpha() {
155 tracing::error!(
156 ?blending_info,
157 ?alpha_ec_info,
158 "blending_info.alpha_channel is not the type of Alpha",
159 );
160 return Err(jxl_bitstream::Error::ValidationFailed(
161 "blending_info.alpha_channel is not the type of Alpha",
162 )
163 .into());
164 }
165 }
166 }
167
168 if header.flags.use_lf_frame() && header.lf_level >= 4 {
169 return Err(jxl_bitstream::Error::ValidationFailed("lf_level out of range").into());
170 }
171
172 let color_upsampling_shift = header.upsampling.trailing_zeros();
173 for (ec_upsampling, ec_info) in header
174 .ec_upsampling
175 .iter()
176 .zip(image_header.metadata.ec_info.iter())
177 {
178 let ec_upsampling_shift = ec_upsampling.trailing_zeros();
179 let dim_shift = ec_info.dim_shift;
180
181 if ec_upsampling_shift + dim_shift < color_upsampling_shift {
182 return Err(jxl_bitstream::Error::ValidationFailed(
183 "EC upsampling < color upsampling, which is invalid",
184 )
185 .into());
186 }
187
188 if ec_upsampling_shift + dim_shift > 6 {
189 tracing::error!(
190 ec_upsampling,
191 dim_shift = ec_info.dim_shift,
192 "Cumulative EC upsampling factor is too large"
193 );
194 return Err(jxl_bitstream::Error::ValidationFailed(
195 "cumulative EC upsampling factor is too large",
196 )
197 .into());
198 }
199
200 let actual_dim_shift = ec_upsampling_shift + dim_shift - color_upsampling_shift;
201
202 if actual_dim_shift > 7 + header.group_size_shift {
203 return Err(jxl_bitstream::Error::ValidationFailed("dim_shift too large").into());
204 }
205 }
206
207 if header.width == 0 || header.height == 0 {
208 return Err(jxl_bitstream::Error::ValidationFailed(
209 "Invalid crop dimensions for frame: zero width or height",
210 )
211 .into());
212 }
213
214 let mut toc = Toc::parse(bitstream, &header)?;
215 toc.adjust_offsets(base_offset);
216 let data = toc.iter_bitstream_order().map(GroupData::from).collect();
217
218 let passes = &header.passes;
219 let mut pass_shifts = BTreeMap::new();
220 let mut maxshift = 3i32;
221 for (&downsample, &last_pass) in passes.downsample.iter().zip(&passes.last_pass) {
222 let minshift = downsample.trailing_zeros() as i32;
223 pass_shifts.insert(last_pass, (minshift, maxshift));
224 maxshift = minshift;
225 }
226 pass_shifts.insert(header.passes.num_passes - 1, (0i32, maxshift));
227
228 Ok(Self {
229 pool,
230 tracker,
231 image_header,
232 header,
233 toc,
234 data,
235 all_group_offsets: AllGroupOffsets::default(),
236 reading_data_index: 0,
237 pass_shifts,
238 })
239 }
240}
241
242impl Frame {
243 #[inline]
244 pub fn alloc_tracker(&self) -> Option<&AllocTracker> {
245 self.tracker.as_ref()
246 }
247
248 pub fn image_header(&self) -> &ImageHeader {
249 &self.image_header
250 }
251
252 pub fn clone_image_header(&self) -> Arc<ImageHeader> {
253 Arc::clone(&self.image_header)
254 }
255
256 pub fn header(&self) -> &FrameHeader {
258 &self.header
259 }
260
261 pub fn toc(&self) -> &Toc {
265 &self.toc
266 }
267
268 pub fn pass_shifts(&self) -> &BTreeMap<u32, (i32, i32)> {
269 &self.pass_shifts
270 }
271
272 pub fn data(&self, group: TocGroupKind) -> Option<&[u8]> {
273 let idx = self.toc.group_index_bitstream_order(group);
274 self.data.get(idx).map(|b| &*b.bytes)
275 }
276}
277
278impl Frame {
279 pub fn feed_bytes<'buf>(&mut self, mut buf: &'buf [u8]) -> Result<&'buf [u8]> {
280 while let Some(group_data) = self.data.get_mut(self.reading_data_index) {
281 group_data.ensure_allocated(self.tracker.as_ref())?;
282 let bytes_left = group_data.toc_group.size as usize - group_data.bytes.len();
283 if buf.len() < bytes_left {
284 group_data.bytes.extend_from_slice(buf);
285 return Ok(&[]);
286 }
287 let (l, r) = buf.split_at(bytes_left);
288 group_data.bytes.extend_from_slice(l);
289 buf = r;
290 self.reading_data_index += 1;
291 }
292 Ok(buf)
293 }
294
295 #[inline]
296 pub fn current_loading_group(&self) -> Option<TocGroup> {
297 self.toc.iter_bitstream_order().nth(self.reading_data_index)
298 }
299
300 #[inline]
301 pub fn is_loading_done(&self) -> bool {
302 self.reading_data_index >= self.data.len()
303 }
304}
305
306impl Frame {
307 pub fn try_parse_lf_global<S: Sample>(&self) -> Option<Result<LfGlobal<S>>> {
308 Some(if self.toc.is_single_entry() {
309 if self.all_group_offsets.has_error.load(Ordering::Relaxed) != 0 {
310 return Some(Err(Error::HadError));
311 }
312
313 let group = self.data.first()?;
314 let loaded = self.reading_data_index != 0;
315 let mut bitstream = Bitstream::new(&group.bytes);
316 let lf_global = LfGlobal::parse(
317 &mut bitstream,
318 LfGlobalParams::new(
319 &self.image_header,
320 &self.header,
321 self.tracker.as_ref(),
322 false,
323 ),
324 );
325 match lf_global {
326 Ok(lf_global) => {
327 tracing::trace!(num_read_bits = bitstream.num_read_bits(), "LfGlobal");
328 self.all_group_offsets
329 .lf_group
330 .store(bitstream.num_read_bits(), Ordering::Relaxed);
331 Ok(lf_global)
332 }
333 Err(e) if !loaded && e.unexpected_eof() => Err(e),
334 Err(e) => {
335 self.all_group_offsets.has_error.store(1, Ordering::Relaxed);
336 Err(e)
337 }
338 }
339 } else {
340 let idx = self.toc.group_index_bitstream_order(TocGroupKind::LfGlobal);
341 let group = self.data.get(idx)?;
342 let allow_partial = group.bytes.len() < group.toc_group.size as usize;
343
344 let mut bitstream = Bitstream::new(&group.bytes);
345 LfGlobal::parse(
346 &mut bitstream,
347 LfGlobalParams::new(
348 &self.image_header,
349 &self.header,
350 self.tracker.as_ref(),
351 allow_partial,
352 ),
353 )
354 })
355 }
356
357 pub fn try_parse_lf_group<S: Sample>(
358 &self,
359 lf_global_vardct: Option<&LfGlobalVarDct>,
360 global_ma_config: Option<&MaConfig>,
361 mlf_group: Option<TransformedModularSubimage<S>>,
362 lf_group_idx: u32,
363 ) -> Option<Result<LfGroup<S>>> {
364 if self.toc.is_single_entry() {
365 if self.all_group_offsets.has_error.load(Ordering::Relaxed) != 0 {
366 return Some(Err(Error::HadError));
367 }
368
369 if lf_group_idx != 0 {
370 return None;
371 }
372
373 let group = self.data.first()?;
374 let loaded = self.reading_data_index != 0;
375 let mut bitstream = Bitstream::new(&group.bytes);
376 let offset = self.all_group_offsets.lf_group.load(Ordering::Relaxed);
377 if offset == 0 {
378 let lf_global = self.try_parse_lf_global::<S>().unwrap();
379 if let Err(e) = lf_global {
380 return Some(Err(e));
381 }
382 }
383 let offset = self.all_group_offsets.lf_group.load(Ordering::Relaxed);
384 bitstream.skip_bits(offset).unwrap();
385
386 let result = LfGroup::parse(
387 &mut bitstream,
388 LfGroupParams {
389 frame_header: &self.header,
390 quantizer: lf_global_vardct.map(|x| &x.quantizer),
391 global_ma_config,
392 mlf_group,
393 lf_group_idx,
394 allow_partial: !loaded,
395 tracker: self.tracker.as_ref(),
396 pool: &self.pool,
397 },
398 );
399
400 match result {
401 Ok(result) => {
402 tracing::trace!(num_read_bits = bitstream.num_read_bits(), "LfGroup");
403 self.all_group_offsets
404 .hf_global
405 .store(bitstream.num_read_bits(), Ordering::Relaxed);
406 Some(Ok(result))
407 }
408 Err(e) if !loaded && e.unexpected_eof() => None,
409 Err(e) => {
410 self.all_group_offsets.has_error.store(2, Ordering::Relaxed);
411 Some(Err(e))
412 }
413 }
414 } else {
415 let idx = self
416 .toc
417 .group_index_bitstream_order(TocGroupKind::LfGroup(lf_group_idx));
418 let group = self.data.get(idx)?;
419 let allow_partial = group.bytes.len() < group.toc_group.size as usize;
420
421 let mut bitstream = Bitstream::new(&group.bytes);
422 let result = LfGroup::parse(
423 &mut bitstream,
424 LfGroupParams {
425 frame_header: &self.header,
426 quantizer: lf_global_vardct.map(|x| &x.quantizer),
427 global_ma_config,
428 mlf_group,
429 lf_group_idx,
430 allow_partial,
431 tracker: self.tracker.as_ref(),
432 pool: &self.pool,
433 },
434 );
435 if allow_partial && result.is_err() {
436 return None;
437 }
438 Some(result)
439 }
440 }
441
442 pub fn try_parse_hf_global<S: Sample>(
443 &self,
444 cached_lf_global: Option<&LfGlobal<S>>,
445 ) -> Option<Result<HfGlobal>> {
446 let is_modular = self.header.encoding == header::Encoding::Modular;
447
448 if self.toc.is_single_entry() {
449 if self.all_group_offsets.has_error.load(Ordering::Relaxed) != 0 {
450 return Some(Err(Error::HadError));
451 }
452
453 let group = self.data.first()?;
454 let loaded = self.reading_data_index != 0;
455 let mut bitstream = Bitstream::new(&group.bytes);
456 let offset = self.all_group_offsets.hf_global.load(Ordering::Relaxed);
457 let lf_global = if cached_lf_global.is_none() && (offset == 0 || !is_modular) {
458 match self.try_parse_lf_global()? {
459 Ok(lf_global) => Some(lf_global),
460 Err(e) => return Some(Err(e)),
461 }
462 } else {
463 None
464 };
465 let lf_global = cached_lf_global.or(lf_global.as_ref());
466
467 if offset == 0 {
468 let lf_global = lf_global.unwrap();
469 let mut gmodular = match lf_global.gmodular.try_clone() {
470 Ok(gmodular) => gmodular,
471 Err(e) => return Some(Err(e)),
472 };
473 let groups = gmodular
474 .modular
475 .image_mut()
476 .map(|x| x.prepare_groups(&self.pass_shifts))
477 .transpose();
478 let groups = match groups {
479 Ok(groups) => groups,
480 Err(e) => return Some(Err(e.into())),
481 };
482 let mlf_group = groups.and_then(|mut x| x.lf_groups.pop());
483 let lf_group = self
484 .try_parse_lf_group(
485 lf_global.vardct.as_ref(),
486 lf_global.gmodular.ma_config(),
487 mlf_group,
488 0,
489 )
490 .ok_or(
491 jxl_bitstream::Error::Io(std::io::ErrorKind::UnexpectedEof.into()).into(),
492 )
493 .and_then(|x| x);
494 if let Err(e) = lf_group {
495 return Some(Err(e));
496 }
497 }
498 let offset = self.all_group_offsets.hf_global.load(Ordering::Relaxed);
499
500 if self.header.encoding == header::Encoding::Modular {
501 self.all_group_offsets
502 .pass_group
503 .store(offset, Ordering::Relaxed);
504 return None;
505 }
506
507 bitstream.skip_bits(offset).unwrap();
508 let lf_global = lf_global.unwrap();
509 let result = HfGlobal::parse(
510 &mut bitstream,
511 HfGlobalParams::new(
512 &self.image_header.metadata,
513 &self.header,
514 lf_global,
515 self.tracker.as_ref(),
516 &self.pool,
517 ),
518 );
519
520 Some(match result {
521 Ok(result) => {
522 self.all_group_offsets
523 .pass_group
524 .store(bitstream.num_read_bits(), Ordering::Relaxed);
525 Ok(result)
526 }
527 Err(e) if !loaded && e.unexpected_eof() => Err(e),
528 Err(e) => {
529 self.all_group_offsets.has_error.store(3, Ordering::Relaxed);
530 Err(e)
531 }
532 })
533 } else {
534 if self.header.encoding == header::Encoding::Modular {
535 return None;
536 }
537
538 let idx = self.toc.group_index_bitstream_order(TocGroupKind::HfGlobal);
539 let group = self.data.get(idx)?;
540 if group.bytes.len() < group.toc_group.size as usize {
541 return None;
542 }
543
544 let mut bitstream = Bitstream::new(&group.bytes);
545 let lf_global = if cached_lf_global.is_none() {
546 match self.try_parse_lf_global()? {
547 Ok(lf_global) => Some(lf_global),
548 Err(e) => return Some(Err(e)),
549 }
550 } else {
551 None
552 };
553 let lf_global = cached_lf_global.or(lf_global.as_ref()).unwrap();
554 let params = HfGlobalParams::new(
555 &self.image_header.metadata,
556 &self.header,
557 lf_global,
558 self.tracker.as_ref(),
559 &self.pool,
560 );
561 Some(HfGlobal::parse(&mut bitstream, params))
562 }
563 }
564
565 pub fn pass_group_bitstream(
566 &self,
567 pass_idx: u32,
568 group_idx: u32,
569 ) -> Option<Result<PassGroupBitstream>> {
570 Some(if self.toc.is_single_entry() {
571 if self.all_group_offsets.has_error.load(Ordering::Relaxed) != 0 {
572 return Some(Err(Error::HadError));
573 }
574
575 if pass_idx != 0 || group_idx != 0 {
576 return None;
577 }
578
579 let group = self.data.first()?;
580 let loaded = self.reading_data_index != 0;
581 let mut bitstream = Bitstream::new(&group.bytes);
582 let mut offset = self.all_group_offsets.pass_group.load(Ordering::Relaxed);
583 if offset == 0 {
584 let hf_global = self.try_parse_hf_global::<i32>(None)?;
585 if let Err(e) = hf_global {
586 return Some(Err(e));
587 }
588 offset = self.all_group_offsets.pass_group.load(Ordering::Relaxed);
589 }
590 bitstream.skip_bits(offset).unwrap();
591
592 Ok(PassGroupBitstream {
593 bitstream,
594 partial: !loaded,
595 })
596 } else {
597 let idx = self
598 .toc
599 .group_index_bitstream_order(TocGroupKind::GroupPass {
600 pass_idx,
601 group_idx,
602 });
603 let group = self.data.get(idx)?;
604 let partial = group.bytes.len() < group.toc_group.size as usize;
605
606 let bitstream = Bitstream::new(&group.bytes);
607 Ok(PassGroupBitstream { bitstream, partial })
608 })
609 }
610}
611
612#[derive(Debug)]
613pub struct PassGroupBitstream<'buf> {
614 pub bitstream: Bitstream<'buf>,
615 pub partial: bool,
616}
617
618impl Frame {
619 pub fn adjust_region(&self, (left, top, width, height): &mut (u32, u32, u32, u32)) {
629 if self.header.have_crop {
630 *left = left.saturating_add_signed(-self.header.x0);
631 *top = top.saturating_add_signed(-self.header.y0);
632 };
633
634 let mut padding = 0u32;
635 if self.header.restoration_filter.gab.enabled() {
636 tracing::debug!("Gabor-like filter requires padding of 1 pixel");
637 padding = 1;
638 }
639 if self.header.restoration_filter.epf.enabled() {
640 tracing::debug!("Edge-preserving filter requires padding of 3 pixels");
641 padding = 3;
642 }
643 if padding > 0 {
644 let delta_w = (*left).min(padding);
645 let delta_h = (*top).min(padding);
646 *left -= delta_w;
647 *top -= delta_h;
648 *width += delta_w + padding;
649 *height += delta_h + padding;
650 }
651 }
652}