jxl_vardct/
hf_pass.rs

1use jxl_bitstream::{Bitstream, U};
2use jxl_coding::Decoder;
3use jxl_oxide_common::Bundle;
4
5/// Parameters for decoding `HfPass`.
6#[derive(Debug, Copy, Clone)]
7pub struct HfPassParams<'a> {
8    hf_block_ctx: &'a crate::HfBlockContext,
9    num_hf_presets: u32,
10}
11
12impl<'a> HfPassParams<'a> {
13    pub fn new(hf_block_ctx: &'a crate::HfBlockContext, num_hf_presets: u32) -> Self {
14        Self {
15            hf_block_ctx,
16            num_hf_presets,
17        }
18    }
19}
20
21/// HF coefficient decoder configuration.
22///
23/// Includes distribution for the entropy decoder and the order of HF coefficients. This struct is
24/// passed as a parameter when [decoding HF coefficients][crate::write_hf_coeff].
25#[derive(Debug)]
26pub struct HfPass {
27    permutation: [[Vec<(u16, u16)>; 3]; 13],
28    hf_dist: Decoder,
29}
30
31impl Bundle<HfPassParams<'_>> for HfPass {
32    type Error = crate::Error;
33
34    fn parse(bitstream: &mut Bitstream, params: HfPassParams<'_>) -> crate::Result<Self> {
35        let HfPassParams {
36            hf_block_ctx,
37            num_hf_presets,
38        } = params;
39        let mut used_orders = bitstream.read_u32(0x5F, 0x13, 0x00, U(13))?;
40        let mut decoder = (used_orders != 0)
41            .then(|| Decoder::parse(bitstream, 8))
42            .transpose()?;
43
44        let mut permutation: [_; 13] =
45            std::array::from_fn(|_| [Vec::new(), Vec::new(), Vec::new()]);
46        if let Some(decoder) = &mut decoder {
47            let it = permutation.iter_mut().zip(BLOCK_SIZES).enumerate();
48            for (idx, (permutation, (bw, bh))) in it {
49                if used_orders & 1 != 0 {
50                    let size = (bw * bh) as u32;
51                    let skip = size / 64;
52                    for permutation in permutation {
53                        let perm = jxl_coding::read_permutation(bitstream, decoder, size, skip)?;
54                        let nat = natural_order_lazy(idx);
55                        for idx in perm {
56                            permutation.push(nat[idx]);
57                        }
58                    }
59                }
60
61                used_orders >>= 1;
62            }
63            decoder.finalize()?;
64        }
65
66        let hf_dist = Decoder::parse(
67            bitstream,
68            495 * num_hf_presets * hf_block_ctx.num_block_clusters,
69        )?;
70
71        Ok(Self {
72            permutation,
73            hf_dist,
74        })
75    }
76}
77
78impl HfPass {
79    #[inline]
80    pub(crate) fn clone_decoder(&self) -> Decoder {
81        self.hf_dist.clone()
82    }
83
84    #[inline]
85    pub(crate) fn order(&self, order_id: usize, channel: usize) -> &[(u16, u16)] {
86        let permutation = &self.permutation[order_id][channel];
87        if permutation.is_empty() {
88            natural_order_lazy(order_id)
89        } else {
90            permutation
91        }
92    }
93}
94
95const BLOCK_SIZES: [(usize, usize); 13] = [
96    (8, 8),
97    (8, 8),
98    (16, 16),
99    (32, 32),
100    (16, 8),
101    (32, 8),
102    (32, 16),
103    (64, 64),
104    (64, 32),
105    (128, 128),
106    (128, 64),
107    (256, 256),
108    (256, 128),
109];
110
111const NATURAL_ORDER: [&[(u16, u16)]; 9] = [
112    &const_compute_natural_order::<{ BLOCK_SIZES[0].0 * BLOCK_SIZES[0].1 }>(BLOCK_SIZES[0]),
113    &const_compute_natural_order::<{ BLOCK_SIZES[1].0 * BLOCK_SIZES[1].1 }>(BLOCK_SIZES[1]),
114    &const_compute_natural_order::<{ BLOCK_SIZES[2].0 * BLOCK_SIZES[2].1 }>(BLOCK_SIZES[2]),
115    &const_compute_natural_order::<{ BLOCK_SIZES[3].0 * BLOCK_SIZES[3].1 }>(BLOCK_SIZES[3]),
116    &const_compute_natural_order::<{ BLOCK_SIZES[4].0 * BLOCK_SIZES[4].1 }>(BLOCK_SIZES[4]),
117    &const_compute_natural_order::<{ BLOCK_SIZES[5].0 * BLOCK_SIZES[5].1 }>(BLOCK_SIZES[5]),
118    &const_compute_natural_order::<{ BLOCK_SIZES[6].0 * BLOCK_SIZES[6].1 }>(BLOCK_SIZES[6]),
119    &const_compute_natural_order::<{ BLOCK_SIZES[7].0 * BLOCK_SIZES[7].1 }>(BLOCK_SIZES[7]),
120    &const_compute_natural_order::<{ BLOCK_SIZES[8].0 * BLOCK_SIZES[8].1 }>(BLOCK_SIZES[8]),
121];
122
123pub const DCT8_NATURAL_ORDER: &[(u16, u16)] = NATURAL_ORDER[0];
124
125fn natural_order_lazy(idx: usize) -> &'static [(u16, u16)] {
126    if idx >= 13 {
127        panic!("Order ID out of bounds");
128    }
129    let block_size = BLOCK_SIZES[idx];
130    let Some(idx) = idx.checked_sub(NATURAL_ORDER.len()) else {
131        return NATURAL_ORDER[idx];
132    };
133
134    static INITIALIZER: [std::sync::Once; 4] = [
135        std::sync::Once::new(),
136        std::sync::Once::new(),
137        std::sync::Once::new(),
138        std::sync::Once::new(),
139    ];
140    static mut LARGE_NATURAL_ORDER: [Vec<(u16, u16)>; 4] =
141        [Vec::new(), Vec::new(), Vec::new(), Vec::new()];
142
143    // TODO: Replace this with `OnceLock` when it is available in stable.
144    INITIALIZER[idx].call_once(|| {
145        // SAFETY: this is the only thread accessing LARGE_NATURAL_ORDER[idx],
146        // as we're in call_once
147        let natural_order = unsafe { &mut LARGE_NATURAL_ORDER[idx] };
148        natural_order.resize(block_size.0 * block_size.1, (0, 0));
149        fill_natural_order(block_size, natural_order);
150    });
151    // SAFETY: none of the threads will have mutable access to LARGE_NATURAL_ORDER[idx],
152    // as we used call_once
153    unsafe { &LARGE_NATURAL_ORDER[idx] }
154}
155
156const fn const_compute_natural_order<const N: usize>((bw, bh): (usize, usize)) -> [(u16, u16); N] {
157    let y_scale = bw / bh;
158
159    let mut ret = [(0u16, 0u16); N];
160    let mut idx = 0usize;
161    let lbw = bw / 8;
162    let lbh = bh / 8;
163
164    while idx < lbw * lbh {
165        let x = idx % lbw;
166        let y = idx / lbw;
167        ret[idx] = (x as u16, y as u16);
168        idx += 1;
169    }
170
171    let mut dist = 1usize;
172    while dist < 2 * bw {
173        let margin = dist.saturating_sub(bw);
174        let mut order = margin;
175        while order < dist - margin {
176            let (x, y) = if dist % 2 == 1 {
177                (order, dist - 1 - order)
178            } else {
179                (dist - 1 - order, order)
180            };
181            order += 1;
182
183            if x < lbw && y < lbw {
184                continue;
185            }
186            if y % y_scale != 0 {
187                continue;
188            }
189            ret[idx] = (x as u16, (y / y_scale) as u16);
190            idx += 1;
191        }
192        dist += 1;
193    }
194
195    ret
196}
197
198fn fill_natural_order((bw, bh): (usize, usize), output: &mut [(u16, u16)]) {
199    let y_scale = bw / bh;
200
201    let mut idx = 0usize;
202    let lbw = bw / 8;
203    let lbh = bh / 8;
204
205    while idx < lbw * lbh {
206        let x = idx % lbw;
207        let y = idx / lbw;
208        output[idx] = (x as u16, y as u16);
209        idx += 1;
210    }
211
212    for dist in 1..(2 * bw) {
213        let margin = dist.saturating_sub(bw);
214        for order in margin..(dist - margin) {
215            let (x, y) = if dist % 2 == 1 {
216                (order, dist - 1 - order)
217            } else {
218                (dist - 1 - order, order)
219            };
220
221            if x < lbw && y < lbw {
222                continue;
223            }
224            if y % y_scale != 0 {
225                continue;
226            }
227            output[idx] = (x as u16, (y / y_scale) as u16);
228            idx += 1;
229        }
230    }
231}