/src/libjxl/lib/jxl/modular/encoding/encoding.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) the JPEG XL Project Authors. All rights reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style |
4 | | // license that can be found in the LICENSE file. |
5 | | |
6 | | #include "lib/jxl/modular/encoding/encoding.h" |
7 | | |
8 | | #include <jxl/memory_manager.h> |
9 | | |
10 | | #include <algorithm> |
11 | | #include <array> |
12 | | #include <cstddef> |
13 | | #include <cstdint> |
14 | | #include <cstdlib> |
15 | | #include <queue> |
16 | | #include <utility> |
17 | | #include <vector> |
18 | | |
19 | | #include "lib/jxl/base/printf_macros.h" |
20 | | #include "lib/jxl/base/scope_guard.h" |
21 | | #include "lib/jxl/base/status.h" |
22 | | #include "lib/jxl/dec_ans.h" |
23 | | #include "lib/jxl/dec_bit_reader.h" |
24 | | #include "lib/jxl/frame_dimensions.h" |
25 | | #include "lib/jxl/image_ops.h" |
26 | | #include "lib/jxl/modular/encoding/context_predict.h" |
27 | | #include "lib/jxl/modular/options.h" |
28 | | #include "lib/jxl/pack_signed.h" |
29 | | |
30 | | namespace jxl { |
31 | | |
32 | | // Removes all nodes that use a static property (i.e. channel or group ID) from |
33 | | // the tree and collapses each node on even levels with its two children to |
34 | | // produce a flatter tree. Also computes whether the resulting tree requires |
35 | | // using the weighted predictor. |
36 | | FlatTree FilterTree(const Tree &global_tree, |
37 | | std::array<pixel_type, kNumStaticProperties> &static_props, |
38 | | size_t *num_props, bool *use_wp, bool *wp_only, |
39 | 192k | bool *gradient_only) { |
40 | 192k | *num_props = 0; |
41 | 192k | bool has_wp = false; |
42 | 192k | bool has_non_wp = false; |
43 | 192k | *gradient_only = true; |
44 | 216k | const auto mark_property = [&](int32_t p) { |
45 | 216k | if (p == kWPProp) { |
46 | 10.0k | has_wp = true; |
47 | 206k | } else if (p >= kNumStaticProperties) { |
48 | 131k | has_non_wp = true; |
49 | 131k | } |
50 | 216k | if (p >= kNumStaticProperties && p != kGradientProp) { |
51 | 129k | *gradient_only = false; |
52 | 129k | } |
53 | 216k | }; |
54 | 192k | FlatTree output; |
55 | 192k | std::queue<size_t> nodes; |
56 | 192k | nodes.push(0); |
57 | | // Produces a trimmed and flattened tree by doing a BFS visit of the original |
58 | | // tree, ignoring branches that are known to be false and proceeding two |
59 | | // levels at a time to collapse nodes in a flatter tree; if an inner parent |
60 | | // node has a leaf as a child, the leaf is duplicated and an implicit fake |
61 | | // node is added. This allows to reduce the number of branches when traversing |
62 | | // the resulting flat tree. |
63 | 669k | while (!nodes.empty()) { |
64 | 477k | size_t cur = nodes.front(); |
65 | 477k | nodes.pop(); |
66 | | // Skip nodes that we can decide now, by jumping directly to their children. |
67 | 492k | while (global_tree[cur].property < kNumStaticProperties && |
68 | 492k | global_tree[cur].property != -1) { |
69 | 15.2k | if (static_props[global_tree[cur].property] > global_tree[cur].splitval) { |
70 | 7.72k | cur = global_tree[cur].lchild; |
71 | 7.72k | } else { |
72 | 7.56k | cur = global_tree[cur].rchild; |
73 | 7.56k | } |
74 | 15.2k | } |
75 | 477k | FlatDecisionNode flat; |
76 | 477k | if (global_tree[cur].property == -1) { |
77 | 397k | flat.property0 = -1; |
78 | 397k | flat.childID = global_tree[cur].lchild; |
79 | 397k | flat.predictor = global_tree[cur].predictor; |
80 | 397k | flat.predictor_offset = global_tree[cur].predictor_offset; |
81 | 397k | flat.multiplier = global_tree[cur].multiplier; |
82 | 397k | *gradient_only &= flat.predictor == Predictor::Gradient; |
83 | 397k | has_wp |= flat.predictor == Predictor::Weighted; |
84 | 397k | has_non_wp |= flat.predictor != Predictor::Weighted; |
85 | 397k | output.push_back(flat); |
86 | 397k | continue; |
87 | 397k | } |
88 | 79.4k | flat.childID = output.size() + nodes.size() + 1; |
89 | | |
90 | 79.4k | flat.property0 = global_tree[cur].property; |
91 | 79.4k | *num_props = std::max<size_t>(flat.property0 + 1, *num_props); |
92 | 79.4k | flat.splitval0 = global_tree[cur].splitval; |
93 | | |
94 | 222k | for (size_t i = 0; i < 2; i++) { |
95 | 143k | size_t cur_child = |
96 | 143k | i == 0 ? global_tree[cur].lchild : global_tree[cur].rchild; |
97 | | // Skip nodes that we can decide now. |
98 | 158k | while (global_tree[cur_child].property < kNumStaticProperties && |
99 | 158k | global_tree[cur_child].property != -1) { |
100 | 15.4k | if (static_props[global_tree[cur_child].property] > |
101 | 15.4k | global_tree[cur_child].splitval) { |
102 | 9.63k | cur_child = global_tree[cur_child].lchild; |
103 | 9.63k | } else { |
104 | 5.82k | cur_child = global_tree[cur_child].rchild; |
105 | 5.82k | } |
106 | 15.4k | } |
107 | | // We ended up in a leaf, add a placeholder decision and two copies of the |
108 | | // leaf. |
109 | 143k | if (global_tree[cur_child].property == -1) { |
110 | 76.7k | flat.properties[i] = 0; |
111 | 76.7k | flat.splitvals[i] = 0; |
112 | 76.7k | nodes.push(cur_child); |
113 | 76.7k | nodes.push(cur_child); |
114 | 76.7k | } else { |
115 | 66.2k | flat.properties[i] = global_tree[cur_child].property; |
116 | 66.2k | flat.splitvals[i] = global_tree[cur_child].splitval; |
117 | 66.2k | nodes.push(global_tree[cur_child].lchild); |
118 | 66.2k | nodes.push(global_tree[cur_child].rchild); |
119 | 66.2k | *num_props = std::max<size_t>(flat.properties[i] + 1, *num_props); |
120 | 66.2k | } |
121 | 143k | } |
122 | | |
123 | 144k | for (int16_t property : flat.properties) mark_property(property); |
124 | 79.4k | mark_property(flat.property0); |
125 | 79.4k | output.push_back(flat); |
126 | 79.4k | } |
127 | 192k | if (*num_props > kNumNonrefProperties) { |
128 | 0 | *num_props = |
129 | 0 | DivCeil(*num_props - kNumNonrefProperties, kExtraPropsPerChannel) * |
130 | 0 | kExtraPropsPerChannel + |
131 | 0 | kNumNonrefProperties; |
132 | 192k | } else { |
133 | 192k | *num_props = kNumNonrefProperties; |
134 | 192k | } |
135 | 192k | *use_wp = has_wp; |
136 | 192k | *wp_only = has_wp && !has_non_wp; |
137 | | |
138 | 192k | return output; |
139 | 192k | } |
140 | | |
141 | | namespace detail { |
142 | | template <bool uses_lz77> |
143 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
144 | | const std::vector<uint8_t> &context_map, |
145 | | const Tree &global_tree, |
146 | | const weighted::Header &wp_header, |
147 | | pixel_type chan, size_t group_id, |
148 | | TreeLut<uint8_t, false, false> &tree_lut, |
149 | | Image *image, uint32_t &fl_run, |
150 | 192k | uint32_t &fl_v) { |
151 | 192k | JxlMemoryManager *memory_manager = image->memory_manager(); |
152 | 192k | Channel &channel = image->channel[chan]; |
153 | | |
154 | 192k | std::array<pixel_type, kNumStaticProperties> static_props = { |
155 | 192k | {chan, static_cast<int>(group_id)}}; |
156 | | // TODO(veluca): filter the tree according to static_props. |
157 | | |
158 | | // zero pixel channel? could happen |
159 | 192k | if (channel.w == 0 || channel.h == 0) return true; |
160 | | |
161 | 192k | bool tree_has_wp_prop_or_pred = false; |
162 | 192k | bool is_wp_only = false; |
163 | 192k | bool is_gradient_only = false; |
164 | 192k | size_t num_props; |
165 | 192k | FlatTree tree = |
166 | 192k | FilterTree(global_tree, static_props, &num_props, |
167 | 192k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); |
168 | | |
169 | | // From here on, tree lookup returns a *clustered* context ID. |
170 | | // This avoids an extra memory lookup after tree traversal. |
171 | 494k | for (auto &node : tree) { |
172 | 494k | if (node.property0 == -1) { |
173 | 419k | node.childID = context_map[node.childID]; |
174 | 419k | } |
175 | 494k | } |
176 | | |
177 | 192k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); |
178 | | |
179 | | // MAANS decode |
180 | 192k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, |
181 | 30.4M | pixel_type_w offset) -> pixel_type { |
182 | 30.4M | JXL_DASSERT((v & 0xFFFFFFFF) == v); |
183 | 30.4M | pixel_type_w val = UnpackSigned(v); |
184 | | // if it overflows, it overflows, and we have a problem anyway |
185 | 30.4M | return val * multiplier + offset; |
186 | 30.4M | }; jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 181 | 2.56k | pixel_type_w offset) -> pixel_type { | 182 | 2.56k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 183 | 2.56k | pixel_type_w val = UnpackSigned(v); | 184 | | // if it overflows, it overflows, and we have a problem anyway | 185 | 2.56k | return val * multiplier + offset; | 186 | 2.56k | }; |
jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 181 | 30.4M | pixel_type_w offset) -> pixel_type { | 182 | 30.4M | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 183 | 30.4M | pixel_type_w val = UnpackSigned(v); | 184 | | // if it overflows, it overflows, and we have a problem anyway | 185 | 30.4M | return val * multiplier + offset; | 186 | 30.4M | }; |
|
187 | | |
188 | 192k | if (tree.size() == 1) { |
189 | | // special optimized case: no meta-adaptation, so no need |
190 | | // to compute properties. |
191 | 188k | Predictor predictor = tree[0].predictor; |
192 | 188k | int64_t offset = tree[0].predictor_offset; |
193 | 188k | int32_t multiplier = tree[0].multiplier; |
194 | 188k | size_t ctx_id = tree[0].childID; |
195 | 188k | if (predictor == Predictor::Zero) { |
196 | 187k | uint32_t value; |
197 | 187k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, |
198 | 187k | channel.w * channel.h)) { |
199 | | // Special-case: histogram has a single symbol, with no extra bits, and |
200 | | // we use ANS mode. |
201 | 25.9k | JXL_DEBUG_V(8, "Fastest track."); |
202 | 25.9k | pixel_type v = make_pixel(value, multiplier, offset); |
203 | 1.31M | for (size_t y = 0; y < channel.h; y++) { |
204 | 1.28M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
205 | 1.28M | std::fill(r, r + channel.w, v); |
206 | 1.28M | } |
207 | 161k | } else { |
208 | 161k | JXL_DEBUG_V(8, "Fast track."); |
209 | 161k | if (multiplier == 1 && offset == 0) { |
210 | 2.80M | for (size_t y = 0; y < channel.h; y++) { |
211 | 2.64M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
212 | 70.2M | for (size_t x = 0; x < channel.w; x++) { |
213 | 67.6M | uint32_t v = |
214 | 67.6M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
215 | 67.6M | r[x] = UnpackSigned(v); |
216 | 67.6M | } |
217 | 2.64M | } |
218 | 18.4E | } else { |
219 | 18.4E | for (size_t y = 0; y < channel.h; y++) { |
220 | 1.40k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
221 | 263k | for (size_t x = 0; x < channel.w; x++) { |
222 | 262k | uint32_t v = |
223 | 262k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, |
224 | 262k | br); |
225 | 262k | r[x] = make_pixel(v, multiplier, offset); |
226 | 262k | } |
227 | 1.40k | } |
228 | 18.4E | } |
229 | 161k | } |
230 | 187k | return true; |
231 | 187k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && |
232 | 1.09k | multiplier == 1 && reader->IsHuffRleOnly()) { |
233 | 0 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); |
234 | 0 | pixel_type_w sv = UnpackSigned(fl_v); |
235 | 0 | for (size_t y = 0; y < channel.h; y++) { |
236 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
237 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
238 | 0 | const pixel_type *JXL_RESTRICT rtopleft = |
239 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); |
240 | 0 | pixel_type_w guess = (y ? rtop[0] : 0); |
241 | 0 | if (fl_run == 0) { |
242 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
243 | 0 | &fl_run); |
244 | 0 | sv = UnpackSigned(fl_v); |
245 | 0 | } else { |
246 | 0 | fl_run--; |
247 | 0 | } |
248 | 0 | r[0] = sv + guess; |
249 | 0 | for (size_t x = 1; x < channel.w; x++) { |
250 | 0 | pixel_type left = r[x - 1]; |
251 | 0 | pixel_type top = rtop[x]; |
252 | 0 | pixel_type topleft = rtopleft[x]; |
253 | 0 | pixel_type_w guess = ClampedGradient(top, left, topleft); |
254 | 0 | if (!fl_run) { |
255 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
256 | 0 | &fl_run); |
257 | 0 | sv = UnpackSigned(fl_v); |
258 | 0 | } else { |
259 | 0 | fl_run--; |
260 | 0 | } |
261 | 0 | r[x] = sv + guess; |
262 | 0 | } |
263 | 0 | } |
264 | 0 | return true; |
265 | 1.10k | } else if (predictor == Predictor::Gradient && offset == 0 && |
266 | 1.10k | multiplier == 1) { |
267 | 1.10k | JXL_DEBUG_V(8, "Gradient very fast track."); |
268 | 1.10k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
269 | 11.2k | for (size_t y = 0; y < channel.h; y++) { |
270 | 10.1k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
271 | 140k | for (size_t x = 0; x < channel.w; x++) { |
272 | 130k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
273 | 130k | pixel_type top = (y ? *(r + x - onerow) : left); |
274 | 130k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); |
275 | 130k | pixel_type guess = ClampedGradient(top, left, topleft); |
276 | 130k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
277 | 130k | ctx_id, br); |
278 | 130k | r[x] = make_pixel(v, 1, guess); |
279 | 130k | } |
280 | 10.1k | } |
281 | 1.10k | return true; |
282 | 1.10k | } |
283 | 188k | } |
284 | | |
285 | | // Check if this tree is a WP-only tree with a small enough property value |
286 | | // range. |
287 | 3.90k | if (is_wp_only) { |
288 | 0 | is_wp_only = TreeToLookupTable(tree, tree_lut); |
289 | 0 | } |
290 | 3.90k | if (is_gradient_only) { |
291 | 4 | is_gradient_only = TreeToLookupTable(tree, tree_lut); |
292 | 4 | } |
293 | | |
294 | 3.90k | if (is_gradient_only) { |
295 | 0 | JXL_DEBUG_V(8, "Gradient fast track."); |
296 | 0 | const intptr_t onerow = channel.plane.PixelsPerRow(); |
297 | 0 | for (size_t y = 0; y < channel.h; y++) { |
298 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
299 | 0 | for (size_t x = 0; x < channel.w; x++) { |
300 | 0 | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
301 | 0 | pixel_type_w top = (y ? *(r + x - onerow) : left); |
302 | 0 | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); |
303 | 0 | int32_t guess = ClampedGradient(top, left, topleft); |
304 | 0 | uint32_t pos = |
305 | 0 | kPropRangeFast + |
306 | 0 | std::min<pixel_type_w>( |
307 | 0 | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), |
308 | 0 | kPropRangeFast - 1); |
309 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
310 | 0 | uint64_t v = |
311 | 0 | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); |
312 | 0 | r[x] = make_pixel(v, 1, guess); |
313 | 0 | } |
314 | 0 | } |
315 | 3.90k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { |
316 | 0 | JXL_DEBUG_V(8, "WP fast track."); |
317 | 0 | weighted::State wp_state(wp_header, channel.w, channel.h); |
318 | 0 | Properties properties(1); |
319 | 0 | for (size_t y = 0; y < channel.h; y++) { |
320 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
321 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
322 | 0 | const pixel_type *JXL_RESTRICT rtoptop = |
323 | 0 | (y > 1 ? channel.Row(y - 2) : rtop); |
324 | 0 | const pixel_type *JXL_RESTRICT rtopleft = |
325 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); |
326 | 0 | const pixel_type *JXL_RESTRICT rtopright = |
327 | 0 | (y ? channel.Row(y - 1) + 1 : r - 1); |
328 | 0 | size_t x = 0; |
329 | 0 | { |
330 | 0 | size_t offset = 0; |
331 | 0 | pixel_type_w left = y ? rtop[x] : 0; |
332 | 0 | pixel_type_w toptop = y ? rtoptop[x] : 0; |
333 | 0 | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); |
334 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
335 | 0 | x, y, channel.w, left, left, topright, left, toptop, &properties, |
336 | 0 | offset); |
337 | 0 | uint32_t pos = |
338 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), |
339 | 0 | kPropRangeFast - 1); |
340 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
341 | 0 | uint64_t v = |
342 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
343 | 0 | r[x] = make_pixel(v, 1, guess); |
344 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); |
345 | 0 | } |
346 | 0 | for (x = 1; x + 1 < channel.w; x++) { |
347 | 0 | size_t offset = 0; |
348 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
349 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], |
350 | 0 | rtoptop[x], &properties, offset); |
351 | 0 | uint32_t pos = |
352 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), |
353 | 0 | kPropRangeFast - 1); |
354 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
355 | 0 | uint64_t v = |
356 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
357 | 0 | r[x] = make_pixel(v, 1, guess); |
358 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); |
359 | 0 | } |
360 | 0 | { |
361 | 0 | size_t offset = 0; |
362 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
363 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], |
364 | 0 | rtoptop[x], &properties, offset); |
365 | 0 | uint32_t pos = |
366 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), |
367 | 0 | kPropRangeFast - 1); |
368 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
369 | 0 | uint64_t v = |
370 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
371 | 0 | r[x] = make_pixel(v, 1, guess); |
372 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); |
373 | 0 | } |
374 | 0 | } |
375 | 3.90k | } else if (!tree_has_wp_prop_or_pred) { |
376 | | // special optimized case: the weighted predictor and its properties are not |
377 | | // used, so no need to compute weights and properties. |
378 | 2.86k | JXL_DEBUG_V(8, "Slow track."); |
379 | 2.86k | MATreeLookup tree_lookup(tree); |
380 | 2.86k | Properties properties = Properties(num_props); |
381 | 2.86k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
382 | 2.86k | JXL_ASSIGN_OR_RETURN( |
383 | 2.86k | Channel references, |
384 | 2.86k | Channel::Create(memory_manager, |
385 | 2.86k | properties.size() - kNumNonrefProperties, channel.w)); |
386 | 127k | for (size_t y = 0; y < channel.h; y++) { |
387 | 124k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
388 | 124k | PrecomputeReferences(channel, y, *image, chan, &references); |
389 | 124k | InitPropsRow(&properties, static_props, y); |
390 | 124k | if (y > 1 && channel.w > 8 && references.w == 0) { |
391 | 346k | for (size_t x = 0; x < 2; x++) { |
392 | 231k | PredictionResult res = |
393 | 231k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
394 | 231k | tree_lookup, references); |
395 | 231k | uint64_t v = |
396 | 231k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
397 | 231k | p[x] = make_pixel(v, res.multiplier, res.guess); |
398 | 231k | } |
399 | 15.0M | for (size_t x = 2; x < channel.w - 2; x++) { |
400 | 14.8M | PredictionResult res = |
401 | 14.8M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, |
402 | 14.8M | tree_lookup, references); |
403 | 14.8M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
404 | 14.8M | res.context, br); |
405 | 14.8M | p[x] = make_pixel(v, res.multiplier, res.guess); |
406 | 14.8M | } |
407 | 346k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
408 | 231k | PredictionResult res = |
409 | 231k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
410 | 231k | tree_lookup, references); |
411 | 231k | uint64_t v = |
412 | 231k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
413 | 231k | p[x] = make_pixel(v, res.multiplier, res.guess); |
414 | 231k | } |
415 | 115k | } else { |
416 | 408k | for (size_t x = 0; x < channel.w; x++) { |
417 | 399k | PredictionResult res = |
418 | 399k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
419 | 399k | tree_lookup, references); |
420 | 399k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
421 | 399k | res.context, br); |
422 | 399k | p[x] = make_pixel(v, res.multiplier, res.guess); |
423 | 399k | } |
424 | 8.84k | } |
425 | 124k | } |
426 | 2.86k | } else { |
427 | 1.04k | JXL_DEBUG_V(8, "Slowest track."); |
428 | 1.04k | MATreeLookup tree_lookup(tree); |
429 | 1.04k | Properties properties = Properties(num_props); |
430 | 1.04k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
431 | 1.04k | JXL_ASSIGN_OR_RETURN( |
432 | 1.04k | Channel references, |
433 | 1.04k | Channel::Create(memory_manager, |
434 | 1.04k | properties.size() - kNumNonrefProperties, channel.w)); |
435 | 1.04k | weighted::State wp_state(wp_header, channel.w, channel.h); |
436 | 98.9k | for (size_t y = 0; y < channel.h; y++) { |
437 | 97.8k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
438 | 97.8k | InitPropsRow(&properties, static_props, y); |
439 | 97.8k | PrecomputeReferences(channel, y, *image, chan, &references); |
440 | 97.8k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { |
441 | 288k | for (size_t x = 0; x < 2; x++) { |
442 | 192k | PredictionResult res = |
443 | 192k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
444 | 192k | tree_lookup, references, &wp_state); |
445 | 192k | uint64_t v = |
446 | 192k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
447 | 192k | p[x] = make_pixel(v, res.multiplier, res.guess); |
448 | 192k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
449 | 192k | } |
450 | 18.3M | for (size_t x = 2; x < channel.w - 2; x++) { |
451 | 18.2M | PredictionResult res = |
452 | 18.2M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, |
453 | 18.2M | tree_lookup, references, &wp_state); |
454 | 18.2M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
455 | 18.2M | res.context, br); |
456 | 18.2M | p[x] = make_pixel(v, res.multiplier, res.guess); |
457 | 18.2M | wp_state.UpdateErrors(p[x], x, y, channel.w); |
458 | 18.2M | } |
459 | 288k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
460 | 192k | PredictionResult res = |
461 | 192k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
462 | 192k | tree_lookup, references, &wp_state); |
463 | 192k | uint64_t v = |
464 | 192k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
465 | 192k | p[x] = make_pixel(v, res.multiplier, res.guess); |
466 | 192k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
467 | 192k | } |
468 | 96.3k | } else { |
469 | 304k | for (size_t x = 0; x < channel.w; x++) { |
470 | 302k | PredictionResult res = |
471 | 302k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
472 | 302k | tree_lookup, references, &wp_state); |
473 | 302k | uint64_t v = |
474 | 302k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
475 | 302k | p[x] = make_pixel(v, res.multiplier, res.guess); |
476 | 302k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
477 | 302k | } |
478 | 1.59k | } |
479 | 97.8k | } |
480 | 1.04k | } |
481 | 3.90k | return true; |
482 | 3.90k | } jxl::Status jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 150 | 8.94k | uint32_t &fl_v) { | 151 | 8.94k | JxlMemoryManager *memory_manager = image->memory_manager(); | 152 | 8.94k | Channel &channel = image->channel[chan]; | 153 | | | 154 | 8.94k | std::array<pixel_type, kNumStaticProperties> static_props = { | 155 | 8.94k | {chan, static_cast<int>(group_id)}}; | 156 | | // TODO(veluca): filter the tree according to static_props. | 157 | | | 158 | | // zero pixel channel? could happen | 159 | 8.95k | if (channel.w == 0 || channel.h == 0) return true; | 160 | | | 161 | 8.94k | bool tree_has_wp_prop_or_pred = false; | 162 | 8.94k | bool is_wp_only = false; | 163 | 8.94k | bool is_gradient_only = false; | 164 | 8.94k | size_t num_props; | 165 | 8.94k | FlatTree tree = | 166 | 8.94k | FilterTree(global_tree, static_props, &num_props, | 167 | 8.94k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 168 | | | 169 | | // From here on, tree lookup returns a *clustered* context ID. | 170 | | // This avoids an extra memory lookup after tree traversal. | 171 | 8.95k | for (auto &node : tree) { | 172 | 8.95k | if (node.property0 == -1) { | 173 | 8.94k | node.childID = context_map[node.childID]; | 174 | 8.94k | } | 175 | 8.95k | } | 176 | | | 177 | 8.94k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 178 | | | 179 | | // MAANS decode | 180 | 8.94k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 181 | 8.94k | pixel_type_w offset) -> pixel_type { | 182 | 8.94k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 183 | 8.94k | pixel_type_w val = UnpackSigned(v); | 184 | | // if it overflows, it overflows, and we have a problem anyway | 185 | 8.94k | return val * multiplier + offset; | 186 | 8.94k | }; | 187 | | | 188 | 8.94k | if (tree.size() == 1) { | 189 | | // special optimized case: no meta-adaptation, so no need | 190 | | // to compute properties. | 191 | 8.91k | Predictor predictor = tree[0].predictor; | 192 | 8.91k | int64_t offset = tree[0].predictor_offset; | 193 | 8.91k | int32_t multiplier = tree[0].multiplier; | 194 | 8.91k | size_t ctx_id = tree[0].childID; | 195 | 8.92k | if (predictor == Predictor::Zero) { | 196 | 8.92k | uint32_t value; | 197 | 8.92k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 198 | 8.92k | channel.w * channel.h)) { | 199 | | // Special-case: histogram has a single symbol, with no extra bits, and | 200 | | // we use ANS mode. | 201 | 2.47k | JXL_DEBUG_V(8, "Fastest track."); | 202 | 2.47k | pixel_type v = make_pixel(value, multiplier, offset); | 203 | 217k | for (size_t y = 0; y < channel.h; y++) { | 204 | 214k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 205 | 214k | std::fill(r, r + channel.w, v); | 206 | 214k | } | 207 | 6.45k | } else { | 208 | 6.45k | JXL_DEBUG_V(8, "Fast track."); | 209 | 6.46k | if (multiplier == 1 && offset == 0) { | 210 | 741k | for (size_t y = 0; y < channel.h; y++) { | 211 | 735k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 212 | 21.4M | for (size_t x = 0; x < channel.w; x++) { | 213 | 20.7M | uint32_t v = | 214 | 20.7M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 215 | 20.7M | r[x] = UnpackSigned(v); | 216 | 20.7M | } | 217 | 735k | } | 218 | 18.4E | } else { | 219 | 18.4E | for (size_t y = 0; y < channel.h; y++) { | 220 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 221 | 0 | for (size_t x = 0; x < channel.w; x++) { | 222 | 0 | uint32_t v = | 223 | 0 | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 224 | 0 | br); | 225 | 0 | r[x] = make_pixel(v, multiplier, offset); | 226 | 0 | } | 227 | 0 | } | 228 | 18.4E | } | 229 | 6.45k | } | 230 | 8.92k | return true; | 231 | 18.4E | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 232 | 18.4E | multiplier == 1 && reader->IsHuffRleOnly()) { | 233 | 0 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 234 | 0 | pixel_type_w sv = UnpackSigned(fl_v); | 235 | 0 | for (size_t y = 0; y < channel.h; y++) { | 236 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 237 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 238 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 239 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 240 | 0 | pixel_type_w guess = (y ? rtop[0] : 0); | 241 | 0 | if (fl_run == 0) { | 242 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 243 | 0 | &fl_run); | 244 | 0 | sv = UnpackSigned(fl_v); | 245 | 0 | } else { | 246 | 0 | fl_run--; | 247 | 0 | } | 248 | 0 | r[0] = sv + guess; | 249 | 0 | for (size_t x = 1; x < channel.w; x++) { | 250 | 0 | pixel_type left = r[x - 1]; | 251 | 0 | pixel_type top = rtop[x]; | 252 | 0 | pixel_type topleft = rtopleft[x]; | 253 | 0 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 254 | 0 | if (!fl_run) { | 255 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 256 | 0 | &fl_run); | 257 | 0 | sv = UnpackSigned(fl_v); | 258 | 0 | } else { | 259 | 0 | fl_run--; | 260 | 0 | } | 261 | 0 | r[x] = sv + guess; | 262 | 0 | } | 263 | 0 | } | 264 | 0 | return true; | 265 | 18.4E | } else if (predictor == Predictor::Gradient && offset == 0 && | 266 | 18.4E | multiplier == 1) { | 267 | 0 | JXL_DEBUG_V(8, "Gradient very fast track."); | 268 | 0 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 269 | 0 | for (size_t y = 0; y < channel.h; y++) { | 270 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 271 | 0 | for (size_t x = 0; x < channel.w; x++) { | 272 | 0 | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 273 | 0 | pixel_type top = (y ? *(r + x - onerow) : left); | 274 | 0 | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 275 | 0 | pixel_type guess = ClampedGradient(top, left, topleft); | 276 | 0 | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 277 | 0 | ctx_id, br); | 278 | 0 | r[x] = make_pixel(v, 1, guess); | 279 | 0 | } | 280 | 0 | } | 281 | 0 | return true; | 282 | 0 | } | 283 | 8.91k | } | 284 | | | 285 | | // Check if this tree is a WP-only tree with a small enough property value | 286 | | // range. | 287 | 25 | if (is_wp_only) { | 288 | 0 | is_wp_only = TreeToLookupTable(tree, tree_lut); | 289 | 0 | } | 290 | 25 | if (is_gradient_only) { | 291 | 0 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 292 | 0 | } | 293 | | | 294 | 25 | if (is_gradient_only) { | 295 | 0 | JXL_DEBUG_V(8, "Gradient fast track."); | 296 | 0 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 297 | 0 | for (size_t y = 0; y < channel.h; y++) { | 298 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 299 | 0 | for (size_t x = 0; x < channel.w; x++) { | 300 | 0 | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 301 | 0 | pixel_type_w top = (y ? *(r + x - onerow) : left); | 302 | 0 | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 303 | 0 | int32_t guess = ClampedGradient(top, left, topleft); | 304 | 0 | uint32_t pos = | 305 | 0 | kPropRangeFast + | 306 | 0 | std::min<pixel_type_w>( | 307 | 0 | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 308 | 0 | kPropRangeFast - 1); | 309 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 310 | 0 | uint64_t v = | 311 | 0 | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 312 | 0 | r[x] = make_pixel(v, 1, guess); | 313 | 0 | } | 314 | 0 | } | 315 | 25 | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 316 | 0 | JXL_DEBUG_V(8, "WP fast track."); | 317 | 0 | weighted::State wp_state(wp_header, channel.w, channel.h); | 318 | 0 | Properties properties(1); | 319 | 0 | for (size_t y = 0; y < channel.h; y++) { | 320 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 321 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 322 | 0 | const pixel_type *JXL_RESTRICT rtoptop = | 323 | 0 | (y > 1 ? channel.Row(y - 2) : rtop); | 324 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 325 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 326 | 0 | const pixel_type *JXL_RESTRICT rtopright = | 327 | 0 | (y ? channel.Row(y - 1) + 1 : r - 1); | 328 | 0 | size_t x = 0; | 329 | 0 | { | 330 | 0 | size_t offset = 0; | 331 | 0 | pixel_type_w left = y ? rtop[x] : 0; | 332 | 0 | pixel_type_w toptop = y ? rtoptop[x] : 0; | 333 | 0 | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 334 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 335 | 0 | x, y, channel.w, left, left, topright, left, toptop, &properties, | 336 | 0 | offset); | 337 | 0 | uint32_t pos = | 338 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 339 | 0 | kPropRangeFast - 1); | 340 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 341 | 0 | uint64_t v = | 342 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 343 | 0 | r[x] = make_pixel(v, 1, guess); | 344 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 345 | 0 | } | 346 | 0 | for (x = 1; x + 1 < channel.w; x++) { | 347 | 0 | size_t offset = 0; | 348 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 349 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 350 | 0 | rtoptop[x], &properties, offset); | 351 | 0 | uint32_t pos = | 352 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 353 | 0 | kPropRangeFast - 1); | 354 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 355 | 0 | uint64_t v = | 356 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 357 | 0 | r[x] = make_pixel(v, 1, guess); | 358 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 359 | 0 | } | 360 | 0 | { | 361 | 0 | size_t offset = 0; | 362 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 363 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 364 | 0 | rtoptop[x], &properties, offset); | 365 | 0 | uint32_t pos = | 366 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 367 | 0 | kPropRangeFast - 1); | 368 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 369 | 0 | uint64_t v = | 370 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 371 | 0 | r[x] = make_pixel(v, 1, guess); | 372 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 373 | 0 | } | 374 | 0 | } | 375 | 25 | } else if (!tree_has_wp_prop_or_pred) { | 376 | | // special optimized case: the weighted predictor and its properties are not | 377 | | // used, so no need to compute weights and properties. | 378 | 3 | JXL_DEBUG_V(8, "Slow track."); | 379 | 3 | MATreeLookup tree_lookup(tree); | 380 | 3 | Properties properties = Properties(num_props); | 381 | 3 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 382 | 3 | JXL_ASSIGN_OR_RETURN( | 383 | 3 | Channel references, | 384 | 3 | Channel::Create(memory_manager, | 385 | 3 | properties.size() - kNumNonrefProperties, channel.w)); | 386 | 27 | for (size_t y = 0; y < channel.h; y++) { | 387 | 24 | pixel_type *JXL_RESTRICT p = channel.Row(y); | 388 | 24 | PrecomputeReferences(channel, y, *image, chan, &references); | 389 | 24 | InitPropsRow(&properties, static_props, y); | 390 | 24 | if (y > 1 && channel.w > 8 && references.w == 0) { | 391 | 0 | for (size_t x = 0; x < 2; x++) { | 392 | 0 | PredictionResult res = | 393 | 0 | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 394 | 0 | tree_lookup, references); | 395 | 0 | uint64_t v = | 396 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 397 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 398 | 0 | } | 399 | 0 | for (size_t x = 2; x < channel.w - 2; x++) { | 400 | 0 | PredictionResult res = | 401 | 0 | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 402 | 0 | tree_lookup, references); | 403 | 0 | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 404 | 0 | res.context, br); | 405 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 406 | 0 | } | 407 | 0 | for (size_t x = channel.w - 2; x < channel.w; x++) { | 408 | 0 | PredictionResult res = | 409 | 0 | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 410 | 0 | tree_lookup, references); | 411 | 0 | uint64_t v = | 412 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 413 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 414 | 0 | } | 415 | 24 | } else { | 416 | 120 | for (size_t x = 0; x < channel.w; x++) { | 417 | 96 | PredictionResult res = | 418 | 96 | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 419 | 96 | tree_lookup, references); | 420 | 96 | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 421 | 96 | res.context, br); | 422 | 96 | p[x] = make_pixel(v, res.multiplier, res.guess); | 423 | 96 | } | 424 | 24 | } | 425 | 24 | } | 426 | 22 | } else { | 427 | 22 | JXL_DEBUG_V(8, "Slowest track."); | 428 | 22 | MATreeLookup tree_lookup(tree); | 429 | 22 | Properties properties = Properties(num_props); | 430 | 22 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 431 | 22 | JXL_ASSIGN_OR_RETURN( | 432 | 22 | Channel references, | 433 | 22 | Channel::Create(memory_manager, | 434 | 22 | properties.size() - kNumNonrefProperties, channel.w)); | 435 | 22 | weighted::State wp_state(wp_header, channel.w, channel.h); | 436 | 22 | for (size_t y = 0; y < channel.h; y++) { | 437 | 0 | pixel_type *JXL_RESTRICT p = channel.Row(y); | 438 | 0 | InitPropsRow(&properties, static_props, y); | 439 | 0 | PrecomputeReferences(channel, y, *image, chan, &references); | 440 | 0 | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 441 | 0 | for (size_t x = 0; x < 2; x++) { | 442 | 0 | PredictionResult res = | 443 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 444 | 0 | tree_lookup, references, &wp_state); | 445 | 0 | uint64_t v = | 446 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 447 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 448 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 449 | 0 | } | 450 | 0 | for (size_t x = 2; x < channel.w - 2; x++) { | 451 | 0 | PredictionResult res = | 452 | 0 | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 453 | 0 | tree_lookup, references, &wp_state); | 454 | 0 | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 455 | 0 | res.context, br); | 456 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 457 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 458 | 0 | } | 459 | 0 | for (size_t x = channel.w - 2; x < channel.w; x++) { | 460 | 0 | PredictionResult res = | 461 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 462 | 0 | tree_lookup, references, &wp_state); | 463 | 0 | uint64_t v = | 464 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 465 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 466 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 467 | 0 | } | 468 | 0 | } else { | 469 | 0 | for (size_t x = 0; x < channel.w; x++) { | 470 | 0 | PredictionResult res = | 471 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 472 | 0 | tree_lookup, references, &wp_state); | 473 | 0 | uint64_t v = | 474 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 475 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 476 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 477 | 0 | } | 478 | 0 | } | 479 | 0 | } | 480 | 22 | } | 481 | 25 | return true; | 482 | 25 | } |
jxl::Status jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 150 | 183k | uint32_t &fl_v) { | 151 | 183k | JxlMemoryManager *memory_manager = image->memory_manager(); | 152 | 183k | Channel &channel = image->channel[chan]; | 153 | | | 154 | 183k | std::array<pixel_type, kNumStaticProperties> static_props = { | 155 | 183k | {chan, static_cast<int>(group_id)}}; | 156 | | // TODO(veluca): filter the tree according to static_props. | 157 | | | 158 | | // zero pixel channel? could happen | 159 | 183k | if (channel.w == 0 || channel.h == 0) return true; | 160 | | | 161 | 183k | bool tree_has_wp_prop_or_pred = false; | 162 | 183k | bool is_wp_only = false; | 163 | 183k | bool is_gradient_only = false; | 164 | 183k | size_t num_props; | 165 | 183k | FlatTree tree = | 166 | 183k | FilterTree(global_tree, static_props, &num_props, | 167 | 183k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 168 | | | 169 | | // From here on, tree lookup returns a *clustered* context ID. | 170 | | // This avoids an extra memory lookup after tree traversal. | 171 | 485k | for (auto &node : tree) { | 172 | 485k | if (node.property0 == -1) { | 173 | 410k | node.childID = context_map[node.childID]; | 174 | 410k | } | 175 | 485k | } | 176 | | | 177 | 183k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 178 | | | 179 | | // MAANS decode | 180 | 183k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 181 | 183k | pixel_type_w offset) -> pixel_type { | 182 | 183k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 183 | 183k | pixel_type_w val = UnpackSigned(v); | 184 | | // if it overflows, it overflows, and we have a problem anyway | 185 | 183k | return val * multiplier + offset; | 186 | 183k | }; | 187 | | | 188 | 183k | if (tree.size() == 1) { | 189 | | // special optimized case: no meta-adaptation, so no need | 190 | | // to compute properties. | 191 | 179k | Predictor predictor = tree[0].predictor; | 192 | 179k | int64_t offset = tree[0].predictor_offset; | 193 | 179k | int32_t multiplier = tree[0].multiplier; | 194 | 179k | size_t ctx_id = tree[0].childID; | 195 | 179k | if (predictor == Predictor::Zero) { | 196 | 178k | uint32_t value; | 197 | 178k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 198 | 178k | channel.w * channel.h)) { | 199 | | // Special-case: histogram has a single symbol, with no extra bits, and | 200 | | // we use ANS mode. | 201 | 23.5k | JXL_DEBUG_V(8, "Fastest track."); | 202 | 23.5k | pixel_type v = make_pixel(value, multiplier, offset); | 203 | 1.09M | for (size_t y = 0; y < channel.h; y++) { | 204 | 1.07M | pixel_type *JXL_RESTRICT r = channel.Row(y); | 205 | 1.07M | std::fill(r, r + channel.w, v); | 206 | 1.07M | } | 207 | 154k | } else { | 208 | 154k | JXL_DEBUG_V(8, "Fast track."); | 209 | 154k | if (multiplier == 1 && offset == 0) { | 210 | 2.06M | for (size_t y = 0; y < channel.h; y++) { | 211 | 1.90M | pixel_type *JXL_RESTRICT r = channel.Row(y); | 212 | 48.8M | for (size_t x = 0; x < channel.w; x++) { | 213 | 46.9M | uint32_t v = | 214 | 46.9M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 215 | 46.9M | r[x] = UnpackSigned(v); | 216 | 46.9M | } | 217 | 1.90M | } | 218 | 18.4E | } else { | 219 | 18.4E | for (size_t y = 0; y < channel.h; y++) { | 220 | 1.40k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 221 | 263k | for (size_t x = 0; x < channel.w; x++) { | 222 | 262k | uint32_t v = | 223 | 262k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 224 | 262k | br); | 225 | 262k | r[x] = make_pixel(v, multiplier, offset); | 226 | 262k | } | 227 | 1.40k | } | 228 | 18.4E | } | 229 | 154k | } | 230 | 178k | return true; | 231 | 178k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 232 | 1.10k | multiplier == 1 && reader->IsHuffRleOnly()) { | 233 | 0 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 234 | 0 | pixel_type_w sv = UnpackSigned(fl_v); | 235 | 0 | for (size_t y = 0; y < channel.h; y++) { | 236 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 237 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 238 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 239 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 240 | 0 | pixel_type_w guess = (y ? rtop[0] : 0); | 241 | 0 | if (fl_run == 0) { | 242 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 243 | 0 | &fl_run); | 244 | 0 | sv = UnpackSigned(fl_v); | 245 | 0 | } else { | 246 | 0 | fl_run--; | 247 | 0 | } | 248 | 0 | r[0] = sv + guess; | 249 | 0 | for (size_t x = 1; x < channel.w; x++) { | 250 | 0 | pixel_type left = r[x - 1]; | 251 | 0 | pixel_type top = rtop[x]; | 252 | 0 | pixel_type topleft = rtopleft[x]; | 253 | 0 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 254 | 0 | if (!fl_run) { | 255 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 256 | 0 | &fl_run); | 257 | 0 | sv = UnpackSigned(fl_v); | 258 | 0 | } else { | 259 | 0 | fl_run--; | 260 | 0 | } | 261 | 0 | r[x] = sv + guess; | 262 | 0 | } | 263 | 0 | } | 264 | 0 | return true; | 265 | 1.10k | } else if (predictor == Predictor::Gradient && offset == 0 && | 266 | 1.10k | multiplier == 1) { | 267 | 1.10k | JXL_DEBUG_V(8, "Gradient very fast track."); | 268 | 1.10k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 269 | 11.2k | for (size_t y = 0; y < channel.h; y++) { | 270 | 10.1k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 271 | 140k | for (size_t x = 0; x < channel.w; x++) { | 272 | 130k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 273 | 130k | pixel_type top = (y ? *(r + x - onerow) : left); | 274 | 130k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 275 | 130k | pixel_type guess = ClampedGradient(top, left, topleft); | 276 | 130k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 277 | 130k | ctx_id, br); | 278 | 130k | r[x] = make_pixel(v, 1, guess); | 279 | 130k | } | 280 | 10.1k | } | 281 | 1.10k | return true; | 282 | 1.10k | } | 283 | 179k | } | 284 | | | 285 | | // Check if this tree is a WP-only tree with a small enough property value | 286 | | // range. | 287 | 3.88k | if (is_wp_only) { | 288 | 0 | is_wp_only = TreeToLookupTable(tree, tree_lut); | 289 | 0 | } | 290 | 3.88k | if (is_gradient_only) { | 291 | 4 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 292 | 4 | } | 293 | | | 294 | 3.88k | if (is_gradient_only) { | 295 | 0 | JXL_DEBUG_V(8, "Gradient fast track."); | 296 | 0 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 297 | 0 | for (size_t y = 0; y < channel.h; y++) { | 298 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 299 | 0 | for (size_t x = 0; x < channel.w; x++) { | 300 | 0 | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 301 | 0 | pixel_type_w top = (y ? *(r + x - onerow) : left); | 302 | 0 | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 303 | 0 | int32_t guess = ClampedGradient(top, left, topleft); | 304 | 0 | uint32_t pos = | 305 | 0 | kPropRangeFast + | 306 | 0 | std::min<pixel_type_w>( | 307 | 0 | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 308 | 0 | kPropRangeFast - 1); | 309 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 310 | 0 | uint64_t v = | 311 | 0 | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 312 | 0 | r[x] = make_pixel(v, 1, guess); | 313 | 0 | } | 314 | 0 | } | 315 | 3.88k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 316 | 0 | JXL_DEBUG_V(8, "WP fast track."); | 317 | 0 | weighted::State wp_state(wp_header, channel.w, channel.h); | 318 | 0 | Properties properties(1); | 319 | 0 | for (size_t y = 0; y < channel.h; y++) { | 320 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 321 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 322 | 0 | const pixel_type *JXL_RESTRICT rtoptop = | 323 | 0 | (y > 1 ? channel.Row(y - 2) : rtop); | 324 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 325 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 326 | 0 | const pixel_type *JXL_RESTRICT rtopright = | 327 | 0 | (y ? channel.Row(y - 1) + 1 : r - 1); | 328 | 0 | size_t x = 0; | 329 | 0 | { | 330 | 0 | size_t offset = 0; | 331 | 0 | pixel_type_w left = y ? rtop[x] : 0; | 332 | 0 | pixel_type_w toptop = y ? rtoptop[x] : 0; | 333 | 0 | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 334 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 335 | 0 | x, y, channel.w, left, left, topright, left, toptop, &properties, | 336 | 0 | offset); | 337 | 0 | uint32_t pos = | 338 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 339 | 0 | kPropRangeFast - 1); | 340 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 341 | 0 | uint64_t v = | 342 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 343 | 0 | r[x] = make_pixel(v, 1, guess); | 344 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 345 | 0 | } | 346 | 0 | for (x = 1; x + 1 < channel.w; x++) { | 347 | 0 | size_t offset = 0; | 348 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 349 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 350 | 0 | rtoptop[x], &properties, offset); | 351 | 0 | uint32_t pos = | 352 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 353 | 0 | kPropRangeFast - 1); | 354 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 355 | 0 | uint64_t v = | 356 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 357 | 0 | r[x] = make_pixel(v, 1, guess); | 358 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 359 | 0 | } | 360 | 0 | { | 361 | 0 | size_t offset = 0; | 362 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 363 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 364 | 0 | rtoptop[x], &properties, offset); | 365 | 0 | uint32_t pos = | 366 | 0 | kPropRangeFast + std::min(std::max(-kPropRangeFast, properties[0]), | 367 | 0 | kPropRangeFast - 1); | 368 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 369 | 0 | uint64_t v = | 370 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 371 | 0 | r[x] = make_pixel(v, 1, guess); | 372 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 373 | 0 | } | 374 | 0 | } | 375 | 3.88k | } else if (!tree_has_wp_prop_or_pred) { | 376 | | // special optimized case: the weighted predictor and its properties are not | 377 | | // used, so no need to compute weights and properties. | 378 | 2.86k | JXL_DEBUG_V(8, "Slow track."); | 379 | 2.86k | MATreeLookup tree_lookup(tree); | 380 | 2.86k | Properties properties = Properties(num_props); | 381 | 2.86k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 382 | 2.86k | JXL_ASSIGN_OR_RETURN( | 383 | 2.86k | Channel references, | 384 | 2.86k | Channel::Create(memory_manager, | 385 | 2.86k | properties.size() - kNumNonrefProperties, channel.w)); | 386 | 127k | for (size_t y = 0; y < channel.h; y++) { | 387 | 124k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 388 | 124k | PrecomputeReferences(channel, y, *image, chan, &references); | 389 | 124k | InitPropsRow(&properties, static_props, y); | 390 | 124k | if (y > 1 && channel.w > 8 && references.w == 0) { | 391 | 346k | for (size_t x = 0; x < 2; x++) { | 392 | 231k | PredictionResult res = | 393 | 231k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 394 | 231k | tree_lookup, references); | 395 | 231k | uint64_t v = | 396 | 231k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 397 | 231k | p[x] = make_pixel(v, res.multiplier, res.guess); | 398 | 231k | } | 399 | 15.0M | for (size_t x = 2; x < channel.w - 2; x++) { | 400 | 14.8M | PredictionResult res = | 401 | 14.8M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 402 | 14.8M | tree_lookup, references); | 403 | 14.8M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 404 | 14.8M | res.context, br); | 405 | 14.8M | p[x] = make_pixel(v, res.multiplier, res.guess); | 406 | 14.8M | } | 407 | 346k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 408 | 231k | PredictionResult res = | 409 | 231k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 410 | 231k | tree_lookup, references); | 411 | 231k | uint64_t v = | 412 | 231k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 413 | 231k | p[x] = make_pixel(v, res.multiplier, res.guess); | 414 | 231k | } | 415 | 115k | } else { | 416 | 408k | for (size_t x = 0; x < channel.w; x++) { | 417 | 399k | PredictionResult res = | 418 | 399k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 419 | 399k | tree_lookup, references); | 420 | 399k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 421 | 399k | res.context, br); | 422 | 399k | p[x] = make_pixel(v, res.multiplier, res.guess); | 423 | 399k | } | 424 | 8.81k | } | 425 | 124k | } | 426 | 2.86k | } else { | 427 | 1.02k | JXL_DEBUG_V(8, "Slowest track."); | 428 | 1.02k | MATreeLookup tree_lookup(tree); | 429 | 1.02k | Properties properties = Properties(num_props); | 430 | 1.02k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 431 | 1.02k | JXL_ASSIGN_OR_RETURN( | 432 | 1.02k | Channel references, | 433 | 1.02k | Channel::Create(memory_manager, | 434 | 1.02k | properties.size() - kNumNonrefProperties, channel.w)); | 435 | 1.02k | weighted::State wp_state(wp_header, channel.w, channel.h); | 436 | 98.9k | for (size_t y = 0; y < channel.h; y++) { | 437 | 97.8k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 438 | 97.8k | InitPropsRow(&properties, static_props, y); | 439 | 97.8k | PrecomputeReferences(channel, y, *image, chan, &references); | 440 | 97.8k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 441 | 288k | for (size_t x = 0; x < 2; x++) { | 442 | 192k | PredictionResult res = | 443 | 192k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 444 | 192k | tree_lookup, references, &wp_state); | 445 | 192k | uint64_t v = | 446 | 192k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 447 | 192k | p[x] = make_pixel(v, res.multiplier, res.guess); | 448 | 192k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 449 | 192k | } | 450 | 18.3M | for (size_t x = 2; x < channel.w - 2; x++) { | 451 | 18.2M | PredictionResult res = | 452 | 18.2M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 453 | 18.2M | tree_lookup, references, &wp_state); | 454 | 18.2M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 455 | 18.2M | res.context, br); | 456 | 18.2M | p[x] = make_pixel(v, res.multiplier, res.guess); | 457 | 18.2M | wp_state.UpdateErrors(p[x], x, y, channel.w); | 458 | 18.2M | } | 459 | 288k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 460 | 192k | PredictionResult res = | 461 | 192k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 462 | 192k | tree_lookup, references, &wp_state); | 463 | 192k | uint64_t v = | 464 | 192k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 465 | 192k | p[x] = make_pixel(v, res.multiplier, res.guess); | 466 | 192k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 467 | 192k | } | 468 | 96.3k | } else { | 469 | 304k | for (size_t x = 0; x < channel.w; x++) { | 470 | 302k | PredictionResult res = | 471 | 302k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 472 | 302k | tree_lookup, references, &wp_state); | 473 | 302k | uint64_t v = | 474 | 302k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 475 | 302k | p[x] = make_pixel(v, res.multiplier, res.guess); | 476 | 302k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 477 | 302k | } | 478 | 1.59k | } | 479 | 97.8k | } | 480 | 1.02k | } | 481 | 3.88k | return true; | 482 | 3.88k | } |
|
483 | | } // namespace detail |
484 | | |
485 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
486 | | const std::vector<uint8_t> &context_map, |
487 | | const Tree &global_tree, |
488 | | const weighted::Header &wp_header, |
489 | | pixel_type chan, size_t group_id, |
490 | | TreeLut<uint8_t, false, false> &tree_lut, |
491 | | Image *image, uint32_t &fl_run, |
492 | 192k | uint32_t &fl_v) { |
493 | 192k | if (reader->UsesLZ77()) { |
494 | 8.94k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/true>( |
495 | 8.94k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
496 | 8.94k | tree_lut, image, fl_run, fl_v); |
497 | 183k | } else { |
498 | 183k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/false>( |
499 | 183k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
500 | 183k | tree_lut, image, fl_run, fl_v); |
501 | 183k | } |
502 | 192k | } |
503 | | |
504 | 95.9k | GroupHeader::GroupHeader() { Bundle::Init(this); } |
505 | | |
506 | | Status ValidateChannelDimensions(const Image &image, |
507 | 25.3k | const ModularOptions &options) { |
508 | 25.3k | size_t nb_channels = image.channel.size(); |
509 | 50.6k | for (bool is_dc : {true, false}) { |
510 | 50.6k | size_t group_dim = options.group_dim * (is_dc ? kBlockDim : 1); |
511 | 50.6k | size_t c = image.nb_meta_channels; |
512 | 440k | for (; c < nb_channels; c++) { |
513 | 392k | const Channel &ch = image.channel[c]; |
514 | 392k | if (ch.w > options.group_dim || ch.h > options.group_dim) break; |
515 | 392k | } |
516 | 77.1k | for (; c < nb_channels; c++) { |
517 | 26.5k | const Channel &ch = image.channel[c]; |
518 | 26.5k | if (ch.w == 0 || ch.h == 0) continue; // skip empty |
519 | 26.5k | bool is_dc_channel = std::min(ch.hshift, ch.vshift) >= 3; |
520 | 26.5k | if (is_dc_channel != is_dc) continue; |
521 | 13.2k | size_t tile_dim = group_dim >> std::max(ch.hshift, ch.vshift); |
522 | 13.2k | if (tile_dim == 0) { |
523 | 0 | return JXL_FAILURE("Inconsistent transforms"); |
524 | 0 | } |
525 | 13.2k | } |
526 | 50.6k | } |
527 | 25.3k | return true; |
528 | 25.3k | } |
529 | | |
530 | | Status ModularDecode(BitReader *br, Image &image, GroupHeader &header, |
531 | | size_t group_id, ModularOptions *options, |
532 | | const Tree *global_tree, const ANSCode *global_code, |
533 | | const std::vector<uint8_t> *global_ctx_map, |
534 | 31.4k | const bool allow_truncated_group) { |
535 | 31.4k | if (image.channel.empty()) return true; |
536 | 25.3k | JxlMemoryManager *memory_manager = image.memory_manager(); |
537 | | |
538 | | // decode transforms |
539 | 25.3k | Status status = Bundle::Read(br, &header); |
540 | 25.3k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(status); |
541 | 25.3k | if (status.IsFatalError()) return status; |
542 | 25.3k | if (!br->AllReadsWithinBounds()) { |
543 | | // Don't do/undo transforms if header is incomplete. |
544 | 0 | header.transforms.clear(); |
545 | 0 | image.transform = header.transforms; |
546 | 0 | for (auto &ch : image.channel) { |
547 | 0 | ZeroFillImage(&ch.plane); |
548 | 0 | } |
549 | 0 | return Status(StatusCode::kNotEnoughBytes); |
550 | 0 | } |
551 | | |
552 | 25.3k | JXL_DEBUG_V(3, "Image data underwent %" PRIuS " transformations: ", |
553 | 25.3k | header.transforms.size()); |
554 | 25.3k | image.transform = header.transforms; |
555 | 25.3k | for (Transform &transform : image.transform) { |
556 | 13.3k | JXL_RETURN_IF_ERROR(transform.MetaApply(image)); |
557 | 13.3k | } |
558 | 25.3k | if (image.error) { |
559 | 0 | return JXL_FAILURE("Corrupt file. Aborting."); |
560 | 0 | } |
561 | 25.3k | JXL_RETURN_IF_ERROR(ValidateChannelDimensions(image, *options)); |
562 | | |
563 | 25.3k | size_t nb_channels = image.channel.size(); |
564 | | |
565 | 25.3k | size_t num_chans = 0; |
566 | 25.3k | size_t distance_multiplier = 0; |
567 | 221k | for (size_t i = 0; i < nb_channels; i++) { |
568 | 197k | Channel &channel = image.channel[i]; |
569 | 197k | if (!channel.w || !channel.h) { |
570 | 1.95k | continue; // skip empty channels |
571 | 1.95k | } |
572 | 195k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
573 | 194k | channel.h > options->max_chan_size)) { |
574 | 1.38k | break; |
575 | 1.38k | } |
576 | 194k | if (channel.w > distance_multiplier) { |
577 | 36.9k | distance_multiplier = channel.w; |
578 | 36.9k | } |
579 | 194k | num_chans++; |
580 | 194k | } |
581 | 25.3k | if (num_chans == 0) return true; |
582 | | |
583 | 25.1k | size_t next_channel = 0; |
584 | 25.1k | auto scope_guard = MakeScopeGuard([&]() { |
585 | 2.03k | for (size_t c = next_channel; c < image.channel.size(); c++) { |
586 | 1.97k | ZeroFillImage(&image.channel[c].plane); |
587 | 1.97k | } |
588 | 53 | }); |
589 | | // Do not do anything if truncated groups are not allowed. |
590 | 25.1k | if (allow_truncated_group) scope_guard.Disarm(); |
591 | | |
592 | | // Read tree. |
593 | 25.1k | Tree tree_storage; |
594 | 25.1k | std::vector<uint8_t> context_map_storage; |
595 | 25.1k | ANSCode code_storage; |
596 | 25.1k | const Tree *tree = &tree_storage; |
597 | 25.1k | const ANSCode *code = &code_storage; |
598 | 25.1k | const std::vector<uint8_t> *context_map = &context_map_storage; |
599 | 25.1k | if (!header.use_global_tree) { |
600 | 3.75k | uint64_t max_tree_size = 1024; |
601 | 33.8k | for (size_t i = 0; i < nb_channels; i++) { |
602 | 30.1k | Channel &channel = image.channel[i]; |
603 | 30.1k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
604 | 29.5k | channel.h > options->max_chan_size)) { |
605 | 0 | break; |
606 | 0 | } |
607 | 30.1k | uint64_t pixels = channel.w * channel.h; |
608 | 30.1k | max_tree_size += pixels; |
609 | 30.1k | } |
610 | 3.75k | max_tree_size = std::min(static_cast<uint64_t>(1 << 20), max_tree_size); |
611 | 3.75k | JXL_RETURN_IF_ERROR( |
612 | 3.75k | DecodeTree(memory_manager, br, &tree_storage, max_tree_size)); |
613 | 3.73k | JXL_RETURN_IF_ERROR(DecodeHistograms(memory_manager, br, |
614 | 3.73k | (tree_storage.size() + 1) / 2, |
615 | 3.73k | &code_storage, &context_map_storage)); |
616 | 21.4k | } else { |
617 | 21.4k | if (!global_tree || !global_code || !global_ctx_map || |
618 | 21.4k | global_tree->empty()) { |
619 | 1 | return JXL_FAILURE("No global tree available but one was requested"); |
620 | 1 | } |
621 | 21.4k | tree = global_tree; |
622 | 21.4k | code = global_code; |
623 | 21.4k | context_map = global_ctx_map; |
624 | 21.4k | } |
625 | | |
626 | | // Read channels |
627 | 50.3k | JXL_ASSIGN_OR_RETURN(ANSSymbolReader reader, |
628 | 50.3k | ANSSymbolReader::Create(code, br, distance_multiplier)); |
629 | 50.3k | auto tree_lut = jxl::make_unique<TreeLut<uint8_t, false, false>>(); |
630 | 50.3k | uint32_t fl_run = 0; |
631 | 50.3k | uint32_t fl_v = 0; |
632 | 219k | for (; next_channel < nb_channels; next_channel++) { |
633 | 195k | Channel &channel = image.channel[next_channel]; |
634 | 195k | if (!channel.w || !channel.h) { |
635 | 1.95k | continue; // skip empty channels |
636 | 1.95k | } |
637 | 193k | if (next_channel >= image.nb_meta_channels && |
638 | 193k | (channel.w > options->max_chan_size || |
639 | 192k | channel.h > options->max_chan_size)) { |
640 | 1.27k | break; |
641 | 1.27k | } |
642 | 192k | JXL_RETURN_IF_ERROR(DecodeModularChannelMAANS( |
643 | 192k | br, &reader, *context_map, *tree, header.wp_header, next_channel, |
644 | 192k | group_id, *tree_lut, &image, fl_run, fl_v)); |
645 | | |
646 | | // Truncated group. |
647 | 192k | if (!br->AllReadsWithinBounds()) { |
648 | 33 | if (!allow_truncated_group) return JXL_FAILURE("Truncated input"); |
649 | 0 | return Status(StatusCode::kNotEnoughBytes); |
650 | 33 | } |
651 | 192k | } |
652 | | |
653 | | // Make sure no zero-filling happens even if next_channel < nb_channels. |
654 | 25.1k | scope_guard.Disarm(); |
655 | | |
656 | 25.1k | if (!reader.CheckANSFinalState()) { |
657 | 0 | return JXL_FAILURE("ANS decode final state failed"); |
658 | 0 | } |
659 | 25.1k | return true; |
660 | 25.1k | } |
661 | | |
662 | | Status ModularGenericDecompress(BitReader *br, Image &image, |
663 | | GroupHeader *header, size_t group_id, |
664 | | ModularOptions *options, bool undo_transforms, |
665 | | const Tree *tree, const ANSCode *code, |
666 | | const std::vector<uint8_t> *ctx_map, |
667 | 31.4k | bool allow_truncated_group) { |
668 | 31.4k | std::vector<std::pair<uint32_t, uint32_t>> req_sizes; |
669 | 31.4k | req_sizes.reserve(image.channel.size()); |
670 | 100k | for (const auto &c : image.channel) { |
671 | 100k | req_sizes.emplace_back(c.w, c.h); |
672 | 100k | } |
673 | 31.4k | GroupHeader local_header; |
674 | 31.4k | if (header == nullptr) header = &local_header; |
675 | 31.4k | size_t bit_pos = br->TotalBitsConsumed(); |
676 | 31.4k | auto dec_status = ModularDecode(br, image, *header, group_id, options, tree, |
677 | 31.4k | code, ctx_map, allow_truncated_group); |
678 | 31.4k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(dec_status); |
679 | 31.3k | if (dec_status.IsFatalError()) return dec_status; |
680 | 31.3k | if (undo_transforms) image.undo_transforms(header->wp_header); |
681 | 31.3k | if (image.error) return JXL_FAILURE("Corrupt file. Aborting."); |
682 | 31.3k | JXL_DEBUG_V(4, |
683 | 31.3k | "Modular-decoded a %" PRIuS "x%" PRIuS " nbchans=%" PRIuS |
684 | 31.3k | " image from %" PRIuS " bytes", |
685 | 31.3k | image.w, image.h, image.channel.size(), |
686 | 31.3k | (br->TotalBitsConsumed() - bit_pos) / 8); |
687 | 31.3k | JXL_DEBUG_V(5, "Modular image: %s", image.DebugString().c_str()); |
688 | 31.3k | (void)bit_pos; |
689 | | // Check that after applying all transforms we are back to the requested |
690 | | // image sizes, otherwise there's a programming error with the |
691 | | // transformations. |
692 | 31.3k | if (undo_transforms) { |
693 | 19.3k | JXL_ENSURE(image.channel.size() == req_sizes.size()); |
694 | 97.5k | for (size_t c = 0; c < req_sizes.size(); c++) { |
695 | 78.2k | JXL_ENSURE(req_sizes[c].first == image.channel[c].w); |
696 | 78.2k | JXL_ENSURE(req_sizes[c].second == image.channel[c].h); |
697 | 78.2k | } |
698 | 19.3k | } |
699 | 31.3k | return dec_status; |
700 | 31.3k | } |
701 | | |
702 | | } // namespace jxl |