/src/libjxl/lib/jxl/modular/encoding/encoding.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) the JPEG XL Project Authors. All rights reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style |
4 | | // license that can be found in the LICENSE file. |
5 | | |
6 | | #include "lib/jxl/modular/encoding/encoding.h" |
7 | | |
8 | | #include <jxl/memory_manager.h> |
9 | | |
10 | | #include <algorithm> |
11 | | #include <array> |
12 | | #include <cstddef> |
13 | | #include <cstdint> |
14 | | #include <cstdlib> |
15 | | #include <queue> |
16 | | #include <utility> |
17 | | #include <vector> |
18 | | |
19 | | #include "lib/jxl/base/common.h" |
20 | | #include "lib/jxl/base/compiler_specific.h" |
21 | | #include "lib/jxl/base/printf_macros.h" |
22 | | #include "lib/jxl/base/scope_guard.h" |
23 | | #include "lib/jxl/base/status.h" |
24 | | #include "lib/jxl/dec_ans.h" |
25 | | #include "lib/jxl/dec_bit_reader.h" |
26 | | #include "lib/jxl/fields.h" |
27 | | #include "lib/jxl/frame_dimensions.h" |
28 | | #include "lib/jxl/image_ops.h" |
29 | | #include "lib/jxl/modular/encoding/context_predict.h" |
30 | | #include "lib/jxl/modular/encoding/dec_ma.h" |
31 | | #include "lib/jxl/modular/modular_image.h" |
32 | | #include "lib/jxl/modular/options.h" |
33 | | #include "lib/jxl/modular/transform/transform.h" |
34 | | #include "lib/jxl/pack_signed.h" |
35 | | |
36 | | namespace jxl { |
37 | | |
38 | | // Removes all nodes that use a static property (i.e. channel or group ID) from |
39 | | // the tree and collapses each node on even levels with its two children to |
40 | | // produce a flatter tree. Also computes whether the resulting tree requires |
41 | | // using the weighted predictor. |
42 | | FlatTree FilterTree(const Tree &global_tree, |
43 | | std::array<pixel_type, kNumStaticProperties> &static_props, |
44 | | size_t *num_props, bool *use_wp, bool *wp_only, |
45 | 225k | bool *gradient_only) { |
46 | 225k | *num_props = 0; |
47 | 225k | bool has_wp = false; |
48 | 225k | bool has_non_wp = false; |
49 | 225k | *gradient_only = true; |
50 | 225k | const auto mark_property = [&](int32_t p) { |
51 | 22.2k | if (p == kWPProp) { |
52 | 5.33k | has_wp = true; |
53 | 16.9k | } else if (p >= kNumStaticProperties) { |
54 | 7.71k | has_non_wp = true; |
55 | 7.71k | } |
56 | 22.2k | if (p >= kNumStaticProperties && p != kGradientProp) { |
57 | 10.7k | *gradient_only = false; |
58 | 10.7k | } |
59 | 22.2k | }; |
60 | 225k | FlatTree output; |
61 | 225k | std::queue<size_t> nodes; |
62 | 225k | nodes.push(0); |
63 | | // Produces a trimmed and flattened tree by doing a BFS visit of the original |
64 | | // tree, ignoring branches that are known to be false and proceeding two |
65 | | // levels at a time to collapse nodes in a flatter tree; if an inner parent |
66 | | // node has a leaf as a child, the leaf is duplicated and an implicit fake |
67 | | // node is added. This allows to reduce the number of branches when traversing |
68 | | // the resulting flat tree. |
69 | 479k | while (!nodes.empty()) { |
70 | 254k | size_t cur = nodes.front(); |
71 | 254k | nodes.pop(); |
72 | | // Skip nodes that we can decide now, by jumping directly to their children. |
73 | 258k | while (global_tree[cur].property < kNumStaticProperties && |
74 | 258k | global_tree[cur].property != -1) { |
75 | 4.03k | if (static_props[global_tree[cur].property] > global_tree[cur].splitval) { |
76 | 2.35k | cur = global_tree[cur].lchild; |
77 | 2.35k | } else { |
78 | 1.68k | cur = global_tree[cur].rchild; |
79 | 1.68k | } |
80 | 4.03k | } |
81 | 254k | FlatDecisionNode flat; |
82 | 254k | if (global_tree[cur].property == -1) { |
83 | 247k | flat.property0 = -1; |
84 | 247k | flat.childID = global_tree[cur].lchild; |
85 | 247k | flat.predictor = global_tree[cur].predictor; |
86 | 247k | flat.predictor_offset = global_tree[cur].predictor_offset; |
87 | 247k | flat.multiplier = global_tree[cur].multiplier; |
88 | 247k | *gradient_only &= flat.predictor == Predictor::Gradient; |
89 | 247k | has_wp |= flat.predictor == Predictor::Weighted; |
90 | 247k | has_non_wp |= flat.predictor != Predictor::Weighted; |
91 | 247k | output.push_back(flat); |
92 | 247k | continue; |
93 | 247k | } |
94 | 7.41k | flat.childID = output.size() + nodes.size() + 1; |
95 | | |
96 | 7.41k | flat.property0 = global_tree[cur].property; |
97 | 7.41k | *num_props = std::max<size_t>(flat.property0 + 1, *num_props); |
98 | 7.41k | flat.splitval0 = global_tree[cur].splitval; |
99 | | |
100 | 22.2k | for (size_t i = 0; i < 2; i++) { |
101 | 14.8k | size_t cur_child = |
102 | 14.8k | i == 0 ? global_tree[cur].lchild : global_tree[cur].rchild; |
103 | | // Skip nodes that we can decide now. |
104 | 16.0k | while (global_tree[cur_child].property < kNumStaticProperties && |
105 | 16.0k | global_tree[cur_child].property != -1) { |
106 | 1.18k | if (static_props[global_tree[cur_child].property] > |
107 | 1.18k | global_tree[cur_child].splitval) { |
108 | 891 | cur_child = global_tree[cur_child].lchild; |
109 | 891 | } else { |
110 | 295 | cur_child = global_tree[cur_child].rchild; |
111 | 295 | } |
112 | 1.18k | } |
113 | | // We ended up in a leaf, add a placeholder decision and two copies of the |
114 | | // leaf. |
115 | 14.8k | if (global_tree[cur_child].property == -1) { |
116 | 9.20k | flat.properties[i] = 0; |
117 | 9.20k | flat.splitvals[i] = 0; |
118 | 9.20k | nodes.push(cur_child); |
119 | 9.20k | nodes.push(cur_child); |
120 | 9.20k | } else { |
121 | 5.63k | flat.properties[i] = global_tree[cur_child].property; |
122 | 5.63k | flat.splitvals[i] = global_tree[cur_child].splitval; |
123 | 5.63k | nodes.push(global_tree[cur_child].lchild); |
124 | 5.63k | nodes.push(global_tree[cur_child].rchild); |
125 | 5.63k | *num_props = std::max<size_t>(flat.properties[i] + 1, *num_props); |
126 | 5.63k | } |
127 | 14.8k | } |
128 | | |
129 | 14.8k | for (int16_t property : flat.properties) mark_property(property); |
130 | 7.41k | mark_property(flat.property0); |
131 | 7.41k | output.push_back(flat); |
132 | 7.41k | } |
133 | 225k | if (*num_props > kNumNonrefProperties) { |
134 | 164 | *num_props = |
135 | 164 | DivCeil(*num_props - kNumNonrefProperties, kExtraPropsPerChannel) * |
136 | 164 | kExtraPropsPerChannel + |
137 | 164 | kNumNonrefProperties; |
138 | 224k | } else { |
139 | 224k | *num_props = kNumNonrefProperties; |
140 | 224k | } |
141 | 225k | *use_wp = has_wp; |
142 | 225k | *wp_only = has_wp && !has_non_wp; |
143 | | |
144 | 225k | return output; |
145 | 225k | } |
146 | | |
147 | | namespace detail { |
148 | | template <bool uses_lz77> |
149 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
150 | | const std::vector<uint8_t> &context_map, |
151 | | const Tree &global_tree, |
152 | | const weighted::Header &wp_header, |
153 | | pixel_type chan, size_t group_id, |
154 | | TreeLut<uint8_t, false, false> &tree_lut, |
155 | | Image *image, uint32_t &fl_run, |
156 | 223k | uint32_t &fl_v) { |
157 | 223k | JxlMemoryManager *memory_manager = image->memory_manager(); |
158 | 223k | Channel &channel = image->channel[chan]; |
159 | | |
160 | 223k | std::array<pixel_type, kNumStaticProperties> static_props = { |
161 | 223k | {chan, static_cast<int>(group_id)}}; |
162 | | // TODO(veluca): filter the tree according to static_props. |
163 | | |
164 | | // zero pixel channel? could happen |
165 | 223k | if (channel.w == 0 || channel.h == 0) return true; |
166 | | |
167 | 223k | bool tree_has_wp_prop_or_pred = false; |
168 | 223k | bool is_wp_only = false; |
169 | 223k | bool is_gradient_only = false; |
170 | 223k | size_t num_props; |
171 | 223k | FlatTree tree = |
172 | 223k | FilterTree(global_tree, static_props, &num_props, |
173 | 223k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); |
174 | | |
175 | | // From here on, tree lookup returns a *clustered* context ID. |
176 | | // This avoids an extra memory lookup after tree traversal. |
177 | 233k | for (auto &node : tree) { |
178 | 233k | if (node.property0 == -1) { |
179 | 231k | node.childID = context_map[node.childID]; |
180 | 231k | } |
181 | 233k | } |
182 | | |
183 | 223k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); |
184 | | |
185 | | // MAANS decode |
186 | 223k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, |
187 | 27.4M | pixel_type_w offset) -> pixel_type { |
188 | 27.4M | JXL_DASSERT((v & 0xFFFFFFFF) == v); |
189 | 27.4M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); |
190 | | // if it overflows, it overflows, and we have a problem anyway |
191 | 27.4M | return val * multiplier + offset; |
192 | 27.4M | }; jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 187 | 20.8M | pixel_type_w offset) -> pixel_type { | 188 | 20.8M | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 20.8M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 20.8M | return val * multiplier + offset; | 192 | 20.8M | }; |
jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 187 | 6.52M | pixel_type_w offset) -> pixel_type { | 188 | 6.52M | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 6.52M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 6.52M | return val * multiplier + offset; | 192 | 6.52M | }; |
|
193 | | |
194 | 223k | if (tree.size() == 1) { |
195 | | // special optimized case: no meta-adaptation, so no need |
196 | | // to compute properties. |
197 | 222k | Predictor predictor = tree[0].predictor; |
198 | 222k | int64_t offset = tree[0].predictor_offset; |
199 | 222k | int32_t multiplier = tree[0].multiplier; |
200 | 222k | size_t ctx_id = tree[0].childID; |
201 | 222k | if (predictor == Predictor::Zero) { |
202 | 217k | uint32_t value; |
203 | 217k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, |
204 | 217k | channel.w * channel.h)) { |
205 | | // Special-case: histogram has a single symbol, with no extra bits, and |
206 | | // we use ANS mode. |
207 | 87.9k | JXL_DEBUG_V(8, "Fastest track."); |
208 | 87.9k | pixel_type v = make_pixel(value, multiplier, offset); |
209 | 2.25M | for (size_t y = 0; y < channel.h; y++) { |
210 | 2.16M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
211 | 2.16M | std::fill(r, r + channel.w, v); |
212 | 2.16M | } |
213 | 129k | } else { |
214 | 129k | JXL_DEBUG_V(8, "Fast track."); |
215 | 129k | if (multiplier == 1 && offset == 0) { |
216 | 2.18M | for (size_t y = 0; y < channel.h; y++) { |
217 | 2.05M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
218 | 152M | for (size_t x = 0; x < channel.w; x++) { |
219 | 150M | uint32_t v = |
220 | 150M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
221 | 150M | r[x] = UnpackSigned(v); |
222 | 150M | } |
223 | 2.05M | } |
224 | 123k | } else { |
225 | 190k | for (size_t y = 0; y < channel.h; y++) { |
226 | 184k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
227 | 14.7M | for (size_t x = 0; x < channel.w; x++) { |
228 | 14.5M | uint32_t v = |
229 | 14.5M | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, |
230 | 14.5M | br); |
231 | 14.5M | r[x] = make_pixel(v, multiplier, offset); |
232 | 14.5M | } |
233 | 184k | } |
234 | 6.05k | } |
235 | 129k | } |
236 | 217k | return true; |
237 | 217k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && |
238 | 4.92k | multiplier == 1 && reader->IsHuffRleOnly()) { |
239 | 4 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); |
240 | 4 | pixel_type_w sv = UnpackSigned(fl_v); |
241 | 16 | for (size_t y = 0; y < channel.h; y++) { |
242 | 12 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
243 | 12 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
244 | 12 | const pixel_type *JXL_RESTRICT rtopleft = |
245 | 12 | (y ? channel.Row(y - 1) - 1 : r - 1); |
246 | 12 | pixel_type_w guess_0 = (y ? rtop[0] : 0); |
247 | 12 | if (fl_run == 0) { |
248 | 12 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
249 | 12 | &fl_run); |
250 | 12 | sv = UnpackSigned(fl_v); |
251 | 12 | } else { |
252 | 0 | fl_run--; |
253 | 0 | } |
254 | 12 | r[0] = sv + guess_0; |
255 | 48 | for (size_t x = 1; x < channel.w; x++) { |
256 | 36 | pixel_type left = r[x - 1]; |
257 | 36 | pixel_type top = rtop[x]; |
258 | 36 | pixel_type topleft = rtopleft[x]; |
259 | 36 | pixel_type_w guess = ClampedGradient(top, left, topleft); |
260 | 36 | if (!fl_run) { |
261 | 36 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
262 | 36 | &fl_run); |
263 | 36 | sv = UnpackSigned(fl_v); |
264 | 36 | } else { |
265 | 0 | fl_run--; |
266 | 0 | } |
267 | 36 | r[x] = sv + guess; |
268 | 36 | } |
269 | 12 | } |
270 | 4 | return true; |
271 | 4.91k | } else if (predictor == Predictor::Gradient && offset == 0 && |
272 | 4.91k | multiplier == 1) { |
273 | 3 | JXL_DEBUG_V(8, "Gradient very fast track."); |
274 | 3 | const intptr_t onerow = channel.plane.PixelsPerRow(); |
275 | 37 | for (size_t y = 0; y < channel.h; y++) { |
276 | 34 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
277 | 1.06k | for (size_t x = 0; x < channel.w; x++) { |
278 | 1.02k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
279 | 1.02k | pixel_type top = (y ? *(r + x - onerow) : left); |
280 | 1.02k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); |
281 | 1.02k | pixel_type guess = ClampedGradient(top, left, topleft); |
282 | 1.02k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
283 | 1.02k | ctx_id, br); |
284 | 1.02k | r[x] = make_pixel(v, 1, guess); |
285 | 1.02k | } |
286 | 34 | } |
287 | 3 | return true; |
288 | 3 | } |
289 | 222k | } |
290 | | |
291 | | // Check if this tree is a WP-only tree with a small enough property value |
292 | | // range. |
293 | 5.98k | if (is_wp_only) { |
294 | 2.37k | is_wp_only = TreeToLookupTable(tree, tree_lut); |
295 | 2.37k | } |
296 | 5.98k | if (is_gradient_only) { |
297 | 151 | is_gradient_only = TreeToLookupTable(tree, tree_lut); |
298 | 151 | } |
299 | | |
300 | 5.98k | if (is_gradient_only) { |
301 | 98 | JXL_DEBUG_V(8, "Gradient fast track."); |
302 | 98 | const intptr_t onerow = channel.plane.PixelsPerRow(); |
303 | 2.55k | for (size_t y = 0; y < channel.h; y++) { |
304 | 2.46k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
305 | 57.9k | for (size_t x = 0; x < channel.w; x++) { |
306 | 55.5k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
307 | 55.5k | pixel_type_w top = (y ? *(r + x - onerow) : left); |
308 | 55.5k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); |
309 | 55.5k | int32_t guess = ClampedGradient(top, left, topleft); |
310 | 55.5k | uint32_t pos = |
311 | 55.5k | kPropRangeFast + |
312 | 55.5k | std::min<pixel_type_w>( |
313 | 55.5k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), |
314 | 55.5k | kPropRangeFast - 1); |
315 | 55.5k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
316 | 55.5k | uint64_t v = |
317 | 55.5k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); |
318 | 55.5k | r[x] = make_pixel(v, 1, guess); |
319 | 55.5k | } |
320 | 2.46k | } |
321 | 5.88k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { |
322 | 376 | JXL_DEBUG_V(8, "WP fast track."); |
323 | 376 | weighted::State wp_state(wp_header, channel.w, channel.h); |
324 | 376 | Properties properties(1); |
325 | 3.23k | for (size_t y = 0; y < channel.h; y++) { |
326 | 2.85k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
327 | 2.85k | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
328 | 2.85k | const pixel_type *JXL_RESTRICT rtoptop = |
329 | 2.85k | (y > 1 ? channel.Row(y - 2) : rtop); |
330 | 2.85k | const pixel_type *JXL_RESTRICT rtopleft = |
331 | 2.85k | (y ? channel.Row(y - 1) - 1 : r - 1); |
332 | 2.85k | const pixel_type *JXL_RESTRICT rtopright = |
333 | 2.85k | (y ? channel.Row(y - 1) + 1 : r - 1); |
334 | 2.85k | size_t x = 0; |
335 | 2.85k | { |
336 | 2.85k | size_t offset = 0; |
337 | 2.85k | pixel_type_w left = y ? rtop[x] : 0; |
338 | 2.85k | pixel_type_w toptop = y ? rtoptop[x] : 0; |
339 | 2.85k | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); |
340 | 2.85k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
341 | 2.85k | x, y, channel.w, left, left, topright, left, toptop, &properties, |
342 | 2.85k | offset); |
343 | 2.85k | uint32_t pos = |
344 | 2.85k | kPropRangeFast + |
345 | 2.85k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
346 | 2.85k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
347 | 2.85k | uint64_t v = |
348 | 2.85k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
349 | 2.85k | r[x] = make_pixel(v, 1, guess); |
350 | 2.85k | wp_state.UpdateErrors(r[x], x, y, channel.w); |
351 | 2.85k | } |
352 | 73.4k | for (x = 1; x + 1 < channel.w; x++) { |
353 | 70.6k | size_t offset = 0; |
354 | 70.6k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
355 | 70.6k | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], |
356 | 70.6k | rtoptop[x], &properties, offset); |
357 | 70.6k | uint32_t pos = |
358 | 70.6k | kPropRangeFast + |
359 | 70.6k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
360 | 70.6k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
361 | 70.6k | uint64_t v = |
362 | 70.6k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
363 | 70.6k | r[x] = make_pixel(v, 1, guess); |
364 | 70.6k | wp_state.UpdateErrors(r[x], x, y, channel.w); |
365 | 70.6k | } |
366 | 2.85k | { |
367 | 2.85k | size_t offset = 0; |
368 | 2.85k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
369 | 2.85k | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], |
370 | 2.85k | rtoptop[x], &properties, offset); |
371 | 2.85k | uint32_t pos = |
372 | 2.85k | kPropRangeFast + |
373 | 2.85k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
374 | 2.85k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
375 | 2.85k | uint64_t v = |
376 | 2.85k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
377 | 2.85k | r[x] = make_pixel(v, 1, guess); |
378 | 2.85k | wp_state.UpdateErrors(r[x], x, y, channel.w); |
379 | 2.85k | } |
380 | 2.85k | } |
381 | 5.50k | } else if (!tree_has_wp_prop_or_pred) { |
382 | | // special optimized case: the weighted predictor and its properties are not |
383 | | // used, so no need to compute weights and properties. |
384 | 3.22k | JXL_DEBUG_V(8, "Slow track."); |
385 | 3.22k | MATreeLookup tree_lookup(tree); |
386 | 3.22k | Properties properties = Properties(num_props); |
387 | 3.22k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
388 | 3.22k | JXL_ASSIGN_OR_RETURN( |
389 | 3.22k | Channel references, |
390 | 3.22k | Channel::Create(memory_manager, |
391 | 3.22k | properties.size() - kNumNonrefProperties, channel.w)); |
392 | 101k | for (size_t y = 0; y < channel.h; y++) { |
393 | 97.9k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
394 | 97.9k | PrecomputeReferences(channel, y, *image, chan, &references); |
395 | 97.9k | InitPropsRow(&properties, static_props, y); |
396 | 97.9k | if (y > 1 && channel.w > 8 && references.w == 0) { |
397 | 199k | for (size_t x = 0; x < 2; x++) { |
398 | 133k | PredictionResult res = |
399 | 133k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
400 | 133k | tree_lookup, references); |
401 | 133k | uint64_t v = |
402 | 133k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
403 | 133k | p[x] = make_pixel(v, res.multiplier, res.guess); |
404 | 133k | } |
405 | 9.21M | for (size_t x = 2; x < channel.w - 2; x++) { |
406 | 9.15M | PredictionResult res = |
407 | 9.15M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, |
408 | 9.15M | tree_lookup, references); |
409 | 9.15M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
410 | 9.15M | res.context, br); |
411 | 9.15M | p[x] = make_pixel(v, res.multiplier, res.guess); |
412 | 9.15M | } |
413 | 199k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
414 | 133k | PredictionResult res = |
415 | 133k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
416 | 133k | tree_lookup, references); |
417 | 133k | uint64_t v = |
418 | 133k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
419 | 133k | p[x] = make_pixel(v, res.multiplier, res.guess); |
420 | 133k | } |
421 | 66.5k | } else { |
422 | 313k | for (size_t x = 0; x < channel.w; x++) { |
423 | 281k | PredictionResult res = |
424 | 281k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
425 | 281k | tree_lookup, references); |
426 | 281k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
427 | 281k | res.context, br); |
428 | 281k | p[x] = make_pixel(v, res.multiplier, res.guess); |
429 | 281k | } |
430 | 31.3k | } |
431 | 97.9k | } |
432 | 3.22k | } else { |
433 | 2.28k | JXL_DEBUG_V(8, "Slowest track."); |
434 | 2.28k | MATreeLookup tree_lookup(tree); |
435 | 2.28k | Properties properties = Properties(num_props); |
436 | 2.28k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
437 | 2.28k | JXL_ASSIGN_OR_RETURN( |
438 | 2.28k | Channel references, |
439 | 2.28k | Channel::Create(memory_manager, |
440 | 2.28k | properties.size() - kNumNonrefProperties, channel.w)); |
441 | 2.28k | weighted::State wp_state(wp_header, channel.w, channel.h); |
442 | 59.4k | for (size_t y = 0; y < channel.h; y++) { |
443 | 57.1k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
444 | 57.1k | InitPropsRow(&properties, static_props, y); |
445 | 57.1k | PrecomputeReferences(channel, y, *image, chan, &references); |
446 | 57.1k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { |
447 | 131k | for (size_t x = 0; x < 2; x++) { |
448 | 87.5k | PredictionResult res = |
449 | 87.5k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
450 | 87.5k | tree_lookup, references, &wp_state); |
451 | 87.5k | uint64_t v = |
452 | 87.5k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
453 | 87.5k | p[x] = make_pixel(v, res.multiplier, res.guess); |
454 | 87.5k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
455 | 87.5k | } |
456 | 2.62M | for (size_t x = 2; x < channel.w - 2; x++) { |
457 | 2.58M | PredictionResult res = |
458 | 2.58M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, |
459 | 2.58M | tree_lookup, references, &wp_state); |
460 | 2.58M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
461 | 2.58M | res.context, br); |
462 | 2.58M | p[x] = make_pixel(v, res.multiplier, res.guess); |
463 | 2.58M | wp_state.UpdateErrors(p[x], x, y, channel.w); |
464 | 2.58M | } |
465 | 131k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
466 | 87.5k | PredictionResult res = |
467 | 87.5k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
468 | 87.5k | tree_lookup, references, &wp_state); |
469 | 87.5k | uint64_t v = |
470 | 87.5k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
471 | 87.5k | p[x] = make_pixel(v, res.multiplier, res.guess); |
472 | 87.5k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
473 | 87.5k | } |
474 | 43.7k | } else { |
475 | 209k | for (size_t x = 0; x < channel.w; x++) { |
476 | 195k | PredictionResult res = |
477 | 195k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
478 | 195k | tree_lookup, references, &wp_state); |
479 | 195k | uint64_t v = |
480 | 195k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
481 | 195k | p[x] = make_pixel(v, res.multiplier, res.guess); |
482 | 195k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
483 | 195k | } |
484 | 13.3k | } |
485 | 57.1k | } |
486 | 2.28k | } |
487 | 5.98k | return true; |
488 | 5.98k | } jxl::Status jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 156 | 17.5k | uint32_t &fl_v) { | 157 | 17.5k | JxlMemoryManager *memory_manager = image->memory_manager(); | 158 | 17.5k | Channel &channel = image->channel[chan]; | 159 | | | 160 | 17.5k | std::array<pixel_type, kNumStaticProperties> static_props = { | 161 | 17.5k | {chan, static_cast<int>(group_id)}}; | 162 | | // TODO(veluca): filter the tree according to static_props. | 163 | | | 164 | | // zero pixel channel? could happen | 165 | 17.5k | if (channel.w == 0 || channel.h == 0) return true; | 166 | | | 167 | 17.5k | bool tree_has_wp_prop_or_pred = false; | 168 | 17.5k | bool is_wp_only = false; | 169 | 17.5k | bool is_gradient_only = false; | 170 | 17.5k | size_t num_props; | 171 | 17.5k | FlatTree tree = | 172 | 17.5k | FilterTree(global_tree, static_props, &num_props, | 173 | 17.5k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 174 | | | 175 | | // From here on, tree lookup returns a *clustered* context ID. | 176 | | // This avoids an extra memory lookup after tree traversal. | 177 | 17.8k | for (auto &node : tree) { | 178 | 17.8k | if (node.property0 == -1) { | 179 | 17.7k | node.childID = context_map[node.childID]; | 180 | 17.7k | } | 181 | 17.8k | } | 182 | | | 183 | 17.5k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 184 | | | 185 | | // MAANS decode | 186 | 17.5k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 187 | 17.5k | pixel_type_w offset) -> pixel_type { | 188 | 17.5k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 17.5k | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 17.5k | return val * multiplier + offset; | 192 | 17.5k | }; | 193 | | | 194 | 17.5k | if (tree.size() == 1) { | 195 | | // special optimized case: no meta-adaptation, so no need | 196 | | // to compute properties. | 197 | 17.4k | Predictor predictor = tree[0].predictor; | 198 | 17.4k | int64_t offset = tree[0].predictor_offset; | 199 | 17.4k | int32_t multiplier = tree[0].multiplier; | 200 | 17.4k | size_t ctx_id = tree[0].childID; | 201 | 17.4k | if (predictor == Predictor::Zero) { | 202 | 16.6k | uint32_t value; | 203 | 16.6k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 204 | 16.6k | channel.w * channel.h)) { | 205 | | // Special-case: histogram has a single symbol, with no extra bits, and | 206 | | // we use ANS mode. | 207 | 8.38k | JXL_DEBUG_V(8, "Fastest track."); | 208 | 8.38k | pixel_type v = make_pixel(value, multiplier, offset); | 209 | 270k | for (size_t y = 0; y < channel.h; y++) { | 210 | 262k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 211 | 262k | std::fill(r, r + channel.w, v); | 212 | 262k | } | 213 | 8.38k | } else { | 214 | 8.27k | JXL_DEBUG_V(8, "Fast track."); | 215 | 8.27k | if (multiplier == 1 && offset == 0) { | 216 | 291k | for (size_t y = 0; y < channel.h; y++) { | 217 | 288k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 218 | 55.5M | for (size_t x = 0; x < channel.w; x++) { | 219 | 55.2M | uint32_t v = | 220 | 55.2M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 221 | 55.2M | r[x] = UnpackSigned(v); | 222 | 55.2M | } | 223 | 288k | } | 224 | 5.12k | } else { | 225 | 175k | for (size_t y = 0; y < channel.h; y++) { | 226 | 170k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 227 | 13.7M | for (size_t x = 0; x < channel.w; x++) { | 228 | 13.5M | uint32_t v = | 229 | 13.5M | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 230 | 13.5M | br); | 231 | 13.5M | r[x] = make_pixel(v, multiplier, offset); | 232 | 13.5M | } | 233 | 170k | } | 234 | 5.12k | } | 235 | 8.27k | } | 236 | 16.6k | return true; | 237 | 16.6k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 238 | 797 | multiplier == 1 && reader->IsHuffRleOnly()) { | 239 | 4 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 240 | 4 | pixel_type_w sv = UnpackSigned(fl_v); | 241 | 16 | for (size_t y = 0; y < channel.h; y++) { | 242 | 12 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 243 | 12 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 244 | 12 | const pixel_type *JXL_RESTRICT rtopleft = | 245 | 12 | (y ? channel.Row(y - 1) - 1 : r - 1); | 246 | 12 | pixel_type_w guess_0 = (y ? rtop[0] : 0); | 247 | 12 | if (fl_run == 0) { | 248 | 12 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 249 | 12 | &fl_run); | 250 | 12 | sv = UnpackSigned(fl_v); | 251 | 12 | } else { | 252 | 0 | fl_run--; | 253 | 0 | } | 254 | 12 | r[0] = sv + guess_0; | 255 | 48 | for (size_t x = 1; x < channel.w; x++) { | 256 | 36 | pixel_type left = r[x - 1]; | 257 | 36 | pixel_type top = rtop[x]; | 258 | 36 | pixel_type topleft = rtopleft[x]; | 259 | 36 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 260 | 36 | if (!fl_run) { | 261 | 36 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 262 | 36 | &fl_run); | 263 | 36 | sv = UnpackSigned(fl_v); | 264 | 36 | } else { | 265 | 0 | fl_run--; | 266 | 0 | } | 267 | 36 | r[x] = sv + guess; | 268 | 36 | } | 269 | 12 | } | 270 | 4 | return true; | 271 | 793 | } else if (predictor == Predictor::Gradient && offset == 0 && | 272 | 793 | multiplier == 1) { | 273 | 0 | JXL_DEBUG_V(8, "Gradient very fast track."); | 274 | 0 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 275 | 0 | for (size_t y = 0; y < channel.h; y++) { | 276 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 277 | 0 | for (size_t x = 0; x < channel.w; x++) { | 278 | 0 | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 279 | 0 | pixel_type top = (y ? *(r + x - onerow) : left); | 280 | 0 | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 281 | 0 | pixel_type guess = ClampedGradient(top, left, topleft); | 282 | 0 | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 283 | 0 | ctx_id, br); | 284 | 0 | r[x] = make_pixel(v, 1, guess); | 285 | 0 | } | 286 | 0 | } | 287 | 0 | return true; | 288 | 0 | } | 289 | 17.4k | } | 290 | | | 291 | | // Check if this tree is a WP-only tree with a small enough property value | 292 | | // range. | 293 | 870 | if (is_wp_only) { | 294 | 183 | is_wp_only = TreeToLookupTable(tree, tree_lut); | 295 | 183 | } | 296 | 870 | if (is_gradient_only) { | 297 | 51 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 298 | 51 | } | 299 | | | 300 | 870 | if (is_gradient_only) { | 301 | 28 | JXL_DEBUG_V(8, "Gradient fast track."); | 302 | 28 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 303 | 1.05k | for (size_t y = 0; y < channel.h; y++) { | 304 | 1.02k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 305 | 15.2k | for (size_t x = 0; x < channel.w; x++) { | 306 | 14.2k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 307 | 14.2k | pixel_type_w top = (y ? *(r + x - onerow) : left); | 308 | 14.2k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 309 | 14.2k | int32_t guess = ClampedGradient(top, left, topleft); | 310 | 14.2k | uint32_t pos = | 311 | 14.2k | kPropRangeFast + | 312 | 14.2k | std::min<pixel_type_w>( | 313 | 14.2k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 314 | 14.2k | kPropRangeFast - 1); | 315 | 14.2k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 316 | 14.2k | uint64_t v = | 317 | 14.2k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 318 | 14.2k | r[x] = make_pixel(v, 1, guess); | 319 | 14.2k | } | 320 | 1.02k | } | 321 | 842 | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 322 | 0 | JXL_DEBUG_V(8, "WP fast track."); | 323 | 0 | weighted::State wp_state(wp_header, channel.w, channel.h); | 324 | 0 | Properties properties(1); | 325 | 0 | for (size_t y = 0; y < channel.h; y++) { | 326 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 327 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 328 | 0 | const pixel_type *JXL_RESTRICT rtoptop = | 329 | 0 | (y > 1 ? channel.Row(y - 2) : rtop); | 330 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 331 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 332 | 0 | const pixel_type *JXL_RESTRICT rtopright = | 333 | 0 | (y ? channel.Row(y - 1) + 1 : r - 1); | 334 | 0 | size_t x = 0; | 335 | 0 | { | 336 | 0 | size_t offset = 0; | 337 | 0 | pixel_type_w left = y ? rtop[x] : 0; | 338 | 0 | pixel_type_w toptop = y ? rtoptop[x] : 0; | 339 | 0 | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 340 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 341 | 0 | x, y, channel.w, left, left, topright, left, toptop, &properties, | 342 | 0 | offset); | 343 | 0 | uint32_t pos = | 344 | 0 | kPropRangeFast + | 345 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 346 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 347 | 0 | uint64_t v = | 348 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 349 | 0 | r[x] = make_pixel(v, 1, guess); | 350 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 351 | 0 | } | 352 | 0 | for (x = 1; x + 1 < channel.w; x++) { | 353 | 0 | size_t offset = 0; | 354 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 355 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 356 | 0 | rtoptop[x], &properties, offset); | 357 | 0 | uint32_t pos = | 358 | 0 | kPropRangeFast + | 359 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 360 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 361 | 0 | uint64_t v = | 362 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 363 | 0 | r[x] = make_pixel(v, 1, guess); | 364 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 365 | 0 | } | 366 | 0 | { | 367 | 0 | size_t offset = 0; | 368 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 369 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 370 | 0 | rtoptop[x], &properties, offset); | 371 | 0 | uint32_t pos = | 372 | 0 | kPropRangeFast + | 373 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 374 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 375 | 0 | uint64_t v = | 376 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 377 | 0 | r[x] = make_pixel(v, 1, guess); | 378 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 379 | 0 | } | 380 | 0 | } | 381 | 842 | } else if (!tree_has_wp_prop_or_pred) { | 382 | | // special optimized case: the weighted predictor and its properties are not | 383 | | // used, so no need to compute weights and properties. | 384 | 658 | JXL_DEBUG_V(8, "Slow track."); | 385 | 658 | MATreeLookup tree_lookup(tree); | 386 | 658 | Properties properties = Properties(num_props); | 387 | 658 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 388 | 658 | JXL_ASSIGN_OR_RETURN( | 389 | 658 | Channel references, | 390 | 658 | Channel::Create(memory_manager, | 391 | 658 | properties.size() - kNumNonrefProperties, channel.w)); | 392 | 42.3k | for (size_t y = 0; y < channel.h; y++) { | 393 | 41.6k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 394 | 41.6k | PrecomputeReferences(channel, y, *image, chan, &references); | 395 | 41.6k | InitPropsRow(&properties, static_props, y); | 396 | 41.6k | if (y > 1 && channel.w > 8 && references.w == 0) { | 397 | 114k | for (size_t x = 0; x < 2; x++) { | 398 | 76.0k | PredictionResult res = | 399 | 76.0k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 400 | 76.0k | tree_lookup, references); | 401 | 76.0k | uint64_t v = | 402 | 76.0k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 403 | 76.0k | p[x] = make_pixel(v, res.multiplier, res.guess); | 404 | 76.0k | } | 405 | 7.04M | for (size_t x = 2; x < channel.w - 2; x++) { | 406 | 7.00M | PredictionResult res = | 407 | 7.00M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 408 | 7.00M | tree_lookup, references); | 409 | 7.00M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 410 | 7.00M | res.context, br); | 411 | 7.00M | p[x] = make_pixel(v, res.multiplier, res.guess); | 412 | 7.00M | } | 413 | 114k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 414 | 76.0k | PredictionResult res = | 415 | 76.0k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 416 | 76.0k | tree_lookup, references); | 417 | 76.0k | uint64_t v = | 418 | 76.0k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 419 | 76.0k | p[x] = make_pixel(v, res.multiplier, res.guess); | 420 | 76.0k | } | 421 | 38.0k | } else { | 422 | 110k | for (size_t x = 0; x < channel.w; x++) { | 423 | 107k | PredictionResult res = | 424 | 107k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 425 | 107k | tree_lookup, references); | 426 | 107k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 427 | 107k | res.context, br); | 428 | 107k | p[x] = make_pixel(v, res.multiplier, res.guess); | 429 | 107k | } | 430 | 3.62k | } | 431 | 41.6k | } | 432 | 658 | } else { | 433 | 184 | JXL_DEBUG_V(8, "Slowest track."); | 434 | 184 | MATreeLookup tree_lookup(tree); | 435 | 184 | Properties properties = Properties(num_props); | 436 | 184 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 437 | 184 | JXL_ASSIGN_OR_RETURN( | 438 | 184 | Channel references, | 439 | 184 | Channel::Create(memory_manager, | 440 | 184 | properties.size() - kNumNonrefProperties, channel.w)); | 441 | 184 | weighted::State wp_state(wp_header, channel.w, channel.h); | 442 | 3.79k | for (size_t y = 0; y < channel.h; y++) { | 443 | 3.60k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 444 | 3.60k | InitPropsRow(&properties, static_props, y); | 445 | 3.60k | PrecomputeReferences(channel, y, *image, chan, &references); | 446 | 3.60k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 447 | 0 | for (size_t x = 0; x < 2; x++) { | 448 | 0 | PredictionResult res = | 449 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 450 | 0 | tree_lookup, references, &wp_state); | 451 | 0 | uint64_t v = | 452 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 453 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 454 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 455 | 0 | } | 456 | 0 | for (size_t x = 2; x < channel.w - 2; x++) { | 457 | 0 | PredictionResult res = | 458 | 0 | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 459 | 0 | tree_lookup, references, &wp_state); | 460 | 0 | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 461 | 0 | res.context, br); | 462 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 463 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 464 | 0 | } | 465 | 0 | for (size_t x = channel.w - 2; x < channel.w; x++) { | 466 | 0 | PredictionResult res = | 467 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 468 | 0 | tree_lookup, references, &wp_state); | 469 | 0 | uint64_t v = | 470 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 471 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 472 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 473 | 0 | } | 474 | 3.60k | } else { | 475 | 46.3k | for (size_t x = 0; x < channel.w; x++) { | 476 | 42.7k | PredictionResult res = | 477 | 42.7k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 478 | 42.7k | tree_lookup, references, &wp_state); | 479 | 42.7k | uint64_t v = | 480 | 42.7k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 481 | 42.7k | p[x] = make_pixel(v, res.multiplier, res.guess); | 482 | 42.7k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 483 | 42.7k | } | 484 | 3.60k | } | 485 | 3.60k | } | 486 | 184 | } | 487 | 870 | return true; | 488 | 870 | } |
jxl::Status jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 156 | 206k | uint32_t &fl_v) { | 157 | 206k | JxlMemoryManager *memory_manager = image->memory_manager(); | 158 | 206k | Channel &channel = image->channel[chan]; | 159 | | | 160 | 206k | std::array<pixel_type, kNumStaticProperties> static_props = { | 161 | 206k | {chan, static_cast<int>(group_id)}}; | 162 | | // TODO(veluca): filter the tree according to static_props. | 163 | | | 164 | | // zero pixel channel? could happen | 165 | 206k | if (channel.w == 0 || channel.h == 0) return true; | 166 | | | 167 | 206k | bool tree_has_wp_prop_or_pred = false; | 168 | 206k | bool is_wp_only = false; | 169 | 206k | bool is_gradient_only = false; | 170 | 206k | size_t num_props; | 171 | 206k | FlatTree tree = | 172 | 206k | FilterTree(global_tree, static_props, &num_props, | 173 | 206k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 174 | | | 175 | | // From here on, tree lookup returns a *clustered* context ID. | 176 | | // This avoids an extra memory lookup after tree traversal. | 177 | 215k | for (auto &node : tree) { | 178 | 215k | if (node.property0 == -1) { | 179 | 213k | node.childID = context_map[node.childID]; | 180 | 213k | } | 181 | 215k | } | 182 | | | 183 | 206k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 184 | | | 185 | | // MAANS decode | 186 | 206k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 187 | 206k | pixel_type_w offset) -> pixel_type { | 188 | 206k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 206k | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 206k | return val * multiplier + offset; | 192 | 206k | }; | 193 | | | 194 | 206k | if (tree.size() == 1) { | 195 | | // special optimized case: no meta-adaptation, so no need | 196 | | // to compute properties. | 197 | 205k | Predictor predictor = tree[0].predictor; | 198 | 205k | int64_t offset = tree[0].predictor_offset; | 199 | 205k | int32_t multiplier = tree[0].multiplier; | 200 | 205k | size_t ctx_id = tree[0].childID; | 201 | 205k | if (predictor == Predictor::Zero) { | 202 | 201k | uint32_t value; | 203 | 201k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 204 | 201k | channel.w * channel.h)) { | 205 | | // Special-case: histogram has a single symbol, with no extra bits, and | 206 | | // we use ANS mode. | 207 | 79.5k | JXL_DEBUG_V(8, "Fastest track."); | 208 | 79.5k | pixel_type v = make_pixel(value, multiplier, offset); | 209 | 1.98M | for (size_t y = 0; y < channel.h; y++) { | 210 | 1.90M | pixel_type *JXL_RESTRICT r = channel.Row(y); | 211 | 1.90M | std::fill(r, r + channel.w, v); | 212 | 1.90M | } | 213 | 121k | } else { | 214 | 121k | JXL_DEBUG_V(8, "Fast track."); | 215 | 121k | if (multiplier == 1 && offset == 0) { | 216 | 1.89M | for (size_t y = 0; y < channel.h; y++) { | 217 | 1.77M | pixel_type *JXL_RESTRICT r = channel.Row(y); | 218 | 96.5M | for (size_t x = 0; x < channel.w; x++) { | 219 | 94.7M | uint32_t v = | 220 | 94.7M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 221 | 94.7M | r[x] = UnpackSigned(v); | 222 | 94.7M | } | 223 | 1.77M | } | 224 | 120k | } else { | 225 | 15.0k | for (size_t y = 0; y < channel.h; y++) { | 226 | 14.0k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 227 | 991k | for (size_t x = 0; x < channel.w; x++) { | 228 | 977k | uint32_t v = | 229 | 977k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 230 | 977k | br); | 231 | 977k | r[x] = make_pixel(v, multiplier, offset); | 232 | 977k | } | 233 | 14.0k | } | 234 | 937 | } | 235 | 121k | } | 236 | 201k | return true; | 237 | 201k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 238 | 4.12k | multiplier == 1 && reader->IsHuffRleOnly()) { | 239 | 0 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 240 | 0 | pixel_type_w sv = UnpackSigned(fl_v); | 241 | 0 | for (size_t y = 0; y < channel.h; y++) { | 242 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 243 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 244 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 245 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 246 | 0 | pixel_type_w guess_0 = (y ? rtop[0] : 0); | 247 | 0 | if (fl_run == 0) { | 248 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 249 | 0 | &fl_run); | 250 | 0 | sv = UnpackSigned(fl_v); | 251 | 0 | } else { | 252 | 0 | fl_run--; | 253 | 0 | } | 254 | 0 | r[0] = sv + guess_0; | 255 | 0 | for (size_t x = 1; x < channel.w; x++) { | 256 | 0 | pixel_type left = r[x - 1]; | 257 | 0 | pixel_type top = rtop[x]; | 258 | 0 | pixel_type topleft = rtopleft[x]; | 259 | 0 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 260 | 0 | if (!fl_run) { | 261 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 262 | 0 | &fl_run); | 263 | 0 | sv = UnpackSigned(fl_v); | 264 | 0 | } else { | 265 | 0 | fl_run--; | 266 | 0 | } | 267 | 0 | r[x] = sv + guess; | 268 | 0 | } | 269 | 0 | } | 270 | 0 | return true; | 271 | 4.12k | } else if (predictor == Predictor::Gradient && offset == 0 && | 272 | 4.12k | multiplier == 1) { | 273 | 3 | JXL_DEBUG_V(8, "Gradient very fast track."); | 274 | 3 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 275 | 37 | for (size_t y = 0; y < channel.h; y++) { | 276 | 34 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 277 | 1.06k | for (size_t x = 0; x < channel.w; x++) { | 278 | 1.02k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 279 | 1.02k | pixel_type top = (y ? *(r + x - onerow) : left); | 280 | 1.02k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 281 | 1.02k | pixel_type guess = ClampedGradient(top, left, topleft); | 282 | 1.02k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 283 | 1.02k | ctx_id, br); | 284 | 1.02k | r[x] = make_pixel(v, 1, guess); | 285 | 1.02k | } | 286 | 34 | } | 287 | 3 | return true; | 288 | 3 | } | 289 | 205k | } | 290 | | | 291 | | // Check if this tree is a WP-only tree with a small enough property value | 292 | | // range. | 293 | 5.11k | if (is_wp_only) { | 294 | 2.18k | is_wp_only = TreeToLookupTable(tree, tree_lut); | 295 | 2.18k | } | 296 | 5.11k | if (is_gradient_only) { | 297 | 100 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 298 | 100 | } | 299 | | | 300 | 5.11k | if (is_gradient_only) { | 301 | 70 | JXL_DEBUG_V(8, "Gradient fast track."); | 302 | 70 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 303 | 1.50k | for (size_t y = 0; y < channel.h; y++) { | 304 | 1.43k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 305 | 42.7k | for (size_t x = 0; x < channel.w; x++) { | 306 | 41.2k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 307 | 41.2k | pixel_type_w top = (y ? *(r + x - onerow) : left); | 308 | 41.2k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 309 | 41.2k | int32_t guess = ClampedGradient(top, left, topleft); | 310 | 41.2k | uint32_t pos = | 311 | 41.2k | kPropRangeFast + | 312 | 41.2k | std::min<pixel_type_w>( | 313 | 41.2k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 314 | 41.2k | kPropRangeFast - 1); | 315 | 41.2k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 316 | 41.2k | uint64_t v = | 317 | 41.2k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 318 | 41.2k | r[x] = make_pixel(v, 1, guess); | 319 | 41.2k | } | 320 | 1.43k | } | 321 | 5.04k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 322 | 376 | JXL_DEBUG_V(8, "WP fast track."); | 323 | 376 | weighted::State wp_state(wp_header, channel.w, channel.h); | 324 | 376 | Properties properties(1); | 325 | 3.23k | for (size_t y = 0; y < channel.h; y++) { | 326 | 2.85k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 327 | 2.85k | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 328 | 2.85k | const pixel_type *JXL_RESTRICT rtoptop = | 329 | 2.85k | (y > 1 ? channel.Row(y - 2) : rtop); | 330 | 2.85k | const pixel_type *JXL_RESTRICT rtopleft = | 331 | 2.85k | (y ? channel.Row(y - 1) - 1 : r - 1); | 332 | 2.85k | const pixel_type *JXL_RESTRICT rtopright = | 333 | 2.85k | (y ? channel.Row(y - 1) + 1 : r - 1); | 334 | 2.85k | size_t x = 0; | 335 | 2.85k | { | 336 | 2.85k | size_t offset = 0; | 337 | 2.85k | pixel_type_w left = y ? rtop[x] : 0; | 338 | 2.85k | pixel_type_w toptop = y ? rtoptop[x] : 0; | 339 | 2.85k | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 340 | 2.85k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 341 | 2.85k | x, y, channel.w, left, left, topright, left, toptop, &properties, | 342 | 2.85k | offset); | 343 | 2.85k | uint32_t pos = | 344 | 2.85k | kPropRangeFast + | 345 | 2.85k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 346 | 2.85k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 347 | 2.85k | uint64_t v = | 348 | 2.85k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 349 | 2.85k | r[x] = make_pixel(v, 1, guess); | 350 | 2.85k | wp_state.UpdateErrors(r[x], x, y, channel.w); | 351 | 2.85k | } | 352 | 73.4k | for (x = 1; x + 1 < channel.w; x++) { | 353 | 70.6k | size_t offset = 0; | 354 | 70.6k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 355 | 70.6k | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 356 | 70.6k | rtoptop[x], &properties, offset); | 357 | 70.6k | uint32_t pos = | 358 | 70.6k | kPropRangeFast + | 359 | 70.6k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 360 | 70.6k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 361 | 70.6k | uint64_t v = | 362 | 70.6k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 363 | 70.6k | r[x] = make_pixel(v, 1, guess); | 364 | 70.6k | wp_state.UpdateErrors(r[x], x, y, channel.w); | 365 | 70.6k | } | 366 | 2.85k | { | 367 | 2.85k | size_t offset = 0; | 368 | 2.85k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 369 | 2.85k | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 370 | 2.85k | rtoptop[x], &properties, offset); | 371 | 2.85k | uint32_t pos = | 372 | 2.85k | kPropRangeFast + | 373 | 2.85k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 374 | 2.85k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 375 | 2.85k | uint64_t v = | 376 | 2.85k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 377 | 2.85k | r[x] = make_pixel(v, 1, guess); | 378 | 2.85k | wp_state.UpdateErrors(r[x], x, y, channel.w); | 379 | 2.85k | } | 380 | 2.85k | } | 381 | 4.66k | } else if (!tree_has_wp_prop_or_pred) { | 382 | | // special optimized case: the weighted predictor and its properties are not | 383 | | // used, so no need to compute weights and properties. | 384 | 2.57k | JXL_DEBUG_V(8, "Slow track."); | 385 | 2.57k | MATreeLookup tree_lookup(tree); | 386 | 2.57k | Properties properties = Properties(num_props); | 387 | 2.57k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 388 | 2.57k | JXL_ASSIGN_OR_RETURN( | 389 | 2.57k | Channel references, | 390 | 2.57k | Channel::Create(memory_manager, | 391 | 2.57k | properties.size() - kNumNonrefProperties, channel.w)); | 392 | 58.8k | for (size_t y = 0; y < channel.h; y++) { | 393 | 56.2k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 394 | 56.2k | PrecomputeReferences(channel, y, *image, chan, &references); | 395 | 56.2k | InitPropsRow(&properties, static_props, y); | 396 | 56.2k | if (y > 1 && channel.w > 8 && references.w == 0) { | 397 | 85.6k | for (size_t x = 0; x < 2; x++) { | 398 | 57.0k | PredictionResult res = | 399 | 57.0k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 400 | 57.0k | tree_lookup, references); | 401 | 57.0k | uint64_t v = | 402 | 57.0k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 403 | 57.0k | p[x] = make_pixel(v, res.multiplier, res.guess); | 404 | 57.0k | } | 405 | 2.17M | for (size_t x = 2; x < channel.w - 2; x++) { | 406 | 2.15M | PredictionResult res = | 407 | 2.15M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 408 | 2.15M | tree_lookup, references); | 409 | 2.15M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 410 | 2.15M | res.context, br); | 411 | 2.15M | p[x] = make_pixel(v, res.multiplier, res.guess); | 412 | 2.15M | } | 413 | 85.6k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 414 | 57.0k | PredictionResult res = | 415 | 57.0k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 416 | 57.0k | tree_lookup, references); | 417 | 57.0k | uint64_t v = | 418 | 57.0k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 419 | 57.0k | p[x] = make_pixel(v, res.multiplier, res.guess); | 420 | 57.0k | } | 421 | 28.5k | } else { | 422 | 202k | for (size_t x = 0; x < channel.w; x++) { | 423 | 174k | PredictionResult res = | 424 | 174k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 425 | 174k | tree_lookup, references); | 426 | 174k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 427 | 174k | res.context, br); | 428 | 174k | p[x] = make_pixel(v, res.multiplier, res.guess); | 429 | 174k | } | 430 | 27.7k | } | 431 | 56.2k | } | 432 | 2.57k | } else { | 433 | 2.09k | JXL_DEBUG_V(8, "Slowest track."); | 434 | 2.09k | MATreeLookup tree_lookup(tree); | 435 | 2.09k | Properties properties = Properties(num_props); | 436 | 2.09k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 437 | 2.09k | JXL_ASSIGN_OR_RETURN( | 438 | 2.09k | Channel references, | 439 | 2.09k | Channel::Create(memory_manager, | 440 | 2.09k | properties.size() - kNumNonrefProperties, channel.w)); | 441 | 2.09k | weighted::State wp_state(wp_header, channel.w, channel.h); | 442 | 55.6k | for (size_t y = 0; y < channel.h; y++) { | 443 | 53.5k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 444 | 53.5k | InitPropsRow(&properties, static_props, y); | 445 | 53.5k | PrecomputeReferences(channel, y, *image, chan, &references); | 446 | 53.5k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 447 | 131k | for (size_t x = 0; x < 2; x++) { | 448 | 87.5k | PredictionResult res = | 449 | 87.5k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 450 | 87.5k | tree_lookup, references, &wp_state); | 451 | 87.5k | uint64_t v = | 452 | 87.5k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 453 | 87.5k | p[x] = make_pixel(v, res.multiplier, res.guess); | 454 | 87.5k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 455 | 87.5k | } | 456 | 2.62M | for (size_t x = 2; x < channel.w - 2; x++) { | 457 | 2.58M | PredictionResult res = | 458 | 2.58M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 459 | 2.58M | tree_lookup, references, &wp_state); | 460 | 2.58M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 461 | 2.58M | res.context, br); | 462 | 2.58M | p[x] = make_pixel(v, res.multiplier, res.guess); | 463 | 2.58M | wp_state.UpdateErrors(p[x], x, y, channel.w); | 464 | 2.58M | } | 465 | 131k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 466 | 87.5k | PredictionResult res = | 467 | 87.5k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 468 | 87.5k | tree_lookup, references, &wp_state); | 469 | 87.5k | uint64_t v = | 470 | 87.5k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 471 | 87.5k | p[x] = make_pixel(v, res.multiplier, res.guess); | 472 | 87.5k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 473 | 87.5k | } | 474 | 43.7k | } else { | 475 | 162k | for (size_t x = 0; x < channel.w; x++) { | 476 | 152k | PredictionResult res = | 477 | 152k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 478 | 152k | tree_lookup, references, &wp_state); | 479 | 152k | uint64_t v = | 480 | 152k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 481 | 152k | p[x] = make_pixel(v, res.multiplier, res.guess); | 482 | 152k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 483 | 152k | } | 484 | 9.76k | } | 485 | 53.5k | } | 486 | 2.09k | } | 487 | 5.11k | return true; | 488 | 5.11k | } |
|
489 | | } // namespace detail |
490 | | |
491 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
492 | | const std::vector<uint8_t> &context_map, |
493 | | const Tree &global_tree, |
494 | | const weighted::Header &wp_header, |
495 | | pixel_type chan, size_t group_id, |
496 | | TreeLut<uint8_t, false, false> &tree_lut, |
497 | | Image *image, uint32_t &fl_run, |
498 | 223k | uint32_t &fl_v) { |
499 | 223k | if (reader->UsesLZ77()) { |
500 | 17.5k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/true>( |
501 | 17.5k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
502 | 17.5k | tree_lut, image, fl_run, fl_v); |
503 | 206k | } else { |
504 | 206k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/false>( |
505 | 206k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
506 | 206k | tree_lut, image, fl_run, fl_v); |
507 | 206k | } |
508 | 223k | } |
509 | | |
510 | 102k | GroupHeader::GroupHeader() { Bundle::Init(this); } |
511 | | |
512 | | Status ValidateChannelDimensions(const Image &image, |
513 | 30.4k | const ModularOptions &options) { |
514 | 30.4k | size_t nb_channels = image.channel.size(); |
515 | 60.9k | for (bool is_dc : {true, false}) { |
516 | 60.9k | size_t group_dim = options.group_dim * (is_dc ? kBlockDim : 1); |
517 | 60.9k | size_t c = image.nb_meta_channels; |
518 | 510k | for (; c < nb_channels; c++) { |
519 | 451k | const Channel &ch = image.channel[c]; |
520 | 451k | if (ch.w > options.group_dim || ch.h > options.group_dim) break; |
521 | 451k | } |
522 | 77.6k | for (; c < nb_channels; c++) { |
523 | 16.7k | const Channel &ch = image.channel[c]; |
524 | 16.7k | if (ch.w == 0 || ch.h == 0) continue; // skip empty |
525 | 16.4k | bool is_dc_channel = std::min(ch.hshift, ch.vshift) >= 3; |
526 | 16.4k | if (is_dc_channel != is_dc) continue; |
527 | 8.24k | size_t tile_dim = group_dim >> std::max(ch.hshift, ch.vshift); |
528 | 8.24k | if (tile_dim == 0) { |
529 | 0 | return JXL_FAILURE("Inconsistent transforms"); |
530 | 0 | } |
531 | 8.24k | } |
532 | 60.9k | } |
533 | 30.4k | return true; |
534 | 30.4k | } |
535 | | |
536 | | Status ModularDecode(BitReader *br, Image &image, GroupHeader &header, |
537 | | size_t group_id, ModularOptions *options, |
538 | | const Tree *global_tree, const ANSCode *global_code, |
539 | | const std::vector<uint8_t> *global_ctx_map, |
540 | 34.6k | const bool allow_truncated_group) { |
541 | 34.6k | if (image.channel.empty()) return true; |
542 | 30.5k | JxlMemoryManager *memory_manager = image.memory_manager(); |
543 | | |
544 | | // decode transforms |
545 | 30.5k | Status status = Bundle::Read(br, &header); |
546 | 30.5k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(status); |
547 | 30.4k | if (status.IsFatalError()) return status; |
548 | 30.4k | if (!br->AllReadsWithinBounds()) { |
549 | | // Don't do/undo transforms if header is incomplete. |
550 | 0 | header.transforms.clear(); |
551 | 0 | image.transform = header.transforms; |
552 | 0 | for (auto &ch : image.channel) { |
553 | 0 | ZeroFillImage(&ch.plane); |
554 | 0 | } |
555 | 0 | return JXL_NOT_ENOUGH_BYTES("Read overrun before ModularDecode"); |
556 | 0 | } |
557 | | |
558 | 30.4k | JXL_DEBUG_V(3, "Image data underwent %" PRIuS " transformations: ", |
559 | 30.4k | header.transforms.size()); |
560 | 30.4k | image.transform = header.transforms; |
561 | 30.4k | for (Transform &transform : image.transform) { |
562 | 16.7k | JXL_RETURN_IF_ERROR(transform.MetaApply(image)); |
563 | 16.7k | } |
564 | 30.4k | if (image.error) { |
565 | 0 | return JXL_FAILURE("Corrupt file. Aborting."); |
566 | 0 | } |
567 | 30.4k | JXL_RETURN_IF_ERROR(ValidateChannelDimensions(image, *options)); |
568 | | |
569 | 30.4k | size_t nb_channels = image.channel.size(); |
570 | | |
571 | 30.4k | size_t num_chans = 0; |
572 | 30.4k | size_t distance_multiplier = 0; |
573 | 257k | for (size_t i = 0; i < nb_channels; i++) { |
574 | 227k | Channel &channel = image.channel[i]; |
575 | 227k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
576 | 225k | channel.h > options->max_chan_size)) { |
577 | 906 | break; |
578 | 906 | } |
579 | 226k | if (!channel.w || !channel.h) { |
580 | 2.52k | continue; // skip empty channels |
581 | 2.52k | } |
582 | 224k | if (channel.w > distance_multiplier) { |
583 | 43.8k | distance_multiplier = channel.w; |
584 | 43.8k | } |
585 | 224k | num_chans++; |
586 | 224k | } |
587 | 30.4k | if (num_chans == 0) return true; |
588 | | |
589 | 30.1k | size_t next_channel = 0; |
590 | 30.1k | auto scope_guard = MakeScopeGuard([&]() { |
591 | 911 | for (size_t c = next_channel; c < image.channel.size(); c++) { |
592 | 798 | ZeroFillImage(&image.channel[c].plane); |
593 | 798 | } |
594 | 113 | }); |
595 | | // Do not do anything if truncated groups are not allowed. |
596 | 30.1k | if (allow_truncated_group) scope_guard.Disarm(); |
597 | | |
598 | | // Read tree. |
599 | 30.1k | Tree tree_storage; |
600 | 30.1k | std::vector<uint8_t> context_map_storage; |
601 | 30.1k | ANSCode code_storage; |
602 | 30.1k | const Tree *tree = &tree_storage; |
603 | 30.1k | const ANSCode *code = &code_storage; |
604 | 30.1k | const std::vector<uint8_t> *context_map = &context_map_storage; |
605 | 30.1k | if (!header.use_global_tree) { |
606 | 17.2k | uint64_t max_tree_size = 1024; |
607 | 153k | for (size_t i = 0; i < nb_channels; i++) { |
608 | 136k | Channel &channel = image.channel[i]; |
609 | 136k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
610 | 136k | channel.h > options->max_chan_size)) { |
611 | 15 | break; |
612 | 15 | } |
613 | 136k | uint64_t pixels = channel.w * channel.h; |
614 | 136k | max_tree_size += pixels; |
615 | 136k | } |
616 | 17.2k | max_tree_size = std::min(static_cast<uint64_t>(1 << 20), max_tree_size); |
617 | 17.2k | JXL_RETURN_IF_ERROR( |
618 | 17.2k | DecodeTree(memory_manager, br, &tree_storage, max_tree_size)); |
619 | 17.1k | JXL_RETURN_IF_ERROR(DecodeHistograms(memory_manager, br, |
620 | 17.1k | (tree_storage.size() + 1) / 2, |
621 | 17.1k | &code_storage, &context_map_storage)); |
622 | 17.1k | } else { |
623 | 12.9k | if (!global_tree || !global_code || !global_ctx_map || |
624 | 12.9k | global_tree->empty()) { |
625 | 9 | return JXL_FAILURE("No global tree available but one was requested"); |
626 | 9 | } |
627 | 12.9k | tree = global_tree; |
628 | 12.9k | code = global_code; |
629 | 12.9k | context_map = global_ctx_map; |
630 | 12.9k | } |
631 | | |
632 | | // Read channels |
633 | 60.1k | JXL_ASSIGN_OR_RETURN(ANSSymbolReader reader, |
634 | 60.1k | ANSSymbolReader::Create(code, br, distance_multiplier)); |
635 | 60.1k | auto tree_lut = jxl::make_unique<TreeLut<uint8_t, false, false>>(); |
636 | 60.1k | uint32_t fl_run = 0; |
637 | 60.1k | uint32_t fl_v = 0; |
638 | 256k | for (; next_channel < nb_channels; next_channel++) { |
639 | 226k | Channel &channel = image.channel[next_channel]; |
640 | 226k | if (next_channel >= image.nb_meta_channels && |
641 | 226k | (channel.w > options->max_chan_size || |
642 | 224k | channel.h > options->max_chan_size)) { |
643 | 636 | break; |
644 | 636 | } |
645 | 226k | if (!channel.w || !channel.h) { |
646 | 2.52k | continue; // skip empty channels |
647 | 2.52k | } |
648 | 223k | JXL_RETURN_IF_ERROR(DecodeModularChannelMAANS( |
649 | 223k | br, &reader, *context_map, *tree, header.wp_header, next_channel, |
650 | 223k | group_id, *tree_lut, &image, fl_run, fl_v)); |
651 | | |
652 | | // Truncated group. |
653 | 223k | if (!br->AllReadsWithinBounds()) { |
654 | 38 | if (!allow_truncated_group) return JXL_FAILURE("Truncated input"); |
655 | 0 | return JXL_NOT_ENOUGH_BYTES("Read overrun in ModularDecode"); |
656 | 38 | } |
657 | 223k | } |
658 | | |
659 | | // Make sure no zero-filling happens even if next_channel < nb_channels. |
660 | 30.0k | scope_guard.Disarm(); |
661 | | |
662 | 30.0k | if (!reader.CheckANSFinalState()) { |
663 | 0 | return JXL_FAILURE("ANS decode final state failed"); |
664 | 0 | } |
665 | 30.0k | return true; |
666 | 30.0k | } |
667 | | |
668 | | Status ModularGenericDecompress(BitReader *br, Image &image, |
669 | | GroupHeader *header, size_t group_id, |
670 | | ModularOptions *options, bool undo_transforms, |
671 | | const Tree *tree, const ANSCode *code, |
672 | | const std::vector<uint8_t> *ctx_map, |
673 | 34.6k | bool allow_truncated_group) { |
674 | 34.6k | std::vector<std::pair<uint32_t, uint32_t>> req_sizes; |
675 | 34.6k | req_sizes.reserve(image.channel.size()); |
676 | 111k | for (const auto &c : image.channel) { |
677 | 111k | req_sizes.emplace_back(c.w, c.h); |
678 | 111k | } |
679 | 34.6k | GroupHeader local_header; |
680 | 34.6k | if (header == nullptr) header = &local_header; |
681 | 34.6k | size_t bit_pos = br->TotalBitsConsumed(); |
682 | 34.6k | auto dec_status = ModularDecode(br, image, *header, group_id, options, tree, |
683 | 34.6k | code, ctx_map, allow_truncated_group); |
684 | 34.6k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(dec_status); |
685 | 34.4k | if (dec_status.IsFatalError()) return dec_status; |
686 | 34.4k | if (undo_transforms) image.undo_transforms(header->wp_header); |
687 | 34.4k | if (image.error) return JXL_FAILURE("Corrupt file. Aborting."); |
688 | 34.4k | JXL_DEBUG_V(4, |
689 | 34.4k | "Modular-decoded a %" PRIuS "x%" PRIuS " nbchans=%" PRIuS |
690 | 34.4k | " image from %" PRIuS " bytes", |
691 | 34.4k | image.w, image.h, image.channel.size(), |
692 | 34.4k | (br->TotalBitsConsumed() - bit_pos) / 8); |
693 | 34.4k | JXL_DEBUG_V(5, "Modular image: %s", image.DebugString().c_str()); |
694 | 34.4k | (void)bit_pos; |
695 | | // Check that after applying all transforms we are back to the requested |
696 | | // image sizes, otherwise there's a programming error with the |
697 | | // transformations. |
698 | 34.4k | if (undo_transforms) { |
699 | 10.4k | JXL_ENSURE(image.channel.size() == req_sizes.size()); |
700 | 53.4k | for (size_t c = 0; c < req_sizes.size(); c++) { |
701 | 42.9k | JXL_ENSURE(req_sizes[c].first == image.channel[c].w); |
702 | 42.9k | JXL_ENSURE(req_sizes[c].second == image.channel[c].h); |
703 | 42.9k | } |
704 | 10.4k | } |
705 | 34.4k | return dec_status; |
706 | 34.4k | } |
707 | | |
708 | | } // namespace jxl |