/src/libjxl/lib/jxl/modular/encoding/encoding.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) the JPEG XL Project Authors. All rights reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style |
4 | | // license that can be found in the LICENSE file. |
5 | | |
6 | | #include "lib/jxl/modular/encoding/encoding.h" |
7 | | |
8 | | #include <jxl/memory_manager.h> |
9 | | |
10 | | #include <algorithm> |
11 | | #include <array> |
12 | | #include <cstddef> |
13 | | #include <cstdint> |
14 | | #include <cstdlib> |
15 | | #include <queue> |
16 | | #include <utility> |
17 | | #include <vector> |
18 | | |
19 | | #include "lib/jxl/base/common.h" |
20 | | #include "lib/jxl/base/compiler_specific.h" |
21 | | #include "lib/jxl/base/printf_macros.h" |
22 | | #include "lib/jxl/base/scope_guard.h" |
23 | | #include "lib/jxl/base/status.h" |
24 | | #include "lib/jxl/dec_ans.h" |
25 | | #include "lib/jxl/dec_bit_reader.h" |
26 | | #include "lib/jxl/fields.h" |
27 | | #include "lib/jxl/frame_dimensions.h" |
28 | | #include "lib/jxl/image_ops.h" |
29 | | #include "lib/jxl/modular/encoding/context_predict.h" |
30 | | #include "lib/jxl/modular/encoding/dec_ma.h" |
31 | | #include "lib/jxl/modular/modular_image.h" |
32 | | #include "lib/jxl/modular/options.h" |
33 | | #include "lib/jxl/modular/transform/transform.h" |
34 | | #include "lib/jxl/pack_signed.h" |
35 | | |
36 | | namespace jxl { |
37 | | |
38 | | // Removes all nodes that use a static property (i.e. channel or group ID) from |
39 | | // the tree and collapses each node on even levels with its two children to |
40 | | // produce a flatter tree. Also computes whether the resulting tree requires |
41 | | // using the weighted predictor. |
42 | | FlatTree FilterTree(const Tree &global_tree, |
43 | | std::array<pixel_type, kNumStaticProperties> &static_props, |
44 | | size_t *num_props, bool *use_wp, bool *wp_only, |
45 | 160k | bool *gradient_only) { |
46 | 160k | *num_props = 0; |
47 | 160k | bool has_wp = false; |
48 | 160k | bool has_non_wp = false; |
49 | 160k | *gradient_only = true; |
50 | 160k | const auto mark_property = [&](int32_t p) { |
51 | 46.8k | if (p == kWPProp) { |
52 | 8.86k | has_wp = true; |
53 | 37.9k | } else if (p >= kNumStaticProperties) { |
54 | 16.2k | has_non_wp = true; |
55 | 16.2k | } |
56 | 46.8k | if (p >= kNumStaticProperties && p != kGradientProp) { |
57 | 21.7k | *gradient_only = false; |
58 | 21.7k | } |
59 | 46.8k | }; |
60 | 160k | FlatTree output; |
61 | 160k | std::queue<size_t> nodes; |
62 | 160k | nodes.push(0); |
63 | | // Produces a trimmed and flattened tree by doing a BFS visit of the original |
64 | | // tree, ignoring branches that are known to be false and proceeding two |
65 | | // levels at a time to collapse nodes in a flatter tree; if an inner parent |
66 | | // node has a leaf as a child, the leaf is duplicated and an implicit fake |
67 | | // node is added. This allows to reduce the number of branches when traversing |
68 | | // the resulting flat tree. |
69 | 383k | while (!nodes.empty()) { |
70 | 222k | size_t cur = nodes.front(); |
71 | 222k | nodes.pop(); |
72 | | // Skip nodes that we can decide now, by jumping directly to their children. |
73 | 228k | while (global_tree[cur].property < kNumStaticProperties && |
74 | 228k | global_tree[cur].property != -1) { |
75 | 5.61k | if (static_props[global_tree[cur].property] > global_tree[cur].splitval) { |
76 | 3.27k | cur = global_tree[cur].lchild; |
77 | 3.27k | } else { |
78 | 2.34k | cur = global_tree[cur].rchild; |
79 | 2.34k | } |
80 | 5.61k | } |
81 | 222k | FlatDecisionNode flat; |
82 | 222k | if (global_tree[cur].property == -1) { |
83 | 207k | flat.property0 = -1; |
84 | 207k | flat.childID = global_tree[cur].lchild; |
85 | 207k | flat.predictor = global_tree[cur].predictor; |
86 | 207k | flat.predictor_offset = global_tree[cur].predictor_offset; |
87 | 207k | flat.multiplier = global_tree[cur].multiplier; |
88 | 207k | *gradient_only &= flat.predictor == Predictor::Gradient; |
89 | 207k | has_wp |= flat.predictor == Predictor::Weighted; |
90 | 207k | has_non_wp |= flat.predictor != Predictor::Weighted; |
91 | 207k | output.push_back(flat); |
92 | 207k | continue; |
93 | 207k | } |
94 | 15.6k | flat.childID = output.size() + nodes.size() + 1; |
95 | | |
96 | 15.6k | flat.property0 = global_tree[cur].property; |
97 | 15.6k | *num_props = std::max<size_t>(flat.property0 + 1, *num_props); |
98 | 15.6k | flat.splitval0 = global_tree[cur].splitval; |
99 | | |
100 | 46.8k | for (size_t i = 0; i < 2; i++) { |
101 | 31.2k | size_t cur_child = |
102 | 31.2k | i == 0 ? global_tree[cur].lchild : global_tree[cur].rchild; |
103 | | // Skip nodes that we can decide now. |
104 | 33.4k | while (global_tree[cur_child].property < kNumStaticProperties && |
105 | 33.4k | global_tree[cur_child].property != -1) { |
106 | 2.21k | if (static_props[global_tree[cur_child].property] > |
107 | 2.21k | global_tree[cur_child].splitval) { |
108 | 1.66k | cur_child = global_tree[cur_child].lchild; |
109 | 1.66k | } else { |
110 | 556 | cur_child = global_tree[cur_child].rchild; |
111 | 556 | } |
112 | 2.21k | } |
113 | | // We ended up in a leaf, add a placeholder decision and two copies of the |
114 | | // leaf. |
115 | 31.2k | if (global_tree[cur_child].property == -1) { |
116 | 21.6k | flat.properties[i] = 0; |
117 | 21.6k | flat.splitvals[i] = 0; |
118 | 21.6k | nodes.push(cur_child); |
119 | 21.6k | nodes.push(cur_child); |
120 | 21.6k | } else { |
121 | 9.54k | flat.properties[i] = global_tree[cur_child].property; |
122 | 9.54k | flat.splitvals[i] = global_tree[cur_child].splitval; |
123 | 9.54k | nodes.push(global_tree[cur_child].lchild); |
124 | 9.54k | nodes.push(global_tree[cur_child].rchild); |
125 | 9.54k | *num_props = std::max<size_t>(flat.properties[i] + 1, *num_props); |
126 | 9.54k | } |
127 | 31.2k | } |
128 | | |
129 | 31.2k | for (int16_t property : flat.properties) mark_property(property); |
130 | 15.6k | mark_property(flat.property0); |
131 | 15.6k | output.push_back(flat); |
132 | 15.6k | } |
133 | 160k | if (*num_props > kNumNonrefProperties) { |
134 | 1.30k | *num_props = |
135 | 1.30k | DivCeil(*num_props - kNumNonrefProperties, kExtraPropsPerChannel) * |
136 | 1.30k | kExtraPropsPerChannel + |
137 | 1.30k | kNumNonrefProperties; |
138 | 159k | } else { |
139 | 159k | *num_props = kNumNonrefProperties; |
140 | 159k | } |
141 | 160k | *use_wp = has_wp; |
142 | 160k | *wp_only = has_wp && !has_non_wp; |
143 | | |
144 | 160k | return output; |
145 | 160k | } |
146 | | |
147 | | namespace detail { |
148 | | template <bool uses_lz77> |
149 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
150 | | const std::vector<uint8_t> &context_map, |
151 | | const Tree &global_tree, |
152 | | const weighted::Header &wp_header, |
153 | | pixel_type chan, size_t group_id, |
154 | | TreeLut<uint8_t, false, false> &tree_lut, |
155 | | Image *image, uint32_t &fl_run, |
156 | 158k | uint32_t &fl_v) { |
157 | 158k | JxlMemoryManager *memory_manager = image->memory_manager(); |
158 | 158k | Channel &channel = image->channel[chan]; |
159 | | |
160 | 158k | std::array<pixel_type, kNumStaticProperties> static_props = { |
161 | 158k | {chan, static_cast<int>(group_id)}}; |
162 | | // TODO(veluca): filter the tree according to static_props. |
163 | | |
164 | | // zero pixel channel? could happen |
165 | 158k | if (channel.w == 0 || channel.h == 0) return true; |
166 | | |
167 | 158k | bool tree_has_wp_prop_or_pred = false; |
168 | 158k | bool is_wp_only = false; |
169 | 158k | bool is_gradient_only = false; |
170 | 158k | size_t num_props; |
171 | 158k | FlatTree tree = |
172 | 158k | FilterTree(global_tree, static_props, &num_props, |
173 | 158k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); |
174 | | |
175 | | // From here on, tree lookup returns a *clustered* context ID. |
176 | | // This avoids an extra memory lookup after tree traversal. |
177 | 195k | for (auto &node : tree) { |
178 | 195k | if (node.property0 == -1) { |
179 | 186k | node.childID = context_map[node.childID]; |
180 | 186k | } |
181 | 195k | } |
182 | | |
183 | 158k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); |
184 | | |
185 | | // MAANS decode |
186 | 158k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, |
187 | 28.9M | pixel_type_w offset) -> pixel_type { |
188 | 28.9M | JXL_DASSERT((v & 0xFFFFFFFF) == v); |
189 | 28.9M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); |
190 | | // if it overflows, it overflows, and we have a problem anyway |
191 | 28.9M | return val * multiplier + offset; |
192 | 28.9M | }; jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 187 | 7.40M | pixel_type_w offset) -> pixel_type { | 188 | 7.40M | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 7.40M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 7.40M | return val * multiplier + offset; | 192 | 7.40M | }; |
jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&)::{lambda(unsigned long, int, long)#1}::operator()(unsigned long, int, long) const Line | Count | Source | 187 | 21.5M | pixel_type_w offset) -> pixel_type { | 188 | 21.5M | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 21.5M | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 21.5M | return val * multiplier + offset; | 192 | 21.5M | }; |
|
193 | | |
194 | 158k | if (tree.size() == 1) { |
195 | | // special optimized case: no meta-adaptation, so no need |
196 | | // to compute properties. |
197 | 153k | Predictor predictor = tree[0].predictor; |
198 | 153k | int64_t offset = tree[0].predictor_offset; |
199 | 153k | int32_t multiplier = tree[0].multiplier; |
200 | 153k | size_t ctx_id = tree[0].childID; |
201 | 153k | if (predictor == Predictor::Zero) { |
202 | 141k | uint32_t value; |
203 | 141k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, |
204 | 141k | channel.w * channel.h)) { |
205 | | // Special-case: histogram has a single symbol, with no extra bits, and |
206 | | // we use ANS mode. |
207 | 59.2k | JXL_DEBUG_V(8, "Fastest track."); |
208 | 59.2k | pixel_type v = make_pixel(value, multiplier, offset); |
209 | 1.49M | for (size_t y = 0; y < channel.h; y++) { |
210 | 1.43M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
211 | 1.43M | std::fill(r, r + channel.w, v); |
212 | 1.43M | } |
213 | 82.6k | } else { |
214 | 82.6k | JXL_DEBUG_V(8, "Fast track."); |
215 | 82.6k | if (multiplier == 1 && offset == 0) { |
216 | 1.09M | for (size_t y = 0; y < channel.h; y++) { |
217 | 1.02M | pixel_type *JXL_RESTRICT r = channel.Row(y); |
218 | 63.4M | for (size_t x = 0; x < channel.w; x++) { |
219 | 62.4M | uint32_t v = |
220 | 62.4M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
221 | 62.4M | r[x] = UnpackSigned(v); |
222 | 62.4M | } |
223 | 1.02M | } |
224 | 76.8k | } else { |
225 | 134k | for (size_t y = 0; y < channel.h; y++) { |
226 | 128k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
227 | 6.82M | for (size_t x = 0; x < channel.w; x++) { |
228 | 6.69M | uint32_t v = |
229 | 6.69M | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, |
230 | 6.69M | br); |
231 | 6.69M | r[x] = make_pixel(v, multiplier, offset); |
232 | 6.69M | } |
233 | 128k | } |
234 | 5.75k | } |
235 | 82.6k | } |
236 | 141k | return true; |
237 | 141k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && |
238 | 11.6k | multiplier == 1 && reader->IsHuffRleOnly()) { |
239 | 3 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); |
240 | 3 | pixel_type_w sv = UnpackSigned(fl_v); |
241 | 9 | for (size_t y = 0; y < channel.h; y++) { |
242 | 6 | pixel_type *JXL_RESTRICT r = channel.Row(y); |
243 | 6 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
244 | 6 | const pixel_type *JXL_RESTRICT rtopleft = |
245 | 6 | (y ? channel.Row(y - 1) - 1 : r - 1); |
246 | 6 | pixel_type_w guess_0 = (y ? rtop[0] : 0); |
247 | 6 | if (fl_run == 0) { |
248 | 6 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
249 | 6 | &fl_run); |
250 | 6 | sv = UnpackSigned(fl_v); |
251 | 6 | } else { |
252 | 0 | fl_run--; |
253 | 0 | } |
254 | 6 | r[0] = sv + guess_0; |
255 | 174 | for (size_t x = 1; x < channel.w; x++) { |
256 | 168 | pixel_type left = r[x - 1]; |
257 | 168 | pixel_type top = rtop[x]; |
258 | 168 | pixel_type topleft = rtopleft[x]; |
259 | 168 | pixel_type_w guess = ClampedGradient(top, left, topleft); |
260 | 168 | if (!fl_run) { |
261 | 168 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, |
262 | 168 | &fl_run); |
263 | 168 | sv = UnpackSigned(fl_v); |
264 | 168 | } else { |
265 | 0 | fl_run--; |
266 | 0 | } |
267 | 168 | r[x] = sv + guess; |
268 | 168 | } |
269 | 6 | } |
270 | 3 | return true; |
271 | 11.5k | } else if (predictor == Predictor::Gradient && offset == 0 && |
272 | 11.5k | multiplier == 1) { |
273 | 423 | JXL_DEBUG_V(8, "Gradient very fast track."); |
274 | 423 | const intptr_t onerow = channel.plane.PixelsPerRow(); |
275 | 7.67k | for (size_t y = 0; y < channel.h; y++) { |
276 | 7.25k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
277 | 274k | for (size_t x = 0; x < channel.w; x++) { |
278 | 267k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
279 | 267k | pixel_type top = (y ? *(r + x - onerow) : left); |
280 | 267k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); |
281 | 267k | pixel_type guess = ClampedGradient(top, left, topleft); |
282 | 267k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
283 | 267k | ctx_id, br); |
284 | 267k | r[x] = make_pixel(v, 1, guess); |
285 | 267k | } |
286 | 7.25k | } |
287 | 423 | return true; |
288 | 423 | } |
289 | 153k | } |
290 | | |
291 | | // Check if this tree is a WP-only tree with a small enough property value |
292 | | // range. |
293 | 16.4k | if (is_wp_only) { |
294 | 4.28k | is_wp_only = TreeToLookupTable(tree, tree_lut); |
295 | 4.28k | } |
296 | 16.4k | if (is_gradient_only) { |
297 | 743 | is_gradient_only = TreeToLookupTable(tree, tree_lut); |
298 | 743 | } |
299 | | |
300 | 16.4k | if (is_gradient_only) { |
301 | 293 | JXL_DEBUG_V(8, "Gradient fast track."); |
302 | 293 | const intptr_t onerow = channel.plane.PixelsPerRow(); |
303 | 5.47k | for (size_t y = 0; y < channel.h; y++) { |
304 | 5.17k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
305 | 354k | for (size_t x = 0; x < channel.w; x++) { |
306 | 349k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); |
307 | 349k | pixel_type_w top = (y ? *(r + x - onerow) : left); |
308 | 349k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); |
309 | 349k | int32_t guess = ClampedGradient(top, left, topleft); |
310 | 349k | uint32_t pos = |
311 | 349k | kPropRangeFast + |
312 | 349k | std::min<pixel_type_w>( |
313 | 349k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), |
314 | 349k | kPropRangeFast - 1); |
315 | 349k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
316 | 349k | uint64_t v = |
317 | 349k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); |
318 | 349k | r[x] = make_pixel(v, 1, guess); |
319 | 349k | } |
320 | 5.17k | } |
321 | 16.1k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { |
322 | 553 | JXL_DEBUG_V(8, "WP fast track."); |
323 | 553 | weighted::State wp_state(wp_header, channel.w, channel.h); |
324 | 553 | Properties properties(1); |
325 | 16.3k | for (size_t y = 0; y < channel.h; y++) { |
326 | 15.8k | pixel_type *JXL_RESTRICT r = channel.Row(y); |
327 | 15.8k | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); |
328 | 15.8k | const pixel_type *JXL_RESTRICT rtoptop = |
329 | 15.8k | (y > 1 ? channel.Row(y - 2) : rtop); |
330 | 15.8k | const pixel_type *JXL_RESTRICT rtopleft = |
331 | 15.8k | (y ? channel.Row(y - 1) - 1 : r - 1); |
332 | 15.8k | const pixel_type *JXL_RESTRICT rtopright = |
333 | 15.8k | (y ? channel.Row(y - 1) + 1 : r - 1); |
334 | 15.8k | size_t x = 0; |
335 | 15.8k | { |
336 | 15.8k | size_t offset = 0; |
337 | 15.8k | pixel_type_w left = y ? rtop[x] : 0; |
338 | 15.8k | pixel_type_w toptop = y ? rtoptop[x] : 0; |
339 | 15.8k | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); |
340 | 15.8k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
341 | 15.8k | x, y, channel.w, left, left, topright, left, toptop, &properties, |
342 | 15.8k | offset); |
343 | 15.8k | uint32_t pos = |
344 | 15.8k | kPropRangeFast + |
345 | 15.8k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
346 | 15.8k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
347 | 15.8k | uint64_t v = |
348 | 15.8k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
349 | 15.8k | r[x] = make_pixel(v, 1, guess); |
350 | 15.8k | wp_state.UpdateErrors(r[x], x, y, channel.w); |
351 | 15.8k | } |
352 | 1.76M | for (x = 1; x + 1 < channel.w; x++) { |
353 | 1.74M | size_t offset = 0; |
354 | 1.74M | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
355 | 1.74M | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], |
356 | 1.74M | rtoptop[x], &properties, offset); |
357 | 1.74M | uint32_t pos = |
358 | 1.74M | kPropRangeFast + |
359 | 1.74M | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
360 | 1.74M | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
361 | 1.74M | uint64_t v = |
362 | 1.74M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
363 | 1.74M | r[x] = make_pixel(v, 1, guess); |
364 | 1.74M | wp_state.UpdateErrors(r[x], x, y, channel.w); |
365 | 1.74M | } |
366 | 15.8k | { |
367 | 15.8k | size_t offset = 0; |
368 | 15.8k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( |
369 | 15.8k | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], |
370 | 15.8k | rtoptop[x], &properties, offset); |
371 | 15.8k | uint32_t pos = |
372 | 15.8k | kPropRangeFast + |
373 | 15.8k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); |
374 | 15.8k | uint32_t ctx_id = tree_lut.context_lookup[pos]; |
375 | 15.8k | uint64_t v = |
376 | 15.8k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); |
377 | 15.8k | r[x] = make_pixel(v, 1, guess); |
378 | 15.8k | wp_state.UpdateErrors(r[x], x, y, channel.w); |
379 | 15.8k | } |
380 | 15.8k | } |
381 | 15.6k | } else if (!tree_has_wp_prop_or_pred) { |
382 | | // special optimized case: the weighted predictor and its properties are not |
383 | | // used, so no need to compute weights and properties. |
384 | 9.66k | JXL_DEBUG_V(8, "Slow track."); |
385 | 9.66k | MATreeLookup tree_lookup(tree); |
386 | 9.66k | Properties properties = Properties(num_props); |
387 | 9.66k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
388 | 9.66k | JXL_ASSIGN_OR_RETURN( |
389 | 9.66k | Channel references, |
390 | 9.66k | Channel::Create(memory_manager, |
391 | 9.66k | properties.size() - kNumNonrefProperties, channel.w)); |
392 | 177k | for (size_t y = 0; y < channel.h; y++) { |
393 | 167k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
394 | 167k | PrecomputeReferences(channel, y, *image, chan, &references); |
395 | 167k | InitPropsRow(&properties, static_props, y); |
396 | 167k | if (y > 1 && channel.w > 8 && references.w == 0) { |
397 | 317k | for (size_t x = 0; x < 2; x++) { |
398 | 211k | PredictionResult res = |
399 | 211k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
400 | 211k | tree_lookup, references); |
401 | 211k | uint64_t v = |
402 | 211k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
403 | 211k | p[x] = make_pixel(v, res.multiplier, res.guess); |
404 | 211k | } |
405 | 8.98M | for (size_t x = 2; x < channel.w - 2; x++) { |
406 | 8.87M | PredictionResult res = |
407 | 8.87M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, |
408 | 8.87M | tree_lookup, references); |
409 | 8.87M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
410 | 8.87M | res.context, br); |
411 | 8.87M | p[x] = make_pixel(v, res.multiplier, res.guess); |
412 | 8.87M | } |
413 | 317k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
414 | 211k | PredictionResult res = |
415 | 211k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
416 | 211k | tree_lookup, references); |
417 | 211k | uint64_t v = |
418 | 211k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
419 | 211k | p[x] = make_pixel(v, res.multiplier, res.guess); |
420 | 211k | } |
421 | 105k | } else { |
422 | 1.01M | for (size_t x = 0; x < channel.w; x++) { |
423 | 952k | PredictionResult res = |
424 | 952k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, |
425 | 952k | tree_lookup, references); |
426 | 952k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( |
427 | 952k | res.context, br); |
428 | 952k | p[x] = make_pixel(v, res.multiplier, res.guess); |
429 | 952k | } |
430 | 61.5k | } |
431 | 167k | } |
432 | 9.66k | } else { |
433 | 5.96k | JXL_DEBUG_V(8, "Slowest track."); |
434 | 5.96k | MATreeLookup tree_lookup(tree); |
435 | 5.96k | Properties properties = Properties(num_props); |
436 | 5.96k | const intptr_t onerow = channel.plane.PixelsPerRow(); |
437 | 5.96k | JXL_ASSIGN_OR_RETURN( |
438 | 5.96k | Channel references, |
439 | 5.96k | Channel::Create(memory_manager, |
440 | 5.96k | properties.size() - kNumNonrefProperties, channel.w)); |
441 | 5.96k | weighted::State wp_state(wp_header, channel.w, channel.h); |
442 | 151k | for (size_t y = 0; y < channel.h; y++) { |
443 | 145k | pixel_type *JXL_RESTRICT p = channel.Row(y); |
444 | 145k | InitPropsRow(&properties, static_props, y); |
445 | 145k | PrecomputeReferences(channel, y, *image, chan, &references); |
446 | 145k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { |
447 | 284k | for (size_t x = 0; x < 2; x++) { |
448 | 189k | PredictionResult res = |
449 | 189k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
450 | 189k | tree_lookup, references, &wp_state); |
451 | 189k | uint64_t v = |
452 | 189k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
453 | 189k | p[x] = make_pixel(v, res.multiplier, res.guess); |
454 | 189k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
455 | 189k | } |
456 | 7.93M | for (size_t x = 2; x < channel.w - 2; x++) { |
457 | 7.84M | PredictionResult res = |
458 | 7.84M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, |
459 | 7.84M | tree_lookup, references, &wp_state); |
460 | 7.84M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( |
461 | 7.84M | res.context, br); |
462 | 7.84M | p[x] = make_pixel(v, res.multiplier, res.guess); |
463 | 7.84M | wp_state.UpdateErrors(p[x], x, y, channel.w); |
464 | 7.84M | } |
465 | 284k | for (size_t x = channel.w - 2; x < channel.w; x++) { |
466 | 189k | PredictionResult res = |
467 | 189k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
468 | 189k | tree_lookup, references, &wp_state); |
469 | 189k | uint64_t v = |
470 | 189k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
471 | 189k | p[x] = make_pixel(v, res.multiplier, res.guess); |
472 | 189k | wp_state.UpdateErrors(p[x], x, y, channel.w); |
473 | 189k | } |
474 | 94.7k | } else { |
475 | 1.38M | for (size_t x = 0; x < channel.w; x++) { |
476 | 1.32M | PredictionResult res = |
477 | 1.32M | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, |
478 | 1.32M | tree_lookup, references, &wp_state); |
479 | 1.32M | uint64_t v = |
480 | 1.32M | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); |
481 | 1.32M | p[x] = make_pixel(v, res.multiplier, res.guess); |
482 | 1.32M | wp_state.UpdateErrors(p[x], x, y, channel.w); |
483 | 1.32M | } |
484 | 50.9k | } |
485 | 145k | } |
486 | 5.96k | } |
487 | 16.4k | return true; |
488 | 16.4k | } jxl::Status jxl::detail::DecodeModularChannelMAANS<true>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 156 | 27.9k | uint32_t &fl_v) { | 157 | 27.9k | JxlMemoryManager *memory_manager = image->memory_manager(); | 158 | 27.9k | Channel &channel = image->channel[chan]; | 159 | | | 160 | 27.9k | std::array<pixel_type, kNumStaticProperties> static_props = { | 161 | 27.9k | {chan, static_cast<int>(group_id)}}; | 162 | | // TODO(veluca): filter the tree according to static_props. | 163 | | | 164 | | // zero pixel channel? could happen | 165 | 27.9k | if (channel.w == 0 || channel.h == 0) return true; | 166 | | | 167 | 27.9k | bool tree_has_wp_prop_or_pred = false; | 168 | 27.9k | bool is_wp_only = false; | 169 | 27.9k | bool is_gradient_only = false; | 170 | 27.9k | size_t num_props; | 171 | 27.9k | FlatTree tree = | 172 | 27.9k | FilterTree(global_tree, static_props, &num_props, | 173 | 27.9k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 174 | | | 175 | | // From here on, tree lookup returns a *clustered* context ID. | 176 | | // This avoids an extra memory lookup after tree traversal. | 177 | 34.6k | for (auto &node : tree) { | 178 | 34.6k | if (node.property0 == -1) { | 179 | 32.9k | node.childID = context_map[node.childID]; | 180 | 32.9k | } | 181 | 34.6k | } | 182 | | | 183 | 27.9k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 184 | | | 185 | | // MAANS decode | 186 | 27.9k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 187 | 27.9k | pixel_type_w offset) -> pixel_type { | 188 | 27.9k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 27.9k | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 27.9k | return val * multiplier + offset; | 192 | 27.9k | }; | 193 | | | 194 | 27.9k | if (tree.size() == 1) { | 195 | | // special optimized case: no meta-adaptation, so no need | 196 | | // to compute properties. | 197 | 26.8k | Predictor predictor = tree[0].predictor; | 198 | 26.8k | int64_t offset = tree[0].predictor_offset; | 199 | 26.8k | int32_t multiplier = tree[0].multiplier; | 200 | 26.8k | size_t ctx_id = tree[0].childID; | 201 | 26.8k | if (predictor == Predictor::Zero) { | 202 | 25.3k | uint32_t value; | 203 | 25.3k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 204 | 25.3k | channel.w * channel.h)) { | 205 | | // Special-case: histogram has a single symbol, with no extra bits, and | 206 | | // we use ANS mode. | 207 | 18.8k | JXL_DEBUG_V(8, "Fastest track."); | 208 | 18.8k | pixel_type v = make_pixel(value, multiplier, offset); | 209 | 645k | for (size_t y = 0; y < channel.h; y++) { | 210 | 627k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 211 | 627k | std::fill(r, r + channel.w, v); | 212 | 627k | } | 213 | 18.8k | } else { | 214 | 6.53k | JXL_DEBUG_V(8, "Fast track."); | 215 | 6.53k | if (multiplier == 1 && offset == 0) { | 216 | 96.3k | for (size_t y = 0; y < channel.h; y++) { | 217 | 92.6k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 218 | 7.43M | for (size_t x = 0; x < channel.w; x++) { | 219 | 7.34M | uint32_t v = | 220 | 7.34M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 221 | 7.34M | r[x] = UnpackSigned(v); | 222 | 7.34M | } | 223 | 92.6k | } | 224 | 3.67k | } else { | 225 | 69.7k | for (size_t y = 0; y < channel.h; y++) { | 226 | 66.9k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 227 | 2.95M | for (size_t x = 0; x < channel.w; x++) { | 228 | 2.89M | uint32_t v = | 229 | 2.89M | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 230 | 2.89M | br); | 231 | 2.89M | r[x] = make_pixel(v, multiplier, offset); | 232 | 2.89M | } | 233 | 66.9k | } | 234 | 2.86k | } | 235 | 6.53k | } | 236 | 25.3k | return true; | 237 | 25.3k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 238 | 1.47k | multiplier == 1 && reader->IsHuffRleOnly()) { | 239 | 3 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 240 | 3 | pixel_type_w sv = UnpackSigned(fl_v); | 241 | 9 | for (size_t y = 0; y < channel.h; y++) { | 242 | 6 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 243 | 6 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 244 | 6 | const pixel_type *JXL_RESTRICT rtopleft = | 245 | 6 | (y ? channel.Row(y - 1) - 1 : r - 1); | 246 | 6 | pixel_type_w guess_0 = (y ? rtop[0] : 0); | 247 | 6 | if (fl_run == 0) { | 248 | 6 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 249 | 6 | &fl_run); | 250 | 6 | sv = UnpackSigned(fl_v); | 251 | 6 | } else { | 252 | 0 | fl_run--; | 253 | 0 | } | 254 | 6 | r[0] = sv + guess_0; | 255 | 174 | for (size_t x = 1; x < channel.w; x++) { | 256 | 168 | pixel_type left = r[x - 1]; | 257 | 168 | pixel_type top = rtop[x]; | 258 | 168 | pixel_type topleft = rtopleft[x]; | 259 | 168 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 260 | 168 | if (!fl_run) { | 261 | 168 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 262 | 168 | &fl_run); | 263 | 168 | sv = UnpackSigned(fl_v); | 264 | 168 | } else { | 265 | 0 | fl_run--; | 266 | 0 | } | 267 | 168 | r[x] = sv + guess; | 268 | 168 | } | 269 | 6 | } | 270 | 3 | return true; | 271 | 1.47k | } else if (predictor == Predictor::Gradient && offset == 0 && | 272 | 1.47k | multiplier == 1) { | 273 | 203 | JXL_DEBUG_V(8, "Gradient very fast track."); | 274 | 203 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 275 | 3.58k | for (size_t y = 0; y < channel.h; y++) { | 276 | 3.38k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 277 | 229k | for (size_t x = 0; x < channel.w; x++) { | 278 | 225k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 279 | 225k | pixel_type top = (y ? *(r + x - onerow) : left); | 280 | 225k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 281 | 225k | pixel_type guess = ClampedGradient(top, left, topleft); | 282 | 225k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 283 | 225k | ctx_id, br); | 284 | 225k | r[x] = make_pixel(v, 1, guess); | 285 | 225k | } | 286 | 3.38k | } | 287 | 203 | return true; | 288 | 203 | } | 289 | 26.8k | } | 290 | | | 291 | | // Check if this tree is a WP-only tree with a small enough property value | 292 | | // range. | 293 | 2.35k | if (is_wp_only) { | 294 | 318 | is_wp_only = TreeToLookupTable(tree, tree_lut); | 295 | 318 | } | 296 | 2.35k | if (is_gradient_only) { | 297 | 205 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 298 | 205 | } | 299 | | | 300 | 2.35k | if (is_gradient_only) { | 301 | 87 | JXL_DEBUG_V(8, "Gradient fast track."); | 302 | 87 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 303 | 1.11k | for (size_t y = 0; y < channel.h; y++) { | 304 | 1.03k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 305 | 76.7k | for (size_t x = 0; x < channel.w; x++) { | 306 | 75.7k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 307 | 75.7k | pixel_type_w top = (y ? *(r + x - onerow) : left); | 308 | 75.7k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 309 | 75.7k | int32_t guess = ClampedGradient(top, left, topleft); | 310 | 75.7k | uint32_t pos = | 311 | 75.7k | kPropRangeFast + | 312 | 75.7k | std::min<pixel_type_w>( | 313 | 75.7k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 314 | 75.7k | kPropRangeFast - 1); | 315 | 75.7k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 316 | 75.7k | uint64_t v = | 317 | 75.7k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 318 | 75.7k | r[x] = make_pixel(v, 1, guess); | 319 | 75.7k | } | 320 | 1.03k | } | 321 | 2.27k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 322 | 0 | JXL_DEBUG_V(8, "WP fast track."); | 323 | 0 | weighted::State wp_state(wp_header, channel.w, channel.h); | 324 | 0 | Properties properties(1); | 325 | 0 | for (size_t y = 0; y < channel.h; y++) { | 326 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 327 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 328 | 0 | const pixel_type *JXL_RESTRICT rtoptop = | 329 | 0 | (y > 1 ? channel.Row(y - 2) : rtop); | 330 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 331 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 332 | 0 | const pixel_type *JXL_RESTRICT rtopright = | 333 | 0 | (y ? channel.Row(y - 1) + 1 : r - 1); | 334 | 0 | size_t x = 0; | 335 | 0 | { | 336 | 0 | size_t offset = 0; | 337 | 0 | pixel_type_w left = y ? rtop[x] : 0; | 338 | 0 | pixel_type_w toptop = y ? rtoptop[x] : 0; | 339 | 0 | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 340 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 341 | 0 | x, y, channel.w, left, left, topright, left, toptop, &properties, | 342 | 0 | offset); | 343 | 0 | uint32_t pos = | 344 | 0 | kPropRangeFast + | 345 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 346 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 347 | 0 | uint64_t v = | 348 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 349 | 0 | r[x] = make_pixel(v, 1, guess); | 350 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 351 | 0 | } | 352 | 0 | for (x = 1; x + 1 < channel.w; x++) { | 353 | 0 | size_t offset = 0; | 354 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 355 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 356 | 0 | rtoptop[x], &properties, offset); | 357 | 0 | uint32_t pos = | 358 | 0 | kPropRangeFast + | 359 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 360 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 361 | 0 | uint64_t v = | 362 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 363 | 0 | r[x] = make_pixel(v, 1, guess); | 364 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 365 | 0 | } | 366 | 0 | { | 367 | 0 | size_t offset = 0; | 368 | 0 | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 369 | 0 | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 370 | 0 | rtoptop[x], &properties, offset); | 371 | 0 | uint32_t pos = | 372 | 0 | kPropRangeFast + | 373 | 0 | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 374 | 0 | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 375 | 0 | uint64_t v = | 376 | 0 | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 377 | 0 | r[x] = make_pixel(v, 1, guess); | 378 | 0 | wp_state.UpdateErrors(r[x], x, y, channel.w); | 379 | 0 | } | 380 | 0 | } | 381 | 2.27k | } else if (!tree_has_wp_prop_or_pred) { | 382 | | // special optimized case: the weighted predictor and its properties are not | 383 | | // used, so no need to compute weights and properties. | 384 | 1.44k | JXL_DEBUG_V(8, "Slow track."); | 385 | 1.44k | MATreeLookup tree_lookup(tree); | 386 | 1.44k | Properties properties = Properties(num_props); | 387 | 1.44k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 388 | 1.44k | JXL_ASSIGN_OR_RETURN( | 389 | 1.44k | Channel references, | 390 | 1.44k | Channel::Create(memory_manager, | 391 | 1.44k | properties.size() - kNumNonrefProperties, channel.w)); | 392 | 36.9k | for (size_t y = 0; y < channel.h; y++) { | 393 | 35.4k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 394 | 35.4k | PrecomputeReferences(channel, y, *image, chan, &references); | 395 | 35.4k | InitPropsRow(&properties, static_props, y); | 396 | 35.4k | if (y > 1 && channel.w > 8 && references.w == 0) { | 397 | 79.4k | for (size_t x = 0; x < 2; x++) { | 398 | 52.9k | PredictionResult res = | 399 | 52.9k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 400 | 52.9k | tree_lookup, references); | 401 | 52.9k | uint64_t v = | 402 | 52.9k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 403 | 52.9k | p[x] = make_pixel(v, res.multiplier, res.guess); | 404 | 52.9k | } | 405 | 3.67M | for (size_t x = 2; x < channel.w - 2; x++) { | 406 | 3.64M | PredictionResult res = | 407 | 3.64M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 408 | 3.64M | tree_lookup, references); | 409 | 3.64M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 410 | 3.64M | res.context, br); | 411 | 3.64M | p[x] = make_pixel(v, res.multiplier, res.guess); | 412 | 3.64M | } | 413 | 79.4k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 414 | 52.9k | PredictionResult res = | 415 | 52.9k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 416 | 52.9k | tree_lookup, references); | 417 | 52.9k | uint64_t v = | 418 | 52.9k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 419 | 52.9k | p[x] = make_pixel(v, res.multiplier, res.guess); | 420 | 52.9k | } | 421 | 26.4k | } else { | 422 | 134k | for (size_t x = 0; x < channel.w; x++) { | 423 | 125k | PredictionResult res = | 424 | 125k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 425 | 125k | tree_lookup, references); | 426 | 125k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 427 | 125k | res.context, br); | 428 | 125k | p[x] = make_pixel(v, res.multiplier, res.guess); | 429 | 125k | } | 430 | 8.98k | } | 431 | 35.4k | } | 432 | 1.44k | } else { | 433 | 824 | JXL_DEBUG_V(8, "Slowest track."); | 434 | 824 | MATreeLookup tree_lookup(tree); | 435 | 824 | Properties properties = Properties(num_props); | 436 | 824 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 437 | 824 | JXL_ASSIGN_OR_RETURN( | 438 | 824 | Channel references, | 439 | 824 | Channel::Create(memory_manager, | 440 | 824 | properties.size() - kNumNonrefProperties, channel.w)); | 441 | 824 | weighted::State wp_state(wp_header, channel.w, channel.h); | 442 | 13.6k | for (size_t y = 0; y < channel.h; y++) { | 443 | 12.8k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 444 | 12.8k | InitPropsRow(&properties, static_props, y); | 445 | 12.8k | PrecomputeReferences(channel, y, *image, chan, &references); | 446 | 12.8k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 447 | 0 | for (size_t x = 0; x < 2; x++) { | 448 | 0 | PredictionResult res = | 449 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 450 | 0 | tree_lookup, references, &wp_state); | 451 | 0 | uint64_t v = | 452 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 453 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 454 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 455 | 0 | } | 456 | 0 | for (size_t x = 2; x < channel.w - 2; x++) { | 457 | 0 | PredictionResult res = | 458 | 0 | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 459 | 0 | tree_lookup, references, &wp_state); | 460 | 0 | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 461 | 0 | res.context, br); | 462 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 463 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 464 | 0 | } | 465 | 0 | for (size_t x = channel.w - 2; x < channel.w; x++) { | 466 | 0 | PredictionResult res = | 467 | 0 | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 468 | 0 | tree_lookup, references, &wp_state); | 469 | 0 | uint64_t v = | 470 | 0 | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 471 | 0 | p[x] = make_pixel(v, res.multiplier, res.guess); | 472 | 0 | wp_state.UpdateErrors(p[x], x, y, channel.w); | 473 | 0 | } | 474 | 12.8k | } else { | 475 | 328k | for (size_t x = 0; x < channel.w; x++) { | 476 | 315k | PredictionResult res = | 477 | 315k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 478 | 315k | tree_lookup, references, &wp_state); | 479 | 315k | uint64_t v = | 480 | 315k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 481 | 315k | p[x] = make_pixel(v, res.multiplier, res.guess); | 482 | 315k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 483 | 315k | } | 484 | 12.8k | } | 485 | 12.8k | } | 486 | 824 | } | 487 | 2.35k | return true; | 488 | 2.35k | } |
jxl::Status jxl::detail::DecodeModularChannelMAANS<false>(jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> > const&, jxl::weighted::Header const&, int, unsigned long, jxl::TreeLut<unsigned char, false, false>&, jxl::Image*, unsigned int&, unsigned int&) Line | Count | Source | 156 | 130k | uint32_t &fl_v) { | 157 | 130k | JxlMemoryManager *memory_manager = image->memory_manager(); | 158 | 130k | Channel &channel = image->channel[chan]; | 159 | | | 160 | 130k | std::array<pixel_type, kNumStaticProperties> static_props = { | 161 | 130k | {chan, static_cast<int>(group_id)}}; | 162 | | // TODO(veluca): filter the tree according to static_props. | 163 | | | 164 | | // zero pixel channel? could happen | 165 | 130k | if (channel.w == 0 || channel.h == 0) return true; | 166 | | | 167 | 130k | bool tree_has_wp_prop_or_pred = false; | 168 | 130k | bool is_wp_only = false; | 169 | 130k | bool is_gradient_only = false; | 170 | 130k | size_t num_props; | 171 | 130k | FlatTree tree = | 172 | 130k | FilterTree(global_tree, static_props, &num_props, | 173 | 130k | &tree_has_wp_prop_or_pred, &is_wp_only, &is_gradient_only); | 174 | | | 175 | | // From here on, tree lookup returns a *clustered* context ID. | 176 | | // This avoids an extra memory lookup after tree traversal. | 177 | 160k | for (auto &node : tree) { | 178 | 160k | if (node.property0 == -1) { | 179 | 153k | node.childID = context_map[node.childID]; | 180 | 153k | } | 181 | 160k | } | 182 | | | 183 | 130k | JXL_DEBUG_V(3, "Decoded MA tree with %" PRIuS " nodes", tree.size()); | 184 | | | 185 | | // MAANS decode | 186 | 130k | const auto make_pixel = [](uint64_t v, pixel_type multiplier, | 187 | 130k | pixel_type_w offset) -> pixel_type { | 188 | 130k | JXL_DASSERT((v & 0xFFFFFFFF) == v); | 189 | 130k | pixel_type_w val = static_cast<pixel_type_w>(UnpackSigned(v)); | 190 | | // if it overflows, it overflows, and we have a problem anyway | 191 | 130k | return val * multiplier + offset; | 192 | 130k | }; | 193 | | | 194 | 130k | if (tree.size() == 1) { | 195 | | // special optimized case: no meta-adaptation, so no need | 196 | | // to compute properties. | 197 | 126k | Predictor predictor = tree[0].predictor; | 198 | 126k | int64_t offset = tree[0].predictor_offset; | 199 | 126k | int32_t multiplier = tree[0].multiplier; | 200 | 126k | size_t ctx_id = tree[0].childID; | 201 | 126k | if (predictor == Predictor::Zero) { | 202 | 116k | uint32_t value; | 203 | 116k | if (reader->IsSingleValueAndAdvance(ctx_id, &value, | 204 | 116k | channel.w * channel.h)) { | 205 | | // Special-case: histogram has a single symbol, with no extra bits, and | 206 | | // we use ANS mode. | 207 | 40.4k | JXL_DEBUG_V(8, "Fastest track."); | 208 | 40.4k | pixel_type v = make_pixel(value, multiplier, offset); | 209 | 850k | for (size_t y = 0; y < channel.h; y++) { | 210 | 810k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 211 | 810k | std::fill(r, r + channel.w, v); | 212 | 810k | } | 213 | 76.0k | } else { | 214 | 76.0k | JXL_DEBUG_V(8, "Fast track."); | 215 | 76.0k | if (multiplier == 1 && offset == 0) { | 216 | 1.00M | for (size_t y = 0; y < channel.h; y++) { | 217 | 930k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 218 | 56.0M | for (size_t x = 0; x < channel.w; x++) { | 219 | 55.0M | uint32_t v = | 220 | 55.0M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 221 | 55.0M | r[x] = UnpackSigned(v); | 222 | 55.0M | } | 223 | 930k | } | 224 | 73.2k | } else { | 225 | 64.8k | for (size_t y = 0; y < channel.h; y++) { | 226 | 61.9k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 227 | 3.87M | for (size_t x = 0; x < channel.w; x++) { | 228 | 3.80M | uint32_t v = | 229 | 3.80M | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, | 230 | 3.80M | br); | 231 | 3.80M | r[x] = make_pixel(v, multiplier, offset); | 232 | 3.80M | } | 233 | 61.9k | } | 234 | 2.89k | } | 235 | 76.0k | } | 236 | 116k | return true; | 237 | 116k | } else if (uses_lz77 && predictor == Predictor::Gradient && offset == 0 && | 238 | 10.1k | multiplier == 1 && reader->IsHuffRleOnly()) { | 239 | 0 | JXL_DEBUG_V(8, "Gradient RLE (fjxl) very fast track."); | 240 | 0 | pixel_type_w sv = UnpackSigned(fl_v); | 241 | 0 | for (size_t y = 0; y < channel.h; y++) { | 242 | 0 | pixel_type *JXL_RESTRICT r = channel.Row(y); | 243 | 0 | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 244 | 0 | const pixel_type *JXL_RESTRICT rtopleft = | 245 | 0 | (y ? channel.Row(y - 1) - 1 : r - 1); | 246 | 0 | pixel_type_w guess_0 = (y ? rtop[0] : 0); | 247 | 0 | if (fl_run == 0) { | 248 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 249 | 0 | &fl_run); | 250 | 0 | sv = UnpackSigned(fl_v); | 251 | 0 | } else { | 252 | 0 | fl_run--; | 253 | 0 | } | 254 | 0 | r[0] = sv + guess_0; | 255 | 0 | for (size_t x = 1; x < channel.w; x++) { | 256 | 0 | pixel_type left = r[x - 1]; | 257 | 0 | pixel_type top = rtop[x]; | 258 | 0 | pixel_type topleft = rtopleft[x]; | 259 | 0 | pixel_type_w guess = ClampedGradient(top, left, topleft); | 260 | 0 | if (!fl_run) { | 261 | 0 | reader->ReadHybridUintClusteredHuffRleOnly(ctx_id, br, &fl_v, | 262 | 0 | &fl_run); | 263 | 0 | sv = UnpackSigned(fl_v); | 264 | 0 | } else { | 265 | 0 | fl_run--; | 266 | 0 | } | 267 | 0 | r[x] = sv + guess; | 268 | 0 | } | 269 | 0 | } | 270 | 0 | return true; | 271 | 10.1k | } else if (predictor == Predictor::Gradient && offset == 0 && | 272 | 10.1k | multiplier == 1) { | 273 | 220 | JXL_DEBUG_V(8, "Gradient very fast track."); | 274 | 220 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 275 | 4.09k | for (size_t y = 0; y < channel.h; y++) { | 276 | 3.87k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 277 | 45.6k | for (size_t x = 0; x < channel.w; x++) { | 278 | 41.8k | pixel_type left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 279 | 41.8k | pixel_type top = (y ? *(r + x - onerow) : left); | 280 | 41.8k | pixel_type topleft = (x && y ? *(r + x - 1 - onerow) : left); | 281 | 41.8k | pixel_type guess = ClampedGradient(top, left, topleft); | 282 | 41.8k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 283 | 41.8k | ctx_id, br); | 284 | 41.8k | r[x] = make_pixel(v, 1, guess); | 285 | 41.8k | } | 286 | 3.87k | } | 287 | 220 | return true; | 288 | 220 | } | 289 | 126k | } | 290 | | | 291 | | // Check if this tree is a WP-only tree with a small enough property value | 292 | | // range. | 293 | 14.1k | if (is_wp_only) { | 294 | 3.97k | is_wp_only = TreeToLookupTable(tree, tree_lut); | 295 | 3.97k | } | 296 | 14.1k | if (is_gradient_only) { | 297 | 538 | is_gradient_only = TreeToLookupTable(tree, tree_lut); | 298 | 538 | } | 299 | | | 300 | 14.1k | if (is_gradient_only) { | 301 | 206 | JXL_DEBUG_V(8, "Gradient fast track."); | 302 | 206 | const intptr_t onerow = channel.plane.PixelsPerRow(); | 303 | 4.35k | for (size_t y = 0; y < channel.h; y++) { | 304 | 4.14k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 305 | 277k | for (size_t x = 0; x < channel.w; x++) { | 306 | 273k | pixel_type_w left = (x ? r[x - 1] : y ? *(r + x - onerow) : 0); | 307 | 273k | pixel_type_w top = (y ? *(r + x - onerow) : left); | 308 | 273k | pixel_type_w topleft = (x && y ? *(r + x - 1 - onerow) : left); | 309 | 273k | int32_t guess = ClampedGradient(top, left, topleft); | 310 | 273k | uint32_t pos = | 311 | 273k | kPropRangeFast + | 312 | 273k | std::min<pixel_type_w>( | 313 | 273k | std::max<pixel_type_w>(-kPropRangeFast, top + left - topleft), | 314 | 273k | kPropRangeFast - 1); | 315 | 273k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 316 | 273k | uint64_t v = | 317 | 273k | reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>(ctx_id, br); | 318 | 273k | r[x] = make_pixel(v, 1, guess); | 319 | 273k | } | 320 | 4.14k | } | 321 | 13.9k | } else if (!uses_lz77 && is_wp_only && channel.w > 8) { | 322 | 553 | JXL_DEBUG_V(8, "WP fast track."); | 323 | 553 | weighted::State wp_state(wp_header, channel.w, channel.h); | 324 | 553 | Properties properties(1); | 325 | 16.3k | for (size_t y = 0; y < channel.h; y++) { | 326 | 15.8k | pixel_type *JXL_RESTRICT r = channel.Row(y); | 327 | 15.8k | const pixel_type *JXL_RESTRICT rtop = (y ? channel.Row(y - 1) : r - 1); | 328 | 15.8k | const pixel_type *JXL_RESTRICT rtoptop = | 329 | 15.8k | (y > 1 ? channel.Row(y - 2) : rtop); | 330 | 15.8k | const pixel_type *JXL_RESTRICT rtopleft = | 331 | 15.8k | (y ? channel.Row(y - 1) - 1 : r - 1); | 332 | 15.8k | const pixel_type *JXL_RESTRICT rtopright = | 333 | 15.8k | (y ? channel.Row(y - 1) + 1 : r - 1); | 334 | 15.8k | size_t x = 0; | 335 | 15.8k | { | 336 | 15.8k | size_t offset = 0; | 337 | 15.8k | pixel_type_w left = y ? rtop[x] : 0; | 338 | 15.8k | pixel_type_w toptop = y ? rtoptop[x] : 0; | 339 | 15.8k | pixel_type_w topright = (x + 1 < channel.w && y ? rtop[x + 1] : left); | 340 | 15.8k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 341 | 15.8k | x, y, channel.w, left, left, topright, left, toptop, &properties, | 342 | 15.8k | offset); | 343 | 15.8k | uint32_t pos = | 344 | 15.8k | kPropRangeFast + | 345 | 15.8k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 346 | 15.8k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 347 | 15.8k | uint64_t v = | 348 | 15.8k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 349 | 15.8k | r[x] = make_pixel(v, 1, guess); | 350 | 15.8k | wp_state.UpdateErrors(r[x], x, y, channel.w); | 351 | 15.8k | } | 352 | 1.76M | for (x = 1; x + 1 < channel.w; x++) { | 353 | 1.74M | size_t offset = 0; | 354 | 1.74M | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 355 | 1.74M | x, y, channel.w, rtop[x], r[x - 1], rtopright[x], rtopleft[x], | 356 | 1.74M | rtoptop[x], &properties, offset); | 357 | 1.74M | uint32_t pos = | 358 | 1.74M | kPropRangeFast + | 359 | 1.74M | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 360 | 1.74M | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 361 | 1.74M | uint64_t v = | 362 | 1.74M | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 363 | 1.74M | r[x] = make_pixel(v, 1, guess); | 364 | 1.74M | wp_state.UpdateErrors(r[x], x, y, channel.w); | 365 | 1.74M | } | 366 | 15.8k | { | 367 | 15.8k | size_t offset = 0; | 368 | 15.8k | int32_t guess = wp_state.Predict</*compute_properties=*/true>( | 369 | 15.8k | x, y, channel.w, rtop[x], r[x - 1], rtop[x], rtopleft[x], | 370 | 15.8k | rtoptop[x], &properties, offset); | 371 | 15.8k | uint32_t pos = | 372 | 15.8k | kPropRangeFast + | 373 | 15.8k | jxl::Clamp1(properties[0], -kPropRangeFast, kPropRangeFast - 1); | 374 | 15.8k | uint32_t ctx_id = tree_lut.context_lookup[pos]; | 375 | 15.8k | uint64_t v = | 376 | 15.8k | reader->ReadHybridUintClusteredInlined<uses_lz77>(ctx_id, br); | 377 | 15.8k | r[x] = make_pixel(v, 1, guess); | 378 | 15.8k | wp_state.UpdateErrors(r[x], x, y, channel.w); | 379 | 15.8k | } | 380 | 15.8k | } | 381 | 13.3k | } else if (!tree_has_wp_prop_or_pred) { | 382 | | // special optimized case: the weighted predictor and its properties are not | 383 | | // used, so no need to compute weights and properties. | 384 | 8.21k | JXL_DEBUG_V(8, "Slow track."); | 385 | 8.21k | MATreeLookup tree_lookup(tree); | 386 | 8.21k | Properties properties = Properties(num_props); | 387 | 8.21k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 388 | 8.21k | JXL_ASSIGN_OR_RETURN( | 389 | 8.21k | Channel references, | 390 | 8.21k | Channel::Create(memory_manager, | 391 | 8.21k | properties.size() - kNumNonrefProperties, channel.w)); | 392 | 140k | for (size_t y = 0; y < channel.h; y++) { | 393 | 131k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 394 | 131k | PrecomputeReferences(channel, y, *image, chan, &references); | 395 | 131k | InitPropsRow(&properties, static_props, y); | 396 | 131k | if (y > 1 && channel.w > 8 && references.w == 0) { | 397 | 238k | for (size_t x = 0; x < 2; x++) { | 398 | 158k | PredictionResult res = | 399 | 158k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 400 | 158k | tree_lookup, references); | 401 | 158k | uint64_t v = | 402 | 158k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 403 | 158k | p[x] = make_pixel(v, res.multiplier, res.guess); | 404 | 158k | } | 405 | 5.30M | for (size_t x = 2; x < channel.w - 2; x++) { | 406 | 5.22M | PredictionResult res = | 407 | 5.22M | PredictTreeNoWPNEC(&properties, channel.w, p + x, onerow, x, y, | 408 | 5.22M | tree_lookup, references); | 409 | 5.22M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 410 | 5.22M | res.context, br); | 411 | 5.22M | p[x] = make_pixel(v, res.multiplier, res.guess); | 412 | 5.22M | } | 413 | 238k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 414 | 158k | PredictionResult res = | 415 | 158k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 416 | 158k | tree_lookup, references); | 417 | 158k | uint64_t v = | 418 | 158k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 419 | 158k | p[x] = make_pixel(v, res.multiplier, res.guess); | 420 | 158k | } | 421 | 79.4k | } else { | 422 | 879k | for (size_t x = 0; x < channel.w; x++) { | 423 | 826k | PredictionResult res = | 424 | 826k | PredictTreeNoWP(&properties, channel.w, p + x, onerow, x, y, | 425 | 826k | tree_lookup, references); | 426 | 826k | uint64_t v = reader->ReadHybridUintClusteredMaybeInlined<uses_lz77>( | 427 | 826k | res.context, br); | 428 | 826k | p[x] = make_pixel(v, res.multiplier, res.guess); | 429 | 826k | } | 430 | 52.5k | } | 431 | 131k | } | 432 | 8.21k | } else { | 433 | 5.14k | JXL_DEBUG_V(8, "Slowest track."); | 434 | 5.14k | MATreeLookup tree_lookup(tree); | 435 | 5.14k | Properties properties = Properties(num_props); | 436 | 5.14k | const intptr_t onerow = channel.plane.PixelsPerRow(); | 437 | 5.14k | JXL_ASSIGN_OR_RETURN( | 438 | 5.14k | Channel references, | 439 | 5.14k | Channel::Create(memory_manager, | 440 | 5.14k | properties.size() - kNumNonrefProperties, channel.w)); | 441 | 5.14k | weighted::State wp_state(wp_header, channel.w, channel.h); | 442 | 138k | for (size_t y = 0; y < channel.h; y++) { | 443 | 132k | pixel_type *JXL_RESTRICT p = channel.Row(y); | 444 | 132k | InitPropsRow(&properties, static_props, y); | 445 | 132k | PrecomputeReferences(channel, y, *image, chan, &references); | 446 | 132k | if (!uses_lz77 && y > 1 && channel.w > 8 && references.w == 0) { | 447 | 284k | for (size_t x = 0; x < 2; x++) { | 448 | 189k | PredictionResult res = | 449 | 189k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 450 | 189k | tree_lookup, references, &wp_state); | 451 | 189k | uint64_t v = | 452 | 189k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 453 | 189k | p[x] = make_pixel(v, res.multiplier, res.guess); | 454 | 189k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 455 | 189k | } | 456 | 7.93M | for (size_t x = 2; x < channel.w - 2; x++) { | 457 | 7.84M | PredictionResult res = | 458 | 7.84M | PredictTreeWPNEC(&properties, channel.w, p + x, onerow, x, y, | 459 | 7.84M | tree_lookup, references, &wp_state); | 460 | 7.84M | uint64_t v = reader->ReadHybridUintClusteredInlined<uses_lz77>( | 461 | 7.84M | res.context, br); | 462 | 7.84M | p[x] = make_pixel(v, res.multiplier, res.guess); | 463 | 7.84M | wp_state.UpdateErrors(p[x], x, y, channel.w); | 464 | 7.84M | } | 465 | 284k | for (size_t x = channel.w - 2; x < channel.w; x++) { | 466 | 189k | PredictionResult res = | 467 | 189k | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 468 | 189k | tree_lookup, references, &wp_state); | 469 | 189k | uint64_t v = | 470 | 189k | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 471 | 189k | p[x] = make_pixel(v, res.multiplier, res.guess); | 472 | 189k | wp_state.UpdateErrors(p[x], x, y, channel.w); | 473 | 189k | } | 474 | 94.7k | } else { | 475 | 1.05M | for (size_t x = 0; x < channel.w; x++) { | 476 | 1.01M | PredictionResult res = | 477 | 1.01M | PredictTreeWP(&properties, channel.w, p + x, onerow, x, y, | 478 | 1.01M | tree_lookup, references, &wp_state); | 479 | 1.01M | uint64_t v = | 480 | 1.01M | reader->ReadHybridUintClustered<uses_lz77>(res.context, br); | 481 | 1.01M | p[x] = make_pixel(v, res.multiplier, res.guess); | 482 | 1.01M | wp_state.UpdateErrors(p[x], x, y, channel.w); | 483 | 1.01M | } | 484 | 38.1k | } | 485 | 132k | } | 486 | 5.14k | } | 487 | 14.1k | return true; | 488 | 14.1k | } |
|
489 | | } // namespace detail |
490 | | |
491 | | Status DecodeModularChannelMAANS(BitReader *br, ANSSymbolReader *reader, |
492 | | const std::vector<uint8_t> &context_map, |
493 | | const Tree &global_tree, |
494 | | const weighted::Header &wp_header, |
495 | | pixel_type chan, size_t group_id, |
496 | | TreeLut<uint8_t, false, false> &tree_lut, |
497 | | Image *image, uint32_t &fl_run, |
498 | 158k | uint32_t &fl_v) { |
499 | 158k | if (reader->UsesLZ77()) { |
500 | 27.9k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/true>( |
501 | 27.9k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
502 | 27.9k | tree_lut, image, fl_run, fl_v); |
503 | 130k | } else { |
504 | 130k | return detail::DecodeModularChannelMAANS</*uses_lz77=*/false>( |
505 | 130k | br, reader, context_map, global_tree, wp_header, chan, group_id, |
506 | 130k | tree_lut, image, fl_run, fl_v); |
507 | 130k | } |
508 | 158k | } |
509 | | |
510 | 85.3k | GroupHeader::GroupHeader() { Bundle::Init(this); } |
511 | | |
512 | | Status ValidateChannelDimensions(const Image &image, |
513 | 27.5k | const ModularOptions &options) { |
514 | 27.5k | size_t nb_channels = image.channel.size(); |
515 | 55.1k | for (bool is_dc : {true, false}) { |
516 | 55.1k | size_t group_dim = options.group_dim * (is_dc ? kBlockDim : 1); |
517 | 55.1k | size_t c = image.nb_meta_channels; |
518 | 384k | for (; c < nb_channels; c++) { |
519 | 330k | const Channel &ch = image.channel[c]; |
520 | 330k | if (ch.w > options.group_dim || ch.h > options.group_dim) break; |
521 | 330k | } |
522 | 59.6k | for (; c < nb_channels; c++) { |
523 | 4.48k | const Channel &ch = image.channel[c]; |
524 | 4.48k | if (ch.w == 0 || ch.h == 0) continue; // skip empty |
525 | 4.29k | bool is_dc_channel = std::min(ch.hshift, ch.vshift) >= 3; |
526 | 4.29k | if (is_dc_channel != is_dc) continue; |
527 | 2.14k | size_t tile_dim = group_dim >> std::max(ch.hshift, ch.vshift); |
528 | 2.14k | if (tile_dim == 0) { |
529 | 0 | return JXL_FAILURE("Inconsistent transforms"); |
530 | 0 | } |
531 | 2.14k | } |
532 | 55.1k | } |
533 | 27.5k | return true; |
534 | 27.5k | } |
535 | | |
536 | | Status ModularDecode(BitReader *br, Image &image, GroupHeader &header, |
537 | | size_t group_id, ModularOptions *options, |
538 | | const Tree *global_tree, const ANSCode *global_code, |
539 | | const std::vector<uint8_t> *global_ctx_map, |
540 | 33.3k | const bool allow_truncated_group) { |
541 | 33.3k | if (image.channel.empty()) return true; |
542 | 28.6k | JxlMemoryManager *memory_manager = image.memory_manager(); |
543 | | |
544 | | // decode transforms |
545 | 28.6k | Status status = Bundle::Read(br, &header); |
546 | 28.6k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(status); |
547 | 27.6k | if (status.IsFatalError()) return status; |
548 | 27.6k | if (!br->AllReadsWithinBounds()) { |
549 | | // Don't do/undo transforms if header is incomplete. |
550 | 0 | header.transforms.clear(); |
551 | 0 | image.transform = header.transforms; |
552 | 0 | for (auto &ch : image.channel) { |
553 | 0 | ZeroFillImage(&ch.plane); |
554 | 0 | } |
555 | 0 | return JXL_NOT_ENOUGH_BYTES("Read overrun before ModularDecode"); |
556 | 0 | } |
557 | | |
558 | 27.6k | JXL_DEBUG_V(3, "Image data underwent %" PRIuS " transformations: ", |
559 | 27.6k | header.transforms.size()); |
560 | 27.6k | image.transform = header.transforms; |
561 | 27.6k | for (Transform &transform : image.transform) { |
562 | 13.9k | JXL_RETURN_IF_ERROR(transform.MetaApply(image)); |
563 | 13.9k | } |
564 | 27.4k | if (image.error) { |
565 | 0 | return JXL_FAILURE("Corrupt file. Aborting."); |
566 | 0 | } |
567 | 27.4k | JXL_RETURN_IF_ERROR(ValidateChannelDimensions(image, *options)); |
568 | | |
569 | 27.4k | size_t nb_channels = image.channel.size(); |
570 | | |
571 | 27.4k | size_t num_chans = 0; |
572 | 27.4k | size_t distance_multiplier = 0; |
573 | 194k | for (size_t i = 0; i < nb_channels; i++) { |
574 | 166k | Channel &channel = image.channel[i]; |
575 | 166k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
576 | 164k | channel.h > options->max_chan_size)) { |
577 | 362 | break; |
578 | 362 | } |
579 | 166k | if (!channel.w || !channel.h) { |
580 | 1.11k | continue; // skip empty channels |
581 | 1.11k | } |
582 | 165k | if (channel.w > distance_multiplier) { |
583 | 36.0k | distance_multiplier = channel.w; |
584 | 36.0k | } |
585 | 165k | num_chans++; |
586 | 165k | } |
587 | 27.4k | if (num_chans == 0) return true; |
588 | | |
589 | 27.1k | size_t next_channel = 0; |
590 | 27.1k | auto scope_guard = MakeScopeGuard([&]() { |
591 | 10.7k | for (size_t c = next_channel; c < image.channel.size(); c++) { |
592 | 8.74k | ZeroFillImage(&image.channel[c].plane); |
593 | 8.74k | } |
594 | 1.95k | }); |
595 | | // Do not do anything if truncated groups are not allowed. |
596 | 27.1k | if (allow_truncated_group) scope_guard.Disarm(); |
597 | | |
598 | | // Read tree. |
599 | 27.1k | Tree tree_storage; |
600 | 27.1k | std::vector<uint8_t> context_map_storage; |
601 | 27.1k | ANSCode code_storage; |
602 | 27.1k | const Tree *tree = &tree_storage; |
603 | 27.1k | const ANSCode *code = &code_storage; |
604 | 27.1k | const std::vector<uint8_t> *context_map = &context_map_storage; |
605 | 27.1k | if (!header.use_global_tree) { |
606 | 22.0k | uint64_t max_tree_size = 1024; |
607 | 165k | for (size_t i = 0; i < nb_channels; i++) { |
608 | 143k | Channel &channel = image.channel[i]; |
609 | 143k | if (i >= image.nb_meta_channels && (channel.w > options->max_chan_size || |
610 | 142k | channel.h > options->max_chan_size)) { |
611 | 35 | break; |
612 | 35 | } |
613 | 143k | uint64_t pixels = channel.w * channel.h; |
614 | 143k | max_tree_size += pixels; |
615 | 143k | } |
616 | 22.0k | max_tree_size = std::min(static_cast<uint64_t>(1 << 20), max_tree_size); |
617 | 22.0k | JXL_RETURN_IF_ERROR( |
618 | 22.0k | DecodeTree(memory_manager, br, &tree_storage, max_tree_size)); |
619 | 21.8k | JXL_RETURN_IF_ERROR(DecodeHistograms(memory_manager, br, |
620 | 21.8k | (tree_storage.size() + 1) / 2, |
621 | 21.8k | &code_storage, &context_map_storage)); |
622 | 21.8k | } else { |
623 | 5.06k | if (!global_tree || !global_code || !global_ctx_map || |
624 | 5.06k | global_tree->empty()) { |
625 | 63 | return JXL_FAILURE("No global tree available but one was requested"); |
626 | 63 | } |
627 | 4.99k | tree = global_tree; |
628 | 4.99k | code = global_code; |
629 | 4.99k | context_map = global_ctx_map; |
630 | 4.99k | } |
631 | | |
632 | | // Read channels |
633 | 53.5k | JXL_ASSIGN_OR_RETURN(ANSSymbolReader reader, |
634 | 53.5k | ANSSymbolReader::Create(code, br, distance_multiplier)); |
635 | 53.5k | auto tree_lut = jxl::make_unique<TreeLut<uint8_t, false, false>>(); |
636 | 53.5k | uint32_t fl_run = 0; |
637 | 53.5k | uint32_t fl_v = 0; |
638 | 184k | for (; next_channel < nb_channels; next_channel++) { |
639 | 159k | Channel &channel = image.channel[next_channel]; |
640 | 159k | if (next_channel >= image.nb_meta_channels && |
641 | 159k | (channel.w > options->max_chan_size || |
642 | 157k | channel.h > options->max_chan_size)) { |
643 | 44 | break; |
644 | 44 | } |
645 | 159k | if (!channel.w || !channel.h) { |
646 | 834 | continue; // skip empty channels |
647 | 834 | } |
648 | 158k | JXL_RETURN_IF_ERROR(DecodeModularChannelMAANS( |
649 | 158k | br, &reader, *context_map, *tree, header.wp_header, next_channel, |
650 | 158k | group_id, *tree_lut, &image, fl_run, fl_v)); |
651 | | |
652 | | // Truncated group. |
653 | 158k | if (!br->AllReadsWithinBounds()) { |
654 | 1.57k | if (!allow_truncated_group) return JXL_FAILURE("Truncated input"); |
655 | 0 | return JXL_NOT_ENOUGH_BYTES("Read overrun in ModularDecode"); |
656 | 1.57k | } |
657 | 158k | } |
658 | | |
659 | | // Make sure no zero-filling happens even if next_channel < nb_channels. |
660 | 25.1k | scope_guard.Disarm(); |
661 | | |
662 | 25.1k | if (!reader.CheckANSFinalState()) { |
663 | 0 | return JXL_FAILURE("ANS decode final state failed"); |
664 | 0 | } |
665 | 25.1k | return true; |
666 | 25.1k | } |
667 | | |
668 | | Status ModularGenericDecompress(BitReader *br, Image &image, |
669 | | GroupHeader *header, size_t group_id, |
670 | | ModularOptions *options, bool undo_transforms, |
671 | | const Tree *tree, const ANSCode *code, |
672 | | const std::vector<uint8_t> *ctx_map, |
673 | 33.3k | bool allow_truncated_group) { |
674 | 33.3k | std::vector<std::pair<uint32_t, uint32_t>> req_sizes; |
675 | 33.3k | req_sizes.reserve(image.channel.size()); |
676 | 97.0k | for (const auto &c : image.channel) { |
677 | 97.0k | req_sizes.emplace_back(c.w, c.h); |
678 | 97.0k | } |
679 | 33.3k | GroupHeader local_header; |
680 | 33.3k | if (header == nullptr) header = &local_header; |
681 | 33.3k | size_t bit_pos = br->TotalBitsConsumed(); |
682 | 33.3k | auto dec_status = ModularDecode(br, image, *header, group_id, options, tree, |
683 | 33.3k | code, ctx_map, allow_truncated_group); |
684 | 33.3k | if (!allow_truncated_group) JXL_RETURN_IF_ERROR(dec_status); |
685 | 30.1k | if (dec_status.IsFatalError()) return dec_status; |
686 | 30.1k | if (undo_transforms) image.undo_transforms(header->wp_header); |
687 | 30.1k | if (image.error) return JXL_FAILURE("Corrupt file. Aborting."); |
688 | 30.1k | JXL_DEBUG_V(4, |
689 | 30.1k | "Modular-decoded a %" PRIuS "x%" PRIuS " nbchans=%" PRIuS |
690 | 30.1k | " image from %" PRIuS " bytes", |
691 | 30.1k | image.w, image.h, image.channel.size(), |
692 | 30.1k | (br->TotalBitsConsumed() - bit_pos) / 8); |
693 | 30.1k | JXL_DEBUG_V(5, "Modular image: %s", image.DebugString().c_str()); |
694 | 30.1k | (void)bit_pos; |
695 | | // Check that after applying all transforms we are back to the requested |
696 | | // image sizes, otherwise there's a programming error with the |
697 | | // transformations. |
698 | 30.1k | if (undo_transforms) { |
699 | 5.71k | JXL_ENSURE(image.channel.size() == req_sizes.size()); |
700 | 26.2k | for (size_t c = 0; c < req_sizes.size(); c++) { |
701 | 20.4k | JXL_ENSURE(req_sizes[c].first == image.channel[c].w); |
702 | 20.4k | JXL_ENSURE(req_sizes[c].second == image.channel[c].h); |
703 | 20.4k | } |
704 | 5.71k | } |
705 | 30.1k | return dec_status; |
706 | 30.1k | } |
707 | | |
708 | | } // namespace jxl |