/src/libjxl/lib/jxl/dec_group.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) the JPEG XL Project Authors. All rights reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style |
4 | | // license that can be found in the LICENSE file. |
5 | | |
6 | | #include "lib/jxl/dec_group.h" |
7 | | |
8 | | #include <stdint.h> |
9 | | #include <string.h> |
10 | | |
11 | | #include <algorithm> |
12 | | #include <memory> |
13 | | #include <utility> |
14 | | |
15 | | #include "lib/jxl/frame_header.h" |
16 | | |
17 | | #undef HWY_TARGET_INCLUDE |
18 | | #define HWY_TARGET_INCLUDE "lib/jxl/dec_group.cc" |
19 | | #include <hwy/foreach_target.h> |
20 | | #include <hwy/highway.h> |
21 | | |
22 | | #include "lib/jxl/ac_context.h" |
23 | | #include "lib/jxl/ac_strategy.h" |
24 | | #include "lib/jxl/aux_out.h" |
25 | | #include "lib/jxl/base/bits.h" |
26 | | #include "lib/jxl/base/printf_macros.h" |
27 | | #include "lib/jxl/base/profiler.h" |
28 | | #include "lib/jxl/base/status.h" |
29 | | #include "lib/jxl/coeff_order.h" |
30 | | #include "lib/jxl/common.h" |
31 | | #include "lib/jxl/convolve.h" |
32 | | #include "lib/jxl/dct_scales.h" |
33 | | #include "lib/jxl/dec_cache.h" |
34 | | #include "lib/jxl/dec_transforms-inl.h" |
35 | | #include "lib/jxl/dec_xyb.h" |
36 | | #include "lib/jxl/entropy_coder.h" |
37 | | #include "lib/jxl/epf.h" |
38 | | #include "lib/jxl/opsin_params.h" |
39 | | #include "lib/jxl/quant_weights.h" |
40 | | #include "lib/jxl/quantizer-inl.h" |
41 | | #include "lib/jxl/quantizer.h" |
42 | | |
43 | | #ifndef LIB_JXL_DEC_GROUP_CC |
44 | | #define LIB_JXL_DEC_GROUP_CC |
45 | | namespace jxl { |
46 | | |
47 | | // Interface for reading groups for DecodeGroupImpl. |
48 | | class GetBlock { |
49 | | public: |
50 | | virtual void StartRow(size_t by) = 0; |
51 | | virtual Status LoadBlock(size_t bx, size_t by, const AcStrategy& acs, |
52 | | size_t size, size_t log2_covered_blocks, |
53 | | ACPtr block[3], ACType ac_type) = 0; |
54 | 0 | virtual ~GetBlock() {} |
55 | | }; |
56 | | |
57 | | // Controls whether DecodeGroupImpl renders to pixels or not. |
58 | | enum DrawMode { |
59 | | // Render to pixels. |
60 | | kDraw = 0, |
61 | | // Don't render to pixels. |
62 | | kDontDraw = 1, |
63 | | }; |
64 | | |
65 | | } // namespace jxl |
66 | | #endif // LIB_JXL_DEC_GROUP_CC |
67 | | |
68 | | HWY_BEFORE_NAMESPACE(); |
69 | | namespace jxl { |
70 | | namespace HWY_NAMESPACE { |
71 | | |
72 | | // These templates are not found via ADL. |
73 | | using hwy::HWY_NAMESPACE::Rebind; |
74 | | using hwy::HWY_NAMESPACE::ShiftRight; |
75 | | |
76 | | using D = HWY_FULL(float); |
77 | | using DU = HWY_FULL(uint32_t); |
78 | | using DI = HWY_FULL(int32_t); |
79 | | using DI16 = Rebind<int16_t, DI>; |
80 | | constexpr D d; |
81 | | constexpr DI di; |
82 | | constexpr DI16 di16; |
83 | | |
84 | | // TODO(veluca): consider SIMDfying. |
85 | 0 | void Transpose8x8InPlace(int32_t* JXL_RESTRICT block) { |
86 | 0 | for (size_t x = 0; x < 8; x++) { |
87 | 0 | for (size_t y = x + 1; y < 8; y++) { |
88 | 0 | std::swap(block[y * 8 + x], block[x * 8 + y]); |
89 | 0 | } |
90 | 0 | } |
91 | 0 | } Unexecuted instantiation: jxl::N_SSE4::Transpose8x8InPlace(int*) Unexecuted instantiation: jxl::N_AVX2::Transpose8x8InPlace(int*) Unexecuted instantiation: jxl::N_AVX3::Transpose8x8InPlace(int*) Unexecuted instantiation: jxl::N_EMU128::Transpose8x8InPlace(int*) |
92 | | |
93 | | template <ACType ac_type> |
94 | | void DequantLane(Vec<D> scaled_dequant_x, Vec<D> scaled_dequant_y, |
95 | | Vec<D> scaled_dequant_b, |
96 | | const float* JXL_RESTRICT dequant_matrices, size_t size, |
97 | | size_t k, Vec<D> x_cc_mul, Vec<D> b_cc_mul, |
98 | | const float* JXL_RESTRICT biases, ACPtr qblock[3], |
99 | 0 | float* JXL_RESTRICT block) { |
100 | 0 | const auto x_mul = Mul(Load(d, dequant_matrices + k), scaled_dequant_x); |
101 | 0 | const auto y_mul = |
102 | 0 | Mul(Load(d, dequant_matrices + size + k), scaled_dequant_y); |
103 | 0 | const auto b_mul = |
104 | 0 | Mul(Load(d, dequant_matrices + 2 * size + k), scaled_dequant_b); |
105 | |
|
106 | 0 | Vec<DI> quantized_x_int; |
107 | 0 | Vec<DI> quantized_y_int; |
108 | 0 | Vec<DI> quantized_b_int; |
109 | 0 | if (ac_type == ACType::k16) { |
110 | 0 | Rebind<int16_t, DI> di16; |
111 | 0 | quantized_x_int = PromoteTo(di, Load(di16, qblock[0].ptr16 + k)); |
112 | 0 | quantized_y_int = PromoteTo(di, Load(di16, qblock[1].ptr16 + k)); |
113 | 0 | quantized_b_int = PromoteTo(di, Load(di16, qblock[2].ptr16 + k)); |
114 | 0 | } else { |
115 | 0 | quantized_x_int = Load(di, qblock[0].ptr32 + k); |
116 | 0 | quantized_y_int = Load(di, qblock[1].ptr32 + k); |
117 | 0 | quantized_b_int = Load(di, qblock[2].ptr32 + k); |
118 | 0 | } |
119 | |
|
120 | 0 | const auto dequant_x_cc = |
121 | 0 | Mul(AdjustQuantBias(di, 0, quantized_x_int, biases), x_mul); |
122 | 0 | const auto dequant_y = |
123 | 0 | Mul(AdjustQuantBias(di, 1, quantized_y_int, biases), y_mul); |
124 | 0 | const auto dequant_b_cc = |
125 | 0 | Mul(AdjustQuantBias(di, 2, quantized_b_int, biases), b_mul); |
126 | |
|
127 | 0 | const auto dequant_x = MulAdd(x_cc_mul, dequant_y, dequant_x_cc); |
128 | 0 | const auto dequant_b = MulAdd(b_cc_mul, dequant_y, dequant_b_cc); |
129 | 0 | Store(dequant_x, d, block + k); |
130 | 0 | Store(dequant_y, d, block + size + k); |
131 | 0 | Store(dequant_b, d, block + 2 * size + k); |
132 | 0 | } Unexecuted instantiation: void jxl::N_SSE4::DequantLane<(jxl::ACType)0>(hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, float const*, unsigned long, unsigned long, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_SSE4::DequantLane<(jxl::ACType)1>(hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, float const*, unsigned long, unsigned long, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX2::DequantLane<(jxl::ACType)0>(hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, float const*, unsigned long, unsigned long, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX2::DequantLane<(jxl::ACType)1>(hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, float const*, unsigned long, unsigned long, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX3::DequantLane<(jxl::ACType)0>(hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, float const*, unsigned long, unsigned long, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX3::DequantLane<(jxl::ACType)1>(hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, float const*, unsigned long, unsigned long, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_EMU128::DequantLane<(jxl::ACType)0>(hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, float const*, unsigned long, unsigned long, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_EMU128::DequantLane<(jxl::ACType)1>(hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, float const*, unsigned long, unsigned long, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, float const*, jxl::ACPtr*, float*) |
133 | | |
134 | | template <ACType ac_type> |
135 | | void DequantBlock(const AcStrategy& acs, float inv_global_scale, int quant, |
136 | | float x_dm_multiplier, float b_dm_multiplier, Vec<D> x_cc_mul, |
137 | | Vec<D> b_cc_mul, size_t kind, size_t size, |
138 | | const Quantizer& quantizer, size_t covered_blocks, |
139 | | const size_t* sbx, |
140 | | const float* JXL_RESTRICT* JXL_RESTRICT dc_row, |
141 | | size_t dc_stride, const float* JXL_RESTRICT biases, |
142 | 0 | ACPtr qblock[3], float* JXL_RESTRICT block) { |
143 | 0 | PROFILER_FUNC; |
144 | |
|
145 | 0 | const auto scaled_dequant_s = inv_global_scale / quant; |
146 | |
|
147 | 0 | const auto scaled_dequant_x = Set(d, scaled_dequant_s * x_dm_multiplier); |
148 | 0 | const auto scaled_dequant_y = Set(d, scaled_dequant_s); |
149 | 0 | const auto scaled_dequant_b = Set(d, scaled_dequant_s * b_dm_multiplier); |
150 | |
|
151 | 0 | const float* dequant_matrices = quantizer.DequantMatrix(kind, 0); |
152 | |
|
153 | 0 | for (size_t k = 0; k < covered_blocks * kDCTBlockSize; k += Lanes(d)) { |
154 | 0 | DequantLane<ac_type>(scaled_dequant_x, scaled_dequant_y, scaled_dequant_b, |
155 | 0 | dequant_matrices, size, k, x_cc_mul, b_cc_mul, biases, |
156 | 0 | qblock, block); |
157 | 0 | } |
158 | 0 | for (size_t c = 0; c < 3; c++) { |
159 | 0 | LowestFrequenciesFromDC(acs.Strategy(), dc_row[c] + sbx[c], dc_stride, |
160 | 0 | block + c * size); |
161 | 0 | } |
162 | 0 | } Unexecuted instantiation: void jxl::N_SSE4::DequantBlock<(jxl::ACType)0>(jxl::AcStrategy const&, float, int, float, float, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_SSE4::DequantBlock<(jxl::ACType)1>(jxl::AcStrategy const&, float, int, float, float, hwy::N_SSE4::Vec128<float, 4ul>, hwy::N_SSE4::Vec128<float, 4ul>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX2::DequantBlock<(jxl::ACType)0>(jxl::AcStrategy const&, float, int, float, float, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX2::DequantBlock<(jxl::ACType)1>(jxl::AcStrategy const&, float, int, float, float, hwy::N_AVX2::Vec256<float>, hwy::N_AVX2::Vec256<float>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX3::DequantBlock<(jxl::ACType)0>(jxl::AcStrategy const&, float, int, float, float, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_AVX3::DequantBlock<(jxl::ACType)1>(jxl::AcStrategy const&, float, int, float, float, hwy::N_AVX3::Vec512<float>, hwy::N_AVX3::Vec512<float>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_EMU128::DequantBlock<(jxl::ACType)0>(jxl::AcStrategy const&, float, int, float, float, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) Unexecuted instantiation: void jxl::N_EMU128::DequantBlock<(jxl::ACType)1>(jxl::AcStrategy const&, float, int, float, float, hwy::N_EMU128::Vec128<float, 4ul>, hwy::N_EMU128::Vec128<float, 4ul>, unsigned long, unsigned long, jxl::Quantizer const&, unsigned long, unsigned long const*, float const* restrict*, unsigned long, float const*, jxl::ACPtr*, float*) |
163 | | |
164 | | Status DecodeGroupImpl(GetBlock* JXL_RESTRICT get_block, |
165 | | GroupDecCache* JXL_RESTRICT group_dec_cache, |
166 | | PassesDecoderState* JXL_RESTRICT dec_state, |
167 | | size_t thread, size_t group_idx, |
168 | | RenderPipelineInput& render_pipeline_input, |
169 | 0 | ImageBundle* decoded, DrawMode draw) { |
170 | | // TODO(veluca): investigate cache usage in this function. |
171 | 0 | PROFILER_FUNC; |
172 | 0 | const Rect block_rect = dec_state->shared->BlockGroupRect(group_idx); |
173 | 0 | const AcStrategyImage& ac_strategy = dec_state->shared->ac_strategy; |
174 | |
|
175 | 0 | const size_t xsize_blocks = block_rect.xsize(); |
176 | 0 | const size_t ysize_blocks = block_rect.ysize(); |
177 | |
|
178 | 0 | const size_t dc_stride = dec_state->shared->dc->PixelsPerRow(); |
179 | |
|
180 | 0 | const float inv_global_scale = dec_state->shared->quantizer.InvGlobalScale(); |
181 | |
|
182 | 0 | const YCbCrChromaSubsampling& cs = |
183 | 0 | dec_state->shared->frame_header.chroma_subsampling; |
184 | |
|
185 | 0 | size_t idct_stride[3]; |
186 | 0 | for (size_t c = 0; c < 3; c++) { |
187 | 0 | idct_stride[c] = render_pipeline_input.GetBuffer(c).first->PixelsPerRow(); |
188 | 0 | } |
189 | |
|
190 | 0 | HWY_ALIGN int32_t scaled_qtable[64 * 3]; |
191 | |
|
192 | 0 | ACType ac_type = dec_state->coefficients->Type(); |
193 | 0 | auto dequant_block = ac_type == ACType::k16 ? DequantBlock<ACType::k16> |
194 | 0 | : DequantBlock<ACType::k32>; |
195 | | // Whether or not coefficients should be stored for future usage, and/or read |
196 | | // from past usage. |
197 | 0 | bool accumulate = !dec_state->coefficients->IsEmpty(); |
198 | | // Offset of the current block in the group. |
199 | 0 | size_t offset = 0; |
200 | |
|
201 | 0 | std::array<int, 3> jpeg_c_map; |
202 | 0 | bool jpeg_is_gray = false; |
203 | 0 | std::array<int, 3> dcoff = {}; |
204 | | |
205 | | // TODO(veluca): all of this should be done only once per image. |
206 | 0 | if (decoded->IsJPEG()) { |
207 | 0 | if (!dec_state->shared->cmap.IsJPEGCompatible()) { |
208 | 0 | return JXL_FAILURE("The CfL map is not JPEG-compatible"); |
209 | 0 | } |
210 | 0 | jpeg_is_gray = (decoded->jpeg_data->components.size() == 1); |
211 | 0 | jpeg_c_map = JpegOrder(dec_state->shared->frame_header.color_transform, |
212 | 0 | jpeg_is_gray); |
213 | 0 | const std::vector<QuantEncoding>& qe = |
214 | 0 | dec_state->shared->matrices.encodings(); |
215 | 0 | if (qe.empty() || qe[0].mode != QuantEncoding::Mode::kQuantModeRAW || |
216 | 0 | std::abs(qe[0].qraw.qtable_den - 1.f / (8 * 255)) > 1e-8f) { |
217 | 0 | return JXL_FAILURE( |
218 | 0 | "Quantization table is not a JPEG quantization table."); |
219 | 0 | } |
220 | 0 | for (size_t c = 0; c < 3; c++) { |
221 | 0 | if (dec_state->shared->frame_header.color_transform == |
222 | 0 | ColorTransform::kNone) { |
223 | 0 | dcoff[c] = 1024 / (*qe[0].qraw.qtable)[64 * c]; |
224 | 0 | } |
225 | 0 | for (size_t i = 0; i < 64; i++) { |
226 | | // Transpose the matrix, as it will be used on the transposed block. |
227 | 0 | int n = qe[0].qraw.qtable->at(64 + i); |
228 | 0 | int d = qe[0].qraw.qtable->at(64 * c + i); |
229 | 0 | if (n <= 0 || d <= 0 || n >= 65536 || d >= 65536) { |
230 | 0 | return JXL_FAILURE("Invalid JPEG quantization table"); |
231 | 0 | } |
232 | 0 | scaled_qtable[64 * c + (i % 8) * 8 + (i / 8)] = |
233 | 0 | (1 << kCFLFixedPointPrecision) * n / d; |
234 | 0 | } |
235 | 0 | } |
236 | 0 | } |
237 | | |
238 | 0 | size_t hshift[3] = {cs.HShift(0), cs.HShift(1), cs.HShift(2)}; |
239 | 0 | size_t vshift[3] = {cs.VShift(0), cs.VShift(1), cs.VShift(2)}; |
240 | 0 | Rect r[3]; |
241 | 0 | for (size_t i = 0; i < 3; i++) { |
242 | 0 | r[i] = |
243 | 0 | Rect(block_rect.x0() >> hshift[i], block_rect.y0() >> vshift[i], |
244 | 0 | block_rect.xsize() >> hshift[i], block_rect.ysize() >> vshift[i]); |
245 | 0 | if (!r[i].IsInside({0, 0, dec_state->shared->dc->Plane(i).xsize(), |
246 | 0 | dec_state->shared->dc->Plane(i).ysize()})) { |
247 | 0 | return JXL_FAILURE("Frame dimensions are too big for the image."); |
248 | 0 | } |
249 | 0 | } |
250 | | |
251 | 0 | for (size_t by = 0; by < ysize_blocks; ++by) { |
252 | 0 | get_block->StartRow(by); |
253 | 0 | size_t sby[3] = {by >> vshift[0], by >> vshift[1], by >> vshift[2]}; |
254 | |
|
255 | 0 | const int32_t* JXL_RESTRICT row_quant = |
256 | 0 | block_rect.ConstRow(dec_state->shared->raw_quant_field, by); |
257 | |
|
258 | 0 | const float* JXL_RESTRICT dc_rows[3] = { |
259 | 0 | r[0].ConstPlaneRow(*dec_state->shared->dc, 0, sby[0]), |
260 | 0 | r[1].ConstPlaneRow(*dec_state->shared->dc, 1, sby[1]), |
261 | 0 | r[2].ConstPlaneRow(*dec_state->shared->dc, 2, sby[2]), |
262 | 0 | }; |
263 | |
|
264 | 0 | const size_t ty = (block_rect.y0() + by) / kColorTileDimInBlocks; |
265 | 0 | AcStrategyRow acs_row = ac_strategy.ConstRow(block_rect, by); |
266 | |
|
267 | 0 | const int8_t* JXL_RESTRICT row_cmap[3] = { |
268 | 0 | dec_state->shared->cmap.ytox_map.ConstRow(ty), |
269 | 0 | nullptr, |
270 | 0 | dec_state->shared->cmap.ytob_map.ConstRow(ty), |
271 | 0 | }; |
272 | |
|
273 | 0 | float* JXL_RESTRICT idct_row[3]; |
274 | 0 | int16_t* JXL_RESTRICT jpeg_row[3]; |
275 | 0 | for (size_t c = 0; c < 3; c++) { |
276 | 0 | idct_row[c] = render_pipeline_input.GetBuffer(c).second.Row( |
277 | 0 | render_pipeline_input.GetBuffer(c).first, sby[c] * kBlockDim); |
278 | 0 | if (decoded->IsJPEG()) { |
279 | 0 | auto& component = decoded->jpeg_data->components[jpeg_c_map[c]]; |
280 | 0 | jpeg_row[c] = |
281 | 0 | component.coeffs.data() + |
282 | 0 | (component.width_in_blocks * (r[c].y0() + sby[c]) + r[c].x0()) * |
283 | 0 | kDCTBlockSize; |
284 | 0 | } |
285 | 0 | } |
286 | |
|
287 | 0 | size_t bx = 0; |
288 | 0 | for (size_t tx = 0; tx < DivCeil(xsize_blocks, kColorTileDimInBlocks); |
289 | 0 | tx++) { |
290 | 0 | size_t abs_tx = tx + block_rect.x0() / kColorTileDimInBlocks; |
291 | 0 | auto x_cc_mul = |
292 | 0 | Set(d, dec_state->shared->cmap.YtoXRatio(row_cmap[0][abs_tx])); |
293 | 0 | auto b_cc_mul = |
294 | 0 | Set(d, dec_state->shared->cmap.YtoBRatio(row_cmap[2][abs_tx])); |
295 | | // Increment bx by llf_x because those iterations would otherwise |
296 | | // immediately continue (!IsFirstBlock). Reduces mispredictions. |
297 | 0 | for (; bx < xsize_blocks && bx < (tx + 1) * kColorTileDimInBlocks;) { |
298 | 0 | size_t sbx[3] = {bx >> hshift[0], bx >> hshift[1], bx >> hshift[2]}; |
299 | 0 | AcStrategy acs = acs_row[bx]; |
300 | 0 | const size_t llf_x = acs.covered_blocks_x(); |
301 | | |
302 | | // Can only happen in the second or lower rows of a varblock. |
303 | 0 | if (JXL_UNLIKELY(!acs.IsFirstBlock())) { |
304 | 0 | bx += llf_x; |
305 | 0 | continue; |
306 | 0 | } |
307 | 0 | PROFILER_ZONE("DecodeGroupImpl inner"); |
308 | 0 | const size_t log2_covered_blocks = acs.log2_covered_blocks(); |
309 | |
|
310 | 0 | const size_t covered_blocks = 1 << log2_covered_blocks; |
311 | 0 | const size_t size = covered_blocks * kDCTBlockSize; |
312 | |
|
313 | 0 | ACPtr qblock[3]; |
314 | 0 | if (accumulate) { |
315 | 0 | for (size_t c = 0; c < 3; c++) { |
316 | 0 | qblock[c] = dec_state->coefficients->PlaneRow(c, group_idx, offset); |
317 | 0 | } |
318 | 0 | } else { |
319 | | // No point in reading from bitstream without accumulating and not |
320 | | // drawing. |
321 | 0 | JXL_ASSERT(draw == kDraw); |
322 | 0 | if (ac_type == ACType::k16) { |
323 | 0 | memset(group_dec_cache->dec_group_qblock16, 0, |
324 | 0 | size * 3 * sizeof(int16_t)); |
325 | 0 | for (size_t c = 0; c < 3; c++) { |
326 | 0 | qblock[c].ptr16 = group_dec_cache->dec_group_qblock16 + c * size; |
327 | 0 | } |
328 | 0 | } else { |
329 | 0 | memset(group_dec_cache->dec_group_qblock, 0, |
330 | 0 | size * 3 * sizeof(int32_t)); |
331 | 0 | for (size_t c = 0; c < 3; c++) { |
332 | 0 | qblock[c].ptr32 = group_dec_cache->dec_group_qblock + c * size; |
333 | 0 | } |
334 | 0 | } |
335 | 0 | } |
336 | 0 | JXL_RETURN_IF_ERROR(get_block->LoadBlock( |
337 | 0 | bx, by, acs, size, log2_covered_blocks, qblock, ac_type)); |
338 | 0 | offset += size; |
339 | 0 | if (draw == kDontDraw) { |
340 | 0 | bx += llf_x; |
341 | 0 | continue; |
342 | 0 | } |
343 | | |
344 | 0 | if (JXL_UNLIKELY(decoded->IsJPEG())) { |
345 | 0 | if (acs.Strategy() != AcStrategy::Type::DCT) { |
346 | 0 | return JXL_FAILURE( |
347 | 0 | "Can only decode to JPEG if only DCT-8 is used."); |
348 | 0 | } |
349 | | |
350 | 0 | HWY_ALIGN int32_t transposed_dct_y[64]; |
351 | 0 | for (size_t c : {1, 0, 2}) { |
352 | | // Propagate only Y for grayscale. |
353 | 0 | if (jpeg_is_gray && c != 1) { |
354 | 0 | continue; |
355 | 0 | } |
356 | 0 | if ((sbx[c] << hshift[c] != bx) || (sby[c] << vshift[c] != by)) { |
357 | 0 | continue; |
358 | 0 | } |
359 | 0 | int16_t* JXL_RESTRICT jpeg_pos = |
360 | 0 | jpeg_row[c] + sbx[c] * kDCTBlockSize; |
361 | | // JPEG XL is transposed, JPEG is not. |
362 | 0 | auto transposed_dct = qblock[c].ptr32; |
363 | 0 | Transpose8x8InPlace(transposed_dct); |
364 | | // No CfL - no need to store the y block converted to integers. |
365 | 0 | if (!cs.Is444() || |
366 | 0 | (row_cmap[0][abs_tx] == 0 && row_cmap[2][abs_tx] == 0)) { |
367 | 0 | for (size_t i = 0; i < 64; i += Lanes(d)) { |
368 | 0 | const auto ini = Load(di, transposed_dct + i); |
369 | 0 | const auto ini16 = DemoteTo(di16, ini); |
370 | 0 | StoreU(ini16, di16, jpeg_pos + i); |
371 | 0 | } |
372 | 0 | } else if (c == 1) { |
373 | | // Y channel: save for restoring X/B, but nothing else to do. |
374 | 0 | for (size_t i = 0; i < 64; i += Lanes(d)) { |
375 | 0 | const auto ini = Load(di, transposed_dct + i); |
376 | 0 | Store(ini, di, transposed_dct_y + i); |
377 | 0 | const auto ini16 = DemoteTo(di16, ini); |
378 | 0 | StoreU(ini16, di16, jpeg_pos + i); |
379 | 0 | } |
380 | 0 | } else { |
381 | | // transposed_dct_y contains the y channel block, transposed. |
382 | 0 | const auto scale = Set( |
383 | 0 | di, dec_state->shared->cmap.RatioJPEG(row_cmap[c][abs_tx])); |
384 | 0 | const auto round = Set(di, 1 << (kCFLFixedPointPrecision - 1)); |
385 | 0 | for (int i = 0; i < 64; i += Lanes(d)) { |
386 | 0 | auto in = Load(di, transposed_dct + i); |
387 | 0 | auto in_y = Load(di, transposed_dct_y + i); |
388 | 0 | auto qt = Load(di, scaled_qtable + c * size + i); |
389 | 0 | auto coeff_scale = ShiftRight<kCFLFixedPointPrecision>( |
390 | 0 | Add(Mul(qt, scale), round)); |
391 | 0 | auto cfl_factor = ShiftRight<kCFLFixedPointPrecision>( |
392 | 0 | Add(Mul(in_y, coeff_scale), round)); |
393 | 0 | StoreU(DemoteTo(di16, Add(in, cfl_factor)), di16, jpeg_pos + i); |
394 | 0 | } |
395 | 0 | } |
396 | 0 | jpeg_pos[0] = |
397 | 0 | Clamp1<float>(dc_rows[c][sbx[c]] - dcoff[c], -2047, 2047); |
398 | 0 | } |
399 | 0 | } else { |
400 | 0 | HWY_ALIGN float* const block = group_dec_cache->dec_group_block; |
401 | | // Dequantize and add predictions. |
402 | 0 | dequant_block( |
403 | 0 | acs, inv_global_scale, row_quant[bx], dec_state->x_dm_multiplier, |
404 | 0 | dec_state->b_dm_multiplier, x_cc_mul, b_cc_mul, acs.RawStrategy(), |
405 | 0 | size, dec_state->shared->quantizer, |
406 | 0 | acs.covered_blocks_y() * acs.covered_blocks_x(), sbx, dc_rows, |
407 | 0 | dc_stride, |
408 | 0 | dec_state->output_encoding_info.opsin_params.quant_biases, qblock, |
409 | 0 | block); |
410 | |
|
411 | 0 | for (size_t c : {1, 0, 2}) { |
412 | 0 | if ((sbx[c] << hshift[c] != bx) || (sby[c] << vshift[c] != by)) { |
413 | 0 | continue; |
414 | 0 | } |
415 | | // IDCT |
416 | 0 | float* JXL_RESTRICT idct_pos = idct_row[c] + sbx[c] * kBlockDim; |
417 | 0 | TransformToPixels(acs.Strategy(), block + c * size, idct_pos, |
418 | 0 | idct_stride[c], group_dec_cache->scratch_space); |
419 | 0 | } |
420 | 0 | } |
421 | 0 | bx += llf_x; |
422 | 0 | } |
423 | 0 | } |
424 | 0 | } |
425 | 0 | if (draw == kDontDraw) { |
426 | 0 | return true; |
427 | 0 | } |
428 | 0 | return true; |
429 | 0 | } Unexecuted instantiation: jxl::N_SSE4::DecodeGroupImpl(jxl::GetBlock*, jxl::GroupDecCache*, jxl::PassesDecoderState*, unsigned long, unsigned long, jxl::RenderPipelineInput&, jxl::ImageBundle*, jxl::DrawMode) Unexecuted instantiation: jxl::N_AVX2::DecodeGroupImpl(jxl::GetBlock*, jxl::GroupDecCache*, jxl::PassesDecoderState*, unsigned long, unsigned long, jxl::RenderPipelineInput&, jxl::ImageBundle*, jxl::DrawMode) Unexecuted instantiation: jxl::N_AVX3::DecodeGroupImpl(jxl::GetBlock*, jxl::GroupDecCache*, jxl::PassesDecoderState*, unsigned long, unsigned long, jxl::RenderPipelineInput&, jxl::ImageBundle*, jxl::DrawMode) Unexecuted instantiation: jxl::N_EMU128::DecodeGroupImpl(jxl::GetBlock*, jxl::GroupDecCache*, jxl::PassesDecoderState*, unsigned long, unsigned long, jxl::RenderPipelineInput&, jxl::ImageBundle*, jxl::DrawMode) |
430 | | |
431 | | // NOLINTNEXTLINE(google-readability-namespace-comments) |
432 | | } // namespace HWY_NAMESPACE |
433 | | } // namespace jxl |
434 | | HWY_AFTER_NAMESPACE(); |
435 | | |
436 | | #if HWY_ONCE |
437 | | namespace jxl { |
438 | | namespace { |
439 | | // Decode quantized AC coefficients of DCT blocks. |
440 | | // LLF components in the output block will not be modified. |
441 | | template <ACType ac_type> |
442 | | Status DecodeACVarBlock(size_t ctx_offset, size_t log2_covered_blocks, |
443 | | int32_t* JXL_RESTRICT row_nzeros, |
444 | | const int32_t* JXL_RESTRICT row_nzeros_top, |
445 | | size_t nzeros_stride, size_t c, size_t bx, size_t by, |
446 | | size_t lbx, AcStrategy acs, |
447 | | const coeff_order_t* JXL_RESTRICT coeff_order, |
448 | | BitReader* JXL_RESTRICT br, |
449 | | ANSSymbolReader* JXL_RESTRICT decoder, |
450 | | const std::vector<uint8_t>& context_map, |
451 | | const uint8_t* qdc_row, const int32_t* qf_row, |
452 | | const BlockCtxMap& block_ctx_map, ACPtr block, |
453 | 0 | size_t shift = 0) { |
454 | 0 | PROFILER_FUNC; |
455 | | // Equal to number of LLF coefficients. |
456 | 0 | const size_t covered_blocks = 1 << log2_covered_blocks; |
457 | 0 | const size_t size = covered_blocks * kDCTBlockSize; |
458 | 0 | int32_t predicted_nzeros = |
459 | 0 | PredictFromTopAndLeft(row_nzeros_top, row_nzeros, bx, 32); |
460 | |
|
461 | 0 | size_t ord = kStrategyOrder[acs.RawStrategy()]; |
462 | 0 | const coeff_order_t* JXL_RESTRICT order = |
463 | 0 | &coeff_order[CoeffOrderOffset(ord, c)]; |
464 | |
|
465 | 0 | size_t block_ctx = block_ctx_map.Context(qdc_row[lbx], qf_row[bx], ord, c); |
466 | 0 | const int32_t nzero_ctx = |
467 | 0 | block_ctx_map.NonZeroContext(predicted_nzeros, block_ctx) + ctx_offset; |
468 | |
|
469 | 0 | size_t nzeros = decoder->ReadHybridUint(nzero_ctx, br, context_map); |
470 | 0 | if (nzeros + covered_blocks > size) { |
471 | 0 | return JXL_FAILURE("Invalid AC: nzeros too large"); |
472 | 0 | } |
473 | 0 | for (size_t y = 0; y < acs.covered_blocks_y(); y++) { |
474 | 0 | for (size_t x = 0; x < acs.covered_blocks_x(); x++) { |
475 | 0 | row_nzeros[bx + x + y * nzeros_stride] = |
476 | 0 | (nzeros + covered_blocks - 1) >> log2_covered_blocks; |
477 | 0 | } |
478 | 0 | } |
479 | |
|
480 | 0 | const size_t histo_offset = |
481 | 0 | ctx_offset + block_ctx_map.ZeroDensityContextsOffset(block_ctx); |
482 | | |
483 | | // Skip LLF |
484 | 0 | { |
485 | 0 | PROFILER_ZONE("AcDecSkipLLF, reader"); |
486 | 0 | size_t prev = (nzeros > size / 16 ? 0 : 1); |
487 | 0 | for (size_t k = covered_blocks; k < size && nzeros != 0; ++k) { |
488 | 0 | const size_t ctx = |
489 | 0 | histo_offset + ZeroDensityContext(nzeros, k, covered_blocks, |
490 | 0 | log2_covered_blocks, prev); |
491 | 0 | const size_t u_coeff = decoder->ReadHybridUint(ctx, br, context_map); |
492 | | // Hand-rolled version of UnpackSigned, shifting before the conversion to |
493 | | // signed integer to avoid undefined behavior of shifting negative |
494 | | // numbers. |
495 | 0 | const size_t magnitude = u_coeff >> 1; |
496 | 0 | const size_t neg_sign = (~u_coeff) & 1; |
497 | 0 | const intptr_t coeff = |
498 | 0 | static_cast<intptr_t>((magnitude ^ (neg_sign - 1)) << shift); |
499 | 0 | if (ac_type == ACType::k16) { |
500 | 0 | block.ptr16[order[k]] += coeff; |
501 | 0 | } else { |
502 | 0 | block.ptr32[order[k]] += coeff; |
503 | 0 | } |
504 | 0 | prev = static_cast<size_t>(u_coeff != 0); |
505 | 0 | nzeros -= prev; |
506 | 0 | } |
507 | 0 | if (JXL_UNLIKELY(nzeros != 0)) { |
508 | 0 | return JXL_FAILURE("Invalid AC: nzeros not 0. Block (%" PRIuS ", %" PRIuS |
509 | 0 | "), channel %" PRIuS, |
510 | 0 | bx, by, c); |
511 | 0 | } |
512 | 0 | } |
513 | 0 | return true; |
514 | 0 | } Unexecuted instantiation: dec_group.cc:jxl::Status jxl::(anonymous namespace)::DecodeACVarBlock<(jxl::ACType)0>(unsigned long, unsigned long, int*, int const*, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, jxl::AcStrategy, unsigned int const*, jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, unsigned char const*, int const*, jxl::BlockCtxMap const&, jxl::ACPtr, unsigned long) Unexecuted instantiation: dec_group.cc:jxl::Status jxl::(anonymous namespace)::DecodeACVarBlock<(jxl::ACType)1>(unsigned long, unsigned long, int*, int const*, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, jxl::AcStrategy, unsigned int const*, jxl::BitReader*, jxl::ANSSymbolReader*, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> > const&, unsigned char const*, int const*, jxl::BlockCtxMap const&, jxl::ACPtr, unsigned long) |
515 | | |
516 | | // Structs used by DecodeGroupImpl to get a quantized block. |
517 | | // GetBlockFromBitstream uses ANS decoding (and thus keeps track of row |
518 | | // pointers in row_nzeros), GetBlockFromEncoder simply reads the coefficient |
519 | | // image provided by the encoder. |
520 | | |
521 | | struct GetBlockFromBitstream : public GetBlock { |
522 | 0 | void StartRow(size_t by) override { |
523 | 0 | qf_row = rect.ConstRow(*qf, by); |
524 | 0 | for (size_t c = 0; c < 3; c++) { |
525 | 0 | size_t sby = by >> vshift[c]; |
526 | 0 | quant_dc_row = quant_dc->ConstRow(rect.y0() + by) + rect.x0(); |
527 | 0 | for (size_t i = 0; i < num_passes; i++) { |
528 | 0 | row_nzeros[i][c] = group_dec_cache->num_nzeroes[i].PlaneRow(c, sby); |
529 | 0 | row_nzeros_top[i][c] = |
530 | 0 | sby == 0 |
531 | 0 | ? nullptr |
532 | 0 | : group_dec_cache->num_nzeroes[i].ConstPlaneRow(c, sby - 1); |
533 | 0 | } |
534 | 0 | } |
535 | 0 | } |
536 | | |
537 | | Status LoadBlock(size_t bx, size_t by, const AcStrategy& acs, size_t size, |
538 | | size_t log2_covered_blocks, ACPtr block[3], |
539 | 0 | ACType ac_type) override { |
540 | 0 | auto decode_ac_varblock = ac_type == ACType::k16 |
541 | 0 | ? DecodeACVarBlock<ACType::k16> |
542 | 0 | : DecodeACVarBlock<ACType::k32>; |
543 | 0 | for (size_t c : {1, 0, 2}) { |
544 | 0 | size_t sbx = bx >> hshift[c]; |
545 | 0 | size_t sby = by >> vshift[c]; |
546 | 0 | if (JXL_UNLIKELY((sbx << hshift[c] != bx) || (sby << vshift[c] != by))) { |
547 | 0 | continue; |
548 | 0 | } |
549 | | |
550 | 0 | for (size_t pass = 0; JXL_UNLIKELY(pass < num_passes); pass++) { |
551 | 0 | JXL_RETURN_IF_ERROR(decode_ac_varblock( |
552 | 0 | ctx_offset[pass], log2_covered_blocks, row_nzeros[pass][c], |
553 | 0 | row_nzeros_top[pass][c], nzeros_stride, c, sbx, sby, bx, acs, |
554 | 0 | &coeff_orders[pass * coeff_order_size], readers[pass], |
555 | 0 | &decoders[pass], context_map[pass], quant_dc_row, qf_row, |
556 | 0 | *block_ctx_map, block[c], shift_for_pass[pass])); |
557 | 0 | } |
558 | 0 | } |
559 | 0 | return true; |
560 | 0 | } |
561 | | |
562 | | Status Init(BitReader* JXL_RESTRICT* JXL_RESTRICT readers, size_t num_passes, |
563 | | size_t group_idx, size_t histo_selector_bits, const Rect& rect, |
564 | | GroupDecCache* JXL_RESTRICT group_dec_cache, |
565 | 0 | PassesDecoderState* dec_state, size_t first_pass) { |
566 | 0 | for (size_t i = 0; i < 3; i++) { |
567 | 0 | hshift[i] = dec_state->shared->frame_header.chroma_subsampling.HShift(i); |
568 | 0 | vshift[i] = dec_state->shared->frame_header.chroma_subsampling.VShift(i); |
569 | 0 | } |
570 | 0 | this->coeff_order_size = dec_state->shared->coeff_order_size; |
571 | 0 | this->coeff_orders = |
572 | 0 | dec_state->shared->coeff_orders.data() + first_pass * coeff_order_size; |
573 | 0 | this->context_map = dec_state->context_map.data() + first_pass; |
574 | 0 | this->readers = readers; |
575 | 0 | this->num_passes = num_passes; |
576 | 0 | this->shift_for_pass = |
577 | 0 | dec_state->shared->frame_header.passes.shift + first_pass; |
578 | 0 | this->group_dec_cache = group_dec_cache; |
579 | 0 | this->rect = rect; |
580 | 0 | block_ctx_map = &dec_state->shared->block_ctx_map; |
581 | 0 | qf = &dec_state->shared->raw_quant_field; |
582 | 0 | quant_dc = &dec_state->shared->quant_dc; |
583 | |
|
584 | 0 | for (size_t pass = 0; pass < num_passes; pass++) { |
585 | | // Select which histogram set to use among those of the current pass. |
586 | 0 | size_t cur_histogram = 0; |
587 | 0 | if (histo_selector_bits != 0) { |
588 | 0 | cur_histogram = readers[pass]->ReadBits(histo_selector_bits); |
589 | 0 | } |
590 | 0 | if (cur_histogram >= dec_state->shared->num_histograms) { |
591 | 0 | return JXL_FAILURE("Invalid histogram selector"); |
592 | 0 | } |
593 | 0 | ctx_offset[pass] = cur_histogram * block_ctx_map->NumACContexts(); |
594 | |
|
595 | 0 | decoders[pass] = |
596 | 0 | ANSSymbolReader(&dec_state->code[pass + first_pass], readers[pass]); |
597 | 0 | } |
598 | 0 | nzeros_stride = group_dec_cache->num_nzeroes[0].PixelsPerRow(); |
599 | 0 | for (size_t i = 0; i < num_passes; i++) { |
600 | 0 | JXL_ASSERT( |
601 | 0 | nzeros_stride == |
602 | 0 | static_cast<size_t>(group_dec_cache->num_nzeroes[i].PixelsPerRow())); |
603 | 0 | } |
604 | 0 | return true; |
605 | 0 | } |
606 | | |
607 | | const uint32_t* shift_for_pass = nullptr; // not owned |
608 | | const coeff_order_t* JXL_RESTRICT coeff_orders; |
609 | | size_t coeff_order_size; |
610 | | const std::vector<uint8_t>* JXL_RESTRICT context_map; |
611 | | ANSSymbolReader decoders[kMaxNumPasses]; |
612 | | BitReader* JXL_RESTRICT* JXL_RESTRICT readers; |
613 | | size_t num_passes; |
614 | | size_t ctx_offset[kMaxNumPasses]; |
615 | | size_t nzeros_stride; |
616 | | int32_t* JXL_RESTRICT row_nzeros[kMaxNumPasses][3]; |
617 | | const int32_t* JXL_RESTRICT row_nzeros_top[kMaxNumPasses][3]; |
618 | | GroupDecCache* JXL_RESTRICT group_dec_cache; |
619 | | const BlockCtxMap* block_ctx_map; |
620 | | const ImageI* qf; |
621 | | const ImageB* quant_dc; |
622 | | const int32_t* qf_row; |
623 | | const uint8_t* quant_dc_row; |
624 | | Rect rect; |
625 | | size_t hshift[3], vshift[3]; |
626 | | }; |
627 | | |
628 | | struct GetBlockFromEncoder : public GetBlock { |
629 | 0 | void StartRow(size_t by) override {} |
630 | | |
631 | | Status LoadBlock(size_t bx, size_t by, const AcStrategy& acs, size_t size, |
632 | | size_t log2_covered_blocks, ACPtr block[3], |
633 | 0 | ACType ac_type) override { |
634 | 0 | JXL_DASSERT(ac_type == ACType::k32); |
635 | 0 | for (size_t c = 0; c < 3; c++) { |
636 | | // for each pass |
637 | 0 | for (size_t i = 0; i < quantized_ac->size(); i++) { |
638 | 0 | for (size_t k = 0; k < size; k++) { |
639 | | // TODO(veluca): SIMD. |
640 | 0 | block[c].ptr32[k] += |
641 | 0 | rows[i][c][offset + k] * (1 << shift_for_pass[i]); |
642 | 0 | } |
643 | 0 | } |
644 | 0 | } |
645 | 0 | offset += size; |
646 | 0 | return true; |
647 | 0 | } |
648 | | |
649 | | GetBlockFromEncoder(const std::vector<std::unique_ptr<ACImage>>& ac, |
650 | | size_t group_idx, const uint32_t* shift_for_pass) |
651 | 0 | : quantized_ac(&ac), shift_for_pass(shift_for_pass) { |
652 | | // TODO(veluca): not supported with chroma subsampling. |
653 | 0 | for (size_t i = 0; i < quantized_ac->size(); i++) { |
654 | 0 | JXL_CHECK((*quantized_ac)[i]->Type() == ACType::k32); |
655 | 0 | for (size_t c = 0; c < 3; c++) { |
656 | 0 | rows[i][c] = (*quantized_ac)[i]->PlaneRow(c, group_idx, 0).ptr32; |
657 | 0 | } |
658 | 0 | } |
659 | 0 | } |
660 | | |
661 | | const std::vector<std::unique_ptr<ACImage>>* JXL_RESTRICT quantized_ac; |
662 | | size_t offset = 0; |
663 | | const int32_t* JXL_RESTRICT rows[kMaxNumPasses][3]; |
664 | | const uint32_t* shift_for_pass = nullptr; // not owned |
665 | | }; |
666 | | |
667 | | HWY_EXPORT(DecodeGroupImpl); |
668 | | |
669 | | } // namespace |
670 | | |
671 | | Status DecodeGroup(BitReader* JXL_RESTRICT* JXL_RESTRICT readers, |
672 | | size_t num_passes, size_t group_idx, |
673 | | PassesDecoderState* JXL_RESTRICT dec_state, |
674 | | GroupDecCache* JXL_RESTRICT group_dec_cache, size_t thread, |
675 | | RenderPipelineInput& render_pipeline_input, |
676 | | ImageBundle* JXL_RESTRICT decoded, size_t first_pass, |
677 | 0 | bool force_draw, bool dc_only, bool* should_run_pipeline) { |
678 | 0 | PROFILER_FUNC; |
679 | |
|
680 | 0 | DrawMode draw = (num_passes + first_pass == |
681 | 0 | dec_state->shared->frame_header.passes.num_passes) || |
682 | 0 | force_draw |
683 | 0 | ? kDraw |
684 | 0 | : kDontDraw; |
685 | |
|
686 | 0 | if (should_run_pipeline) { |
687 | 0 | *should_run_pipeline = draw != kDontDraw; |
688 | 0 | } |
689 | |
|
690 | 0 | if (draw == kDraw && num_passes == 0 && first_pass == 0) { |
691 | 0 | group_dec_cache->InitDCBufferOnce(); |
692 | 0 | const YCbCrChromaSubsampling& cs = |
693 | 0 | dec_state->shared->frame_header.chroma_subsampling; |
694 | 0 | for (size_t c : {0, 1, 2}) { |
695 | 0 | size_t hs = cs.HShift(c); |
696 | 0 | size_t vs = cs.VShift(c); |
697 | | // We reuse filter_input_storage here as it is not currently in use. |
698 | 0 | const Rect src_rect_precs = dec_state->shared->BlockGroupRect(group_idx); |
699 | 0 | const Rect src_rect = |
700 | 0 | Rect(src_rect_precs.x0() >> hs, src_rect_precs.y0() >> vs, |
701 | 0 | src_rect_precs.xsize() >> hs, src_rect_precs.ysize() >> vs); |
702 | 0 | const Rect copy_rect(kRenderPipelineXOffset, 2, src_rect.xsize(), |
703 | 0 | src_rect.ysize()); |
704 | 0 | CopyImageToWithPadding(src_rect, dec_state->shared->dc->Plane(c), 2, |
705 | 0 | copy_rect, &group_dec_cache->dc_buffer); |
706 | | // Mirrorpad. Interleaving left and right padding ensures that padding |
707 | | // works out correctly even for images with DC size of 1. |
708 | 0 | for (size_t y = 0; y < src_rect.ysize() + 4; y++) { |
709 | 0 | size_t xend = kRenderPipelineXOffset + |
710 | 0 | (dec_state->shared->dc->Plane(c).xsize() >> hs) - |
711 | 0 | src_rect.x0(); |
712 | 0 | for (size_t ix = 0; ix < 2; ix++) { |
713 | 0 | if (src_rect.x0() == 0) { |
714 | 0 | group_dec_cache->dc_buffer.Row(y)[kRenderPipelineXOffset - ix - 1] = |
715 | 0 | group_dec_cache->dc_buffer.Row(y)[kRenderPipelineXOffset + ix]; |
716 | 0 | } |
717 | 0 | if (src_rect.x0() + src_rect.xsize() + 2 >= |
718 | 0 | (dec_state->shared->dc->xsize() >> hs)) { |
719 | 0 | group_dec_cache->dc_buffer.Row(y)[xend + ix] = |
720 | 0 | group_dec_cache->dc_buffer.Row(y)[xend - ix - 1]; |
721 | 0 | } |
722 | 0 | } |
723 | 0 | } |
724 | 0 | Rect dst_rect = render_pipeline_input.GetBuffer(c).second; |
725 | 0 | ImageF* upsampling_dst = render_pipeline_input.GetBuffer(c).first; |
726 | 0 | JXL_ASSERT(dst_rect.IsInside(*upsampling_dst)); |
727 | | |
728 | 0 | RenderPipelineStage::RowInfo input_rows(1, std::vector<float*>(5)); |
729 | 0 | RenderPipelineStage::RowInfo output_rows(1, std::vector<float*>(8)); |
730 | 0 | for (size_t y = src_rect.y0(); y < src_rect.y0() + src_rect.ysize(); |
731 | 0 | y++) { |
732 | 0 | for (ssize_t iy = 0; iy < 5; iy++) { |
733 | 0 | input_rows[0][iy] = group_dec_cache->dc_buffer.Row( |
734 | 0 | Mirror(ssize_t(y) + iy - 2, |
735 | 0 | dec_state->shared->dc->Plane(c).ysize() >> vs) + |
736 | 0 | 2 - src_rect.y0()); |
737 | 0 | } |
738 | 0 | for (size_t iy = 0; iy < 8; iy++) { |
739 | 0 | output_rows[0][iy] = |
740 | 0 | dst_rect.Row(upsampling_dst, ((y - src_rect.y0()) << 3) + iy) - |
741 | 0 | kRenderPipelineXOffset; |
742 | 0 | } |
743 | | // Arguments set to 0/nullptr are not used. |
744 | 0 | dec_state->upsampler8x->ProcessRow(input_rows, output_rows, |
745 | 0 | /*xextra=*/0, src_rect.xsize(), 0, 0, |
746 | 0 | thread); |
747 | 0 | } |
748 | 0 | } |
749 | 0 | return true; |
750 | 0 | } |
751 | | |
752 | 0 | size_t histo_selector_bits = 0; |
753 | 0 | if (dc_only) { |
754 | 0 | JXL_ASSERT(num_passes == 0); |
755 | 0 | } else { |
756 | 0 | JXL_ASSERT(dec_state->shared->num_histograms > 0); |
757 | 0 | histo_selector_bits = CeilLog2Nonzero(dec_state->shared->num_histograms); |
758 | 0 | } |
759 | | |
760 | 0 | GetBlockFromBitstream get_block; |
761 | 0 | JXL_RETURN_IF_ERROR( |
762 | 0 | get_block.Init(readers, num_passes, group_idx, histo_selector_bits, |
763 | 0 | dec_state->shared->BlockGroupRect(group_idx), |
764 | 0 | group_dec_cache, dec_state, first_pass)); |
765 | | |
766 | 0 | JXL_RETURN_IF_ERROR(HWY_DYNAMIC_DISPATCH(DecodeGroupImpl)( |
767 | 0 | &get_block, group_dec_cache, dec_state, thread, group_idx, |
768 | 0 | render_pipeline_input, decoded, draw)); |
769 | | |
770 | 0 | for (size_t pass = 0; pass < num_passes; pass++) { |
771 | 0 | if (!get_block.decoders[pass].CheckANSFinalState()) { |
772 | 0 | return JXL_FAILURE("ANS checksum failure."); |
773 | 0 | } |
774 | 0 | } |
775 | 0 | return true; |
776 | 0 | } |
777 | | |
778 | | Status DecodeGroupForRoundtrip(const std::vector<std::unique_ptr<ACImage>>& ac, |
779 | | size_t group_idx, |
780 | | PassesDecoderState* JXL_RESTRICT dec_state, |
781 | | GroupDecCache* JXL_RESTRICT group_dec_cache, |
782 | | size_t thread, |
783 | | RenderPipelineInput& render_pipeline_input, |
784 | | ImageBundle* JXL_RESTRICT decoded, |
785 | 0 | AuxOut* aux_out) { |
786 | 0 | PROFILER_FUNC; |
787 | |
|
788 | 0 | GetBlockFromEncoder get_block(ac, group_idx, |
789 | 0 | dec_state->shared->frame_header.passes.shift); |
790 | 0 | group_dec_cache->InitOnce( |
791 | 0 | /*num_passes=*/0, |
792 | 0 | /*used_acs=*/(1u << AcStrategy::kNumValidStrategies) - 1); |
793 | |
|
794 | 0 | return HWY_DYNAMIC_DISPATCH(DecodeGroupImpl)( |
795 | 0 | &get_block, group_dec_cache, dec_state, thread, group_idx, |
796 | 0 | render_pipeline_input, decoded, kDraw); |
797 | 0 | } |
798 | | |
799 | | } // namespace jxl |
800 | | #endif // HWY_ONCE |