/src/mozilla-central/third_party/aom/av1/decoder/decodeframe.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <assert.h> |
13 | | #include <stddef.h> |
14 | | |
15 | | #include "config/aom_config.h" |
16 | | #include "config/aom_dsp_rtcd.h" |
17 | | #include "config/aom_scale_rtcd.h" |
18 | | #include "config/av1_rtcd.h" |
19 | | |
20 | | #include "aom/aom_codec.h" |
21 | | #include "aom_dsp/aom_dsp_common.h" |
22 | | #include "aom_dsp/binary_codes_reader.h" |
23 | | #include "aom_dsp/bitreader.h" |
24 | | #include "aom_dsp/bitreader_buffer.h" |
25 | | #include "aom_mem/aom_mem.h" |
26 | | #include "aom_ports/aom_timer.h" |
27 | | #include "aom_ports/mem.h" |
28 | | #include "aom_ports/mem_ops.h" |
29 | | #include "aom_scale/aom_scale.h" |
30 | | #include "aom_util/aom_thread.h" |
31 | | |
32 | | #if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG |
33 | | #include "aom_util/debug_util.h" |
34 | | #endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG |
35 | | |
36 | | #include "av1/common/alloccommon.h" |
37 | | #include "av1/common/cdef.h" |
38 | | #include "av1/common/cfl.h" |
39 | | #if CONFIG_INSPECTION |
40 | | #include "av1/decoder/inspection.h" |
41 | | #endif |
42 | | #include "av1/common/common.h" |
43 | | #include "av1/common/entropy.h" |
44 | | #include "av1/common/entropymode.h" |
45 | | #include "av1/common/entropymv.h" |
46 | | #include "av1/common/frame_buffers.h" |
47 | | #include "av1/common/idct.h" |
48 | | #include "av1/common/mvref_common.h" |
49 | | #include "av1/common/pred_common.h" |
50 | | #include "av1/common/quant_common.h" |
51 | | #include "av1/common/reconinter.h" |
52 | | #include "av1/common/reconintra.h" |
53 | | #include "av1/common/resize.h" |
54 | | #include "av1/common/seg_common.h" |
55 | | #include "av1/common/thread_common.h" |
56 | | #include "av1/common/tile_common.h" |
57 | | #include "av1/common/warped_motion.h" |
58 | | #include "av1/common/obmc.h" |
59 | | #include "av1/decoder/decodeframe.h" |
60 | | #include "av1/decoder/decodemv.h" |
61 | | #include "av1/decoder/decoder.h" |
62 | | #include "av1/decoder/decodetxb.h" |
63 | | #include "av1/decoder/detokenize.h" |
64 | | |
65 | | #define ACCT_STR __func__ |
66 | | |
67 | | // This is needed by ext_tile related unit tests. |
68 | | #define EXT_TILE_DEBUG 1 |
69 | | #define MC_TEMP_BUF_PELS \ |
70 | 0 | (((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2) * \ |
71 | 0 | ((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2)) |
72 | | |
73 | | // Checks that the remaining bits start with a 1 and ends with 0s. |
74 | | // It consumes an additional byte, if already byte aligned before the check. |
75 | 0 | int av1_check_trailing_bits(AV1Decoder *pbi, struct aom_read_bit_buffer *rb) { |
76 | 0 | AV1_COMMON *const cm = &pbi->common; |
77 | 0 | // bit_offset is set to 0 (mod 8) when the reader is already byte aligned |
78 | 0 | int bits_before_alignment = 8 - rb->bit_offset % 8; |
79 | 0 | int trailing = aom_rb_read_literal(rb, bits_before_alignment); |
80 | 0 | if (trailing != (1 << (bits_before_alignment - 1))) { |
81 | 0 | cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; |
82 | 0 | return -1; |
83 | 0 | } |
84 | 0 | return 0; |
85 | 0 | } |
86 | | |
87 | | // Use only_chroma = 1 to only set the chroma planes |
88 | | static void set_planes_to_neutral_grey(const SequenceHeader *const seq_params, |
89 | | const YV12_BUFFER_CONFIG *const buf, |
90 | 0 | int only_chroma) { |
91 | 0 | if (seq_params->use_highbitdepth) { |
92 | 0 | const int val = 1 << (seq_params->bit_depth - 1); |
93 | 0 | for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) { |
94 | 0 | const int is_uv = plane > 0; |
95 | 0 | uint16_t *const base = CONVERT_TO_SHORTPTR(buf->buffers[plane]); |
96 | 0 | // Set the first row to neutral grey. Then copy the first row to all |
97 | 0 | // subsequent rows. |
98 | 0 | if (buf->crop_heights[is_uv] > 0) { |
99 | 0 | aom_memset16(base, val, buf->crop_widths[is_uv]); |
100 | 0 | for (int row_idx = 1; row_idx < buf->crop_heights[is_uv]; row_idx++) { |
101 | 0 | memcpy(&base[row_idx * buf->strides[is_uv]], base, |
102 | 0 | sizeof(*base) * buf->crop_widths[is_uv]); |
103 | 0 | } |
104 | 0 | } |
105 | 0 | } |
106 | 0 | } else { |
107 | 0 | for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) { |
108 | 0 | const int is_uv = plane > 0; |
109 | 0 | for (int row_idx = 0; row_idx < buf->crop_heights[is_uv]; row_idx++) { |
110 | 0 | memset(&buf->buffers[plane][row_idx * buf->uv_stride], 1 << 7, |
111 | 0 | buf->crop_widths[is_uv]); |
112 | 0 | } |
113 | 0 | } |
114 | 0 | } |
115 | 0 | } |
116 | | |
117 | | static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm, |
118 | | MACROBLOCKD *xd, |
119 | | aom_reader *const r, int plane, |
120 | | int runit_idx); |
121 | | |
122 | 0 | static void setup_compound_reference_mode(AV1_COMMON *cm) { |
123 | 0 | cm->comp_fwd_ref[0] = LAST_FRAME; |
124 | 0 | cm->comp_fwd_ref[1] = LAST2_FRAME; |
125 | 0 | cm->comp_fwd_ref[2] = LAST3_FRAME; |
126 | 0 | cm->comp_fwd_ref[3] = GOLDEN_FRAME; |
127 | 0 |
|
128 | 0 | cm->comp_bwd_ref[0] = BWDREF_FRAME; |
129 | 0 | cm->comp_bwd_ref[1] = ALTREF2_FRAME; |
130 | 0 | cm->comp_bwd_ref[2] = ALTREF_FRAME; |
131 | 0 | } |
132 | | |
133 | | static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { |
134 | | return len != 0 && len <= (size_t)(end - start); |
135 | | } |
136 | | |
137 | 0 | static TX_MODE read_tx_mode(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
138 | 0 | if (cm->coded_lossless) return ONLY_4X4; |
139 | 0 | return aom_rb_read_bit(rb) ? TX_MODE_SELECT : TX_MODE_LARGEST; |
140 | 0 | } |
141 | | |
142 | | static REFERENCE_MODE read_frame_reference_mode( |
143 | 0 | const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
144 | 0 | if (frame_is_intra_only(cm)) { |
145 | 0 | return SINGLE_REFERENCE; |
146 | 0 | } else { |
147 | 0 | return aom_rb_read_bit(rb) ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE; |
148 | 0 | } |
149 | 0 | } |
150 | | |
151 | | static void inverse_transform_block(MACROBLOCKD *xd, int plane, |
152 | | const TX_TYPE tx_type, |
153 | | const TX_SIZE tx_size, uint8_t *dst, |
154 | 0 | int stride, int reduced_tx_set) { |
155 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
156 | 0 | tran_low_t *const dqcoeff = pd->dqcoeff; |
157 | 0 | eob_info *eob_data = pd->eob_data + xd->txb_offset[plane]; |
158 | 0 | uint16_t scan_line = eob_data->max_scan_line; |
159 | 0 | uint16_t eob = eob_data->eob; |
160 | 0 |
|
161 | 0 | memcpy(dqcoeff, pd->dqcoeff_block + xd->cb_offset[plane], |
162 | 0 | (scan_line + 1) * sizeof(dqcoeff[0])); |
163 | 0 | av1_inverse_transform_block(xd, dqcoeff, plane, tx_type, tx_size, dst, stride, |
164 | 0 | eob, reduced_tx_set); |
165 | 0 | memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0])); |
166 | 0 | } |
167 | | |
168 | | static void read_coeffs_tx_intra_block(const AV1_COMMON *const cm, |
169 | | MACROBLOCKD *const xd, |
170 | | aom_reader *const r, const int plane, |
171 | | const int row, const int col, |
172 | 0 | const TX_SIZE tx_size) { |
173 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
174 | 0 | if (!mbmi->skip) { |
175 | | #if TXCOEFF_TIMER |
176 | | struct aom_usec_timer timer; |
177 | | aom_usec_timer_start(&timer); |
178 | | #endif |
179 | | av1_read_coeffs_txb_facade(cm, xd, r, plane, row, col, tx_size); |
180 | | #if TXCOEFF_TIMER |
181 | | aom_usec_timer_mark(&timer); |
182 | | const int64_t elapsed_time = aom_usec_timer_elapsed(&timer); |
183 | | cm->txcoeff_timer += elapsed_time; |
184 | | ++cm->txb_count; |
185 | | #endif |
186 | | } |
187 | 0 | } |
188 | | |
189 | | static void decode_block_void(const AV1_COMMON *const cm, MACROBLOCKD *const xd, |
190 | | aom_reader *const r, const int plane, |
191 | | const int row, const int col, |
192 | 0 | const TX_SIZE tx_size) { |
193 | 0 | (void)cm; |
194 | 0 | (void)xd; |
195 | 0 | (void)r; |
196 | 0 | (void)plane; |
197 | 0 | (void)row; |
198 | 0 | (void)col; |
199 | 0 | (void)tx_size; |
200 | 0 | } |
201 | | |
202 | | static void predict_inter_block_void(AV1_COMMON *const cm, |
203 | | MACROBLOCKD *const xd, int mi_row, |
204 | 0 | int mi_col, BLOCK_SIZE bsize) { |
205 | 0 | (void)cm; |
206 | 0 | (void)xd; |
207 | 0 | (void)mi_row; |
208 | 0 | (void)mi_col; |
209 | 0 | (void)bsize; |
210 | 0 | } |
211 | | |
212 | | static void cfl_store_inter_block_void(AV1_COMMON *const cm, |
213 | 0 | MACROBLOCKD *const xd) { |
214 | 0 | (void)cm; |
215 | 0 | (void)xd; |
216 | 0 | } |
217 | | |
218 | | static void predict_and_reconstruct_intra_block( |
219 | | const AV1_COMMON *const cm, MACROBLOCKD *const xd, aom_reader *const r, |
220 | 0 | const int plane, const int row, const int col, const TX_SIZE tx_size) { |
221 | 0 | (void)r; |
222 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
223 | 0 | PLANE_TYPE plane_type = get_plane_type(plane); |
224 | 0 |
|
225 | 0 | av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size); |
226 | 0 |
|
227 | 0 | if (!mbmi->skip) { |
228 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
229 | 0 |
|
230 | 0 | // tx_type will be read out in av1_read_coeffs_txb_facade |
231 | 0 | const TX_TYPE tx_type = av1_get_tx_type(plane_type, xd, row, col, tx_size, |
232 | 0 | cm->reduced_tx_set_used); |
233 | 0 | eob_info *eob_data = pd->eob_data + xd->txb_offset[plane]; |
234 | 0 | if (eob_data->eob) { |
235 | 0 | uint8_t *dst = |
236 | 0 | &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]]; |
237 | 0 | inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride, |
238 | 0 | cm->reduced_tx_set_used); |
239 | 0 | } |
240 | 0 | } |
241 | 0 | if (plane == AOM_PLANE_Y && store_cfl_required(cm, xd)) { |
242 | 0 | cfl_store_tx(xd, row, col, tx_size, mbmi->sb_type); |
243 | 0 | } |
244 | 0 | } |
245 | | |
246 | | static void inverse_transform_inter_block(const AV1_COMMON *const cm, |
247 | | MACROBLOCKD *const xd, |
248 | | aom_reader *const r, const int plane, |
249 | | const int blk_row, const int blk_col, |
250 | 0 | const TX_SIZE tx_size) { |
251 | 0 | (void)r; |
252 | 0 | PLANE_TYPE plane_type = get_plane_type(plane); |
253 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
254 | 0 |
|
255 | 0 | // tx_type will be read out in av1_read_coeffs_txb_facade |
256 | 0 | const TX_TYPE tx_type = av1_get_tx_type(plane_type, xd, blk_row, blk_col, |
257 | 0 | tx_size, cm->reduced_tx_set_used); |
258 | 0 |
|
259 | 0 | uint8_t *dst = |
260 | 0 | &pd->dst |
261 | 0 | .buf[(blk_row * pd->dst.stride + blk_col) << tx_size_wide_log2[0]]; |
262 | 0 | inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride, |
263 | 0 | cm->reduced_tx_set_used); |
264 | | #if CONFIG_MISMATCH_DEBUG |
265 | | int pixel_c, pixel_r; |
266 | | BLOCK_SIZE bsize = txsize_to_bsize[tx_size]; |
267 | | int blk_w = block_size_wide[bsize]; |
268 | | int blk_h = block_size_high[bsize]; |
269 | | mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, blk_col, blk_row, |
270 | | pd->subsampling_x, pd->subsampling_y); |
271 | | mismatch_check_block_tx(dst, pd->dst.stride, cm->frame_offset, plane, pixel_c, |
272 | | pixel_r, blk_w, blk_h, |
273 | | xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH); |
274 | | #endif |
275 | | } |
276 | | |
277 | | static void set_cb_buffer_offsets(MACROBLOCKD *const xd, TX_SIZE tx_size, |
278 | 0 | int plane) { |
279 | 0 | xd->cb_offset[plane] += tx_size_wide[tx_size] * tx_size_high[tx_size]; |
280 | 0 | xd->txb_offset[plane] = |
281 | 0 | xd->cb_offset[plane] / (TX_SIZE_W_MIN * TX_SIZE_H_MIN); |
282 | 0 | } |
283 | | |
284 | | static void decode_reconstruct_tx(AV1_COMMON *cm, ThreadData *const td, |
285 | | aom_reader *r, MB_MODE_INFO *const mbmi, |
286 | | int plane, BLOCK_SIZE plane_bsize, |
287 | | int blk_row, int blk_col, int block, |
288 | 0 | TX_SIZE tx_size, int *eob_total) { |
289 | 0 | MACROBLOCKD *const xd = &td->xd; |
290 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
291 | 0 | const TX_SIZE plane_tx_size = |
292 | 0 | plane ? av1_get_max_uv_txsize(mbmi->sb_type, pd->subsampling_x, |
293 | 0 | pd->subsampling_y) |
294 | 0 | : mbmi->inter_tx_size[av1_get_txb_size_index(plane_bsize, blk_row, |
295 | 0 | blk_col)]; |
296 | 0 | // Scale to match transform block unit. |
297 | 0 | const int max_blocks_high = max_block_high(xd, plane_bsize, plane); |
298 | 0 | const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane); |
299 | 0 |
|
300 | 0 | if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return; |
301 | 0 | |
302 | 0 | if (tx_size == plane_tx_size || plane) { |
303 | 0 | td->read_coeffs_tx_inter_block_visit(cm, xd, r, plane, blk_row, blk_col, |
304 | 0 | tx_size); |
305 | 0 |
|
306 | 0 | td->inverse_tx_inter_block_visit(cm, xd, r, plane, blk_row, blk_col, |
307 | 0 | tx_size); |
308 | 0 | eob_info *eob_data = pd->eob_data + xd->txb_offset[plane]; |
309 | 0 | *eob_total += eob_data->eob; |
310 | 0 | set_cb_buffer_offsets(xd, tx_size, plane); |
311 | 0 | } else { |
312 | 0 | const TX_SIZE sub_txs = sub_tx_size_map[tx_size]; |
313 | 0 | assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size)); |
314 | 0 | assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size)); |
315 | 0 | const int bsw = tx_size_wide_unit[sub_txs]; |
316 | 0 | const int bsh = tx_size_high_unit[sub_txs]; |
317 | 0 | const int sub_step = bsw * bsh; |
318 | 0 |
|
319 | 0 | assert(bsw > 0 && bsh > 0); |
320 | 0 |
|
321 | 0 | for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh) { |
322 | 0 | for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) { |
323 | 0 | const int offsetr = blk_row + row; |
324 | 0 | const int offsetc = blk_col + col; |
325 | 0 |
|
326 | 0 | if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue; |
327 | 0 | |
328 | 0 | decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize, offsetr, |
329 | 0 | offsetc, block, sub_txs, eob_total); |
330 | 0 | block += sub_step; |
331 | 0 | } |
332 | 0 | } |
333 | 0 | } |
334 | 0 | } |
335 | | |
336 | | static void set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd, |
337 | | BLOCK_SIZE bsize, int mi_row, int mi_col, int bw, |
338 | 0 | int bh, int x_mis, int y_mis) { |
339 | 0 | const int num_planes = av1_num_planes(cm); |
340 | 0 |
|
341 | 0 | const int offset = mi_row * cm->mi_stride + mi_col; |
342 | 0 | const TileInfo *const tile = &xd->tile; |
343 | 0 |
|
344 | 0 | xd->mi = cm->mi_grid_visible + offset; |
345 | 0 | xd->mi[0] = &cm->mi[offset]; |
346 | 0 | // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of |
347 | 0 | // passing bsize from decode_partition(). |
348 | 0 | xd->mi[0]->sb_type = bsize; |
349 | | #if CONFIG_RD_DEBUG |
350 | | xd->mi[0]->mi_row = mi_row; |
351 | | xd->mi[0]->mi_col = mi_col; |
352 | | #endif |
353 | | xd->cfl.mi_row = mi_row; |
354 | 0 | xd->cfl.mi_col = mi_col; |
355 | 0 |
|
356 | 0 | assert(x_mis && y_mis); |
357 | 0 | for (int x = 1; x < x_mis; ++x) xd->mi[x] = xd->mi[0]; |
358 | 0 | int idx = cm->mi_stride; |
359 | 0 | for (int y = 1; y < y_mis; ++y) { |
360 | 0 | memcpy(&xd->mi[idx], &xd->mi[0], x_mis * sizeof(xd->mi[0])); |
361 | 0 | idx += cm->mi_stride; |
362 | 0 | } |
363 | 0 |
|
364 | 0 | set_plane_n4(xd, bw, bh, num_planes); |
365 | 0 | set_skip_context(xd, mi_row, mi_col, num_planes); |
366 | 0 |
|
367 | 0 | // Distance of Mb to the various image edges. These are specified to 8th pel |
368 | 0 | // as they are always compared to values that are in 1/8th pel units |
369 | 0 | set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); |
370 | 0 |
|
371 | 0 | av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, |
372 | 0 | mi_col, 0, num_planes); |
373 | 0 | } |
374 | | |
375 | | static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, |
376 | | int mi_row, int mi_col, aom_reader *r, |
377 | 0 | PARTITION_TYPE partition, BLOCK_SIZE bsize) { |
378 | 0 | AV1_COMMON *const cm = &pbi->common; |
379 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
380 | 0 | const int bw = mi_size_wide[bsize]; |
381 | 0 | const int bh = mi_size_high[bsize]; |
382 | 0 | const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col); |
383 | 0 | const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row); |
384 | 0 |
|
385 | | #if CONFIG_ACCOUNTING |
386 | | aom_accounting_set_context(&pbi->accounting, mi_col, mi_row); |
387 | | #endif |
388 | | set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis); |
389 | 0 | xd->mi[0]->partition = partition; |
390 | 0 | av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis); |
391 | 0 | if (bsize >= BLOCK_8X8 && |
392 | 0 | (seq_params->subsampling_x || seq_params->subsampling_y)) { |
393 | 0 | const BLOCK_SIZE uv_subsize = |
394 | 0 | ss_size_lookup[bsize][seq_params->subsampling_x] |
395 | 0 | [seq_params->subsampling_y]; |
396 | 0 | if (uv_subsize == BLOCK_INVALID) |
397 | 0 | aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME, |
398 | 0 | "Invalid block size."); |
399 | 0 | } |
400 | 0 |
|
401 | 0 | int reader_corrupted_flag = aom_reader_has_error(r); |
402 | 0 | aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag); |
403 | 0 | } |
404 | | |
405 | | typedef struct PadBlock { |
406 | | int x0; |
407 | | int x1; |
408 | | int y0; |
409 | | int y1; |
410 | | } PadBlock; |
411 | | |
412 | | static void highbd_build_mc_border(const uint8_t *src8, int src_stride, |
413 | | uint8_t *dst8, int dst_stride, int x, int y, |
414 | 0 | int b_w, int b_h, int w, int h) { |
415 | 0 | // Get a pointer to the start of the real data for this row. |
416 | 0 | const uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
417 | 0 | uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
418 | 0 | const uint16_t *ref_row = src - x - y * src_stride; |
419 | 0 |
|
420 | 0 | if (y >= h) |
421 | 0 | ref_row += (h - 1) * src_stride; |
422 | 0 | else if (y > 0) |
423 | 0 | ref_row += y * src_stride; |
424 | 0 |
|
425 | 0 | do { |
426 | 0 | int right = 0, copy; |
427 | 0 | int left = x < 0 ? -x : 0; |
428 | 0 |
|
429 | 0 | if (left > b_w) left = b_w; |
430 | 0 |
|
431 | 0 | if (x + b_w > w) right = x + b_w - w; |
432 | 0 |
|
433 | 0 | if (right > b_w) right = b_w; |
434 | 0 |
|
435 | 0 | copy = b_w - left - right; |
436 | 0 |
|
437 | 0 | if (left) aom_memset16(dst, ref_row[0], left); |
438 | 0 |
|
439 | 0 | if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); |
440 | 0 |
|
441 | 0 | if (right) aom_memset16(dst + left + copy, ref_row[w - 1], right); |
442 | 0 |
|
443 | 0 | dst += dst_stride; |
444 | 0 | ++y; |
445 | 0 |
|
446 | 0 | if (y > 0 && y < h) ref_row += src_stride; |
447 | 0 | } while (--b_h); |
448 | 0 | } |
449 | | |
450 | | static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst, |
451 | | int dst_stride, int x, int y, int b_w, int b_h, |
452 | 0 | int w, int h) { |
453 | 0 | // Get a pointer to the start of the real data for this row. |
454 | 0 | const uint8_t *ref_row = src - x - y * src_stride; |
455 | 0 |
|
456 | 0 | if (y >= h) |
457 | 0 | ref_row += (h - 1) * src_stride; |
458 | 0 | else if (y > 0) |
459 | 0 | ref_row += y * src_stride; |
460 | 0 |
|
461 | 0 | do { |
462 | 0 | int right = 0, copy; |
463 | 0 | int left = x < 0 ? -x : 0; |
464 | 0 |
|
465 | 0 | if (left > b_w) left = b_w; |
466 | 0 |
|
467 | 0 | if (x + b_w > w) right = x + b_w - w; |
468 | 0 |
|
469 | 0 | if (right > b_w) right = b_w; |
470 | 0 |
|
471 | 0 | copy = b_w - left - right; |
472 | 0 |
|
473 | 0 | if (left) memset(dst, ref_row[0], left); |
474 | 0 |
|
475 | 0 | if (copy) memcpy(dst + left, ref_row + x + left, copy); |
476 | 0 |
|
477 | 0 | if (right) memset(dst + left + copy, ref_row[w - 1], right); |
478 | 0 |
|
479 | 0 | dst += dst_stride; |
480 | 0 | ++y; |
481 | 0 |
|
482 | 0 | if (y > 0 && y < h) ref_row += src_stride; |
483 | 0 | } while (--b_h); |
484 | 0 | } |
485 | | |
486 | | static INLINE int update_extend_mc_border_params( |
487 | | const struct scale_factors *const sf, struct buf_2d *const pre_buf, |
488 | | MV32 scaled_mv, PadBlock *block, int subpel_x_mv, int subpel_y_mv, |
489 | 0 | int do_warp, int is_intrabc, int *x_pad, int *y_pad) { |
490 | 0 | const int is_scaled = av1_is_scaled(sf); |
491 | 0 | // Get reference width and height. |
492 | 0 | int frame_width = pre_buf->width; |
493 | 0 | int frame_height = pre_buf->height; |
494 | 0 |
|
495 | 0 | // Do border extension if there is motion or |
496 | 0 | // width/height is not a multiple of 8 pixels. |
497 | 0 | if ((!is_intrabc) && (!do_warp) && |
498 | 0 | (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) || |
499 | 0 | (frame_height & 0x7))) { |
500 | 0 | if (subpel_x_mv || (sf->x_step_q4 != SUBPEL_SHIFTS)) { |
501 | 0 | block->x0 -= AOM_INTERP_EXTEND - 1; |
502 | 0 | block->x1 += AOM_INTERP_EXTEND; |
503 | 0 | *x_pad = 1; |
504 | 0 | } |
505 | 0 |
|
506 | 0 | if (subpel_y_mv || (sf->y_step_q4 != SUBPEL_SHIFTS)) { |
507 | 0 | block->y0 -= AOM_INTERP_EXTEND - 1; |
508 | 0 | block->y1 += AOM_INTERP_EXTEND; |
509 | 0 | *y_pad = 1; |
510 | 0 | } |
511 | 0 |
|
512 | 0 | // Skip border extension if block is inside the frame. |
513 | 0 | if (block->x0 < 0 || block->x1 > frame_width - 1 || block->y0 < 0 || |
514 | 0 | block->y1 > frame_height - 1) { |
515 | 0 | return 1; |
516 | 0 | } |
517 | 0 | } |
518 | 0 | return 0; |
519 | 0 | } |
520 | | |
521 | | static INLINE void extend_mc_border(const struct scale_factors *const sf, |
522 | | struct buf_2d *const pre_buf, |
523 | | MV32 scaled_mv, PadBlock block, |
524 | | int subpel_x_mv, int subpel_y_mv, |
525 | | int do_warp, int is_intrabc, int highbd, |
526 | | uint8_t *mc_buf, uint8_t **pre, |
527 | 0 | int *src_stride) { |
528 | 0 | int x_pad = 0, y_pad = 0; |
529 | 0 | if (update_extend_mc_border_params(sf, pre_buf, scaled_mv, &block, |
530 | 0 | subpel_x_mv, subpel_y_mv, do_warp, |
531 | 0 | is_intrabc, &x_pad, &y_pad)) { |
532 | 0 | // Get reference block pointer. |
533 | 0 | const uint8_t *const buf_ptr = |
534 | 0 | pre_buf->buf0 + block.y0 * pre_buf->stride + block.x0; |
535 | 0 | int buf_stride = pre_buf->stride; |
536 | 0 | const int b_w = block.x1 - block.x0; |
537 | 0 | const int b_h = block.y1 - block.y0; |
538 | 0 |
|
539 | 0 | // Extend the border. |
540 | 0 | if (highbd) { |
541 | 0 | highbd_build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, |
542 | 0 | block.y0, b_w, b_h, pre_buf->width, |
543 | 0 | pre_buf->height); |
544 | 0 | } else { |
545 | 0 | build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, block.y0, b_w, |
546 | 0 | b_h, pre_buf->width, pre_buf->height); |
547 | 0 | } |
548 | 0 | *src_stride = b_w; |
549 | 0 | *pre = mc_buf + y_pad * (AOM_INTERP_EXTEND - 1) * b_w + |
550 | 0 | x_pad * (AOM_INTERP_EXTEND - 1); |
551 | 0 | } |
552 | 0 | } |
553 | | |
554 | | static INLINE void dec_calc_subpel_params( |
555 | | MACROBLOCKD *xd, const struct scale_factors *const sf, const MV mv, |
556 | | int plane, const int pre_x, const int pre_y, int x, int y, |
557 | | struct buf_2d *const pre_buf, SubpelParams *subpel_params, int bw, int bh, |
558 | | PadBlock *block, int mi_x, int mi_y, MV32 *scaled_mv, int *subpel_x_mv, |
559 | 0 | int *subpel_y_mv) { |
560 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
561 | 0 | const int is_scaled = av1_is_scaled(sf); |
562 | 0 | if (is_scaled) { |
563 | 0 | int ssx = pd->subsampling_x; |
564 | 0 | int ssy = pd->subsampling_y; |
565 | 0 | int orig_pos_y = (pre_y + y) << SUBPEL_BITS; |
566 | 0 | orig_pos_y += mv.row * (1 << (1 - ssy)); |
567 | 0 | int orig_pos_x = (pre_x + x) << SUBPEL_BITS; |
568 | 0 | orig_pos_x += mv.col * (1 << (1 - ssx)); |
569 | 0 | int pos_y = sf->scale_value_y(orig_pos_y, sf); |
570 | 0 | int pos_x = sf->scale_value_x(orig_pos_x, sf); |
571 | 0 | pos_x += SCALE_EXTRA_OFF; |
572 | 0 | pos_y += SCALE_EXTRA_OFF; |
573 | 0 |
|
574 | 0 | const int top = -AOM_LEFT_TOP_MARGIN_SCALED(ssy); |
575 | 0 | const int left = -AOM_LEFT_TOP_MARGIN_SCALED(ssx); |
576 | 0 | const int bottom = (pre_buf->height + AOM_INTERP_EXTEND) |
577 | 0 | << SCALE_SUBPEL_BITS; |
578 | 0 | const int right = (pre_buf->width + AOM_INTERP_EXTEND) << SCALE_SUBPEL_BITS; |
579 | 0 | pos_y = clamp(pos_y, top, bottom); |
580 | 0 | pos_x = clamp(pos_x, left, right); |
581 | 0 |
|
582 | 0 | subpel_params->subpel_x = pos_x & SCALE_SUBPEL_MASK; |
583 | 0 | subpel_params->subpel_y = pos_y & SCALE_SUBPEL_MASK; |
584 | 0 | subpel_params->xs = sf->x_step_q4; |
585 | 0 | subpel_params->ys = sf->y_step_q4; |
586 | 0 |
|
587 | 0 | // Get reference block top left coordinate. |
588 | 0 | block->x0 = pos_x >> SCALE_SUBPEL_BITS; |
589 | 0 | block->y0 = pos_y >> SCALE_SUBPEL_BITS; |
590 | 0 |
|
591 | 0 | // Get reference block bottom right coordinate. |
592 | 0 | block->x1 = |
593 | 0 | ((pos_x + (bw - 1) * subpel_params->xs) >> SCALE_SUBPEL_BITS) + 1; |
594 | 0 | block->y1 = |
595 | 0 | ((pos_y + (bh - 1) * subpel_params->ys) >> SCALE_SUBPEL_BITS) + 1; |
596 | 0 |
|
597 | 0 | MV temp_mv; |
598 | 0 | temp_mv = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, pd->subsampling_x, |
599 | 0 | pd->subsampling_y); |
600 | 0 | *scaled_mv = av1_scale_mv(&temp_mv, (mi_x + x), (mi_y + y), sf); |
601 | 0 | scaled_mv->row += SCALE_EXTRA_OFF; |
602 | 0 | scaled_mv->col += SCALE_EXTRA_OFF; |
603 | 0 |
|
604 | 0 | *subpel_x_mv = scaled_mv->col & SCALE_SUBPEL_MASK; |
605 | 0 | *subpel_y_mv = scaled_mv->row & SCALE_SUBPEL_MASK; |
606 | 0 | } else { |
607 | 0 | // Get block position in current frame. |
608 | 0 | int pos_x = (pre_x + x) << SUBPEL_BITS; |
609 | 0 | int pos_y = (pre_y + y) << SUBPEL_BITS; |
610 | 0 |
|
611 | 0 | const MV mv_q4 = clamp_mv_to_umv_border_sb( |
612 | 0 | xd, &mv, bw, bh, pd->subsampling_x, pd->subsampling_y); |
613 | 0 | subpel_params->xs = subpel_params->ys = SCALE_SUBPEL_SHIFTS; |
614 | 0 | subpel_params->subpel_x = (mv_q4.col & SUBPEL_MASK) << SCALE_EXTRA_BITS; |
615 | 0 | subpel_params->subpel_y = (mv_q4.row & SUBPEL_MASK) << SCALE_EXTRA_BITS; |
616 | 0 |
|
617 | 0 | // Get reference block top left coordinate. |
618 | 0 | pos_x += mv_q4.col; |
619 | 0 | pos_y += mv_q4.row; |
620 | 0 | block->x0 = pos_x >> SUBPEL_BITS; |
621 | 0 | block->y0 = pos_y >> SUBPEL_BITS; |
622 | 0 |
|
623 | 0 | // Get reference block bottom right coordinate. |
624 | 0 | block->x1 = (pos_x >> SUBPEL_BITS) + (bw - 1) + 1; |
625 | 0 | block->y1 = (pos_y >> SUBPEL_BITS) + (bh - 1) + 1; |
626 | 0 |
|
627 | 0 | scaled_mv->row = mv_q4.row; |
628 | 0 | scaled_mv->col = mv_q4.col; |
629 | 0 | *subpel_x_mv = scaled_mv->col & SUBPEL_MASK; |
630 | 0 | *subpel_y_mv = scaled_mv->row & SUBPEL_MASK; |
631 | 0 | } |
632 | 0 | } |
633 | | |
634 | | static INLINE void dec_build_inter_predictors(const AV1_COMMON *cm, |
635 | | MACROBLOCKD *xd, int plane, |
636 | | const MB_MODE_INFO *mi, |
637 | | int build_for_obmc, int bw, |
638 | 0 | int bh, int mi_x, int mi_y) { |
639 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
640 | 0 | int is_compound = has_second_ref(mi); |
641 | 0 | int ref; |
642 | 0 | const int is_intrabc = is_intrabc_block(mi); |
643 | 0 | assert(IMPLIES(is_intrabc, !is_compound)); |
644 | 0 | int is_global[2] = { 0, 0 }; |
645 | 0 | for (ref = 0; ref < 1 + is_compound; ++ref) { |
646 | 0 | const WarpedMotionParams *const wm = &xd->global_motion[mi->ref_frame[ref]]; |
647 | 0 | is_global[ref] = is_global_mv_block(mi, wm->wmtype); |
648 | 0 | } |
649 | 0 |
|
650 | 0 | const BLOCK_SIZE bsize = mi->sb_type; |
651 | 0 | const int ss_x = pd->subsampling_x; |
652 | 0 | const int ss_y = pd->subsampling_y; |
653 | 0 | int sub8x8_inter = (block_size_wide[bsize] < 8 && ss_x) || |
654 | 0 | (block_size_high[bsize] < 8 && ss_y); |
655 | 0 |
|
656 | 0 | if (is_intrabc) sub8x8_inter = 0; |
657 | 0 |
|
658 | 0 | // For sub8x8 chroma blocks, we may be covering more than one luma block's |
659 | 0 | // worth of pixels. Thus (mi_x, mi_y) may not be the correct coordinates for |
660 | 0 | // the top-left corner of the prediction source - the correct top-left corner |
661 | 0 | // is at (pre_x, pre_y). |
662 | 0 | const int row_start = |
663 | 0 | (block_size_high[bsize] == 4) && ss_y && !build_for_obmc ? -1 : 0; |
664 | 0 | const int col_start = |
665 | 0 | (block_size_wide[bsize] == 4) && ss_x && !build_for_obmc ? -1 : 0; |
666 | 0 | const int pre_x = (mi_x + MI_SIZE * col_start) >> ss_x; |
667 | 0 | const int pre_y = (mi_y + MI_SIZE * row_start) >> ss_y; |
668 | 0 |
|
669 | 0 | sub8x8_inter = sub8x8_inter && !build_for_obmc; |
670 | 0 | if (sub8x8_inter) { |
671 | 0 | for (int row = row_start; row <= 0 && sub8x8_inter; ++row) { |
672 | 0 | for (int col = col_start; col <= 0; ++col) { |
673 | 0 | const MB_MODE_INFO *this_mbmi = xd->mi[row * xd->mi_stride + col]; |
674 | 0 | if (!is_inter_block(this_mbmi)) sub8x8_inter = 0; |
675 | 0 | if (is_intrabc_block(this_mbmi)) sub8x8_inter = 0; |
676 | 0 | } |
677 | 0 | } |
678 | 0 | } |
679 | 0 |
|
680 | 0 | if (sub8x8_inter) { |
681 | 0 | // block size |
682 | 0 | const int b4_w = block_size_wide[bsize] >> ss_x; |
683 | 0 | const int b4_h = block_size_high[bsize] >> ss_y; |
684 | 0 | const BLOCK_SIZE plane_bsize = scale_chroma_bsize(bsize, ss_x, ss_y); |
685 | 0 | const int b8_w = block_size_wide[plane_bsize] >> ss_x; |
686 | 0 | const int b8_h = block_size_high[plane_bsize] >> ss_y; |
687 | 0 | assert(!is_compound); |
688 | 0 |
|
689 | 0 | const struct buf_2d orig_pred_buf[2] = { pd->pre[0], pd->pre[1] }; |
690 | 0 |
|
691 | 0 | int row = row_start; |
692 | 0 | int src_stride; |
693 | 0 | for (int y = 0; y < b8_h; y += b4_h) { |
694 | 0 | int col = col_start; |
695 | 0 | for (int x = 0; x < b8_w; x += b4_w) { |
696 | 0 | MB_MODE_INFO *this_mbmi = xd->mi[row * xd->mi_stride + col]; |
697 | 0 | is_compound = has_second_ref(this_mbmi); |
698 | 0 | int tmp_dst_stride = 8; |
699 | 0 | assert(bw < 8 || bh < 8); |
700 | 0 | ConvolveParams conv_params = get_conv_params_no_round( |
701 | 0 | 0, plane, xd->tmp_conv_dst, tmp_dst_stride, is_compound, xd->bd); |
702 | 0 | conv_params.use_jnt_comp_avg = 0; |
703 | 0 | struct buf_2d *const dst_buf = &pd->dst; |
704 | 0 | uint8_t *dst = dst_buf->buf + dst_buf->stride * y + x; |
705 | 0 |
|
706 | 0 | ref = 0; |
707 | 0 | const RefBuffer *ref_buf = |
708 | 0 | &cm->frame_refs[this_mbmi->ref_frame[ref] - LAST_FRAME]; |
709 | 0 |
|
710 | 0 | pd->pre[ref].buf0 = |
711 | 0 | (plane == 1) ? ref_buf->buf->u_buffer : ref_buf->buf->v_buffer; |
712 | 0 | pd->pre[ref].buf = |
713 | 0 | pd->pre[ref].buf0 + scaled_buffer_offset(pre_x, pre_y, |
714 | 0 | ref_buf->buf->uv_stride, |
715 | 0 | &ref_buf->sf); |
716 | 0 | pd->pre[ref].width = ref_buf->buf->uv_crop_width; |
717 | 0 | pd->pre[ref].height = ref_buf->buf->uv_crop_height; |
718 | 0 | pd->pre[ref].stride = ref_buf->buf->uv_stride; |
719 | 0 |
|
720 | 0 | const struct scale_factors *const sf = |
721 | 0 | is_intrabc ? &cm->sf_identity : &ref_buf->sf; |
722 | 0 | struct buf_2d *const pre_buf = is_intrabc ? dst_buf : &pd->pre[ref]; |
723 | 0 |
|
724 | 0 | const MV mv = this_mbmi->mv[ref].as_mv; |
725 | 0 |
|
726 | 0 | uint8_t *pre; |
727 | 0 | SubpelParams subpel_params; |
728 | 0 | PadBlock block; |
729 | 0 | MV32 scaled_mv; |
730 | 0 | int subpel_x_mv, subpel_y_mv; |
731 | 0 | int highbd; |
732 | 0 | WarpTypesAllowed warp_types; |
733 | 0 | warp_types.global_warp_allowed = is_global[ref]; |
734 | 0 | warp_types.local_warp_allowed = this_mbmi->motion_mode == WARPED_CAUSAL; |
735 | 0 |
|
736 | 0 | dec_calc_subpel_params(xd, sf, mv, plane, pre_x, pre_y, x, y, pre_buf, |
737 | 0 | &subpel_params, bw, bh, &block, mi_x, mi_y, |
738 | 0 | &scaled_mv, &subpel_x_mv, &subpel_y_mv); |
739 | 0 | pre = pre_buf->buf0 + block.y0 * pre_buf->stride + block.x0; |
740 | 0 | src_stride = pre_buf->stride; |
741 | 0 | highbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH; |
742 | 0 | extend_mc_border(sf, pre_buf, scaled_mv, block, subpel_x_mv, |
743 | 0 | subpel_y_mv, 0, is_intrabc, highbd, xd->mc_buf[ref], |
744 | 0 | &pre, &src_stride); |
745 | 0 | conv_params.do_average = ref; |
746 | 0 | if (is_masked_compound_type(mi->interinter_comp.type)) { |
747 | 0 | // masked compound type has its own average mechanism |
748 | 0 | conv_params.do_average = 0; |
749 | 0 | } |
750 | 0 |
|
751 | 0 | av1_make_inter_predictor( |
752 | 0 | pre, src_stride, dst, dst_buf->stride, &subpel_params, sf, b4_w, |
753 | 0 | b4_h, &conv_params, this_mbmi->interp_filters, &warp_types, |
754 | 0 | (mi_x >> pd->subsampling_x) + x, (mi_y >> pd->subsampling_y) + y, |
755 | 0 | plane, ref, mi, build_for_obmc, xd, cm->allow_warped_motion); |
756 | 0 |
|
757 | 0 | ++col; |
758 | 0 | } |
759 | 0 | ++row; |
760 | 0 | } |
761 | 0 |
|
762 | 0 | for (ref = 0; ref < 2; ++ref) pd->pre[ref] = orig_pred_buf[ref]; |
763 | 0 | return; |
764 | 0 | } |
765 | 0 |
|
766 | 0 | { |
767 | 0 | struct buf_2d *const dst_buf = &pd->dst; |
768 | 0 | uint8_t *const dst = dst_buf->buf; |
769 | 0 | uint8_t *pre[2]; |
770 | 0 | SubpelParams subpel_params[2]; |
771 | 0 | int src_stride[2]; |
772 | 0 | for (ref = 0; ref < 1 + is_compound; ++ref) { |
773 | 0 | const struct scale_factors *const sf = |
774 | 0 | is_intrabc ? &cm->sf_identity : &xd->block_refs[ref]->sf; |
775 | 0 | struct buf_2d *const pre_buf = is_intrabc ? dst_buf : &pd->pre[ref]; |
776 | 0 | const MV mv = mi->mv[ref].as_mv; |
777 | 0 | PadBlock block; |
778 | 0 | MV32 scaled_mv; |
779 | 0 | int subpel_x_mv, subpel_y_mv; |
780 | 0 | int highbd; |
781 | 0 |
|
782 | 0 | dec_calc_subpel_params(xd, sf, mv, plane, pre_x, pre_y, 0, 0, pre_buf, |
783 | 0 | &subpel_params[ref], bw, bh, &block, mi_x, mi_y, |
784 | 0 | &scaled_mv, &subpel_x_mv, &subpel_y_mv); |
785 | 0 | pre[ref] = pre_buf->buf0 + block.y0 * pre_buf->stride + block.x0; |
786 | 0 | src_stride[ref] = pre_buf->stride; |
787 | 0 | highbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH; |
788 | 0 |
|
789 | 0 | WarpTypesAllowed warp_types; |
790 | 0 | warp_types.global_warp_allowed = is_global[ref]; |
791 | 0 | warp_types.local_warp_allowed = mi->motion_mode == WARPED_CAUSAL; |
792 | 0 | int do_warp = (bw >= 8 && bh >= 8 && |
793 | 0 | av1_allow_warp(mi, &warp_types, |
794 | 0 | &xd->global_motion[mi->ref_frame[ref]], |
795 | 0 | build_for_obmc, subpel_params[ref].xs, |
796 | 0 | subpel_params[ref].ys, NULL)); |
797 | 0 | do_warp = (do_warp && xd->cur_frame_force_integer_mv == 0); |
798 | 0 |
|
799 | 0 | extend_mc_border(sf, pre_buf, scaled_mv, block, subpel_x_mv, subpel_y_mv, |
800 | 0 | do_warp, is_intrabc, highbd, xd->mc_buf[ref], &pre[ref], |
801 | 0 | &src_stride[ref]); |
802 | 0 | } |
803 | 0 |
|
804 | 0 | ConvolveParams conv_params = get_conv_params_no_round( |
805 | 0 | 0, plane, xd->tmp_conv_dst, MAX_SB_SIZE, is_compound, xd->bd); |
806 | 0 | av1_jnt_comp_weight_assign(cm, mi, 0, &conv_params.fwd_offset, |
807 | 0 | &conv_params.bck_offset, |
808 | 0 | &conv_params.use_jnt_comp_avg, is_compound); |
809 | 0 |
|
810 | 0 | for (ref = 0; ref < 1 + is_compound; ++ref) { |
811 | 0 | const struct scale_factors *const sf = |
812 | 0 | is_intrabc ? &cm->sf_identity : &xd->block_refs[ref]->sf; |
813 | 0 | WarpTypesAllowed warp_types; |
814 | 0 | warp_types.global_warp_allowed = is_global[ref]; |
815 | 0 | warp_types.local_warp_allowed = mi->motion_mode == WARPED_CAUSAL; |
816 | 0 | conv_params.do_average = ref; |
817 | 0 | if (is_masked_compound_type(mi->interinter_comp.type)) { |
818 | 0 | // masked compound type has its own average mechanism |
819 | 0 | conv_params.do_average = 0; |
820 | 0 | } |
821 | 0 |
|
822 | 0 | if (ref && is_masked_compound_type(mi->interinter_comp.type)) |
823 | 0 | av1_make_masked_inter_predictor( |
824 | 0 | pre[ref], src_stride[ref], dst, dst_buf->stride, |
825 | 0 | &subpel_params[ref], sf, bw, bh, &conv_params, mi->interp_filters, |
826 | 0 | plane, &warp_types, mi_x >> pd->subsampling_x, |
827 | 0 | mi_y >> pd->subsampling_y, ref, xd, cm->allow_warped_motion); |
828 | 0 | else |
829 | 0 | av1_make_inter_predictor( |
830 | 0 | pre[ref], src_stride[ref], dst, dst_buf->stride, |
831 | 0 | &subpel_params[ref], sf, bw, bh, &conv_params, mi->interp_filters, |
832 | 0 | &warp_types, mi_x >> pd->subsampling_x, mi_y >> pd->subsampling_y, |
833 | 0 | plane, ref, mi, build_for_obmc, xd, cm->allow_warped_motion); |
834 | 0 | } |
835 | 0 | } |
836 | 0 | } |
837 | | |
838 | | static void dec_build_inter_predictors_for_planes(const AV1_COMMON *cm, |
839 | | MACROBLOCKD *xd, |
840 | | BLOCK_SIZE bsize, int mi_row, |
841 | | int mi_col, int plane_from, |
842 | 0 | int plane_to) { |
843 | 0 | int plane; |
844 | 0 | const int mi_x = mi_col * MI_SIZE; |
845 | 0 | const int mi_y = mi_row * MI_SIZE; |
846 | 0 | for (plane = plane_from; plane <= plane_to; ++plane) { |
847 | 0 | const struct macroblockd_plane *pd = &xd->plane[plane]; |
848 | 0 | const int bw = pd->width; |
849 | 0 | const int bh = pd->height; |
850 | 0 |
|
851 | 0 | if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x, |
852 | 0 | pd->subsampling_y)) |
853 | 0 | continue; |
854 | 0 | |
855 | 0 | dec_build_inter_predictors(cm, xd, plane, xd->mi[0], 0, bw, bh, mi_x, mi_y); |
856 | 0 | } |
857 | 0 | } |
858 | | |
859 | | static void dec_build_inter_predictors_sby(const AV1_COMMON *cm, |
860 | | MACROBLOCKD *xd, int mi_row, |
861 | | int mi_col, BUFFER_SET *ctx, |
862 | 0 | BLOCK_SIZE bsize) { |
863 | 0 | dec_build_inter_predictors_for_planes(cm, xd, bsize, mi_row, mi_col, 0, 0); |
864 | 0 |
|
865 | 0 | if (is_interintra_pred(xd->mi[0])) { |
866 | 0 | BUFFER_SET default_ctx = { { xd->plane[0].dst.buf, NULL, NULL }, |
867 | 0 | { xd->plane[0].dst.stride, 0, 0 } }; |
868 | 0 | if (!ctx) ctx = &default_ctx; |
869 | 0 | av1_build_interintra_predictors_sbp(cm, xd, xd->plane[0].dst.buf, |
870 | 0 | xd->plane[0].dst.stride, ctx, 0, bsize); |
871 | 0 | } |
872 | 0 | } |
873 | | |
874 | | static void dec_build_inter_predictors_sbuv(const AV1_COMMON *cm, |
875 | | MACROBLOCKD *xd, int mi_row, |
876 | | int mi_col, BUFFER_SET *ctx, |
877 | 0 | BLOCK_SIZE bsize) { |
878 | 0 | dec_build_inter_predictors_for_planes(cm, xd, bsize, mi_row, mi_col, 1, |
879 | 0 | MAX_MB_PLANE - 1); |
880 | 0 |
|
881 | 0 | if (is_interintra_pred(xd->mi[0])) { |
882 | 0 | BUFFER_SET default_ctx = { |
883 | 0 | { NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf }, |
884 | 0 | { 0, xd->plane[1].dst.stride, xd->plane[2].dst.stride } |
885 | 0 | }; |
886 | 0 | if (!ctx) ctx = &default_ctx; |
887 | 0 | av1_build_interintra_predictors_sbuv( |
888 | 0 | cm, xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf, |
889 | 0 | xd->plane[1].dst.stride, xd->plane[2].dst.stride, ctx, bsize); |
890 | 0 | } |
891 | 0 | } |
892 | | |
893 | | static void dec_build_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd, |
894 | | int mi_row, int mi_col, |
895 | 0 | BUFFER_SET *ctx, BLOCK_SIZE bsize) { |
896 | 0 | const int num_planes = av1_num_planes(cm); |
897 | 0 | dec_build_inter_predictors_sby(cm, xd, mi_row, mi_col, ctx, bsize); |
898 | 0 | if (num_planes > 1) |
899 | 0 | dec_build_inter_predictors_sbuv(cm, xd, mi_row, mi_col, ctx, bsize); |
900 | 0 | } |
901 | | |
902 | | static INLINE void dec_build_prediction_by_above_pred( |
903 | | MACROBLOCKD *xd, int rel_mi_col, uint8_t above_mi_width, |
904 | 0 | MB_MODE_INFO *above_mbmi, void *fun_ctxt, const int num_planes) { |
905 | 0 | struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt; |
906 | 0 | const int above_mi_col = ctxt->mi_col + rel_mi_col; |
907 | 0 | int mi_x, mi_y; |
908 | 0 | MB_MODE_INFO backup_mbmi = *above_mbmi; |
909 | 0 |
|
910 | 0 | av1_setup_build_prediction_by_above_pred(xd, rel_mi_col, above_mi_width, |
911 | 0 | &backup_mbmi, ctxt, num_planes); |
912 | 0 | mi_x = above_mi_col << MI_SIZE_LOG2; |
913 | 0 | mi_y = ctxt->mi_row << MI_SIZE_LOG2; |
914 | 0 |
|
915 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
916 | 0 |
|
917 | 0 | for (int j = 0; j < num_planes; ++j) { |
918 | 0 | const struct macroblockd_plane *pd = &xd->plane[j]; |
919 | 0 | int bw = (above_mi_width * MI_SIZE) >> pd->subsampling_x; |
920 | 0 | int bh = clamp(block_size_high[bsize] >> (pd->subsampling_y + 1), 4, |
921 | 0 | block_size_high[BLOCK_64X64] >> (pd->subsampling_y + 1)); |
922 | 0 |
|
923 | 0 | if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 0)) continue; |
924 | 0 | dec_build_inter_predictors(ctxt->cm, xd, j, &backup_mbmi, 1, bw, bh, mi_x, |
925 | 0 | mi_y); |
926 | 0 | } |
927 | 0 | } |
928 | | |
929 | | static void dec_build_prediction_by_above_preds( |
930 | | const AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col, |
931 | | uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE], |
932 | 0 | int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) { |
933 | 0 | if (!xd->up_available) return; |
934 | 0 | |
935 | 0 | // Adjust mb_to_bottom_edge to have the correct value for the OBMC |
936 | 0 | // prediction block. This is half the height of the original block, |
937 | 0 | // except for 128-wide blocks, where we only use a height of 32. |
938 | 0 | int this_height = xd->n4_h * MI_SIZE; |
939 | 0 | int pred_height = AOMMIN(this_height / 2, 32); |
940 | 0 | xd->mb_to_bottom_edge += (this_height - pred_height) * 8; |
941 | 0 |
|
942 | 0 | struct build_prediction_ctxt ctxt = { cm, mi_row, |
943 | 0 | mi_col, tmp_buf, |
944 | 0 | tmp_width, tmp_height, |
945 | 0 | tmp_stride, xd->mb_to_right_edge }; |
946 | 0 | BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
947 | 0 | foreach_overlappable_nb_above(cm, xd, mi_col, |
948 | 0 | max_neighbor_obmc[mi_size_wide_log2[bsize]], |
949 | 0 | dec_build_prediction_by_above_pred, &ctxt); |
950 | 0 |
|
951 | 0 | xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8); |
952 | 0 | xd->mb_to_right_edge = ctxt.mb_to_far_edge; |
953 | 0 | xd->mb_to_bottom_edge -= (this_height - pred_height) * 8; |
954 | 0 | } |
955 | | |
956 | | static INLINE void dec_build_prediction_by_left_pred( |
957 | | MACROBLOCKD *xd, int rel_mi_row, uint8_t left_mi_height, |
958 | 0 | MB_MODE_INFO *left_mbmi, void *fun_ctxt, const int num_planes) { |
959 | 0 | struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt; |
960 | 0 | const int left_mi_row = ctxt->mi_row + rel_mi_row; |
961 | 0 | int mi_x, mi_y; |
962 | 0 | MB_MODE_INFO backup_mbmi = *left_mbmi; |
963 | 0 |
|
964 | 0 | av1_setup_build_prediction_by_left_pred(xd, rel_mi_row, left_mi_height, |
965 | 0 | &backup_mbmi, ctxt, num_planes); |
966 | 0 | mi_x = ctxt->mi_col << MI_SIZE_LOG2; |
967 | 0 | mi_y = left_mi_row << MI_SIZE_LOG2; |
968 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
969 | 0 |
|
970 | 0 | for (int j = 0; j < num_planes; ++j) { |
971 | 0 | const struct macroblockd_plane *pd = &xd->plane[j]; |
972 | 0 | int bw = clamp(block_size_wide[bsize] >> (pd->subsampling_x + 1), 4, |
973 | 0 | block_size_wide[BLOCK_64X64] >> (pd->subsampling_x + 1)); |
974 | 0 | int bh = (left_mi_height << MI_SIZE_LOG2) >> pd->subsampling_y; |
975 | 0 |
|
976 | 0 | if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 1)) continue; |
977 | 0 | dec_build_inter_predictors(ctxt->cm, xd, j, &backup_mbmi, 1, bw, bh, mi_x, |
978 | 0 | mi_y); |
979 | 0 | } |
980 | 0 | } |
981 | | |
982 | | static void dec_build_prediction_by_left_preds( |
983 | | const AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col, |
984 | | uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE], |
985 | 0 | int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) { |
986 | 0 | if (!xd->left_available) return; |
987 | 0 | |
988 | 0 | // Adjust mb_to_right_edge to have the correct value for the OBMC |
989 | 0 | // prediction block. This is half the width of the original block, |
990 | 0 | // except for 128-wide blocks, where we only use a width of 32. |
991 | 0 | int this_width = xd->n4_w * MI_SIZE; |
992 | 0 | int pred_width = AOMMIN(this_width / 2, 32); |
993 | 0 | xd->mb_to_right_edge += (this_width - pred_width) * 8; |
994 | 0 |
|
995 | 0 | struct build_prediction_ctxt ctxt = { cm, mi_row, |
996 | 0 | mi_col, tmp_buf, |
997 | 0 | tmp_width, tmp_height, |
998 | 0 | tmp_stride, xd->mb_to_bottom_edge }; |
999 | 0 | BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
1000 | 0 | foreach_overlappable_nb_left(cm, xd, mi_row, |
1001 | 0 | max_neighbor_obmc[mi_size_high_log2[bsize]], |
1002 | 0 | dec_build_prediction_by_left_pred, &ctxt); |
1003 | 0 |
|
1004 | 0 | xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8); |
1005 | 0 | xd->mb_to_right_edge -= (this_width - pred_width) * 8; |
1006 | 0 | xd->mb_to_bottom_edge = ctxt.mb_to_far_edge; |
1007 | 0 | } |
1008 | | |
1009 | | static void dec_build_obmc_inter_predictors_sb(const AV1_COMMON *cm, |
1010 | | MACROBLOCKD *xd, int mi_row, |
1011 | 0 | int mi_col) { |
1012 | 0 | const int num_planes = av1_num_planes(cm); |
1013 | 0 | uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE]; |
1014 | 0 | int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1015 | 0 | int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1016 | 0 | int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1017 | 0 | int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1018 | 0 | int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1019 | 0 | int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE }; |
1020 | 0 |
|
1021 | 0 | if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
1022 | 0 | int len = sizeof(uint16_t); |
1023 | 0 | dst_buf1[0] = CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0]); |
1024 | 0 | dst_buf1[1] = |
1025 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * len); |
1026 | 0 | dst_buf1[2] = |
1027 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * 2 * len); |
1028 | 0 | dst_buf2[0] = CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1]); |
1029 | 0 | dst_buf2[1] = |
1030 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * len); |
1031 | 0 | dst_buf2[2] = |
1032 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * 2 * len); |
1033 | 0 | } else { |
1034 | 0 | dst_buf1[0] = xd->tmp_obmc_bufs[0]; |
1035 | 0 | dst_buf1[1] = xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE; |
1036 | 0 | dst_buf1[2] = xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * 2; |
1037 | 0 | dst_buf2[0] = xd->tmp_obmc_bufs[1]; |
1038 | 0 | dst_buf2[1] = xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE; |
1039 | 0 | dst_buf2[2] = xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * 2; |
1040 | 0 | } |
1041 | 0 | dec_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1, |
1042 | 0 | dst_width1, dst_height1, dst_stride1); |
1043 | 0 | dec_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2, |
1044 | 0 | dst_width2, dst_height2, dst_stride2); |
1045 | 0 | av1_setup_dst_planes(xd->plane, xd->mi[0]->sb_type, get_frame_new_buffer(cm), |
1046 | 0 | mi_row, mi_col, 0, num_planes); |
1047 | 0 | av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1, dst_stride1, |
1048 | 0 | dst_buf2, dst_stride2); |
1049 | 0 | } |
1050 | | |
1051 | 0 | static void cfl_store_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd) { |
1052 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
1053 | 0 | if (store_cfl_required(cm, xd)) { |
1054 | 0 | cfl_store_block(xd, mbmi->sb_type, mbmi->tx_size); |
1055 | 0 | } |
1056 | 0 | } |
1057 | | |
1058 | | static void predict_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd, |
1059 | 0 | int mi_row, int mi_col, BLOCK_SIZE bsize) { |
1060 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
1061 | 0 | const int num_planes = av1_num_planes(cm); |
1062 | 0 | for (int ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) { |
1063 | 0 | const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; |
1064 | 0 | if (frame < LAST_FRAME) { |
1065 | 0 | assert(is_intrabc_block(mbmi)); |
1066 | 0 | assert(frame == INTRA_FRAME); |
1067 | 0 | assert(ref == 0); |
1068 | 0 | } else { |
1069 | 0 | RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME]; |
1070 | 0 |
|
1071 | 0 | xd->block_refs[ref] = ref_buf; |
1072 | 0 | av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf, |
1073 | 0 | num_planes); |
1074 | 0 | } |
1075 | 0 | } |
1076 | 0 |
|
1077 | 0 | dec_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize); |
1078 | 0 | if (mbmi->motion_mode == OBMC_CAUSAL) { |
1079 | 0 | dec_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col); |
1080 | 0 | } |
1081 | | #if CONFIG_MISMATCH_DEBUG |
1082 | | for (int plane = 0; plane < num_planes; ++plane) { |
1083 | | const struct macroblockd_plane *pd = &xd->plane[plane]; |
1084 | | int pixel_c, pixel_r; |
1085 | | mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0, pd->subsampling_x, |
1086 | | pd->subsampling_y); |
1087 | | if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x, |
1088 | | pd->subsampling_y)) |
1089 | | continue; |
1090 | | mismatch_check_block_pre(pd->dst.buf, pd->dst.stride, cm->frame_offset, |
1091 | | plane, pixel_c, pixel_r, pd->width, pd->height, |
1092 | | xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH); |
1093 | | } |
1094 | | #endif |
1095 | | } |
1096 | | |
1097 | | static void set_color_index_map_offset(MACROBLOCKD *const xd, int plane, |
1098 | 0 | aom_reader *r) { |
1099 | 0 | (void)r; |
1100 | 0 | Av1ColorMapParam params; |
1101 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
1102 | 0 | av1_get_block_dimensions(mbmi->sb_type, plane, xd, ¶ms.plane_width, |
1103 | 0 | ¶ms.plane_height, NULL, NULL); |
1104 | 0 | xd->color_index_map_offset[plane] += params.plane_width * params.plane_height; |
1105 | 0 | } |
1106 | | |
1107 | | static void decode_token_recon_block(AV1Decoder *const pbi, |
1108 | | ThreadData *const td, int mi_row, |
1109 | | int mi_col, aom_reader *r, |
1110 | 0 | BLOCK_SIZE bsize) { |
1111 | 0 | AV1_COMMON *const cm = &pbi->common; |
1112 | 0 | MACROBLOCKD *const xd = &td->xd; |
1113 | 0 | const int num_planes = av1_num_planes(cm); |
1114 | 0 |
|
1115 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
1116 | 0 | CFL_CTX *const cfl = &xd->cfl; |
1117 | 0 | cfl->is_chroma_reference = is_chroma_reference( |
1118 | 0 | mi_row, mi_col, bsize, cfl->subsampling_x, cfl->subsampling_y); |
1119 | 0 |
|
1120 | 0 | if (!is_inter_block(mbmi)) { |
1121 | 0 | int row, col; |
1122 | 0 | assert(bsize == get_plane_block_size(bsize, xd->plane[0].subsampling_x, |
1123 | 0 | xd->plane[0].subsampling_y)); |
1124 | 0 | const int max_blocks_wide = max_block_wide(xd, bsize, 0); |
1125 | 0 | const int max_blocks_high = max_block_high(xd, bsize, 0); |
1126 | 0 | const BLOCK_SIZE max_unit_bsize = BLOCK_64X64; |
1127 | 0 | int mu_blocks_wide = |
1128 | 0 | block_size_wide[max_unit_bsize] >> tx_size_wide_log2[0]; |
1129 | 0 | int mu_blocks_high = |
1130 | 0 | block_size_high[max_unit_bsize] >> tx_size_high_log2[0]; |
1131 | 0 | mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide); |
1132 | 0 | mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high); |
1133 | 0 |
|
1134 | 0 | for (row = 0; row < max_blocks_high; row += mu_blocks_high) { |
1135 | 0 | for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) { |
1136 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1137 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
1138 | 0 | if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x, |
1139 | 0 | pd->subsampling_y)) |
1140 | 0 | continue; |
1141 | 0 | |
1142 | 0 | const TX_SIZE tx_size = av1_get_tx_size(plane, xd); |
1143 | 0 | const int stepr = tx_size_high_unit[tx_size]; |
1144 | 0 | const int stepc = tx_size_wide_unit[tx_size]; |
1145 | 0 |
|
1146 | 0 | const int unit_height = ROUND_POWER_OF_TWO( |
1147 | 0 | AOMMIN(mu_blocks_high + row, max_blocks_high), pd->subsampling_y); |
1148 | 0 | const int unit_width = ROUND_POWER_OF_TWO( |
1149 | 0 | AOMMIN(mu_blocks_wide + col, max_blocks_wide), pd->subsampling_x); |
1150 | 0 |
|
1151 | 0 | for (int blk_row = row >> pd->subsampling_y; blk_row < unit_height; |
1152 | 0 | blk_row += stepr) { |
1153 | 0 | for (int blk_col = col >> pd->subsampling_x; blk_col < unit_width; |
1154 | 0 | blk_col += stepc) { |
1155 | 0 | td->read_coeffs_tx_intra_block_visit(cm, xd, r, plane, blk_row, |
1156 | 0 | blk_col, tx_size); |
1157 | 0 | td->predict_and_recon_intra_block_visit(cm, xd, r, plane, blk_row, |
1158 | 0 | blk_col, tx_size); |
1159 | 0 | set_cb_buffer_offsets(xd, tx_size, plane); |
1160 | 0 | } |
1161 | 0 | } |
1162 | 0 | } |
1163 | 0 | } |
1164 | 0 | } |
1165 | 0 | } else { |
1166 | 0 | td->predict_inter_block_visit(cm, xd, mi_row, mi_col, bsize); |
1167 | 0 | // Reconstruction |
1168 | 0 | if (!mbmi->skip) { |
1169 | 0 | int eobtotal = 0; |
1170 | 0 |
|
1171 | 0 | const int max_blocks_wide = max_block_wide(xd, bsize, 0); |
1172 | 0 | const int max_blocks_high = max_block_high(xd, bsize, 0); |
1173 | 0 | int row, col; |
1174 | 0 |
|
1175 | 0 | const BLOCK_SIZE max_unit_bsize = BLOCK_64X64; |
1176 | 0 | assert(max_unit_bsize == |
1177 | 0 | get_plane_block_size(BLOCK_64X64, xd->plane[0].subsampling_x, |
1178 | 0 | xd->plane[0].subsampling_y)); |
1179 | 0 | int mu_blocks_wide = |
1180 | 0 | block_size_wide[max_unit_bsize] >> tx_size_wide_log2[0]; |
1181 | 0 | int mu_blocks_high = |
1182 | 0 | block_size_high[max_unit_bsize] >> tx_size_high_log2[0]; |
1183 | 0 |
|
1184 | 0 | mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide); |
1185 | 0 | mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high); |
1186 | 0 |
|
1187 | 0 | for (row = 0; row < max_blocks_high; row += mu_blocks_high) { |
1188 | 0 | for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) { |
1189 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1190 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
1191 | 0 | if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x, |
1192 | 0 | pd->subsampling_y)) |
1193 | 0 | continue; |
1194 | 0 | const BLOCK_SIZE bsizec = |
1195 | 0 | scale_chroma_bsize(bsize, pd->subsampling_x, pd->subsampling_y); |
1196 | 0 | const BLOCK_SIZE plane_bsize = get_plane_block_size( |
1197 | 0 | bsizec, pd->subsampling_x, pd->subsampling_y); |
1198 | 0 |
|
1199 | 0 | const TX_SIZE max_tx_size = |
1200 | 0 | get_vartx_max_txsize(xd, plane_bsize, plane); |
1201 | 0 | const int bh_var_tx = tx_size_high_unit[max_tx_size]; |
1202 | 0 | const int bw_var_tx = tx_size_wide_unit[max_tx_size]; |
1203 | 0 | int block = 0; |
1204 | 0 | int step = |
1205 | 0 | tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size]; |
1206 | 0 | int blk_row, blk_col; |
1207 | 0 | const int unit_height = ROUND_POWER_OF_TWO( |
1208 | 0 | AOMMIN(mu_blocks_high + row, max_blocks_high), |
1209 | 0 | pd->subsampling_y); |
1210 | 0 | const int unit_width = ROUND_POWER_OF_TWO( |
1211 | 0 | AOMMIN(mu_blocks_wide + col, max_blocks_wide), |
1212 | 0 | pd->subsampling_x); |
1213 | 0 |
|
1214 | 0 | for (blk_row = row >> pd->subsampling_y; blk_row < unit_height; |
1215 | 0 | blk_row += bh_var_tx) { |
1216 | 0 | for (blk_col = col >> pd->subsampling_x; blk_col < unit_width; |
1217 | 0 | blk_col += bw_var_tx) { |
1218 | 0 | decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize, |
1219 | 0 | blk_row, blk_col, block, max_tx_size, |
1220 | 0 | &eobtotal); |
1221 | 0 | block += step; |
1222 | 0 | } |
1223 | 0 | } |
1224 | 0 | } |
1225 | 0 | } |
1226 | 0 | } |
1227 | 0 | } |
1228 | 0 | td->cfl_store_inter_block_visit(cm, xd); |
1229 | 0 | } |
1230 | 0 |
|
1231 | 0 | av1_visit_palette(pbi, xd, mi_row, mi_col, r, bsize, |
1232 | 0 | set_color_index_map_offset); |
1233 | 0 | } |
1234 | | |
1235 | | #if LOOP_FILTER_BITMASK |
1236 | | static void store_bitmask_vartx(AV1_COMMON *cm, int mi_row, int mi_col, |
1237 | | BLOCK_SIZE bsize, TX_SIZE tx_size, |
1238 | | MB_MODE_INFO *mbmi); |
1239 | | #endif |
1240 | | |
1241 | | static void read_tx_size_vartx(MACROBLOCKD *xd, MB_MODE_INFO *mbmi, |
1242 | | TX_SIZE tx_size, int depth, |
1243 | | #if LOOP_FILTER_BITMASK |
1244 | | AV1_COMMON *cm, int mi_row, int mi_col, |
1245 | | #endif |
1246 | 0 | int blk_row, int blk_col, aom_reader *r) { |
1247 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1248 | 0 | int is_split = 0; |
1249 | 0 | const BLOCK_SIZE bsize = mbmi->sb_type; |
1250 | 0 | const int max_blocks_high = max_block_high(xd, bsize, 0); |
1251 | 0 | const int max_blocks_wide = max_block_wide(xd, bsize, 0); |
1252 | 0 | if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return; |
1253 | 0 | assert(tx_size > TX_4X4); |
1254 | 0 |
|
1255 | 0 | if (depth == MAX_VARTX_DEPTH) { |
1256 | 0 | for (int idy = 0; idy < tx_size_high_unit[tx_size]; ++idy) { |
1257 | 0 | for (int idx = 0; idx < tx_size_wide_unit[tx_size]; ++idx) { |
1258 | 0 | const int index = |
1259 | 0 | av1_get_txb_size_index(bsize, blk_row + idy, blk_col + idx); |
1260 | 0 | mbmi->inter_tx_size[index] = tx_size; |
1261 | 0 | } |
1262 | 0 | } |
1263 | 0 | mbmi->tx_size = tx_size; |
1264 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
1265 | 0 | xd->left_txfm_context + blk_row, tx_size, tx_size); |
1266 | 0 | return; |
1267 | 0 | } |
1268 | 0 |
|
1269 | 0 | const int ctx = txfm_partition_context(xd->above_txfm_context + blk_col, |
1270 | 0 | xd->left_txfm_context + blk_row, |
1271 | 0 | mbmi->sb_type, tx_size); |
1272 | 0 | is_split = aom_read_symbol(r, ec_ctx->txfm_partition_cdf[ctx], 2, ACCT_STR); |
1273 | 0 |
|
1274 | 0 | if (is_split) { |
1275 | 0 | const TX_SIZE sub_txs = sub_tx_size_map[tx_size]; |
1276 | 0 | const int bsw = tx_size_wide_unit[sub_txs]; |
1277 | 0 | const int bsh = tx_size_high_unit[sub_txs]; |
1278 | 0 |
|
1279 | 0 | if (sub_txs == TX_4X4) { |
1280 | 0 | for (int idy = 0; idy < tx_size_high_unit[tx_size]; ++idy) { |
1281 | 0 | for (int idx = 0; idx < tx_size_wide_unit[tx_size]; ++idx) { |
1282 | 0 | const int index = |
1283 | 0 | av1_get_txb_size_index(bsize, blk_row + idy, blk_col + idx); |
1284 | 0 | mbmi->inter_tx_size[index] = sub_txs; |
1285 | 0 | } |
1286 | 0 | } |
1287 | 0 | mbmi->tx_size = sub_txs; |
1288 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
1289 | 0 | xd->left_txfm_context + blk_row, sub_txs, tx_size); |
1290 | | #if LOOP_FILTER_BITMASK |
1291 | | store_bitmask_vartx(cm, mi_row + blk_row, mi_col + blk_col, BLOCK_8X8, |
1292 | | TX_4X4, mbmi); |
1293 | | #endif |
1294 | | return; |
1295 | 0 | } |
1296 | | #if LOOP_FILTER_BITMASK |
1297 | | if (depth + 1 == MAX_VARTX_DEPTH) { |
1298 | | store_bitmask_vartx(cm, mi_row + blk_row, mi_col + blk_col, |
1299 | | txsize_to_bsize[tx_size], sub_txs, mbmi); |
1300 | | } |
1301 | | #endif |
1302 | | |
1303 | 0 | assert(bsw > 0 && bsh > 0); |
1304 | 0 | for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh) { |
1305 | 0 | for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) { |
1306 | 0 | int offsetr = blk_row + row; |
1307 | 0 | int offsetc = blk_col + col; |
1308 | 0 | read_tx_size_vartx(xd, mbmi, sub_txs, depth + 1, |
1309 | | #if LOOP_FILTER_BITMASK |
1310 | | cm, mi_row, mi_col, |
1311 | | #endif |
1312 | | offsetr, offsetc, r); |
1313 | 0 | } |
1314 | 0 | } |
1315 | 0 | } else { |
1316 | 0 | for (int idy = 0; idy < tx_size_high_unit[tx_size]; ++idy) { |
1317 | 0 | for (int idx = 0; idx < tx_size_wide_unit[tx_size]; ++idx) { |
1318 | 0 | const int index = |
1319 | 0 | av1_get_txb_size_index(bsize, blk_row + idy, blk_col + idx); |
1320 | 0 | mbmi->inter_tx_size[index] = tx_size; |
1321 | 0 | } |
1322 | 0 | } |
1323 | 0 | mbmi->tx_size = tx_size; |
1324 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
1325 | 0 | xd->left_txfm_context + blk_row, tx_size, tx_size); |
1326 | | #if LOOP_FILTER_BITMASK |
1327 | | store_bitmask_vartx(cm, mi_row + blk_row, mi_col + blk_col, |
1328 | | txsize_to_bsize[tx_size], tx_size, mbmi); |
1329 | | #endif |
1330 | | } |
1331 | 0 | } |
1332 | | |
1333 | 0 | static TX_SIZE read_selected_tx_size(MACROBLOCKD *xd, aom_reader *r) { |
1334 | 0 | // TODO(debargha): Clean up the logic here. This function should only |
1335 | 0 | // be called for intra. |
1336 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
1337 | 0 | const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize); |
1338 | 0 | const int max_depths = bsize_to_max_depth(bsize); |
1339 | 0 | const int ctx = get_tx_size_context(xd); |
1340 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1341 | 0 | const int depth = aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx], |
1342 | 0 | max_depths + 1, ACCT_STR); |
1343 | 0 | assert(depth >= 0 && depth <= max_depths); |
1344 | 0 | const TX_SIZE tx_size = depth_to_tx_size(depth, bsize); |
1345 | 0 | return tx_size; |
1346 | 0 | } |
1347 | | |
1348 | | static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter, |
1349 | 0 | int allow_select_inter, aom_reader *r) { |
1350 | 0 | const TX_MODE tx_mode = cm->tx_mode; |
1351 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->sb_type; |
1352 | 0 | if (xd->lossless[xd->mi[0]->segment_id]) return TX_4X4; |
1353 | 0 | |
1354 | 0 | if (block_signals_txsize(bsize)) { |
1355 | 0 | if ((!is_inter || allow_select_inter) && tx_mode == TX_MODE_SELECT) { |
1356 | 0 | const TX_SIZE coded_tx_size = read_selected_tx_size(xd, r); |
1357 | 0 | return coded_tx_size; |
1358 | 0 | } else { |
1359 | 0 | return tx_size_from_tx_mode(bsize, tx_mode); |
1360 | 0 | } |
1361 | 0 | } else { |
1362 | 0 | assert(IMPLIES(tx_mode == ONLY_4X4, bsize == BLOCK_4X4)); |
1363 | 0 | return max_txsize_rect_lookup[bsize]; |
1364 | 0 | } |
1365 | 0 | } |
1366 | | |
1367 | | #if LOOP_FILTER_BITMASK |
1368 | | static void store_bitmask_vartx(AV1_COMMON *cm, int mi_row, int mi_col, |
1369 | | BLOCK_SIZE bsize, TX_SIZE tx_size, |
1370 | | MB_MODE_INFO *mbmi) { |
1371 | | LoopFilterMask *lfm = get_loop_filter_mask(cm, mi_row, mi_col); |
1372 | | const TX_SIZE tx_size_y_vert = txsize_vert_map[tx_size]; |
1373 | | const TX_SIZE tx_size_y_horz = txsize_horz_map[tx_size]; |
1374 | | const TX_SIZE tx_size_uv_vert = txsize_vert_map[av1_get_max_uv_txsize( |
1375 | | mbmi->sb_type, cm->seq_params.subsampling_x, |
1376 | | cm->seq_params.subsampling_y)]; |
1377 | | const TX_SIZE tx_size_uv_horz = txsize_horz_map[av1_get_max_uv_txsize( |
1378 | | mbmi->sb_type, cm->seq_params.subsampling_x, |
1379 | | cm->seq_params.subsampling_y)]; |
1380 | | const int is_square_transform_size = tx_size <= TX_64X64; |
1381 | | int mask_id = 0; |
1382 | | int offset = 0; |
1383 | | const int half_ratio_tx_size_max32 = |
1384 | | (tx_size > TX_64X64) & (tx_size <= TX_32X16); |
1385 | | if (is_square_transform_size) { |
1386 | | switch (tx_size) { |
1387 | | case TX_4X4: mask_id = mask_id_table_tx_4x4[bsize]; break; |
1388 | | case TX_8X8: |
1389 | | mask_id = mask_id_table_tx_8x8[bsize]; |
1390 | | offset = 19; |
1391 | | break; |
1392 | | case TX_16X16: |
1393 | | mask_id = mask_id_table_tx_16x16[bsize]; |
1394 | | offset = 33; |
1395 | | break; |
1396 | | case TX_32X32: |
1397 | | mask_id = mask_id_table_tx_32x32[bsize]; |
1398 | | offset = 42; |
1399 | | break; |
1400 | | case TX_64X64: mask_id = 46; break; |
1401 | | default: assert(!is_square_transform_size); return; |
1402 | | } |
1403 | | mask_id += offset; |
1404 | | } else if (half_ratio_tx_size_max32) { |
1405 | | int tx_size_equal_block_size = bsize == txsize_to_bsize[tx_size]; |
1406 | | mask_id = 47 + 2 * (tx_size - TX_4X8) + (tx_size_equal_block_size ? 0 : 1); |
1407 | | } else if (tx_size == TX_32X64) { |
1408 | | mask_id = 59; |
1409 | | } else if (tx_size == TX_64X32) { |
1410 | | mask_id = 60; |
1411 | | } else { // quarter ratio tx size |
1412 | | mask_id = 61 + (tx_size - TX_4X16); |
1413 | | } |
1414 | | int index = 0; |
1415 | | const int row = mi_row % MI_SIZE_64X64; |
1416 | | const int col = mi_col % MI_SIZE_64X64; |
1417 | | const int shift = get_index_shift(col, row, &index); |
1418 | | const int vert_shift = tx_size_y_vert <= TX_8X8 ? shift : col; |
1419 | | for (int i = 0; i + index < 4; ++i) { |
1420 | | // y vertical. |
1421 | | lfm->tx_size_ver[0][tx_size_y_horz].bits[i + index] |= |
1422 | | (left_mask_univariant_reordered[mask_id].bits[i] << vert_shift); |
1423 | | // y horizontal. |
1424 | | lfm->tx_size_hor[0][tx_size_y_vert].bits[i + index] |= |
1425 | | (above_mask_univariant_reordered[mask_id].bits[i] << shift); |
1426 | | // u/v vertical. |
1427 | | lfm->tx_size_ver[1][tx_size_uv_horz].bits[i + index] |= |
1428 | | (left_mask_univariant_reordered[mask_id].bits[i] << vert_shift); |
1429 | | // u/v horizontal. |
1430 | | lfm->tx_size_hor[1][tx_size_uv_vert].bits[i + index] |= |
1431 | | (above_mask_univariant_reordered[mask_id].bits[i] << shift); |
1432 | | } |
1433 | | } |
1434 | | |
1435 | | static void store_bitmask_univariant_tx(AV1_COMMON *cm, int mi_row, int mi_col, |
1436 | | BLOCK_SIZE bsize, MB_MODE_INFO *mbmi) { |
1437 | | // Use a lookup table that provides one bitmask for a given block size and |
1438 | | // a univariant transform size. |
1439 | | int index; |
1440 | | int shift; |
1441 | | int row; |
1442 | | int col; |
1443 | | LoopFilterMask *lfm = get_loop_filter_mask(cm, mi_row, mi_col); |
1444 | | const TX_SIZE tx_size_y_vert = txsize_vert_map[mbmi->tx_size]; |
1445 | | const TX_SIZE tx_size_y_horz = txsize_horz_map[mbmi->tx_size]; |
1446 | | const TX_SIZE tx_size_uv_vert = txsize_vert_map[av1_get_max_uv_txsize( |
1447 | | mbmi->sb_type, cm->seq_params.subsampling_x, |
1448 | | cm->seq_params.subsampling_y)]; |
1449 | | const TX_SIZE tx_size_uv_horz = txsize_horz_map[av1_get_max_uv_txsize( |
1450 | | mbmi->sb_type, cm->seq_params.subsampling_x, |
1451 | | cm->seq_params.subsampling_y)]; |
1452 | | const int is_square_transform_size = mbmi->tx_size <= TX_64X64; |
1453 | | int mask_id = 0; |
1454 | | int offset = 0; |
1455 | | const int half_ratio_tx_size_max32 = |
1456 | | (mbmi->tx_size > TX_64X64) & (mbmi->tx_size <= TX_32X16); |
1457 | | if (is_square_transform_size) { |
1458 | | switch (mbmi->tx_size) { |
1459 | | case TX_4X4: mask_id = mask_id_table_tx_4x4[bsize]; break; |
1460 | | case TX_8X8: |
1461 | | mask_id = mask_id_table_tx_8x8[bsize]; |
1462 | | offset = 19; |
1463 | | break; |
1464 | | case TX_16X16: |
1465 | | mask_id = mask_id_table_tx_16x16[bsize]; |
1466 | | offset = 33; |
1467 | | break; |
1468 | | case TX_32X32: |
1469 | | mask_id = mask_id_table_tx_32x32[bsize]; |
1470 | | offset = 42; |
1471 | | break; |
1472 | | case TX_64X64: mask_id = 46; break; |
1473 | | default: assert(!is_square_transform_size); return; |
1474 | | } |
1475 | | mask_id += offset; |
1476 | | } else if (half_ratio_tx_size_max32) { |
1477 | | int tx_size_equal_block_size = bsize == txsize_to_bsize[mbmi->tx_size]; |
1478 | | mask_id = |
1479 | | 47 + 2 * (mbmi->tx_size - TX_4X8) + (tx_size_equal_block_size ? 0 : 1); |
1480 | | } else if (mbmi->tx_size == TX_32X64) { |
1481 | | mask_id = 59; |
1482 | | } else if (mbmi->tx_size == TX_64X32) { |
1483 | | mask_id = 60; |
1484 | | } else { // quarter ratio tx size |
1485 | | mask_id = 61 + (mbmi->tx_size - TX_4X16); |
1486 | | } |
1487 | | row = mi_row % MI_SIZE_64X64; |
1488 | | col = mi_col % MI_SIZE_64X64; |
1489 | | shift = get_index_shift(col, row, &index); |
1490 | | const int vert_shift = tx_size_y_vert <= TX_8X8 ? shift : col; |
1491 | | for (int i = 0; i + index < 4; ++i) { |
1492 | | // y vertical. |
1493 | | lfm->tx_size_ver[0][tx_size_y_horz].bits[i + index] |= |
1494 | | (left_mask_univariant_reordered[mask_id].bits[i] << vert_shift); |
1495 | | // y horizontal. |
1496 | | lfm->tx_size_hor[0][tx_size_y_vert].bits[i + index] |= |
1497 | | (above_mask_univariant_reordered[mask_id].bits[i] << shift); |
1498 | | // u/v vertical. |
1499 | | lfm->tx_size_ver[1][tx_size_uv_horz].bits[i + index] |= |
1500 | | (left_mask_univariant_reordered[mask_id].bits[i] << vert_shift); |
1501 | | // u/v horizontal. |
1502 | | lfm->tx_size_hor[1][tx_size_uv_vert].bits[i + index] |= |
1503 | | (above_mask_univariant_reordered[mask_id].bits[i] << shift); |
1504 | | } |
1505 | | } |
1506 | | |
1507 | | static void store_bitmask_other_info(AV1_COMMON *cm, int mi_row, int mi_col, |
1508 | | BLOCK_SIZE bsize, MB_MODE_INFO *mbmi) { |
1509 | | int index; |
1510 | | int shift; |
1511 | | int row; |
1512 | | LoopFilterMask *lfm = get_loop_filter_mask(cm, mi_row, mi_col); |
1513 | | const int row_start = mi_row % MI_SIZE_64X64; |
1514 | | const int col_start = mi_col % MI_SIZE_64X64; |
1515 | | shift = get_index_shift(col_start, row_start, &index); |
1516 | | const uint64_t top_edge_mask = |
1517 | | ((uint64_t)1 << (shift + mi_size_wide[bsize])) - ((uint64_t)1 << shift); |
1518 | | lfm->is_horz_border.bits[index] |= top_edge_mask; |
1519 | | const int is_vert_border = mask_id_table_vert_border[bsize]; |
1520 | | const int vert_shift = block_size_high[bsize] <= 8 ? shift : col_start; |
1521 | | for (int i = 0; i + index < 4; ++i) { |
1522 | | lfm->is_vert_border.bits[i + index] |= |
1523 | | (left_mask_univariant_reordered[is_vert_border].bits[i] << vert_shift); |
1524 | | } |
1525 | | const int is_skip = mbmi->skip && is_inter_block(mbmi); |
1526 | | if (is_skip) { |
1527 | | const int is_skip_mask = mask_id_table_tx_4x4[bsize]; |
1528 | | for (int i = 0; i + index < 4; ++i) { |
1529 | | lfm->skip.bits[i + index] |= |
1530 | | (above_mask_univariant_reordered[is_skip_mask].bits[i] << shift); |
1531 | | } |
1532 | | } |
1533 | | const uint8_t level_vert_y = get_filter_level(cm, &cm->lf_info, 0, 0, mbmi); |
1534 | | const uint8_t level_horz_y = get_filter_level(cm, &cm->lf_info, 1, 0, mbmi); |
1535 | | const uint8_t level_u = get_filter_level(cm, &cm->lf_info, 0, 1, mbmi); |
1536 | | const uint8_t level_v = get_filter_level(cm, &cm->lf_info, 0, 2, mbmi); |
1537 | | for (int r = mi_row; r < mi_row + mi_size_high[bsize]; r++) { |
1538 | | index = 0; |
1539 | | row = r % MI_SIZE_64X64; |
1540 | | memset(&lfm->lfl_y_ver[row][col_start], level_vert_y, |
1541 | | sizeof(uint8_t) * mi_size_wide[bsize]); |
1542 | | memset(&lfm->lfl_y_hor[row][col_start], level_horz_y, |
1543 | | sizeof(uint8_t) * mi_size_wide[bsize]); |
1544 | | memset(&lfm->lfl_u[row][col_start], level_u, |
1545 | | sizeof(uint8_t) * mi_size_wide[bsize]); |
1546 | | memset(&lfm->lfl_v[row][col_start], level_v, |
1547 | | sizeof(uint8_t) * mi_size_wide[bsize]); |
1548 | | } |
1549 | | } |
1550 | | #endif |
1551 | | |
1552 | | static void parse_decode_block(AV1Decoder *const pbi, ThreadData *const td, |
1553 | | int mi_row, int mi_col, aom_reader *r, |
1554 | 0 | PARTITION_TYPE partition, BLOCK_SIZE bsize) { |
1555 | 0 | MACROBLOCKD *const xd = &td->xd; |
1556 | 0 | decode_mbmi_block(pbi, xd, mi_row, mi_col, r, partition, bsize); |
1557 | 0 |
|
1558 | 0 | av1_visit_palette(pbi, xd, mi_row, mi_col, r, bsize, |
1559 | 0 | av1_decode_palette_tokens); |
1560 | 0 |
|
1561 | 0 | AV1_COMMON *cm = &pbi->common; |
1562 | 0 | const int num_planes = av1_num_planes(cm); |
1563 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
1564 | 0 | int inter_block_tx = is_inter_block(mbmi) || is_intrabc_block(mbmi); |
1565 | 0 | if (cm->tx_mode == TX_MODE_SELECT && block_signals_txsize(bsize) && |
1566 | 0 | !mbmi->skip && inter_block_tx && !xd->lossless[mbmi->segment_id]) { |
1567 | 0 | const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize]; |
1568 | 0 | const int bh = tx_size_high_unit[max_tx_size]; |
1569 | 0 | const int bw = tx_size_wide_unit[max_tx_size]; |
1570 | 0 | const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; |
1571 | 0 | const int height = block_size_high[bsize] >> tx_size_high_log2[0]; |
1572 | 0 |
|
1573 | 0 | for (int idy = 0; idy < height; idy += bh) |
1574 | 0 | for (int idx = 0; idx < width; idx += bw) |
1575 | 0 | read_tx_size_vartx(xd, mbmi, max_tx_size, 0, |
1576 | | #if LOOP_FILTER_BITMASK |
1577 | | cm, mi_row, mi_col, |
1578 | | #endif |
1579 | | idy, idx, r); |
1580 | 0 | } else { |
1581 | 0 | mbmi->tx_size = read_tx_size(cm, xd, inter_block_tx, !mbmi->skip, r); |
1582 | 0 | if (inter_block_tx) |
1583 | 0 | memset(mbmi->inter_tx_size, mbmi->tx_size, sizeof(mbmi->inter_tx_size)); |
1584 | 0 | set_txfm_ctxs(mbmi->tx_size, xd->n4_w, xd->n4_h, |
1585 | 0 | mbmi->skip && is_inter_block(mbmi), xd); |
1586 | | #if LOOP_FILTER_BITMASK |
1587 | | const int w = mi_size_wide[bsize]; |
1588 | | const int h = mi_size_high[bsize]; |
1589 | | if (w <= mi_size_wide[BLOCK_64X64] && h <= mi_size_high[BLOCK_64X64]) { |
1590 | | store_bitmask_univariant_tx(cm, mi_row, mi_col, bsize, mbmi); |
1591 | | } else { |
1592 | | for (int row = 0; row < h; row += mi_size_high[BLOCK_64X64]) { |
1593 | | for (int col = 0; col < w; col += mi_size_wide[BLOCK_64X64]) { |
1594 | | store_bitmask_univariant_tx(cm, mi_row + row, mi_col + col, |
1595 | | BLOCK_64X64, mbmi); |
1596 | | } |
1597 | | } |
1598 | | } |
1599 | | #endif |
1600 | | } |
1601 | | #if LOOP_FILTER_BITMASK |
1602 | | const int w = mi_size_wide[bsize]; |
1603 | | const int h = mi_size_high[bsize]; |
1604 | | if (w <= mi_size_wide[BLOCK_64X64] && h <= mi_size_high[BLOCK_64X64]) { |
1605 | | store_bitmask_other_info(cm, mi_row, mi_col, bsize, mbmi); |
1606 | | } else { |
1607 | | for (int row = 0; row < h; row += mi_size_high[BLOCK_64X64]) { |
1608 | | for (int col = 0; col < w; col += mi_size_wide[BLOCK_64X64]) { |
1609 | | store_bitmask_other_info(cm, mi_row + row, mi_col + col, BLOCK_64X64, |
1610 | | mbmi); |
1611 | | } |
1612 | | } |
1613 | | } |
1614 | | #endif |
1615 | |
|
1616 | 0 | if (cm->delta_q_present_flag) { |
1617 | 0 | for (int i = 0; i < MAX_SEGMENTS; i++) { |
1618 | 0 | const int current_qindex = |
1619 | 0 | av1_get_qindex(&cm->seg, i, xd->current_qindex); |
1620 | 0 | for (int j = 0; j < num_planes; ++j) { |
1621 | 0 | const int dc_delta_q = |
1622 | 0 | j == 0 ? cm->y_dc_delta_q |
1623 | 0 | : (j == 1 ? cm->u_dc_delta_q : cm->v_dc_delta_q); |
1624 | 0 | const int ac_delta_q = |
1625 | 0 | j == 0 ? 0 : (j == 1 ? cm->u_ac_delta_q : cm->v_ac_delta_q); |
1626 | 0 | xd->plane[j].seg_dequant_QTX[i][0] = av1_dc_quant_QTX( |
1627 | 0 | current_qindex, dc_delta_q, cm->seq_params.bit_depth); |
1628 | 0 | xd->plane[j].seg_dequant_QTX[i][1] = av1_ac_quant_QTX( |
1629 | 0 | current_qindex, ac_delta_q, cm->seq_params.bit_depth); |
1630 | 0 | } |
1631 | 0 | } |
1632 | 0 | } |
1633 | 0 | if (mbmi->skip) av1_reset_skip_context(xd, mi_row, mi_col, bsize, num_planes); |
1634 | 0 |
|
1635 | 0 | decode_token_recon_block(pbi, td, mi_row, mi_col, r, bsize); |
1636 | 0 |
|
1637 | 0 | int reader_corrupted_flag = aom_reader_has_error(r); |
1638 | 0 | aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag); |
1639 | 0 | } |
1640 | | |
1641 | | static void set_offsets_for_pred_and_recon(AV1Decoder *const pbi, |
1642 | | ThreadData *const td, int mi_row, |
1643 | 0 | int mi_col, BLOCK_SIZE bsize) { |
1644 | 0 | AV1_COMMON *const cm = &pbi->common; |
1645 | 0 | MACROBLOCKD *const xd = &td->xd; |
1646 | 0 | const int bw = mi_size_wide[bsize]; |
1647 | 0 | const int bh = mi_size_high[bsize]; |
1648 | 0 | const int num_planes = av1_num_planes(cm); |
1649 | 0 |
|
1650 | 0 | const int offset = mi_row * cm->mi_stride + mi_col; |
1651 | 0 | const TileInfo *const tile = &xd->tile; |
1652 | 0 |
|
1653 | 0 | xd->mi = cm->mi_grid_visible + offset; |
1654 | 0 | xd->cfl.mi_row = mi_row; |
1655 | 0 | xd->cfl.mi_col = mi_col; |
1656 | 0 |
|
1657 | 0 | set_plane_n4(xd, bw, bh, num_planes); |
1658 | 0 |
|
1659 | 0 | // Distance of Mb to the various image edges. These are specified to 8th pel |
1660 | 0 | // as they are always compared to values that are in 1/8th pel units |
1661 | 0 | set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); |
1662 | 0 |
|
1663 | 0 | av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, |
1664 | 0 | mi_col, 0, num_planes); |
1665 | 0 | } |
1666 | | |
1667 | | static void decode_block(AV1Decoder *const pbi, ThreadData *const td, |
1668 | | int mi_row, int mi_col, aom_reader *r, |
1669 | 0 | PARTITION_TYPE partition, BLOCK_SIZE bsize) { |
1670 | 0 | (void)partition; |
1671 | 0 | set_offsets_for_pred_and_recon(pbi, td, mi_row, mi_col, bsize); |
1672 | 0 | decode_token_recon_block(pbi, td, mi_row, mi_col, r, bsize); |
1673 | 0 | } |
1674 | | |
1675 | | static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col, |
1676 | | aom_reader *r, int has_rows, int has_cols, |
1677 | 0 | BLOCK_SIZE bsize) { |
1678 | 0 | const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
1679 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1680 | 0 |
|
1681 | 0 | if (!has_rows && !has_cols) return PARTITION_SPLIT; |
1682 | 0 | |
1683 | 0 | assert(ctx >= 0); |
1684 | 0 | aom_cdf_prob *partition_cdf = ec_ctx->partition_cdf[ctx]; |
1685 | 0 | if (has_rows && has_cols) { |
1686 | 0 | return (PARTITION_TYPE)aom_read_symbol( |
1687 | 0 | r, partition_cdf, partition_cdf_length(bsize), ACCT_STR); |
1688 | 0 | } else if (!has_rows && has_cols) { |
1689 | 0 | assert(bsize > BLOCK_8X8); |
1690 | 0 | aom_cdf_prob cdf[2]; |
1691 | 0 | partition_gather_vert_alike(cdf, partition_cdf, bsize); |
1692 | 0 | assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP)); |
1693 | 0 | return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ; |
1694 | 0 | } else { |
1695 | 0 | assert(has_rows && !has_cols); |
1696 | 0 | assert(bsize > BLOCK_8X8); |
1697 | 0 | aom_cdf_prob cdf[2]; |
1698 | 0 | partition_gather_horz_alike(cdf, partition_cdf, bsize); |
1699 | 0 | assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP)); |
1700 | 0 | return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT; |
1701 | 0 | } |
1702 | 0 | } |
1703 | | |
1704 | | // TODO(slavarnway): eliminate bsize and subsize in future commits |
1705 | | static void decode_partition(AV1Decoder *const pbi, ThreadData *const td, |
1706 | | int mi_row, int mi_col, aom_reader *r, |
1707 | 0 | BLOCK_SIZE bsize, int parse_decode_flag) { |
1708 | 0 | AV1_COMMON *const cm = &pbi->common; |
1709 | 0 | MACROBLOCKD *const xd = &td->xd; |
1710 | 0 | const int bw = mi_size_wide[bsize]; |
1711 | 0 | const int hbs = bw >> 1; |
1712 | 0 | PARTITION_TYPE partition; |
1713 | 0 | BLOCK_SIZE subsize; |
1714 | 0 | const int quarter_step = bw / 4; |
1715 | 0 | BLOCK_SIZE bsize2 = get_partition_subsize(bsize, PARTITION_SPLIT); |
1716 | 0 | const int has_rows = (mi_row + hbs) < cm->mi_rows; |
1717 | 0 | const int has_cols = (mi_col + hbs) < cm->mi_cols; |
1718 | 0 |
|
1719 | 0 | if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; |
1720 | 0 | |
1721 | 0 | // parse_decode_flag takes the following values : |
1722 | 0 | // 01 - do parse only |
1723 | 0 | // 10 - do decode only |
1724 | 0 | // 11 - do parse and decode |
1725 | 0 | static const block_visitor_fn_t block_visit[4] = { |
1726 | 0 | NULL, parse_decode_block, decode_block, parse_decode_block |
1727 | 0 | }; |
1728 | 0 |
|
1729 | 0 | if (parse_decode_flag & 1) { |
1730 | 0 | const int num_planes = av1_num_planes(cm); |
1731 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1732 | 0 | int rcol0, rcol1, rrow0, rrow1; |
1733 | 0 | if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize, |
1734 | 0 | &rcol0, &rcol1, &rrow0, &rrow1)) { |
1735 | 0 | const int rstride = cm->rst_info[plane].horz_units_per_tile; |
1736 | 0 | for (int rrow = rrow0; rrow < rrow1; ++rrow) { |
1737 | 0 | for (int rcol = rcol0; rcol < rcol1; ++rcol) { |
1738 | 0 | const int runit_idx = rcol + rrow * rstride; |
1739 | 0 | loop_restoration_read_sb_coeffs(cm, xd, r, plane, runit_idx); |
1740 | 0 | } |
1741 | 0 | } |
1742 | 0 | } |
1743 | 0 | } |
1744 | 0 |
|
1745 | 0 | partition = (bsize < BLOCK_8X8) ? PARTITION_NONE |
1746 | 0 | : read_partition(xd, mi_row, mi_col, r, |
1747 | 0 | has_rows, has_cols, bsize); |
1748 | 0 | } else { |
1749 | 0 | partition = get_partition(cm, mi_row, mi_col, bsize); |
1750 | 0 | } |
1751 | 0 | subsize = get_partition_subsize(bsize, partition); |
1752 | 0 |
|
1753 | 0 | // Check the bitstream is conformant: if there is subsampling on the |
1754 | 0 | // chroma planes, subsize must subsample to a valid block size. |
1755 | 0 | const struct macroblockd_plane *const pd_u = &xd->plane[1]; |
1756 | 0 | if (get_plane_block_size(subsize, pd_u->subsampling_x, pd_u->subsampling_y) == |
1757 | 0 | BLOCK_INVALID) { |
1758 | 0 | aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME, |
1759 | 0 | "Block size %dx%d invalid with this subsampling mode", |
1760 | 0 | block_size_wide[subsize], block_size_high[subsize]); |
1761 | 0 | } |
1762 | 0 |
|
1763 | 0 | #define DEC_BLOCK_STX_ARG |
1764 | 0 | #define DEC_BLOCK_EPT_ARG partition, |
1765 | 0 | #define DEC_BLOCK(db_r, db_c, db_subsize) \ |
1766 | 0 | block_visit[parse_decode_flag](pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), r, \ |
1767 | 0 | DEC_BLOCK_EPT_ARG(db_subsize)) |
1768 | 0 | #define DEC_PARTITION(db_r, db_c, db_subsize) \ |
1769 | 0 | decode_partition(pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), r, (db_subsize), \ |
1770 | 0 | parse_decode_flag) |
1771 | 0 |
|
1772 | 0 | switch (partition) { |
1773 | 0 | case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break; |
1774 | 0 | case PARTITION_HORZ: |
1775 | 0 | DEC_BLOCK(mi_row, mi_col, subsize); |
1776 | 0 | if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize); |
1777 | 0 | break; |
1778 | 0 | case PARTITION_VERT: |
1779 | 0 | DEC_BLOCK(mi_row, mi_col, subsize); |
1780 | 0 | if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize); |
1781 | 0 | break; |
1782 | 0 | case PARTITION_SPLIT: |
1783 | 0 | DEC_PARTITION(mi_row, mi_col, subsize); |
1784 | 0 | DEC_PARTITION(mi_row, mi_col + hbs, subsize); |
1785 | 0 | DEC_PARTITION(mi_row + hbs, mi_col, subsize); |
1786 | 0 | DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize); |
1787 | 0 | break; |
1788 | 0 | case PARTITION_HORZ_A: |
1789 | 0 | DEC_BLOCK(mi_row, mi_col, bsize2); |
1790 | 0 | DEC_BLOCK(mi_row, mi_col + hbs, bsize2); |
1791 | 0 | DEC_BLOCK(mi_row + hbs, mi_col, subsize); |
1792 | 0 | break; |
1793 | 0 | case PARTITION_HORZ_B: |
1794 | 0 | DEC_BLOCK(mi_row, mi_col, subsize); |
1795 | 0 | DEC_BLOCK(mi_row + hbs, mi_col, bsize2); |
1796 | 0 | DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2); |
1797 | 0 | break; |
1798 | 0 | case PARTITION_VERT_A: |
1799 | 0 | DEC_BLOCK(mi_row, mi_col, bsize2); |
1800 | 0 | DEC_BLOCK(mi_row + hbs, mi_col, bsize2); |
1801 | 0 | DEC_BLOCK(mi_row, mi_col + hbs, subsize); |
1802 | 0 | break; |
1803 | 0 | case PARTITION_VERT_B: |
1804 | 0 | DEC_BLOCK(mi_row, mi_col, subsize); |
1805 | 0 | DEC_BLOCK(mi_row, mi_col + hbs, bsize2); |
1806 | 0 | DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2); |
1807 | 0 | break; |
1808 | 0 | case PARTITION_HORZ_4: |
1809 | 0 | for (int i = 0; i < 4; ++i) { |
1810 | 0 | int this_mi_row = mi_row + i * quarter_step; |
1811 | 0 | if (i > 0 && this_mi_row >= cm->mi_rows) break; |
1812 | 0 | DEC_BLOCK(this_mi_row, mi_col, subsize); |
1813 | 0 | } |
1814 | 0 | break; |
1815 | 0 | case PARTITION_VERT_4: |
1816 | 0 | for (int i = 0; i < 4; ++i) { |
1817 | 0 | int this_mi_col = mi_col + i * quarter_step; |
1818 | 0 | if (i > 0 && this_mi_col >= cm->mi_cols) break; |
1819 | 0 | DEC_BLOCK(mi_row, this_mi_col, subsize); |
1820 | 0 | } |
1821 | 0 | break; |
1822 | 0 | default: assert(0 && "Invalid partition type"); |
1823 | 0 | } |
1824 | 0 |
|
1825 | 0 | #undef DEC_PARTITION |
1826 | 0 | #undef DEC_BLOCK |
1827 | 0 | #undef DEC_BLOCK_EPT_ARG |
1828 | 0 | #undef DEC_BLOCK_STX_ARG |
1829 | 0 |
|
1830 | 0 | if (parse_decode_flag & 1) |
1831 | 0 | update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition); |
1832 | 0 | } |
1833 | | |
1834 | | static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end, |
1835 | | const size_t read_size, |
1836 | | struct aom_internal_error_info *error_info, |
1837 | 0 | aom_reader *r, uint8_t allow_update_cdf) { |
1838 | 0 | // Validate the calculated partition length. If the buffer |
1839 | 0 | // described by the partition can't be fully read, then restrict |
1840 | 0 | // it to the portion that can be (for EC mode) or throw an error. |
1841 | 0 | if (!read_is_valid(data, read_size, data_end)) |
1842 | 0 | aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME, |
1843 | 0 | "Truncated packet or corrupt tile length"); |
1844 | 0 |
|
1845 | 0 | if (aom_reader_init(r, data, read_size)) |
1846 | 0 | aom_internal_error(error_info, AOM_CODEC_MEM_ERROR, |
1847 | 0 | "Failed to allocate bool decoder %d", 1); |
1848 | 0 |
|
1849 | 0 | r->allow_update_cdf = allow_update_cdf; |
1850 | 0 | } |
1851 | | |
1852 | | static void setup_segmentation(AV1_COMMON *const cm, |
1853 | 0 | struct aom_read_bit_buffer *rb) { |
1854 | 0 | struct segmentation *const seg = &cm->seg; |
1855 | 0 |
|
1856 | 0 | seg->update_map = 0; |
1857 | 0 | seg->update_data = 0; |
1858 | 0 | seg->temporal_update = 0; |
1859 | 0 |
|
1860 | 0 | seg->enabled = aom_rb_read_bit(rb); |
1861 | 0 | if (!seg->enabled) { |
1862 | 0 | if (cm->cur_frame->seg_map) |
1863 | 0 | memset(cm->cur_frame->seg_map, 0, (cm->mi_rows * cm->mi_cols)); |
1864 | 0 |
|
1865 | 0 | memset(seg, 0, sizeof(*seg)); |
1866 | 0 | segfeatures_copy(&cm->cur_frame->seg, seg); |
1867 | 0 | return; |
1868 | 0 | } |
1869 | 0 | if (cm->seg.enabled && cm->prev_frame && |
1870 | 0 | (cm->mi_rows == cm->prev_frame->mi_rows) && |
1871 | 0 | (cm->mi_cols == cm->prev_frame->mi_cols)) { |
1872 | 0 | cm->last_frame_seg_map = cm->prev_frame->seg_map; |
1873 | 0 | } else { |
1874 | 0 | cm->last_frame_seg_map = NULL; |
1875 | 0 | } |
1876 | 0 | // Read update flags |
1877 | 0 | if (cm->primary_ref_frame == PRIMARY_REF_NONE) { |
1878 | 0 | // These frames can't use previous frames, so must signal map + features |
1879 | 0 | seg->update_map = 1; |
1880 | 0 | seg->temporal_update = 0; |
1881 | 0 | seg->update_data = 1; |
1882 | 0 | } else { |
1883 | 0 | seg->update_map = aom_rb_read_bit(rb); |
1884 | 0 | if (seg->update_map) { |
1885 | 0 | seg->temporal_update = aom_rb_read_bit(rb); |
1886 | 0 | } else { |
1887 | 0 | seg->temporal_update = 0; |
1888 | 0 | } |
1889 | 0 | seg->update_data = aom_rb_read_bit(rb); |
1890 | 0 | } |
1891 | 0 |
|
1892 | 0 | // Segmentation data update |
1893 | 0 | if (seg->update_data) { |
1894 | 0 | av1_clearall_segfeatures(seg); |
1895 | 0 |
|
1896 | 0 | for (int i = 0; i < MAX_SEGMENTS; i++) { |
1897 | 0 | for (int j = 0; j < SEG_LVL_MAX; j++) { |
1898 | 0 | int data = 0; |
1899 | 0 | const int feature_enabled = aom_rb_read_bit(rb); |
1900 | 0 | if (feature_enabled) { |
1901 | 0 | av1_enable_segfeature(seg, i, j); |
1902 | 0 |
|
1903 | 0 | const int data_max = av1_seg_feature_data_max(j); |
1904 | 0 | const int data_min = -data_max; |
1905 | 0 | const int ubits = get_unsigned_bits(data_max); |
1906 | 0 |
|
1907 | 0 | if (av1_is_segfeature_signed(j)) { |
1908 | 0 | data = aom_rb_read_inv_signed_literal(rb, ubits); |
1909 | 0 | } else { |
1910 | 0 | data = aom_rb_read_literal(rb, ubits); |
1911 | 0 | } |
1912 | 0 |
|
1913 | 0 | data = clamp(data, data_min, data_max); |
1914 | 0 | } |
1915 | 0 | av1_set_segdata(seg, i, j, data); |
1916 | 0 | } |
1917 | 0 | } |
1918 | 0 | calculate_segdata(seg); |
1919 | 0 | } else if (cm->prev_frame) { |
1920 | 0 | segfeatures_copy(seg, &cm->prev_frame->seg); |
1921 | 0 | } |
1922 | 0 | segfeatures_copy(&cm->cur_frame->seg, seg); |
1923 | 0 | } |
1924 | | |
1925 | | static void decode_restoration_mode(AV1_COMMON *cm, |
1926 | 0 | struct aom_read_bit_buffer *rb) { |
1927 | 0 | assert(!cm->all_lossless); |
1928 | 0 | const int num_planes = av1_num_planes(cm); |
1929 | 0 | if (cm->allow_intrabc) return; |
1930 | 0 | int all_none = 1, chroma_none = 1; |
1931 | 0 | for (int p = 0; p < num_planes; ++p) { |
1932 | 0 | RestorationInfo *rsi = &cm->rst_info[p]; |
1933 | 0 | if (aom_rb_read_bit(rb)) { |
1934 | 0 | rsi->frame_restoration_type = |
1935 | 0 | aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER; |
1936 | 0 | } else { |
1937 | 0 | rsi->frame_restoration_type = |
1938 | 0 | aom_rb_read_bit(rb) ? RESTORE_SWITCHABLE : RESTORE_NONE; |
1939 | 0 | } |
1940 | 0 | if (rsi->frame_restoration_type != RESTORE_NONE) { |
1941 | 0 | all_none = 0; |
1942 | 0 | chroma_none &= p == 0; |
1943 | 0 | } |
1944 | 0 | } |
1945 | 0 | if (!all_none) { |
1946 | 0 | assert(cm->seq_params.sb_size == BLOCK_64X64 || |
1947 | 0 | cm->seq_params.sb_size == BLOCK_128X128); |
1948 | 0 | const int sb_size = cm->seq_params.sb_size == BLOCK_128X128 ? 128 : 64; |
1949 | 0 |
|
1950 | 0 | for (int p = 0; p < num_planes; ++p) |
1951 | 0 | cm->rst_info[p].restoration_unit_size = sb_size; |
1952 | 0 |
|
1953 | 0 | RestorationInfo *rsi = &cm->rst_info[0]; |
1954 | 0 |
|
1955 | 0 | if (sb_size == 64) { |
1956 | 0 | rsi->restoration_unit_size <<= aom_rb_read_bit(rb); |
1957 | 0 | } |
1958 | 0 | if (rsi->restoration_unit_size > 64) { |
1959 | 0 | rsi->restoration_unit_size <<= aom_rb_read_bit(rb); |
1960 | 0 | } |
1961 | 0 | } else { |
1962 | 0 | const int size = RESTORATION_UNITSIZE_MAX; |
1963 | 0 | for (int p = 0; p < num_planes; ++p) |
1964 | 0 | cm->rst_info[p].restoration_unit_size = size; |
1965 | 0 | } |
1966 | 0 |
|
1967 | 0 | if (num_planes > 1) { |
1968 | 0 | int s = AOMMIN(cm->seq_params.subsampling_x, cm->seq_params.subsampling_y); |
1969 | 0 | if (s && !chroma_none) { |
1970 | 0 | cm->rst_info[1].restoration_unit_size = |
1971 | 0 | cm->rst_info[0].restoration_unit_size >> (aom_rb_read_bit(rb) * s); |
1972 | 0 | } else { |
1973 | 0 | cm->rst_info[1].restoration_unit_size = |
1974 | 0 | cm->rst_info[0].restoration_unit_size; |
1975 | 0 | } |
1976 | 0 | cm->rst_info[2].restoration_unit_size = |
1977 | 0 | cm->rst_info[1].restoration_unit_size; |
1978 | 0 | } |
1979 | 0 | } |
1980 | | |
1981 | | static void read_wiener_filter(int wiener_win, WienerInfo *wiener_info, |
1982 | 0 | WienerInfo *ref_wiener_info, aom_reader *rb) { |
1983 | 0 | memset(wiener_info->vfilter, 0, sizeof(wiener_info->vfilter)); |
1984 | 0 | memset(wiener_info->hfilter, 0, sizeof(wiener_info->hfilter)); |
1985 | 0 |
|
1986 | 0 | if (wiener_win == WIENER_WIN) |
1987 | 0 | wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = |
1988 | 0 | aom_read_primitive_refsubexpfin( |
1989 | 0 | rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, |
1990 | 0 | WIENER_FILT_TAP0_SUBEXP_K, |
1991 | 0 | ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) + |
1992 | 0 | WIENER_FILT_TAP0_MINV; |
1993 | 0 | else |
1994 | 0 | wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = 0; |
1995 | 0 | wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] = |
1996 | 0 | aom_read_primitive_refsubexpfin( |
1997 | 0 | rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, |
1998 | 0 | WIENER_FILT_TAP1_SUBEXP_K, |
1999 | 0 | ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) + |
2000 | 0 | WIENER_FILT_TAP1_MINV; |
2001 | 0 | wiener_info->vfilter[2] = wiener_info->vfilter[WIENER_WIN - 3] = |
2002 | 0 | aom_read_primitive_refsubexpfin( |
2003 | 0 | rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, |
2004 | 0 | WIENER_FILT_TAP2_SUBEXP_K, |
2005 | 0 | ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) + |
2006 | 0 | WIENER_FILT_TAP2_MINV; |
2007 | 0 | // The central element has an implicit +WIENER_FILT_STEP |
2008 | 0 | wiener_info->vfilter[WIENER_HALFWIN] = |
2009 | 0 | -2 * (wiener_info->vfilter[0] + wiener_info->vfilter[1] + |
2010 | 0 | wiener_info->vfilter[2]); |
2011 | 0 |
|
2012 | 0 | if (wiener_win == WIENER_WIN) |
2013 | 0 | wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = |
2014 | 0 | aom_read_primitive_refsubexpfin( |
2015 | 0 | rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, |
2016 | 0 | WIENER_FILT_TAP0_SUBEXP_K, |
2017 | 0 | ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) + |
2018 | 0 | WIENER_FILT_TAP0_MINV; |
2019 | 0 | else |
2020 | 0 | wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = 0; |
2021 | 0 | wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] = |
2022 | 0 | aom_read_primitive_refsubexpfin( |
2023 | 0 | rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, |
2024 | 0 | WIENER_FILT_TAP1_SUBEXP_K, |
2025 | 0 | ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) + |
2026 | 0 | WIENER_FILT_TAP1_MINV; |
2027 | 0 | wiener_info->hfilter[2] = wiener_info->hfilter[WIENER_WIN - 3] = |
2028 | 0 | aom_read_primitive_refsubexpfin( |
2029 | 0 | rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, |
2030 | 0 | WIENER_FILT_TAP2_SUBEXP_K, |
2031 | 0 | ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) + |
2032 | 0 | WIENER_FILT_TAP2_MINV; |
2033 | 0 | // The central element has an implicit +WIENER_FILT_STEP |
2034 | 0 | wiener_info->hfilter[WIENER_HALFWIN] = |
2035 | 0 | -2 * (wiener_info->hfilter[0] + wiener_info->hfilter[1] + |
2036 | 0 | wiener_info->hfilter[2]); |
2037 | 0 | memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info)); |
2038 | 0 | } |
2039 | | |
2040 | | static void read_sgrproj_filter(SgrprojInfo *sgrproj_info, |
2041 | 0 | SgrprojInfo *ref_sgrproj_info, aom_reader *rb) { |
2042 | 0 | sgrproj_info->ep = aom_read_literal(rb, SGRPROJ_PARAMS_BITS, ACCT_STR); |
2043 | 0 | const sgr_params_type *params = &sgr_params[sgrproj_info->ep]; |
2044 | 0 |
|
2045 | 0 | if (params->r[0] == 0) { |
2046 | 0 | sgrproj_info->xqd[0] = 0; |
2047 | 0 | sgrproj_info->xqd[1] = |
2048 | 0 | aom_read_primitive_refsubexpfin( |
2049 | 0 | rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K, |
2050 | 0 | ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) + |
2051 | 0 | SGRPROJ_PRJ_MIN1; |
2052 | 0 | } else if (params->r[1] == 0) { |
2053 | 0 | sgrproj_info->xqd[0] = |
2054 | 0 | aom_read_primitive_refsubexpfin( |
2055 | 0 | rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K, |
2056 | 0 | ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) + |
2057 | 0 | SGRPROJ_PRJ_MIN0; |
2058 | 0 | sgrproj_info->xqd[1] = clamp((1 << SGRPROJ_PRJ_BITS) - sgrproj_info->xqd[0], |
2059 | 0 | SGRPROJ_PRJ_MIN1, SGRPROJ_PRJ_MAX1); |
2060 | 0 | } else { |
2061 | 0 | sgrproj_info->xqd[0] = |
2062 | 0 | aom_read_primitive_refsubexpfin( |
2063 | 0 | rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K, |
2064 | 0 | ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) + |
2065 | 0 | SGRPROJ_PRJ_MIN0; |
2066 | 0 | sgrproj_info->xqd[1] = |
2067 | 0 | aom_read_primitive_refsubexpfin( |
2068 | 0 | rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K, |
2069 | 0 | ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) + |
2070 | 0 | SGRPROJ_PRJ_MIN1; |
2071 | 0 | } |
2072 | 0 |
|
2073 | 0 | memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info)); |
2074 | 0 | } |
2075 | | |
2076 | | static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm, |
2077 | | MACROBLOCKD *xd, |
2078 | | aom_reader *const r, int plane, |
2079 | 0 | int runit_idx) { |
2080 | 0 | const RestorationInfo *rsi = &cm->rst_info[plane]; |
2081 | 0 | RestorationUnitInfo *rui = &rsi->unit_info[runit_idx]; |
2082 | 0 | if (rsi->frame_restoration_type == RESTORE_NONE) return; |
2083 | 0 | |
2084 | 0 | assert(!cm->all_lossless); |
2085 | 0 |
|
2086 | 0 | const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN; |
2087 | 0 | WienerInfo *wiener_info = xd->wiener_info + plane; |
2088 | 0 | SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane; |
2089 | 0 |
|
2090 | 0 | if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) { |
2091 | 0 | rui->restoration_type = |
2092 | 0 | aom_read_symbol(r, xd->tile_ctx->switchable_restore_cdf, |
2093 | 0 | RESTORE_SWITCHABLE_TYPES, ACCT_STR); |
2094 | 0 | switch (rui->restoration_type) { |
2095 | 0 | case RESTORE_WIENER: |
2096 | 0 | read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r); |
2097 | 0 | break; |
2098 | 0 | case RESTORE_SGRPROJ: |
2099 | 0 | read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r); |
2100 | 0 | break; |
2101 | 0 | default: assert(rui->restoration_type == RESTORE_NONE); break; |
2102 | 0 | } |
2103 | 0 | } else if (rsi->frame_restoration_type == RESTORE_WIENER) { |
2104 | 0 | if (aom_read_symbol(r, xd->tile_ctx->wiener_restore_cdf, 2, ACCT_STR)) { |
2105 | 0 | rui->restoration_type = RESTORE_WIENER; |
2106 | 0 | read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r); |
2107 | 0 | } else { |
2108 | 0 | rui->restoration_type = RESTORE_NONE; |
2109 | 0 | } |
2110 | 0 | } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) { |
2111 | 0 | if (aom_read_symbol(r, xd->tile_ctx->sgrproj_restore_cdf, 2, ACCT_STR)) { |
2112 | 0 | rui->restoration_type = RESTORE_SGRPROJ; |
2113 | 0 | read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r); |
2114 | 0 | } else { |
2115 | 0 | rui->restoration_type = RESTORE_NONE; |
2116 | 0 | } |
2117 | 0 | } |
2118 | 0 | } |
2119 | | |
2120 | 0 | static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
2121 | 0 | const int num_planes = av1_num_planes(cm); |
2122 | 0 | struct loopfilter *lf = &cm->lf; |
2123 | 0 | if (cm->allow_intrabc || cm->coded_lossless) { |
2124 | 0 | // write default deltas to frame buffer |
2125 | 0 | av1_set_default_ref_deltas(cm->cur_frame->ref_deltas); |
2126 | 0 | av1_set_default_mode_deltas(cm->cur_frame->mode_deltas); |
2127 | 0 | return; |
2128 | 0 | } |
2129 | 0 | assert(!cm->coded_lossless); |
2130 | 0 | if (cm->prev_frame) { |
2131 | 0 | // write deltas to frame buffer |
2132 | 0 | memcpy(lf->ref_deltas, cm->prev_frame->ref_deltas, REF_FRAMES); |
2133 | 0 | memcpy(lf->mode_deltas, cm->prev_frame->mode_deltas, MAX_MODE_LF_DELTAS); |
2134 | 0 | } else { |
2135 | 0 | av1_set_default_ref_deltas(lf->ref_deltas); |
2136 | 0 | av1_set_default_mode_deltas(lf->mode_deltas); |
2137 | 0 | } |
2138 | 0 | lf->filter_level[0] = aom_rb_read_literal(rb, 6); |
2139 | 0 | lf->filter_level[1] = aom_rb_read_literal(rb, 6); |
2140 | 0 | if (num_planes > 1) { |
2141 | 0 | if (lf->filter_level[0] || lf->filter_level[1]) { |
2142 | 0 | lf->filter_level_u = aom_rb_read_literal(rb, 6); |
2143 | 0 | lf->filter_level_v = aom_rb_read_literal(rb, 6); |
2144 | 0 | } |
2145 | 0 | } |
2146 | 0 | lf->sharpness_level = aom_rb_read_literal(rb, 3); |
2147 | 0 |
|
2148 | 0 | // Read in loop filter deltas applied at the MB level based on mode or ref |
2149 | 0 | // frame. |
2150 | 0 | lf->mode_ref_delta_update = 0; |
2151 | 0 |
|
2152 | 0 | lf->mode_ref_delta_enabled = aom_rb_read_bit(rb); |
2153 | 0 | if (lf->mode_ref_delta_enabled) { |
2154 | 0 | lf->mode_ref_delta_update = aom_rb_read_bit(rb); |
2155 | 0 | if (lf->mode_ref_delta_update) { |
2156 | 0 | for (int i = 0; i < REF_FRAMES; i++) |
2157 | 0 | if (aom_rb_read_bit(rb)) |
2158 | 0 | lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6); |
2159 | 0 |
|
2160 | 0 | for (int i = 0; i < MAX_MODE_LF_DELTAS; i++) |
2161 | 0 | if (aom_rb_read_bit(rb)) |
2162 | 0 | lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6); |
2163 | 0 | } |
2164 | 0 | } |
2165 | 0 |
|
2166 | 0 | // write deltas to frame buffer |
2167 | 0 | memcpy(cm->cur_frame->ref_deltas, lf->ref_deltas, REF_FRAMES); |
2168 | 0 | memcpy(cm->cur_frame->mode_deltas, lf->mode_deltas, MAX_MODE_LF_DELTAS); |
2169 | 0 | } |
2170 | | |
2171 | 0 | static void setup_cdef(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
2172 | 0 | const int num_planes = av1_num_planes(cm); |
2173 | 0 | if (cm->allow_intrabc) return; |
2174 | 0 | cm->cdef_pri_damping = cm->cdef_sec_damping = aom_rb_read_literal(rb, 2) + 3; |
2175 | 0 | cm->cdef_bits = aom_rb_read_literal(rb, 2); |
2176 | 0 | cm->nb_cdef_strengths = 1 << cm->cdef_bits; |
2177 | 0 | for (int i = 0; i < cm->nb_cdef_strengths; i++) { |
2178 | 0 | cm->cdef_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS); |
2179 | 0 | cm->cdef_uv_strengths[i] = |
2180 | 0 | num_planes > 1 ? aom_rb_read_literal(rb, CDEF_STRENGTH_BITS) : 0; |
2181 | 0 | } |
2182 | 0 | } |
2183 | | |
2184 | 0 | static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) { |
2185 | 0 | return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0; |
2186 | 0 | } |
2187 | | |
2188 | | static void setup_quantization(AV1_COMMON *const cm, |
2189 | 0 | struct aom_read_bit_buffer *rb) { |
2190 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
2191 | 0 | const int num_planes = av1_num_planes(cm); |
2192 | 0 | cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS); |
2193 | 0 | cm->y_dc_delta_q = read_delta_q(rb); |
2194 | 0 | if (num_planes > 1) { |
2195 | 0 | int diff_uv_delta = 0; |
2196 | 0 | if (seq_params->separate_uv_delta_q) diff_uv_delta = aom_rb_read_bit(rb); |
2197 | 0 | cm->u_dc_delta_q = read_delta_q(rb); |
2198 | 0 | cm->u_ac_delta_q = read_delta_q(rb); |
2199 | 0 | if (diff_uv_delta) { |
2200 | 0 | cm->v_dc_delta_q = read_delta_q(rb); |
2201 | 0 | cm->v_ac_delta_q = read_delta_q(rb); |
2202 | 0 | } else { |
2203 | 0 | cm->v_dc_delta_q = cm->u_dc_delta_q; |
2204 | 0 | cm->v_ac_delta_q = cm->u_ac_delta_q; |
2205 | 0 | } |
2206 | 0 | } else { |
2207 | 0 | cm->u_dc_delta_q = 0; |
2208 | 0 | cm->u_ac_delta_q = 0; |
2209 | 0 | cm->v_dc_delta_q = 0; |
2210 | 0 | cm->v_ac_delta_q = 0; |
2211 | 0 | } |
2212 | 0 | cm->dequant_bit_depth = seq_params->bit_depth; |
2213 | 0 | cm->using_qmatrix = aom_rb_read_bit(rb); |
2214 | 0 | if (cm->using_qmatrix) { |
2215 | 0 | cm->qm_y = aom_rb_read_literal(rb, QM_LEVEL_BITS); |
2216 | 0 | cm->qm_u = aom_rb_read_literal(rb, QM_LEVEL_BITS); |
2217 | 0 | if (!seq_params->separate_uv_delta_q) |
2218 | 0 | cm->qm_v = cm->qm_u; |
2219 | 0 | else |
2220 | 0 | cm->qm_v = aom_rb_read_literal(rb, QM_LEVEL_BITS); |
2221 | 0 | } else { |
2222 | 0 | cm->qm_y = 0; |
2223 | 0 | cm->qm_u = 0; |
2224 | 0 | cm->qm_v = 0; |
2225 | 0 | } |
2226 | 0 | } |
2227 | | |
2228 | | // Build y/uv dequant values based on segmentation. |
2229 | 0 | static void setup_segmentation_dequant(AV1_COMMON *const cm) { |
2230 | 0 | const int bit_depth = cm->seq_params.bit_depth; |
2231 | 0 | const int using_qm = cm->using_qmatrix; |
2232 | 0 | // When segmentation is disabled, only the first value is used. The |
2233 | 0 | // remaining are don't cares. |
2234 | 0 | const int max_segments = cm->seg.enabled ? MAX_SEGMENTS : 1; |
2235 | 0 | for (int i = 0; i < max_segments; ++i) { |
2236 | 0 | const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex); |
2237 | 0 | cm->y_dequant_QTX[i][0] = |
2238 | 0 | av1_dc_quant_QTX(qindex, cm->y_dc_delta_q, bit_depth); |
2239 | 0 | cm->y_dequant_QTX[i][1] = av1_ac_quant_QTX(qindex, 0, bit_depth); |
2240 | 0 | cm->u_dequant_QTX[i][0] = |
2241 | 0 | av1_dc_quant_QTX(qindex, cm->u_dc_delta_q, bit_depth); |
2242 | 0 | cm->u_dequant_QTX[i][1] = |
2243 | 0 | av1_ac_quant_QTX(qindex, cm->u_ac_delta_q, bit_depth); |
2244 | 0 | cm->v_dequant_QTX[i][0] = |
2245 | 0 | av1_dc_quant_QTX(qindex, cm->v_dc_delta_q, bit_depth); |
2246 | 0 | cm->v_dequant_QTX[i][1] = |
2247 | 0 | av1_ac_quant_QTX(qindex, cm->v_ac_delta_q, bit_depth); |
2248 | 0 | const int lossless = qindex == 0 && cm->y_dc_delta_q == 0 && |
2249 | 0 | cm->u_dc_delta_q == 0 && cm->u_ac_delta_q == 0 && |
2250 | 0 | cm->v_dc_delta_q == 0 && cm->v_ac_delta_q == 0; |
2251 | 0 | // NB: depends on base index so there is only 1 set per frame |
2252 | 0 | // No quant weighting when lossless or signalled not using QM |
2253 | 0 | int qmlevel = (lossless || using_qm == 0) ? NUM_QM_LEVELS - 1 : cm->qm_y; |
2254 | 0 | for (int j = 0; j < TX_SIZES_ALL; ++j) { |
2255 | 0 | cm->y_iqmatrix[i][j] = av1_iqmatrix(cm, qmlevel, AOM_PLANE_Y, j); |
2256 | 0 | } |
2257 | 0 | qmlevel = (lossless || using_qm == 0) ? NUM_QM_LEVELS - 1 : cm->qm_u; |
2258 | 0 | for (int j = 0; j < TX_SIZES_ALL; ++j) { |
2259 | 0 | cm->u_iqmatrix[i][j] = av1_iqmatrix(cm, qmlevel, AOM_PLANE_U, j); |
2260 | 0 | } |
2261 | 0 | qmlevel = (lossless || using_qm == 0) ? NUM_QM_LEVELS - 1 : cm->qm_v; |
2262 | 0 | for (int j = 0; j < TX_SIZES_ALL; ++j) { |
2263 | 0 | cm->v_iqmatrix[i][j] = av1_iqmatrix(cm, qmlevel, AOM_PLANE_V, j); |
2264 | 0 | } |
2265 | 0 | } |
2266 | 0 | } |
2267 | | |
2268 | 0 | static InterpFilter read_frame_interp_filter(struct aom_read_bit_buffer *rb) { |
2269 | 0 | return aom_rb_read_bit(rb) ? SWITCHABLE |
2270 | 0 | : aom_rb_read_literal(rb, LOG_SWITCHABLE_FILTERS); |
2271 | 0 | } |
2272 | | |
2273 | 0 | static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
2274 | 0 | cm->render_width = cm->superres_upscaled_width; |
2275 | 0 | cm->render_height = cm->superres_upscaled_height; |
2276 | 0 | if (aom_rb_read_bit(rb)) |
2277 | 0 | av1_read_frame_size(rb, 16, 16, &cm->render_width, &cm->render_height); |
2278 | 0 | } |
2279 | | |
2280 | | // TODO(afergs): make "struct aom_read_bit_buffer *const rb"? |
2281 | | static void setup_superres(AV1_COMMON *const cm, struct aom_read_bit_buffer *rb, |
2282 | 0 | int *width, int *height) { |
2283 | 0 | cm->superres_upscaled_width = *width; |
2284 | 0 | cm->superres_upscaled_height = *height; |
2285 | 0 |
|
2286 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
2287 | 0 | if (!seq_params->enable_superres) return; |
2288 | 0 | |
2289 | 0 | if (aom_rb_read_bit(rb)) { |
2290 | 0 | cm->superres_scale_denominator = |
2291 | 0 | (uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS); |
2292 | 0 | cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN; |
2293 | 0 | // Don't edit cm->width or cm->height directly, or the buffers won't get |
2294 | 0 | // resized correctly |
2295 | 0 | av1_calculate_scaled_superres_size(width, height, |
2296 | 0 | cm->superres_scale_denominator); |
2297 | 0 | } else { |
2298 | 0 | // 1:1 scaling - ie. no scaling, scale not provided |
2299 | 0 | cm->superres_scale_denominator = SCALE_NUMERATOR; |
2300 | 0 | } |
2301 | 0 | } |
2302 | | |
2303 | 0 | static void resize_context_buffers(AV1_COMMON *cm, int width, int height) { |
2304 | | #if CONFIG_SIZE_LIMIT |
2305 | | if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) |
2306 | | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2307 | | "Dimensions of %dx%d beyond allowed size of %dx%d.", |
2308 | | width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); |
2309 | | #endif |
2310 | 0 | if (cm->width != width || cm->height != height) { |
2311 | 0 | const int new_mi_rows = |
2312 | 0 | ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; |
2313 | 0 | const int new_mi_cols = |
2314 | 0 | ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; |
2315 | 0 |
|
2316 | 0 | // Allocations in av1_alloc_context_buffers() depend on individual |
2317 | 0 | // dimensions as well as the overall size. |
2318 | 0 | if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { |
2319 | 0 | if (av1_alloc_context_buffers(cm, width, height)) { |
2320 | 0 | // The cm->mi_* values have been cleared and any existing context |
2321 | 0 | // buffers have been freed. Clear cm->width and cm->height to be |
2322 | 0 | // consistent and to force a realloc next time. |
2323 | 0 | cm->width = 0; |
2324 | 0 | cm->height = 0; |
2325 | 0 | aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, |
2326 | 0 | "Failed to allocate context buffers"); |
2327 | 0 | } |
2328 | 0 | } else { |
2329 | 0 | av1_set_mb_mi(cm, width, height); |
2330 | 0 | } |
2331 | 0 | av1_init_context_buffers(cm); |
2332 | 0 | cm->width = width; |
2333 | 0 | cm->height = height; |
2334 | 0 | } |
2335 | 0 |
|
2336 | 0 | ensure_mv_buffer(cm->cur_frame, cm); |
2337 | 0 | cm->cur_frame->width = cm->width; |
2338 | 0 | cm->cur_frame->height = cm->height; |
2339 | 0 | } |
2340 | | |
2341 | 0 | static void setup_buffer_pool(AV1_COMMON *cm) { |
2342 | 0 | BufferPool *const pool = cm->buffer_pool; |
2343 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
2344 | 0 |
|
2345 | 0 | lock_buffer_pool(pool); |
2346 | 0 | if (aom_realloc_frame_buffer( |
2347 | 0 | get_frame_new_buffer(cm), cm->width, cm->height, |
2348 | 0 | seq_params->subsampling_x, seq_params->subsampling_y, |
2349 | 0 | seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS, |
2350 | 0 | cm->byte_alignment, |
2351 | 0 | &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, |
2352 | 0 | pool->cb_priv)) { |
2353 | 0 | unlock_buffer_pool(pool); |
2354 | 0 | aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, |
2355 | 0 | "Failed to allocate frame buffer"); |
2356 | 0 | } |
2357 | 0 | unlock_buffer_pool(pool); |
2358 | 0 |
|
2359 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = |
2360 | 0 | seq_params->subsampling_x; |
2361 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = |
2362 | 0 | seq_params->subsampling_y; |
2363 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = |
2364 | 0 | (unsigned int)seq_params->bit_depth; |
2365 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.color_primaries = |
2366 | 0 | seq_params->color_primaries; |
2367 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.transfer_characteristics = |
2368 | 0 | seq_params->transfer_characteristics; |
2369 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.matrix_coefficients = |
2370 | 0 | seq_params->matrix_coefficients; |
2371 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.monochrome = seq_params->monochrome; |
2372 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.chroma_sample_position = |
2373 | 0 | seq_params->chroma_sample_position; |
2374 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.color_range = seq_params->color_range; |
2375 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; |
2376 | 0 | pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; |
2377 | 0 | } |
2378 | | |
2379 | | static void setup_frame_size(AV1_COMMON *cm, int frame_size_override_flag, |
2380 | 0 | struct aom_read_bit_buffer *rb) { |
2381 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
2382 | 0 | int width, height; |
2383 | 0 |
|
2384 | 0 | if (frame_size_override_flag) { |
2385 | 0 | int num_bits_width = seq_params->num_bits_width; |
2386 | 0 | int num_bits_height = seq_params->num_bits_height; |
2387 | 0 | av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height); |
2388 | 0 | if (width > seq_params->max_frame_width || |
2389 | 0 | height > seq_params->max_frame_height) { |
2390 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2391 | 0 | "Frame dimensions are larger than the maximum values"); |
2392 | 0 | } |
2393 | 0 | } else { |
2394 | 0 | width = seq_params->max_frame_width; |
2395 | 0 | height = seq_params->max_frame_height; |
2396 | 0 | } |
2397 | 0 |
|
2398 | 0 | setup_superres(cm, rb, &width, &height); |
2399 | 0 | resize_context_buffers(cm, width, height); |
2400 | 0 | setup_render_size(cm, rb); |
2401 | 0 | setup_buffer_pool(cm); |
2402 | 0 | } |
2403 | | |
2404 | | static void setup_sb_size(SequenceHeader *seq_params, |
2405 | 0 | struct aom_read_bit_buffer *rb) { |
2406 | 0 | set_sb_size(seq_params, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64); |
2407 | 0 | } |
2408 | | |
2409 | | static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth, |
2410 | | int ref_xss, int ref_yss, |
2411 | | aom_bit_depth_t this_bit_depth, |
2412 | 0 | int this_xss, int this_yss) { |
2413 | 0 | return ref_bit_depth == this_bit_depth && ref_xss == this_xss && |
2414 | 0 | ref_yss == this_yss; |
2415 | 0 | } |
2416 | | |
2417 | | static void setup_frame_size_with_refs(AV1_COMMON *cm, |
2418 | 0 | struct aom_read_bit_buffer *rb) { |
2419 | 0 | int width, height; |
2420 | 0 | int found = 0; |
2421 | 0 | int has_valid_ref_frame = 0; |
2422 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
2423 | 0 | if (aom_rb_read_bit(rb)) { |
2424 | 0 | YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; |
2425 | 0 | width = buf->y_crop_width; |
2426 | 0 | height = buf->y_crop_height; |
2427 | 0 | cm->render_width = buf->render_width; |
2428 | 0 | cm->render_height = buf->render_height; |
2429 | 0 | setup_superres(cm, rb, &width, &height); |
2430 | 0 | resize_context_buffers(cm, width, height); |
2431 | 0 | found = 1; |
2432 | 0 | break; |
2433 | 0 | } |
2434 | 0 | } |
2435 | 0 |
|
2436 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
2437 | 0 | if (!found) { |
2438 | 0 | int num_bits_width = seq_params->num_bits_width; |
2439 | 0 | int num_bits_height = seq_params->num_bits_height; |
2440 | 0 |
|
2441 | 0 | av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height); |
2442 | 0 | setup_superres(cm, rb, &width, &height); |
2443 | 0 | resize_context_buffers(cm, width, height); |
2444 | 0 | setup_render_size(cm, rb); |
2445 | 0 | } |
2446 | 0 |
|
2447 | 0 | if (width <= 0 || height <= 0) |
2448 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2449 | 0 | "Invalid frame size"); |
2450 | 0 |
|
2451 | 0 | // Check to make sure at least one of frames that this frame references |
2452 | 0 | // has valid dimensions. |
2453 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
2454 | 0 | RefBuffer *const ref_frame = &cm->frame_refs[i]; |
2455 | 0 | has_valid_ref_frame |= |
2456 | 0 | valid_ref_frame_size(ref_frame->buf->y_crop_width, |
2457 | 0 | ref_frame->buf->y_crop_height, width, height); |
2458 | 0 | } |
2459 | 0 | if (!has_valid_ref_frame) |
2460 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2461 | 0 | "Referenced frame has invalid size"); |
2462 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
2463 | 0 | RefBuffer *const ref_frame = &cm->frame_refs[i]; |
2464 | 0 | if (!valid_ref_frame_img_fmt( |
2465 | 0 | ref_frame->buf->bit_depth, ref_frame->buf->subsampling_x, |
2466 | 0 | ref_frame->buf->subsampling_y, seq_params->bit_depth, |
2467 | 0 | seq_params->subsampling_x, seq_params->subsampling_y)) |
2468 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2469 | 0 | "Referenced frame has incompatible color format"); |
2470 | 0 | } |
2471 | 0 | setup_buffer_pool(cm); |
2472 | 0 | } |
2473 | | |
2474 | | // Same function as av1_read_uniform but reading from uncompresses header wb |
2475 | 0 | static int rb_read_uniform(struct aom_read_bit_buffer *const rb, int n) { |
2476 | 0 | const int l = get_unsigned_bits(n); |
2477 | 0 | const int m = (1 << l) - n; |
2478 | 0 | const int v = aom_rb_read_literal(rb, l - 1); |
2479 | 0 | assert(l != 0); |
2480 | 0 | if (v < m) |
2481 | 0 | return v; |
2482 | 0 | else |
2483 | 0 | return (v << 1) - m + aom_rb_read_bit(rb); |
2484 | 0 | } |
2485 | | |
2486 | | static void read_tile_info_max_tile(AV1_COMMON *const cm, |
2487 | 0 | struct aom_read_bit_buffer *const rb) { |
2488 | 0 | int width_mi = ALIGN_POWER_OF_TWO(cm->mi_cols, cm->seq_params.mib_size_log2); |
2489 | 0 | int height_mi = ALIGN_POWER_OF_TWO(cm->mi_rows, cm->seq_params.mib_size_log2); |
2490 | 0 | int width_sb = width_mi >> cm->seq_params.mib_size_log2; |
2491 | 0 | int height_sb = height_mi >> cm->seq_params.mib_size_log2; |
2492 | 0 |
|
2493 | 0 | av1_get_tile_limits(cm); |
2494 | 0 | cm->uniform_tile_spacing_flag = aom_rb_read_bit(rb); |
2495 | 0 |
|
2496 | 0 | // Read tile columns |
2497 | 0 | if (cm->uniform_tile_spacing_flag) { |
2498 | 0 | cm->log2_tile_cols = cm->min_log2_tile_cols; |
2499 | 0 | while (cm->log2_tile_cols < cm->max_log2_tile_cols) { |
2500 | 0 | if (!aom_rb_read_bit(rb)) { |
2501 | 0 | break; |
2502 | 0 | } |
2503 | 0 | cm->log2_tile_cols++; |
2504 | 0 | } |
2505 | 0 | } else { |
2506 | 0 | int i; |
2507 | 0 | int start_sb; |
2508 | 0 | for (i = 0, start_sb = 0; width_sb > 0 && i < MAX_TILE_COLS; i++) { |
2509 | 0 | const int size_sb = |
2510 | 0 | 1 + rb_read_uniform(rb, AOMMIN(width_sb, cm->max_tile_width_sb)); |
2511 | 0 | cm->tile_col_start_sb[i] = start_sb; |
2512 | 0 | start_sb += size_sb; |
2513 | 0 | width_sb -= size_sb; |
2514 | 0 | } |
2515 | 0 | cm->tile_cols = i; |
2516 | 0 | cm->tile_col_start_sb[i] = start_sb + width_sb; |
2517 | 0 | } |
2518 | 0 | av1_calculate_tile_cols(cm); |
2519 | 0 |
|
2520 | 0 | // Read tile rows |
2521 | 0 | if (cm->uniform_tile_spacing_flag) { |
2522 | 0 | cm->log2_tile_rows = cm->min_log2_tile_rows; |
2523 | 0 | while (cm->log2_tile_rows < cm->max_log2_tile_rows) { |
2524 | 0 | if (!aom_rb_read_bit(rb)) { |
2525 | 0 | break; |
2526 | 0 | } |
2527 | 0 | cm->log2_tile_rows++; |
2528 | 0 | } |
2529 | 0 | } else { |
2530 | 0 | int i; |
2531 | 0 | int start_sb; |
2532 | 0 | for (i = 0, start_sb = 0; height_sb > 0 && i < MAX_TILE_ROWS; i++) { |
2533 | 0 | const int size_sb = |
2534 | 0 | 1 + rb_read_uniform(rb, AOMMIN(height_sb, cm->max_tile_height_sb)); |
2535 | 0 | cm->tile_row_start_sb[i] = start_sb; |
2536 | 0 | start_sb += size_sb; |
2537 | 0 | height_sb -= size_sb; |
2538 | 0 | } |
2539 | 0 | cm->tile_rows = i; |
2540 | 0 | cm->tile_row_start_sb[i] = start_sb + height_sb; |
2541 | 0 | } |
2542 | 0 | av1_calculate_tile_rows(cm); |
2543 | 0 | } |
2544 | | |
2545 | 0 | void av1_set_single_tile_decoding_mode(AV1_COMMON *const cm) { |
2546 | 0 | cm->single_tile_decoding = 0; |
2547 | 0 | if (cm->large_scale_tile) { |
2548 | 0 | struct loopfilter *lf = &cm->lf; |
2549 | 0 |
|
2550 | 0 | // Figure out single_tile_decoding by loopfilter_level. |
2551 | 0 | const int no_loopfilter = !(lf->filter_level[0] || lf->filter_level[1]); |
2552 | 0 | const int no_cdef = cm->cdef_bits == 0 && cm->cdef_strengths[0] == 0 && |
2553 | 0 | cm->cdef_uv_strengths[0] == 0; |
2554 | 0 | const int no_restoration = |
2555 | 0 | cm->rst_info[0].frame_restoration_type == RESTORE_NONE && |
2556 | 0 | cm->rst_info[1].frame_restoration_type == RESTORE_NONE && |
2557 | 0 | cm->rst_info[2].frame_restoration_type == RESTORE_NONE; |
2558 | 0 | assert(IMPLIES(cm->coded_lossless, no_loopfilter && no_cdef)); |
2559 | 0 | assert(IMPLIES(cm->all_lossless, no_restoration)); |
2560 | 0 | cm->single_tile_decoding = no_loopfilter && no_cdef && no_restoration; |
2561 | 0 | } |
2562 | 0 | } |
2563 | | |
2564 | | static void read_tile_info(AV1Decoder *const pbi, |
2565 | 0 | struct aom_read_bit_buffer *const rb) { |
2566 | 0 | AV1_COMMON *const cm = &pbi->common; |
2567 | 0 |
|
2568 | 0 | read_tile_info_max_tile(cm, rb); |
2569 | 0 |
|
2570 | 0 | cm->context_update_tile_id = 0; |
2571 | 0 | if (cm->tile_rows * cm->tile_cols > 1) { |
2572 | 0 | // tile to use for cdf update |
2573 | 0 | cm->context_update_tile_id = |
2574 | 0 | aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols); |
2575 | 0 | if (cm->context_update_tile_id >= cm->tile_rows * cm->tile_cols) { |
2576 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2577 | 0 | "Invalid context_update_tile_id"); |
2578 | 0 | } |
2579 | 0 | // tile size magnitude |
2580 | 0 | pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1; |
2581 | 0 | } |
2582 | 0 | } |
2583 | | |
2584 | | #if EXT_TILE_DEBUG |
2585 | | static void read_ext_tile_info(AV1Decoder *const pbi, |
2586 | 0 | struct aom_read_bit_buffer *const rb) { |
2587 | 0 | AV1_COMMON *const cm = &pbi->common; |
2588 | 0 |
|
2589 | 0 | // This information is stored as a separate byte. |
2590 | 0 | int mod = rb->bit_offset % CHAR_BIT; |
2591 | 0 | if (mod > 0) aom_rb_read_literal(rb, CHAR_BIT - mod); |
2592 | 0 | assert(rb->bit_offset % CHAR_BIT == 0); |
2593 | 0 |
|
2594 | 0 | if (cm->tile_cols * cm->tile_rows > 1) { |
2595 | 0 | // Read the number of bytes used to store tile size |
2596 | 0 | pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1; |
2597 | 0 | pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1; |
2598 | 0 | } |
2599 | 0 | } |
2600 | | #endif // EXT_TILE_DEBUG |
2601 | | |
2602 | 0 | static size_t mem_get_varsize(const uint8_t *src, int sz) { |
2603 | 0 | switch (sz) { |
2604 | 0 | case 1: return src[0]; |
2605 | 0 | case 2: return mem_get_le16(src); |
2606 | 0 | case 3: return mem_get_le24(src); |
2607 | 0 | case 4: return mem_get_le32(src); |
2608 | 0 | default: assert(0 && "Invalid size"); return -1; |
2609 | 0 | } |
2610 | 0 | } |
2611 | | |
2612 | | #if EXT_TILE_DEBUG |
2613 | | // Reads the next tile returning its size and adjusting '*data' accordingly |
2614 | | // based on 'is_last'. On return, '*data' is updated to point to the end of the |
2615 | | // raw tile buffer in the bit stream. |
2616 | | static void get_ls_tile_buffer( |
2617 | | const uint8_t *const data_end, struct aom_internal_error_info *error_info, |
2618 | | const uint8_t **data, TileBufferDec (*const tile_buffers)[MAX_TILE_COLS], |
2619 | 0 | int tile_size_bytes, int col, int row, int tile_copy_mode) { |
2620 | 0 | size_t size; |
2621 | 0 |
|
2622 | 0 | size_t copy_size = 0; |
2623 | 0 | const uint8_t *copy_data = NULL; |
2624 | 0 |
|
2625 | 0 | if (!read_is_valid(*data, tile_size_bytes, data_end)) |
2626 | 0 | aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME, |
2627 | 0 | "Truncated packet or corrupt tile length"); |
2628 | 0 | size = mem_get_varsize(*data, tile_size_bytes); |
2629 | 0 |
|
2630 | 0 | // If tile_copy_mode = 1, then the top bit of the tile header indicates copy |
2631 | 0 | // mode. |
2632 | 0 | if (tile_copy_mode && (size >> (tile_size_bytes * 8 - 1)) == 1) { |
2633 | 0 | // The remaining bits in the top byte signal the row offset |
2634 | 0 | int offset = (size >> (tile_size_bytes - 1) * 8) & 0x7f; |
2635 | 0 |
|
2636 | 0 | // Currently, only use tiles in same column as reference tiles. |
2637 | 0 | copy_data = tile_buffers[row - offset][col].data; |
2638 | 0 | copy_size = tile_buffers[row - offset][col].size; |
2639 | 0 | size = 0; |
2640 | 0 | } else { |
2641 | 0 | size += AV1_MIN_TILE_SIZE_BYTES; |
2642 | 0 | } |
2643 | 0 |
|
2644 | 0 | *data += tile_size_bytes; |
2645 | 0 |
|
2646 | 0 | if (size > (size_t)(data_end - *data)) |
2647 | 0 | aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME, |
2648 | 0 | "Truncated packet or corrupt tile size"); |
2649 | 0 |
|
2650 | 0 | if (size > 0) { |
2651 | 0 | tile_buffers[row][col].data = *data; |
2652 | 0 | tile_buffers[row][col].size = size; |
2653 | 0 | } else { |
2654 | 0 | tile_buffers[row][col].data = copy_data; |
2655 | 0 | tile_buffers[row][col].size = copy_size; |
2656 | 0 | } |
2657 | 0 |
|
2658 | 0 | *data += size; |
2659 | 0 | } |
2660 | | |
2661 | | // Returns the end of the last tile buffer |
2662 | | // (tile_buffers[cm->tile_rows - 1][cm->tile_cols - 1]). |
2663 | | static const uint8_t *get_ls_tile_buffers( |
2664 | | AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end, |
2665 | 0 | TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) { |
2666 | 0 | AV1_COMMON *const cm = &pbi->common; |
2667 | 0 | const int tile_cols = cm->tile_cols; |
2668 | 0 | const int tile_rows = cm->tile_rows; |
2669 | 0 | const int have_tiles = tile_cols * tile_rows > 1; |
2670 | 0 | const uint8_t *raw_data_end; // The end of the last tile buffer |
2671 | 0 |
|
2672 | 0 | if (!have_tiles) { |
2673 | 0 | const size_t tile_size = data_end - data; |
2674 | 0 | tile_buffers[0][0].data = data; |
2675 | 0 | tile_buffers[0][0].size = tile_size; |
2676 | 0 | raw_data_end = NULL; |
2677 | 0 | } else { |
2678 | 0 | // We locate only the tile buffers that are required, which are the ones |
2679 | 0 | // specified by pbi->dec_tile_col and pbi->dec_tile_row. Also, we always |
2680 | 0 | // need the last (bottom right) tile buffer, as we need to know where the |
2681 | 0 | // end of the compressed frame buffer is for proper superframe decoding. |
2682 | 0 |
|
2683 | 0 | const uint8_t *tile_col_data_end[MAX_TILE_COLS] = { NULL }; |
2684 | 0 | const uint8_t *const data_start = data; |
2685 | 0 |
|
2686 | 0 | const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows); |
2687 | 0 | const int single_row = pbi->dec_tile_row >= 0; |
2688 | 0 | const int tile_rows_start = single_row ? dec_tile_row : 0; |
2689 | 0 | const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows; |
2690 | 0 | const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols); |
2691 | 0 | const int single_col = pbi->dec_tile_col >= 0; |
2692 | 0 | const int tile_cols_start = single_col ? dec_tile_col : 0; |
2693 | 0 | const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols; |
2694 | 0 |
|
2695 | 0 | const int tile_col_size_bytes = pbi->tile_col_size_bytes; |
2696 | 0 | const int tile_size_bytes = pbi->tile_size_bytes; |
2697 | 0 | const int tile_copy_mode = |
2698 | 0 | ((AOMMAX(cm->tile_width, cm->tile_height) << MI_SIZE_LOG2) <= 256) ? 1 |
2699 | 0 | : 0; |
2700 | 0 | // Read tile column sizes for all columns (we need the last tile buffer) |
2701 | 0 | for (int c = 0; c < tile_cols; ++c) { |
2702 | 0 | const int is_last = c == tile_cols - 1; |
2703 | 0 | size_t tile_col_size; |
2704 | 0 |
|
2705 | 0 | if (!is_last) { |
2706 | 0 | tile_col_size = mem_get_varsize(data, tile_col_size_bytes); |
2707 | 0 | data += tile_col_size_bytes; |
2708 | 0 | tile_col_data_end[c] = data + tile_col_size; |
2709 | 0 | } else { |
2710 | 0 | tile_col_size = data_end - data; |
2711 | 0 | tile_col_data_end[c] = data_end; |
2712 | 0 | } |
2713 | 0 | data += tile_col_size; |
2714 | 0 | } |
2715 | 0 |
|
2716 | 0 | data = data_start; |
2717 | 0 |
|
2718 | 0 | // Read the required tile sizes. |
2719 | 0 | for (int c = tile_cols_start; c < tile_cols_end; ++c) { |
2720 | 0 | const int is_last = c == tile_cols - 1; |
2721 | 0 |
|
2722 | 0 | if (c > 0) data = tile_col_data_end[c - 1]; |
2723 | 0 |
|
2724 | 0 | if (!is_last) data += tile_col_size_bytes; |
2725 | 0 |
|
2726 | 0 | // Get the whole of the last column, otherwise stop at the required tile. |
2727 | 0 | for (int r = 0; r < (is_last ? tile_rows : tile_rows_end); ++r) { |
2728 | 0 | get_ls_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data, |
2729 | 0 | tile_buffers, tile_size_bytes, c, r, tile_copy_mode); |
2730 | 0 | } |
2731 | 0 | } |
2732 | 0 |
|
2733 | 0 | // If we have not read the last column, then read it to get the last tile. |
2734 | 0 | if (tile_cols_end != tile_cols) { |
2735 | 0 | const int c = tile_cols - 1; |
2736 | 0 |
|
2737 | 0 | data = tile_col_data_end[c - 1]; |
2738 | 0 |
|
2739 | 0 | for (int r = 0; r < tile_rows; ++r) { |
2740 | 0 | get_ls_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data, |
2741 | 0 | tile_buffers, tile_size_bytes, c, r, tile_copy_mode); |
2742 | 0 | } |
2743 | 0 | } |
2744 | 0 | raw_data_end = data; |
2745 | 0 | } |
2746 | 0 | return raw_data_end; |
2747 | 0 | } |
2748 | | #endif // EXT_TILE_DEBUG |
2749 | | |
2750 | | static const uint8_t *get_ls_single_tile_buffer( |
2751 | | AV1Decoder *pbi, const uint8_t *data, |
2752 | 0 | TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) { |
2753 | 0 | assert(pbi->dec_tile_row >= 0 && pbi->dec_tile_col >= 0); |
2754 | 0 | tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].data = data; |
2755 | 0 | tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].size = |
2756 | 0 | (size_t)pbi->coded_tile_data_size; |
2757 | 0 | return data + pbi->coded_tile_data_size; |
2758 | 0 | } |
2759 | | |
2760 | | // Reads the next tile returning its size and adjusting '*data' accordingly |
2761 | | // based on 'is_last'. |
2762 | | static void get_tile_buffer(const uint8_t *const data_end, |
2763 | | const int tile_size_bytes, int is_last, |
2764 | | struct aom_internal_error_info *error_info, |
2765 | 0 | const uint8_t **data, TileBufferDec *const buf) { |
2766 | 0 | size_t size; |
2767 | 0 |
|
2768 | 0 | if (!is_last) { |
2769 | 0 | if (!read_is_valid(*data, tile_size_bytes, data_end)) |
2770 | 0 | aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME, |
2771 | 0 | "Truncated packet or corrupt tile length"); |
2772 | 0 |
|
2773 | 0 | size = mem_get_varsize(*data, tile_size_bytes) + AV1_MIN_TILE_SIZE_BYTES; |
2774 | 0 | *data += tile_size_bytes; |
2775 | 0 |
|
2776 | 0 | if (size > (size_t)(data_end - *data)) |
2777 | 0 | aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME, |
2778 | 0 | "Truncated packet or corrupt tile size"); |
2779 | 0 | } else { |
2780 | 0 | size = data_end - *data; |
2781 | 0 | } |
2782 | 0 |
|
2783 | 0 | buf->data = *data; |
2784 | 0 | buf->size = size; |
2785 | 0 |
|
2786 | 0 | *data += size; |
2787 | 0 | } |
2788 | | |
2789 | | static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data, |
2790 | | const uint8_t *data_end, |
2791 | | TileBufferDec (*const tile_buffers)[MAX_TILE_COLS], |
2792 | 0 | int start_tile, int end_tile) { |
2793 | 0 | AV1_COMMON *const cm = &pbi->common; |
2794 | 0 | const int tile_cols = cm->tile_cols; |
2795 | 0 | const int tile_rows = cm->tile_rows; |
2796 | 0 | int tc = 0; |
2797 | 0 | int first_tile_in_tg = 0; |
2798 | 0 |
|
2799 | 0 | for (int r = 0; r < tile_rows; ++r) { |
2800 | 0 | for (int c = 0; c < tile_cols; ++c, ++tc) { |
2801 | 0 | TileBufferDec *const buf = &tile_buffers[r][c]; |
2802 | 0 |
|
2803 | 0 | const int is_last = (tc == end_tile); |
2804 | 0 | const size_t hdr_offset = 0; |
2805 | 0 |
|
2806 | 0 | if (tc < start_tile || tc > end_tile) continue; |
2807 | 0 | |
2808 | 0 | if (data + hdr_offset >= data_end) |
2809 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
2810 | 0 | "Data ended before all tiles were read."); |
2811 | 0 | first_tile_in_tg += tc == first_tile_in_tg ? pbi->tg_size : 0; |
2812 | 0 | data += hdr_offset; |
2813 | 0 | get_tile_buffer(data_end, pbi->tile_size_bytes, is_last, |
2814 | 0 | &pbi->common.error, &data, buf); |
2815 | 0 | } |
2816 | 0 | } |
2817 | 0 | } |
2818 | | |
2819 | | static void set_cb_buffer(AV1Decoder *pbi, MACROBLOCKD *const xd, |
2820 | | CB_BUFFER *cb_buffer_base, const int num_planes, |
2821 | 0 | int mi_row, int mi_col) { |
2822 | 0 | AV1_COMMON *const cm = &pbi->common; |
2823 | 0 | int mib_size_log2 = cm->seq_params.mib_size_log2; |
2824 | 0 | int stride = (cm->mi_cols >> mib_size_log2) + 1; |
2825 | 0 | int offset = (mi_row >> mib_size_log2) * stride + (mi_col >> mib_size_log2); |
2826 | 0 | CB_BUFFER *cb_buffer = cb_buffer_base + offset; |
2827 | 0 |
|
2828 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
2829 | 0 | xd->plane[plane].dqcoeff_block = cb_buffer->dqcoeff[plane]; |
2830 | 0 | xd->plane[plane].eob_data = cb_buffer->eob_data[plane]; |
2831 | 0 | xd->cb_offset[plane] = 0; |
2832 | 0 | xd->txb_offset[plane] = 0; |
2833 | 0 | } |
2834 | 0 | xd->plane[0].color_index_map = cb_buffer->color_index_map[0]; |
2835 | 0 | xd->plane[1].color_index_map = cb_buffer->color_index_map[1]; |
2836 | 0 | xd->color_index_map_offset[0] = 0; |
2837 | 0 | xd->color_index_map_offset[1] = 0; |
2838 | 0 | } |
2839 | | |
2840 | 0 | static void decoder_alloc_tile_data(AV1Decoder *pbi, const int n_tiles) { |
2841 | 0 | AV1_COMMON *const cm = &pbi->common; |
2842 | 0 | aom_free(pbi->tile_data); |
2843 | 0 | CHECK_MEM_ERROR(cm, pbi->tile_data, |
2844 | 0 | aom_memalign(32, n_tiles * sizeof(*pbi->tile_data))); |
2845 | 0 | pbi->allocated_tiles = n_tiles; |
2846 | 0 | for (int i = 0; i < n_tiles; i++) { |
2847 | 0 | TileDataDec *const tile_data = pbi->tile_data + i; |
2848 | 0 | av1_zero(tile_data->dec_row_mt_sync); |
2849 | 0 | } |
2850 | 0 | pbi->allocated_row_mt_sync_rows = 0; |
2851 | 0 | } |
2852 | | |
2853 | | // Set up nsync by width. |
2854 | 0 | static INLINE int get_sync_range(int width) { |
2855 | 0 | // nsync numbers are picked by testing. |
2856 | | #if 0 |
2857 | | if (width < 640) |
2858 | | return 1; |
2859 | | else if (width <= 1280) |
2860 | | return 2; |
2861 | | else if (width <= 4096) |
2862 | | return 4; |
2863 | | else |
2864 | | return 8; |
2865 | | #else |
2866 | | (void)width; |
2867 | 0 | #endif |
2868 | 0 | return 1; |
2869 | 0 | } |
2870 | | |
2871 | | // Allocate memory for decoder row synchronization |
2872 | | static void dec_row_mt_alloc(AV1DecRowMTSync *dec_row_mt_sync, AV1_COMMON *cm, |
2873 | 0 | int rows) { |
2874 | 0 | dec_row_mt_sync->allocated_sb_rows = rows; |
2875 | 0 | #if CONFIG_MULTITHREAD |
2876 | 0 | { |
2877 | 0 | int i; |
2878 | 0 |
|
2879 | 0 | CHECK_MEM_ERROR(cm, dec_row_mt_sync->mutex_, |
2880 | 0 | aom_malloc(sizeof(*(dec_row_mt_sync->mutex_)) * rows)); |
2881 | 0 | if (dec_row_mt_sync->mutex_) { |
2882 | 0 | for (i = 0; i < rows; ++i) { |
2883 | 0 | pthread_mutex_init(&dec_row_mt_sync->mutex_[i], NULL); |
2884 | 0 | } |
2885 | 0 | } |
2886 | 0 |
|
2887 | 0 | CHECK_MEM_ERROR(cm, dec_row_mt_sync->cond_, |
2888 | 0 | aom_malloc(sizeof(*(dec_row_mt_sync->cond_)) * rows)); |
2889 | 0 | if (dec_row_mt_sync->cond_) { |
2890 | 0 | for (i = 0; i < rows; ++i) { |
2891 | 0 | pthread_cond_init(&dec_row_mt_sync->cond_[i], NULL); |
2892 | 0 | } |
2893 | 0 | } |
2894 | 0 | } |
2895 | 0 | #endif // CONFIG_MULTITHREAD |
2896 | 0 |
|
2897 | 0 | CHECK_MEM_ERROR(cm, dec_row_mt_sync->cur_sb_col, |
2898 | 0 | aom_malloc(sizeof(*(dec_row_mt_sync->cur_sb_col)) * rows)); |
2899 | 0 |
|
2900 | 0 | // Set up nsync. |
2901 | 0 | dec_row_mt_sync->sync_range = get_sync_range(cm->width); |
2902 | 0 | } |
2903 | | |
2904 | | // Deallocate decoder row synchronization related mutex and data |
2905 | 0 | void av1_dec_row_mt_dealloc(AV1DecRowMTSync *dec_row_mt_sync) { |
2906 | 0 | if (dec_row_mt_sync != NULL) { |
2907 | 0 | #if CONFIG_MULTITHREAD |
2908 | 0 | int i; |
2909 | 0 | if (dec_row_mt_sync->mutex_ != NULL) { |
2910 | 0 | for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) { |
2911 | 0 | pthread_mutex_destroy(&dec_row_mt_sync->mutex_[i]); |
2912 | 0 | } |
2913 | 0 | aom_free(dec_row_mt_sync->mutex_); |
2914 | 0 | } |
2915 | 0 | if (dec_row_mt_sync->cond_ != NULL) { |
2916 | 0 | for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) { |
2917 | 0 | pthread_cond_destroy(&dec_row_mt_sync->cond_[i]); |
2918 | 0 | } |
2919 | 0 | aom_free(dec_row_mt_sync->cond_); |
2920 | 0 | } |
2921 | 0 | #endif // CONFIG_MULTITHREAD |
2922 | 0 | aom_free(dec_row_mt_sync->cur_sb_col); |
2923 | 0 |
|
2924 | 0 | // clear the structure as the source of this call may be a resize in which |
2925 | 0 | // case this call will be followed by an _alloc() which may fail. |
2926 | 0 | av1_zero(*dec_row_mt_sync); |
2927 | 0 | } |
2928 | 0 | } |
2929 | | |
2930 | | static INLINE void sync_read(AV1DecRowMTSync *const dec_row_mt_sync, int r, |
2931 | 0 | int c) { |
2932 | 0 | #if CONFIG_MULTITHREAD |
2933 | 0 | const int nsync = dec_row_mt_sync->sync_range; |
2934 | 0 |
|
2935 | 0 | if (r && !(c & (nsync - 1))) { |
2936 | 0 | pthread_mutex_t *const mutex = &dec_row_mt_sync->mutex_[r - 1]; |
2937 | 0 | pthread_mutex_lock(mutex); |
2938 | 0 |
|
2939 | 0 | while (c > dec_row_mt_sync->cur_sb_col[r - 1] - nsync) { |
2940 | 0 | pthread_cond_wait(&dec_row_mt_sync->cond_[r - 1], mutex); |
2941 | 0 | } |
2942 | 0 | pthread_mutex_unlock(mutex); |
2943 | 0 | } |
2944 | | #else |
2945 | | (void)dec_row_mt_sync; |
2946 | | (void)r; |
2947 | | (void)c; |
2948 | | #endif // CONFIG_MULTITHREAD |
2949 | | } |
2950 | | |
2951 | | static INLINE void sync_write(AV1DecRowMTSync *const dec_row_mt_sync, int r, |
2952 | 0 | int c, const int sb_cols) { |
2953 | 0 | #if CONFIG_MULTITHREAD |
2954 | 0 | const int nsync = dec_row_mt_sync->sync_range; |
2955 | 0 | int cur; |
2956 | 0 | int sig = 1; |
2957 | 0 |
|
2958 | 0 | if (c < sb_cols - 1) { |
2959 | 0 | cur = c; |
2960 | 0 | if (c % nsync) sig = 0; |
2961 | 0 | } else { |
2962 | 0 | cur = sb_cols + nsync; |
2963 | 0 | } |
2964 | 0 |
|
2965 | 0 | if (sig) { |
2966 | 0 | pthread_mutex_lock(&dec_row_mt_sync->mutex_[r]); |
2967 | 0 |
|
2968 | 0 | dec_row_mt_sync->cur_sb_col[r] = cur; |
2969 | 0 |
|
2970 | 0 | pthread_cond_signal(&dec_row_mt_sync->cond_[r]); |
2971 | 0 | pthread_mutex_unlock(&dec_row_mt_sync->mutex_[r]); |
2972 | 0 | } |
2973 | | #else |
2974 | | (void)dec_row_mt_sync; |
2975 | | (void)r; |
2976 | | (void)c; |
2977 | | (void)sb_cols; |
2978 | | #endif // CONFIG_MULTITHREAD |
2979 | | } |
2980 | | |
2981 | | static void decode_tile_sb_row(AV1Decoder *pbi, ThreadData *const td, |
2982 | 0 | TileInfo tile_info, const int mi_row) { |
2983 | 0 | AV1_COMMON *const cm = &pbi->common; |
2984 | 0 | const int num_planes = av1_num_planes(cm); |
2985 | 0 | TileDataDec *const tile_data = |
2986 | 0 | pbi->tile_data + tile_info.tile_row * cm->tile_cols + tile_info.tile_col; |
2987 | 0 | const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info); |
2988 | 0 | const int sb_row_in_tile = |
2989 | 0 | (mi_row - tile_info.mi_row_start) >> cm->seq_params.mib_size_log2; |
2990 | 0 | int sb_col_in_tile = 0; |
2991 | 0 |
|
2992 | 0 | for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end; |
2993 | 0 | mi_col += cm->seq_params.mib_size, sb_col_in_tile++) { |
2994 | 0 | set_cb_buffer(pbi, &td->xd, pbi->cb_buffer_base, num_planes, mi_row, |
2995 | 0 | mi_col); |
2996 | 0 |
|
2997 | 0 | sync_read(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile); |
2998 | 0 |
|
2999 | 0 | // Decoding of the super-block |
3000 | 0 | decode_partition(pbi, td, mi_row, mi_col, td->bit_reader, |
3001 | 0 | cm->seq_params.sb_size, 0x2); |
3002 | 0 |
|
3003 | 0 | sync_write(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile, |
3004 | 0 | sb_cols_in_tile); |
3005 | 0 | } |
3006 | 0 | } |
3007 | | |
3008 | 0 | static int check_trailing_bits_after_symbol_coder(aom_reader *r) { |
3009 | 0 | if (aom_reader_has_overflowed(r)) return -1; |
3010 | 0 | |
3011 | 0 | uint32_t nb_bits = aom_reader_tell(r); |
3012 | 0 | uint32_t nb_bytes = (nb_bits + 7) >> 3; |
3013 | 0 | const uint8_t *p = aom_reader_find_begin(r) + nb_bytes; |
3014 | 0 |
|
3015 | 0 | // aom_reader_tell() returns 1 for a newly initialized decoder, and the |
3016 | 0 | // return value only increases as values are decoded. So nb_bits > 0, and |
3017 | 0 | // thus p > p_begin. Therefore accessing p[-1] is safe. |
3018 | 0 | uint8_t last_byte = p[-1]; |
3019 | 0 | uint8_t pattern = 128 >> ((nb_bits - 1) & 7); |
3020 | 0 | if ((last_byte & (2 * pattern - 1)) != pattern) return -1; |
3021 | 0 | |
3022 | 0 | // Make sure that all padding bytes are zero as required by the spec. |
3023 | 0 | const uint8_t *p_end = aom_reader_find_end(r); |
3024 | 0 | while (p < p_end) { |
3025 | 0 | if (*p != 0) return -1; |
3026 | 0 | p++; |
3027 | 0 | } |
3028 | 0 | return 0; |
3029 | 0 | } |
3030 | | |
3031 | 0 | static void set_decode_func_pointers(ThreadData *td, int parse_decode_flag) { |
3032 | 0 | td->read_coeffs_tx_intra_block_visit = decode_block_void; |
3033 | 0 | td->predict_and_recon_intra_block_visit = decode_block_void; |
3034 | 0 | td->read_coeffs_tx_inter_block_visit = decode_block_void; |
3035 | 0 | td->inverse_tx_inter_block_visit = decode_block_void; |
3036 | 0 | td->predict_inter_block_visit = predict_inter_block_void; |
3037 | 0 | td->cfl_store_inter_block_visit = cfl_store_inter_block_void; |
3038 | 0 |
|
3039 | 0 | if (parse_decode_flag & 0x1) { |
3040 | 0 | td->read_coeffs_tx_intra_block_visit = read_coeffs_tx_intra_block; |
3041 | 0 | td->read_coeffs_tx_inter_block_visit = av1_read_coeffs_txb_facade; |
3042 | 0 | } |
3043 | 0 | if (parse_decode_flag & 0x2) { |
3044 | 0 | td->predict_and_recon_intra_block_visit = |
3045 | 0 | predict_and_reconstruct_intra_block; |
3046 | 0 | td->inverse_tx_inter_block_visit = inverse_transform_inter_block; |
3047 | 0 | td->predict_inter_block_visit = predict_inter_block; |
3048 | 0 | td->cfl_store_inter_block_visit = cfl_store_inter_block; |
3049 | 0 | } |
3050 | 0 | } |
3051 | | |
3052 | | static void decode_tile(AV1Decoder *pbi, ThreadData *const td, int tile_row, |
3053 | 0 | int tile_col) { |
3054 | 0 | TileInfo tile_info; |
3055 | 0 |
|
3056 | 0 | AV1_COMMON *const cm = &pbi->common; |
3057 | 0 | const int num_planes = av1_num_planes(cm); |
3058 | 0 |
|
3059 | 0 | av1_tile_set_row(&tile_info, cm, tile_row); |
3060 | 0 | av1_tile_set_col(&tile_info, cm, tile_col); |
3061 | 0 | av1_zero_above_context(cm, &td->xd, tile_info.mi_col_start, |
3062 | 0 | tile_info.mi_col_end, tile_row); |
3063 | 0 | av1_reset_loop_filter_delta(&td->xd, num_planes); |
3064 | 0 | av1_reset_loop_restoration(&td->xd, num_planes); |
3065 | 0 |
|
3066 | 0 | for (int mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end; |
3067 | 0 | mi_row += cm->seq_params.mib_size) { |
3068 | 0 | av1_zero_left_context(&td->xd); |
3069 | 0 |
|
3070 | 0 | for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end; |
3071 | 0 | mi_col += cm->seq_params.mib_size) { |
3072 | 0 | set_cb_buffer(pbi, &td->xd, &td->cb_buffer_base, num_planes, 0, 0); |
3073 | 0 |
|
3074 | 0 | // Bit-stream parsing and decoding of the superblock |
3075 | 0 | decode_partition(pbi, td, mi_row, mi_col, td->bit_reader, |
3076 | 0 | cm->seq_params.sb_size, 0x3); |
3077 | 0 |
|
3078 | 0 | if (aom_reader_has_overflowed(td->bit_reader)) { |
3079 | 0 | aom_merge_corrupted_flag(&td->xd.corrupted, 1); |
3080 | 0 | return; |
3081 | 0 | } |
3082 | 0 | } |
3083 | 0 | } |
3084 | 0 |
|
3085 | 0 | int corrupted = |
3086 | 0 | (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0; |
3087 | 0 | aom_merge_corrupted_flag(&td->xd.corrupted, corrupted); |
3088 | 0 | } |
3089 | | |
3090 | | static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data, |
3091 | | const uint8_t *data_end, int start_tile, |
3092 | 0 | int end_tile) { |
3093 | 0 | AV1_COMMON *const cm = &pbi->common; |
3094 | 0 | ThreadData *const td = &pbi->td; |
3095 | 0 | const int tile_cols = cm->tile_cols; |
3096 | 0 | const int tile_rows = cm->tile_rows; |
3097 | 0 | const int n_tiles = tile_cols * tile_rows; |
3098 | 0 | TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers; |
3099 | 0 | const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows); |
3100 | 0 | const int single_row = pbi->dec_tile_row >= 0; |
3101 | 0 | const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols); |
3102 | 0 | const int single_col = pbi->dec_tile_col >= 0; |
3103 | 0 | int tile_rows_start; |
3104 | 0 | int tile_rows_end; |
3105 | 0 | int tile_cols_start; |
3106 | 0 | int tile_cols_end; |
3107 | 0 | int inv_col_order; |
3108 | 0 | int inv_row_order; |
3109 | 0 | int tile_row, tile_col; |
3110 | 0 | uint8_t allow_update_cdf; |
3111 | 0 | const uint8_t *raw_data_end = NULL; |
3112 | 0 |
|
3113 | 0 | if (cm->large_scale_tile) { |
3114 | 0 | tile_rows_start = single_row ? dec_tile_row : 0; |
3115 | 0 | tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows; |
3116 | 0 | tile_cols_start = single_col ? dec_tile_col : 0; |
3117 | 0 | tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols; |
3118 | 0 | inv_col_order = pbi->inv_tile_order && !single_col; |
3119 | 0 | inv_row_order = pbi->inv_tile_order && !single_row; |
3120 | 0 | allow_update_cdf = 0; |
3121 | 0 | } else { |
3122 | 0 | tile_rows_start = 0; |
3123 | 0 | tile_rows_end = tile_rows; |
3124 | 0 | tile_cols_start = 0; |
3125 | 0 | tile_cols_end = tile_cols; |
3126 | 0 | inv_col_order = pbi->inv_tile_order; |
3127 | 0 | inv_row_order = pbi->inv_tile_order; |
3128 | 0 | allow_update_cdf = 1; |
3129 | 0 | } |
3130 | 0 |
|
3131 | 0 | // No tiles to decode. |
3132 | 0 | if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start || |
3133 | 0 | // First tile is larger than end_tile. |
3134 | 0 | tile_rows_start * cm->tile_cols + tile_cols_start > end_tile || |
3135 | 0 | // Last tile is smaller than start_tile. |
3136 | 0 | (tile_rows_end - 1) * cm->tile_cols + tile_cols_end - 1 < start_tile) |
3137 | 0 | return data; |
3138 | 0 | |
3139 | 0 | allow_update_cdf = allow_update_cdf && !cm->disable_cdf_update; |
3140 | 0 |
|
3141 | 0 | assert(tile_rows <= MAX_TILE_ROWS); |
3142 | 0 | assert(tile_cols <= MAX_TILE_COLS); |
3143 | 0 |
|
3144 | 0 | #if EXT_TILE_DEBUG |
3145 | 0 | if (cm->large_scale_tile && !pbi->ext_tile_debug) |
3146 | 0 | raw_data_end = get_ls_single_tile_buffer(pbi, data, tile_buffers); |
3147 | 0 | else if (cm->large_scale_tile && pbi->ext_tile_debug) |
3148 | 0 | raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers); |
3149 | 0 | else |
3150 | 0 | #endif // EXT_TILE_DEBUG |
3151 | 0 | get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile); |
3152 | 0 |
|
3153 | 0 | if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) { |
3154 | 0 | decoder_alloc_tile_data(pbi, n_tiles); |
3155 | 0 | } |
3156 | | #if CONFIG_ACCOUNTING |
3157 | | if (pbi->acct_enabled) { |
3158 | | aom_accounting_reset(&pbi->accounting); |
3159 | | } |
3160 | | #endif |
3161 | |
|
3162 | 0 | set_decode_func_pointers(&pbi->td, 0x3); |
3163 | 0 |
|
3164 | 0 | // Load all tile information into thread_data. |
3165 | 0 | td->xd = pbi->mb; |
3166 | 0 | td->xd.corrupted = 0; |
3167 | 0 | td->xd.mc_buf[0] = td->mc_buf[0]; |
3168 | 0 | td->xd.mc_buf[1] = td->mc_buf[1]; |
3169 | 0 | td->xd.tmp_conv_dst = td->tmp_conv_dst; |
3170 | 0 | for (int j = 0; j < 2; ++j) { |
3171 | 0 | td->xd.tmp_obmc_bufs[j] = td->tmp_obmc_bufs[j]; |
3172 | 0 | } |
3173 | 0 |
|
3174 | 0 | for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) { |
3175 | 0 | const int row = inv_row_order ? tile_rows - 1 - tile_row : tile_row; |
3176 | 0 |
|
3177 | 0 | for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) { |
3178 | 0 | const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col; |
3179 | 0 | TileDataDec *const tile_data = pbi->tile_data + row * cm->tile_cols + col; |
3180 | 0 | const TileBufferDec *const tile_bs_buf = &tile_buffers[row][col]; |
3181 | 0 |
|
3182 | 0 | if (row * cm->tile_cols + col < start_tile || |
3183 | 0 | row * cm->tile_cols + col > end_tile) |
3184 | 0 | continue; |
3185 | 0 | |
3186 | 0 | td->bit_reader = &tile_data->bit_reader; |
3187 | 0 | av1_zero(td->dqcoeff); |
3188 | 0 | av1_tile_init(&td->xd.tile, cm, row, col); |
3189 | 0 | td->xd.current_qindex = cm->base_qindex; |
3190 | 0 | setup_bool_decoder(tile_bs_buf->data, data_end, tile_bs_buf->size, |
3191 | 0 | &cm->error, td->bit_reader, allow_update_cdf); |
3192 | | #if CONFIG_ACCOUNTING |
3193 | | if (pbi->acct_enabled) { |
3194 | | td->bit_reader->accounting = &pbi->accounting; |
3195 | | td->bit_reader->accounting->last_tell_frac = |
3196 | | aom_reader_tell_frac(td->bit_reader); |
3197 | | } else { |
3198 | | td->bit_reader->accounting = NULL; |
3199 | | } |
3200 | | #endif |
3201 | | av1_init_macroblockd(cm, &td->xd, td->dqcoeff); |
3202 | 0 | av1_init_above_context(cm, &td->xd, row); |
3203 | 0 |
|
3204 | 0 | // Initialise the tile context from the frame context |
3205 | 0 | tile_data->tctx = *cm->fc; |
3206 | 0 | td->xd.tile_ctx = &tile_data->tctx; |
3207 | 0 |
|
3208 | 0 | // decode tile |
3209 | 0 | decode_tile(pbi, td, row, col); |
3210 | 0 | aom_merge_corrupted_flag(&pbi->mb.corrupted, td->xd.corrupted); |
3211 | 0 | if (pbi->mb.corrupted) |
3212 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
3213 | 0 | "Failed to decode tile data"); |
3214 | 0 | } |
3215 | 0 | } |
3216 | 0 |
|
3217 | 0 | if (cm->large_scale_tile) { |
3218 | 0 | if (n_tiles == 1) { |
3219 | 0 | // Find the end of the single tile buffer |
3220 | 0 | return aom_reader_find_end(&pbi->tile_data->bit_reader); |
3221 | 0 | } |
3222 | 0 | // Return the end of the last tile buffer |
3223 | 0 | return raw_data_end; |
3224 | 0 | } |
3225 | 0 | TileDataDec *const tile_data = pbi->tile_data + end_tile; |
3226 | 0 |
|
3227 | 0 | return aom_reader_find_end(&tile_data->bit_reader); |
3228 | 0 | } |
3229 | | |
3230 | 0 | static TileJobsDec *get_dec_job_info(AV1DecTileMT *tile_mt_info) { |
3231 | 0 | TileJobsDec *cur_job_info = NULL; |
3232 | 0 | #if CONFIG_MULTITHREAD |
3233 | 0 | pthread_mutex_lock(tile_mt_info->job_mutex); |
3234 | 0 |
|
3235 | 0 | if (tile_mt_info->jobs_dequeued < tile_mt_info->jobs_enqueued) { |
3236 | 0 | cur_job_info = tile_mt_info->job_queue + tile_mt_info->jobs_dequeued; |
3237 | 0 | tile_mt_info->jobs_dequeued++; |
3238 | 0 | } |
3239 | 0 |
|
3240 | 0 | pthread_mutex_unlock(tile_mt_info->job_mutex); |
3241 | | #else |
3242 | | (void)tile_mt_info; |
3243 | | #endif |
3244 | | return cur_job_info; |
3245 | 0 | } |
3246 | | |
3247 | | static void tile_worker_hook_init(AV1Decoder *const pbi, |
3248 | | DecWorkerData *const thread_data, |
3249 | | const TileBufferDec *const tile_buffer, |
3250 | | TileDataDec *const tile_data, |
3251 | 0 | uint8_t allow_update_cdf) { |
3252 | 0 | AV1_COMMON *cm = &pbi->common; |
3253 | 0 | ThreadData *const td = thread_data->td; |
3254 | 0 | int tile_row = tile_data->tile_info.tile_row; |
3255 | 0 | int tile_col = tile_data->tile_info.tile_col; |
3256 | 0 |
|
3257 | 0 | td->bit_reader = &tile_data->bit_reader; |
3258 | 0 | av1_zero(td->dqcoeff); |
3259 | 0 | av1_tile_init(&td->xd.tile, cm, tile_row, tile_col); |
3260 | 0 | td->xd.current_qindex = cm->base_qindex; |
3261 | 0 | setup_bool_decoder(tile_buffer->data, thread_data->data_end, |
3262 | 0 | tile_buffer->size, &thread_data->error_info, |
3263 | 0 | td->bit_reader, allow_update_cdf); |
3264 | | #if CONFIG_ACCOUNTING |
3265 | | if (pbi->acct_enabled) { |
3266 | | td->bit_reader->accounting = &pbi->accounting; |
3267 | | td->bit_reader->accounting->last_tell_frac = |
3268 | | aom_reader_tell_frac(td->bit_reader); |
3269 | | } else { |
3270 | | td->bit_reader->accounting = NULL; |
3271 | | } |
3272 | | #endif |
3273 | | av1_init_macroblockd(cm, &td->xd, td->dqcoeff); |
3274 | 0 | td->xd.error_info = &thread_data->error_info; |
3275 | 0 | av1_init_above_context(cm, &td->xd, tile_row); |
3276 | 0 |
|
3277 | 0 | // Initialise the tile context from the frame context |
3278 | 0 | tile_data->tctx = *cm->fc; |
3279 | 0 | td->xd.tile_ctx = &tile_data->tctx; |
3280 | | #if CONFIG_ACCOUNTING |
3281 | | if (pbi->acct_enabled) { |
3282 | | tile_data->bit_reader.accounting->last_tell_frac = |
3283 | | aom_reader_tell_frac(&tile_data->bit_reader); |
3284 | | } |
3285 | | #endif |
3286 | | } |
3287 | | |
3288 | 0 | static int tile_worker_hook(void *arg1, void *arg2) { |
3289 | 0 | DecWorkerData *const thread_data = (DecWorkerData *)arg1; |
3290 | 0 | AV1Decoder *const pbi = (AV1Decoder *)arg2; |
3291 | 0 | AV1_COMMON *cm = &pbi->common; |
3292 | 0 | ThreadData *const td = thread_data->td; |
3293 | 0 | uint8_t allow_update_cdf; |
3294 | 0 |
|
3295 | 0 | // The jmp_buf is valid only for the duration of the function that calls |
3296 | 0 | // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 |
3297 | 0 | // before it returns. |
3298 | 0 | if (setjmp(thread_data->error_info.jmp)) { |
3299 | 0 | thread_data->error_info.setjmp = 0; |
3300 | 0 | thread_data->td->xd.corrupted = 1; |
3301 | 0 | return 0; |
3302 | 0 | } |
3303 | 0 | thread_data->error_info.setjmp = 1; |
3304 | 0 |
|
3305 | 0 | allow_update_cdf = cm->large_scale_tile ? 0 : 1; |
3306 | 0 | allow_update_cdf = allow_update_cdf && !cm->disable_cdf_update; |
3307 | 0 |
|
3308 | 0 | set_decode_func_pointers(td, 0x3); |
3309 | 0 |
|
3310 | 0 | assert(cm->tile_cols > 0); |
3311 | 0 | while (1) { |
3312 | 0 | TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info); |
3313 | 0 |
|
3314 | 0 | if (cur_job_info != NULL && !td->xd.corrupted) { |
3315 | 0 | const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer; |
3316 | 0 | TileDataDec *const tile_data = cur_job_info->tile_data; |
3317 | 0 | tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data, |
3318 | 0 | allow_update_cdf); |
3319 | 0 | // decode tile |
3320 | 0 | int tile_row = tile_data->tile_info.tile_row; |
3321 | 0 | int tile_col = tile_data->tile_info.tile_col; |
3322 | 0 | decode_tile(pbi, td, tile_row, tile_col); |
3323 | 0 | } else { |
3324 | 0 | break; |
3325 | 0 | } |
3326 | 0 | } |
3327 | 0 | thread_data->error_info.setjmp = 0; |
3328 | 0 | return !td->xd.corrupted; |
3329 | 0 | } |
3330 | | |
3331 | | static int get_next_job_info(AV1Decoder *const pbi, |
3332 | | AV1DecRowMTJobInfo *next_job_info, |
3333 | 0 | int *end_of_frame) { |
3334 | 0 | AV1_COMMON *cm = &pbi->common; |
3335 | 0 | TileDataDec *tile_data; |
3336 | 0 | AV1DecRowMTSync *dec_row_mt_sync; |
3337 | 0 | AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info; |
3338 | 0 | TileInfo tile_info; |
3339 | 0 | const int tile_rows_start = frame_row_mt_info->tile_rows_start; |
3340 | 0 | const int tile_rows_end = frame_row_mt_info->tile_rows_end; |
3341 | 0 | const int tile_cols_start = frame_row_mt_info->tile_cols_start; |
3342 | 0 | const int tile_cols_end = frame_row_mt_info->tile_cols_end; |
3343 | 0 | const int start_tile = frame_row_mt_info->start_tile; |
3344 | 0 | const int end_tile = frame_row_mt_info->end_tile; |
3345 | 0 | const int sb_mi_size = mi_size_wide[cm->seq_params.sb_size]; |
3346 | 0 | int num_mis_to_decode, num_threads_working; |
3347 | 0 | int num_mis_waiting_for_decode; |
3348 | 0 | int min_threads_working = INT_MAX; |
3349 | 0 | int max_mis_to_decode = 0; |
3350 | 0 | int tile_row_idx, tile_col_idx; |
3351 | 0 | int tile_row = 0; |
3352 | 0 | int tile_col = 0; |
3353 | 0 |
|
3354 | 0 | memset(next_job_info, 0, sizeof(*next_job_info)); |
3355 | 0 |
|
3356 | 0 | // Frame decode is completed or error is encountered. |
3357 | 0 | *end_of_frame = (frame_row_mt_info->mi_rows_decode_started == |
3358 | 0 | frame_row_mt_info->mi_rows_to_decode) || |
3359 | 0 | (frame_row_mt_info->row_mt_exit == 1); |
3360 | 0 | if (*end_of_frame) { |
3361 | 0 | return 1; |
3362 | 0 | } |
3363 | 0 | |
3364 | 0 | // Decoding cannot start as bit-stream parsing is not complete. |
3365 | 0 | if (frame_row_mt_info->mi_rows_parse_done - |
3366 | 0 | frame_row_mt_info->mi_rows_decode_started == |
3367 | 0 | 0) |
3368 | 0 | return 0; |
3369 | 0 | |
3370 | 0 | // Choose the tile to decode. |
3371 | 0 | for (tile_row_idx = tile_rows_start; tile_row_idx < tile_rows_end; |
3372 | 0 | ++tile_row_idx) { |
3373 | 0 | for (tile_col_idx = tile_cols_start; tile_col_idx < tile_cols_end; |
3374 | 0 | ++tile_col_idx) { |
3375 | 0 | if (tile_row_idx * cm->tile_cols + tile_col_idx < start_tile || |
3376 | 0 | tile_row_idx * cm->tile_cols + tile_col_idx > end_tile) |
3377 | 0 | continue; |
3378 | 0 | |
3379 | 0 | tile_data = pbi->tile_data + tile_row_idx * cm->tile_cols + tile_col_idx; |
3380 | 0 | dec_row_mt_sync = &tile_data->dec_row_mt_sync; |
3381 | 0 |
|
3382 | 0 | num_threads_working = dec_row_mt_sync->num_threads_working; |
3383 | 0 | num_mis_waiting_for_decode = (dec_row_mt_sync->mi_rows_parse_done - |
3384 | 0 | dec_row_mt_sync->mi_rows_decode_started) * |
3385 | 0 | dec_row_mt_sync->mi_cols; |
3386 | 0 | num_mis_to_decode = |
3387 | 0 | (dec_row_mt_sync->mi_rows - dec_row_mt_sync->mi_rows_decode_started) * |
3388 | 0 | dec_row_mt_sync->mi_cols; |
3389 | 0 |
|
3390 | 0 | assert(num_mis_to_decode >= num_mis_waiting_for_decode); |
3391 | 0 |
|
3392 | 0 | // Pick the tile which has minimum number of threads working on it. |
3393 | 0 | if (num_mis_waiting_for_decode > 0) { |
3394 | 0 | if (num_threads_working < min_threads_working) { |
3395 | 0 | min_threads_working = num_threads_working; |
3396 | 0 | max_mis_to_decode = 0; |
3397 | 0 | } |
3398 | 0 | if (num_threads_working == min_threads_working && |
3399 | 0 | num_mis_to_decode > max_mis_to_decode) { |
3400 | 0 | max_mis_to_decode = num_mis_to_decode; |
3401 | 0 | tile_row = tile_row_idx; |
3402 | 0 | tile_col = tile_col_idx; |
3403 | 0 | } |
3404 | 0 | } |
3405 | 0 | } |
3406 | 0 | } |
3407 | 0 |
|
3408 | 0 | tile_data = pbi->tile_data + tile_row * cm->tile_cols + tile_col; |
3409 | 0 | tile_info = tile_data->tile_info; |
3410 | 0 | dec_row_mt_sync = &tile_data->dec_row_mt_sync; |
3411 | 0 |
|
3412 | 0 | next_job_info->tile_row = tile_row; |
3413 | 0 | next_job_info->tile_col = tile_col; |
3414 | 0 | next_job_info->mi_row = |
3415 | 0 | dec_row_mt_sync->mi_rows_decode_started + tile_info.mi_row_start; |
3416 | 0 |
|
3417 | 0 | dec_row_mt_sync->num_threads_working++; |
3418 | 0 | dec_row_mt_sync->mi_rows_decode_started += sb_mi_size; |
3419 | 0 | frame_row_mt_info->mi_rows_decode_started += sb_mi_size; |
3420 | 0 |
|
3421 | 0 | return 1; |
3422 | 0 | } |
3423 | | |
3424 | | static INLINE void signal_parse_sb_row_done(AV1Decoder *const pbi, |
3425 | | TileDataDec *const tile_data, |
3426 | 0 | const int sb_mi_size) { |
3427 | 0 | AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info; |
3428 | 0 | #if CONFIG_MULTITHREAD |
3429 | 0 | pthread_mutex_lock(pbi->row_mt_mutex_); |
3430 | 0 | #endif |
3431 | 0 | tile_data->dec_row_mt_sync.mi_rows_parse_done += sb_mi_size; |
3432 | 0 | frame_row_mt_info->mi_rows_parse_done += sb_mi_size; |
3433 | 0 | #if CONFIG_MULTITHREAD |
3434 | 0 | pthread_cond_broadcast(pbi->row_mt_cond_); |
3435 | 0 | pthread_mutex_unlock(pbi->row_mt_mutex_); |
3436 | 0 | #endif |
3437 | 0 | } |
3438 | | |
3439 | 0 | static int row_mt_worker_hook(void *arg1, void *arg2) { |
3440 | 0 | DecWorkerData *const thread_data = (DecWorkerData *)arg1; |
3441 | 0 | AV1Decoder *const pbi = (AV1Decoder *)arg2; |
3442 | 0 | AV1_COMMON *cm = &pbi->common; |
3443 | 0 | ThreadData *const td = thread_data->td; |
3444 | 0 | uint8_t allow_update_cdf; |
3445 | 0 | const int sb_mi_size = mi_size_wide[cm->seq_params.sb_size]; |
3446 | 0 | AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info; |
3447 | 0 | td->xd.corrupted = 0; |
3448 | 0 |
|
3449 | 0 | // The jmp_buf is valid only for the duration of the function that calls |
3450 | 0 | // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 |
3451 | 0 | // before it returns. |
3452 | 0 | if (setjmp(thread_data->error_info.jmp)) { |
3453 | 0 | thread_data->error_info.setjmp = 0; |
3454 | 0 | thread_data->td->xd.corrupted = 1; |
3455 | 0 | #if CONFIG_MULTITHREAD |
3456 | 0 | pthread_mutex_lock(pbi->row_mt_mutex_); |
3457 | 0 | #endif |
3458 | 0 | frame_row_mt_info->row_mt_exit = 1; |
3459 | 0 | #if CONFIG_MULTITHREAD |
3460 | 0 | pthread_cond_broadcast(pbi->row_mt_cond_); |
3461 | 0 | pthread_mutex_unlock(pbi->row_mt_mutex_); |
3462 | 0 | #endif |
3463 | 0 | return 0; |
3464 | 0 | } |
3465 | 0 | thread_data->error_info.setjmp = 1; |
3466 | 0 |
|
3467 | 0 | const int num_planes = av1_num_planes(cm); |
3468 | 0 | allow_update_cdf = cm->large_scale_tile ? 0 : 1; |
3469 | 0 | allow_update_cdf = allow_update_cdf && !cm->disable_cdf_update; |
3470 | 0 |
|
3471 | 0 | assert(cm->tile_cols > 0); |
3472 | 0 | while (1) { |
3473 | 0 | TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info); |
3474 | 0 |
|
3475 | 0 | if (cur_job_info != NULL && !td->xd.corrupted) { |
3476 | 0 | const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer; |
3477 | 0 | TileDataDec *const tile_data = cur_job_info->tile_data; |
3478 | 0 | tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data, |
3479 | 0 | allow_update_cdf); |
3480 | 0 |
|
3481 | 0 | set_decode_func_pointers(td, 0x1); |
3482 | 0 |
|
3483 | 0 | // decode tile |
3484 | 0 | TileInfo tile_info = tile_data->tile_info; |
3485 | 0 | int tile_row = tile_info.tile_row; |
3486 | 0 |
|
3487 | 0 | av1_zero_above_context(cm, &td->xd, tile_info.mi_col_start, |
3488 | 0 | tile_info.mi_col_end, tile_row); |
3489 | 0 | av1_reset_loop_filter_delta(&td->xd, num_planes); |
3490 | 0 | av1_reset_loop_restoration(&td->xd, num_planes); |
3491 | 0 |
|
3492 | 0 | for (int mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end; |
3493 | 0 | mi_row += cm->seq_params.mib_size) { |
3494 | 0 | av1_zero_left_context(&td->xd); |
3495 | 0 |
|
3496 | 0 | for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end; |
3497 | 0 | mi_col += cm->seq_params.mib_size) { |
3498 | 0 | set_cb_buffer(pbi, &td->xd, pbi->cb_buffer_base, num_planes, mi_row, |
3499 | 0 | mi_col); |
3500 | 0 |
|
3501 | 0 | // Bit-stream parsing of the superblock |
3502 | 0 | decode_partition(pbi, td, mi_row, mi_col, td->bit_reader, |
3503 | 0 | cm->seq_params.sb_size, 0x1); |
3504 | 0 | } |
3505 | 0 | signal_parse_sb_row_done(pbi, tile_data, sb_mi_size); |
3506 | 0 | } |
3507 | 0 |
|
3508 | 0 | int corrupted = |
3509 | 0 | (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0; |
3510 | 0 | aom_merge_corrupted_flag(&td->xd.corrupted, corrupted); |
3511 | 0 | } else { |
3512 | 0 | break; |
3513 | 0 | } |
3514 | 0 | } |
3515 | 0 |
|
3516 | 0 | set_decode_func_pointers(td, 0x2); |
3517 | 0 |
|
3518 | 0 | while (1) { |
3519 | 0 | AV1DecRowMTJobInfo next_job_info; |
3520 | 0 | int end_of_frame = 0; |
3521 | 0 |
|
3522 | 0 | #if CONFIG_MULTITHREAD |
3523 | 0 | pthread_mutex_lock(pbi->row_mt_mutex_); |
3524 | 0 | #endif |
3525 | 0 | while (!get_next_job_info(pbi, &next_job_info, &end_of_frame)) { |
3526 | 0 | #if CONFIG_MULTITHREAD |
3527 | 0 | pthread_cond_wait(pbi->row_mt_cond_, pbi->row_mt_mutex_); |
3528 | 0 | #endif |
3529 | 0 | } |
3530 | 0 | #if CONFIG_MULTITHREAD |
3531 | 0 | pthread_mutex_unlock(pbi->row_mt_mutex_); |
3532 | 0 | #endif |
3533 | 0 |
|
3534 | 0 | if (end_of_frame) break; |
3535 | 0 | |
3536 | 0 | int tile_row = next_job_info.tile_row; |
3537 | 0 | int tile_col = next_job_info.tile_col; |
3538 | 0 | int mi_row = next_job_info.mi_row; |
3539 | 0 |
|
3540 | 0 | TileDataDec *tile_data = |
3541 | 0 | pbi->tile_data + tile_row * cm->tile_cols + tile_col; |
3542 | 0 | AV1DecRowMTSync *dec_row_mt_sync = &tile_data->dec_row_mt_sync; |
3543 | 0 | TileInfo tile_info = tile_data->tile_info; |
3544 | 0 |
|
3545 | 0 | av1_tile_init(&td->xd.tile, cm, tile_row, tile_col); |
3546 | 0 | av1_init_macroblockd(cm, &td->xd, td->dqcoeff); |
3547 | 0 | td->xd.error_info = &thread_data->error_info; |
3548 | 0 |
|
3549 | 0 | decode_tile_sb_row(pbi, td, tile_info, mi_row); |
3550 | 0 |
|
3551 | 0 | #if CONFIG_MULTITHREAD |
3552 | 0 | pthread_mutex_lock(pbi->row_mt_mutex_); |
3553 | 0 | #endif |
3554 | 0 | dec_row_mt_sync->num_threads_working--; |
3555 | 0 | #if CONFIG_MULTITHREAD |
3556 | 0 | pthread_mutex_unlock(pbi->row_mt_mutex_); |
3557 | 0 | #endif |
3558 | 0 | } |
3559 | 0 | thread_data->error_info.setjmp = 0; |
3560 | 0 | return !td->xd.corrupted; |
3561 | 0 | } |
3562 | | |
3563 | | // sorts in descending order |
3564 | 0 | static int compare_tile_buffers(const void *a, const void *b) { |
3565 | 0 | const TileJobsDec *const buf1 = (const TileJobsDec *)a; |
3566 | 0 | const TileJobsDec *const buf2 = (const TileJobsDec *)b; |
3567 | 0 | return (((int)buf2->tile_buffer->size) - ((int)buf1->tile_buffer->size)); |
3568 | 0 | } |
3569 | | |
3570 | | static void enqueue_tile_jobs(AV1Decoder *pbi, AV1_COMMON *cm, |
3571 | | int tile_rows_start, int tile_rows_end, |
3572 | | int tile_cols_start, int tile_cols_end, |
3573 | 0 | int startTile, int endTile) { |
3574 | 0 | AV1DecTileMT *tile_mt_info = &pbi->tile_mt_info; |
3575 | 0 | TileJobsDec *tile_job_queue = tile_mt_info->job_queue; |
3576 | 0 | tile_mt_info->jobs_enqueued = 0; |
3577 | 0 | tile_mt_info->jobs_dequeued = 0; |
3578 | 0 |
|
3579 | 0 | for (int row = tile_rows_start; row < tile_rows_end; row++) { |
3580 | 0 | for (int col = tile_cols_start; col < tile_cols_end; col++) { |
3581 | 0 | if (row * cm->tile_cols + col < startTile || |
3582 | 0 | row * cm->tile_cols + col > endTile) |
3583 | 0 | continue; |
3584 | 0 | tile_job_queue->tile_buffer = &pbi->tile_buffers[row][col]; |
3585 | 0 | tile_job_queue->tile_data = pbi->tile_data + row * cm->tile_cols + col; |
3586 | 0 | tile_job_queue++; |
3587 | 0 | tile_mt_info->jobs_enqueued++; |
3588 | 0 | } |
3589 | 0 | } |
3590 | 0 | } |
3591 | | |
3592 | | static void alloc_dec_jobs(AV1DecTileMT *tile_mt_info, AV1_COMMON *cm, |
3593 | 0 | int tile_rows, int tile_cols) { |
3594 | 0 | tile_mt_info->alloc_tile_rows = tile_rows; |
3595 | 0 | tile_mt_info->alloc_tile_cols = tile_cols; |
3596 | 0 | int num_tiles = tile_rows * tile_cols; |
3597 | 0 | #if CONFIG_MULTITHREAD |
3598 | 0 | { |
3599 | 0 | CHECK_MEM_ERROR(cm, tile_mt_info->job_mutex, |
3600 | 0 | aom_malloc(sizeof(*tile_mt_info->job_mutex) * num_tiles)); |
3601 | 0 |
|
3602 | 0 | for (int i = 0; i < num_tiles; i++) { |
3603 | 0 | pthread_mutex_init(&tile_mt_info->job_mutex[i], NULL); |
3604 | 0 | } |
3605 | 0 | } |
3606 | 0 | #endif |
3607 | 0 | CHECK_MEM_ERROR(cm, tile_mt_info->job_queue, |
3608 | 0 | aom_malloc(sizeof(*tile_mt_info->job_queue) * num_tiles)); |
3609 | 0 | } |
3610 | | |
3611 | 0 | void av1_free_mc_tmp_buf(ThreadData *thread_data) { |
3612 | 0 | int ref; |
3613 | 0 | for (ref = 0; ref < 2; ref++) { |
3614 | 0 | if (thread_data->mc_buf_use_highbd) |
3615 | 0 | aom_free(CONVERT_TO_SHORTPTR(thread_data->mc_buf[ref])); |
3616 | 0 | else |
3617 | 0 | aom_free(thread_data->mc_buf[ref]); |
3618 | 0 | thread_data->mc_buf[ref] = NULL; |
3619 | 0 | } |
3620 | 0 | thread_data->mc_buf_size = 0; |
3621 | 0 | thread_data->mc_buf_use_highbd = 0; |
3622 | 0 |
|
3623 | 0 | aom_free(thread_data->tmp_conv_dst); |
3624 | 0 | thread_data->tmp_conv_dst = NULL; |
3625 | 0 | for (int i = 0; i < 2; ++i) { |
3626 | 0 | aom_free(thread_data->tmp_obmc_bufs[i]); |
3627 | 0 | thread_data->tmp_obmc_bufs[i] = NULL; |
3628 | 0 | } |
3629 | 0 | } |
3630 | | |
3631 | | static void allocate_mc_tmp_buf(AV1_COMMON *const cm, ThreadData *thread_data, |
3632 | 0 | int buf_size, int use_highbd) { |
3633 | 0 | for (int ref = 0; ref < 2; ref++) { |
3634 | 0 | if (use_highbd) { |
3635 | 0 | uint16_t *hbd_mc_buf; |
3636 | 0 | CHECK_MEM_ERROR(cm, hbd_mc_buf, (uint16_t *)aom_memalign(16, buf_size)); |
3637 | 0 | thread_data->mc_buf[ref] = CONVERT_TO_BYTEPTR(hbd_mc_buf); |
3638 | 0 | } else { |
3639 | 0 | CHECK_MEM_ERROR(cm, thread_data->mc_buf[ref], |
3640 | 0 | (uint8_t *)aom_memalign(16, buf_size)); |
3641 | 0 | } |
3642 | 0 | } |
3643 | 0 | thread_data->mc_buf_size = buf_size; |
3644 | 0 | thread_data->mc_buf_use_highbd = use_highbd; |
3645 | 0 |
|
3646 | 0 | CHECK_MEM_ERROR(cm, thread_data->tmp_conv_dst, |
3647 | 0 | aom_memalign(32, MAX_SB_SIZE * MAX_SB_SIZE * |
3648 | 0 | sizeof(*thread_data->tmp_conv_dst))); |
3649 | 0 | for (int i = 0; i < 2; ++i) { |
3650 | 0 | CHECK_MEM_ERROR( |
3651 | 0 | cm, thread_data->tmp_obmc_bufs[i], |
3652 | 0 | aom_memalign(16, 2 * MAX_MB_PLANE * MAX_SB_SQUARE * |
3653 | 0 | sizeof(*thread_data->tmp_obmc_bufs[i]))); |
3654 | 0 | } |
3655 | 0 | } |
3656 | | |
3657 | | static void reset_dec_workers(AV1Decoder *pbi, AVxWorkerHook worker_hook, |
3658 | 0 | int num_workers) { |
3659 | 0 | const AVxWorkerInterface *const winterface = aom_get_worker_interface(); |
3660 | 0 |
|
3661 | 0 | // Reset tile decoding hook |
3662 | 0 | for (int worker_idx = 0; worker_idx < num_workers; ++worker_idx) { |
3663 | 0 | AVxWorker *const worker = &pbi->tile_workers[worker_idx]; |
3664 | 0 | DecWorkerData *const thread_data = pbi->thread_data + worker_idx; |
3665 | 0 | thread_data->td->xd = pbi->mb; |
3666 | 0 | thread_data->td->xd.corrupted = 0; |
3667 | 0 | thread_data->td->xd.mc_buf[0] = thread_data->td->mc_buf[0]; |
3668 | 0 | thread_data->td->xd.mc_buf[1] = thread_data->td->mc_buf[1]; |
3669 | 0 | thread_data->td->xd.tmp_conv_dst = thread_data->td->tmp_conv_dst; |
3670 | 0 | for (int j = 0; j < 2; ++j) { |
3671 | 0 | thread_data->td->xd.tmp_obmc_bufs[j] = thread_data->td->tmp_obmc_bufs[j]; |
3672 | 0 | } |
3673 | 0 | winterface->sync(worker); |
3674 | 0 |
|
3675 | 0 | worker->hook = worker_hook; |
3676 | 0 | worker->data1 = thread_data; |
3677 | 0 | worker->data2 = pbi; |
3678 | 0 | } |
3679 | | #if CONFIG_ACCOUNTING |
3680 | | if (pbi->acct_enabled) { |
3681 | | aom_accounting_reset(&pbi->accounting); |
3682 | | } |
3683 | | #endif |
3684 | | } |
3685 | | |
3686 | | static void launch_dec_workers(AV1Decoder *pbi, const uint8_t *data_end, |
3687 | 0 | int num_workers) { |
3688 | 0 | const AVxWorkerInterface *const winterface = aom_get_worker_interface(); |
3689 | 0 |
|
3690 | 0 | for (int worker_idx = 0; worker_idx < num_workers; ++worker_idx) { |
3691 | 0 | AVxWorker *const worker = &pbi->tile_workers[worker_idx]; |
3692 | 0 | DecWorkerData *const thread_data = (DecWorkerData *)worker->data1; |
3693 | 0 |
|
3694 | 0 | thread_data->data_end = data_end; |
3695 | 0 |
|
3696 | 0 | worker->had_error = 0; |
3697 | 0 | if (worker_idx == num_workers - 1) { |
3698 | 0 | winterface->execute(worker); |
3699 | 0 | } else { |
3700 | 0 | winterface->launch(worker); |
3701 | 0 | } |
3702 | 0 | } |
3703 | 0 | } |
3704 | | |
3705 | 0 | static void sync_dec_workers(AV1Decoder *pbi, int num_workers) { |
3706 | 0 | const AVxWorkerInterface *const winterface = aom_get_worker_interface(); |
3707 | 0 | int corrupted = 0; |
3708 | 0 |
|
3709 | 0 | for (int worker_idx = num_workers; worker_idx > 0; --worker_idx) { |
3710 | 0 | AVxWorker *const worker = &pbi->tile_workers[worker_idx - 1]; |
3711 | 0 | aom_merge_corrupted_flag(&corrupted, !winterface->sync(worker)); |
3712 | 0 | } |
3713 | 0 |
|
3714 | 0 | pbi->mb.corrupted = corrupted; |
3715 | 0 | } |
3716 | | |
3717 | 0 | static void decode_mt_init(AV1Decoder *pbi) { |
3718 | 0 | AV1_COMMON *const cm = &pbi->common; |
3719 | 0 | const AVxWorkerInterface *const winterface = aom_get_worker_interface(); |
3720 | 0 | int worker_idx; |
3721 | 0 |
|
3722 | 0 | // Create workers and thread_data |
3723 | 0 | if (pbi->num_workers == 0) { |
3724 | 0 | const int num_threads = pbi->max_threads; |
3725 | 0 | CHECK_MEM_ERROR(cm, pbi->tile_workers, |
3726 | 0 | aom_malloc(num_threads * sizeof(*pbi->tile_workers))); |
3727 | 0 | CHECK_MEM_ERROR(cm, pbi->thread_data, |
3728 | 0 | aom_malloc(num_threads * sizeof(*pbi->thread_data))); |
3729 | 0 |
|
3730 | 0 | for (worker_idx = 0; worker_idx < num_threads; ++worker_idx) { |
3731 | 0 | AVxWorker *const worker = &pbi->tile_workers[worker_idx]; |
3732 | 0 | DecWorkerData *const thread_data = pbi->thread_data + worker_idx; |
3733 | 0 | ++pbi->num_workers; |
3734 | 0 |
|
3735 | 0 | winterface->init(worker); |
3736 | 0 | if (worker_idx < num_threads - 1 && !winterface->reset(worker)) { |
3737 | 0 | aom_internal_error(&cm->error, AOM_CODEC_ERROR, |
3738 | 0 | "Tile decoder thread creation failed"); |
3739 | 0 | } |
3740 | 0 |
|
3741 | 0 | if (worker_idx < num_threads - 1) { |
3742 | 0 | // Allocate thread data. |
3743 | 0 | CHECK_MEM_ERROR(cm, thread_data->td, |
3744 | 0 | aom_memalign(32, sizeof(*thread_data->td))); |
3745 | 0 | av1_zero(*thread_data->td); |
3746 | 0 | } else { |
3747 | 0 | // Main thread acts as a worker and uses the thread data in pbi |
3748 | 0 | thread_data->td = &pbi->td; |
3749 | 0 | } |
3750 | 0 | thread_data->error_info.error_code = AOM_CODEC_OK; |
3751 | 0 | thread_data->error_info.setjmp = 0; |
3752 | 0 | } |
3753 | 0 | } |
3754 | 0 | const int use_highbd = cm->seq_params.use_highbitdepth ? 1 : 0; |
3755 | 0 | const int buf_size = MC_TEMP_BUF_PELS << use_highbd; |
3756 | 0 | for (worker_idx = 0; worker_idx < pbi->max_threads - 1; ++worker_idx) { |
3757 | 0 | DecWorkerData *const thread_data = pbi->thread_data + worker_idx; |
3758 | 0 | if (thread_data->td->mc_buf_size != buf_size) { |
3759 | 0 | av1_free_mc_tmp_buf(thread_data->td); |
3760 | 0 | allocate_mc_tmp_buf(cm, thread_data->td, buf_size, use_highbd); |
3761 | 0 | } |
3762 | 0 | } |
3763 | 0 | } |
3764 | | |
3765 | | static void tile_mt_queue(AV1Decoder *pbi, int tile_cols, int tile_rows, |
3766 | | int tile_rows_start, int tile_rows_end, |
3767 | | int tile_cols_start, int tile_cols_end, |
3768 | 0 | int start_tile, int end_tile) { |
3769 | 0 | AV1_COMMON *const cm = &pbi->common; |
3770 | 0 | if (pbi->tile_mt_info.alloc_tile_cols != tile_cols || |
3771 | 0 | pbi->tile_mt_info.alloc_tile_rows != tile_rows) { |
3772 | 0 | av1_dealloc_dec_jobs(&pbi->tile_mt_info); |
3773 | 0 | alloc_dec_jobs(&pbi->tile_mt_info, cm, tile_rows, tile_cols); |
3774 | 0 | } |
3775 | 0 | enqueue_tile_jobs(pbi, cm, tile_rows_start, tile_rows_end, tile_cols_start, |
3776 | 0 | tile_cols_end, start_tile, end_tile); |
3777 | 0 | qsort(pbi->tile_mt_info.job_queue, pbi->tile_mt_info.jobs_enqueued, |
3778 | 0 | sizeof(pbi->tile_mt_info.job_queue[0]), compare_tile_buffers); |
3779 | 0 | } |
3780 | | |
3781 | | static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data, |
3782 | | const uint8_t *data_end, int start_tile, |
3783 | 0 | int end_tile) { |
3784 | 0 | AV1_COMMON *const cm = &pbi->common; |
3785 | 0 | const int tile_cols = cm->tile_cols; |
3786 | 0 | const int tile_rows = cm->tile_rows; |
3787 | 0 | const int n_tiles = tile_cols * tile_rows; |
3788 | 0 | TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers; |
3789 | 0 | const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows); |
3790 | 0 | const int single_row = pbi->dec_tile_row >= 0; |
3791 | 0 | const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols); |
3792 | 0 | const int single_col = pbi->dec_tile_col >= 0; |
3793 | 0 | int tile_rows_start; |
3794 | 0 | int tile_rows_end; |
3795 | 0 | int tile_cols_start; |
3796 | 0 | int tile_cols_end; |
3797 | 0 | int tile_count_tg; |
3798 | 0 | int num_workers; |
3799 | 0 | const uint8_t *raw_data_end = NULL; |
3800 | 0 |
|
3801 | 0 | if (cm->large_scale_tile) { |
3802 | 0 | tile_rows_start = single_row ? dec_tile_row : 0; |
3803 | 0 | tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows; |
3804 | 0 | tile_cols_start = single_col ? dec_tile_col : 0; |
3805 | 0 | tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols; |
3806 | 0 | } else { |
3807 | 0 | tile_rows_start = 0; |
3808 | 0 | tile_rows_end = tile_rows; |
3809 | 0 | tile_cols_start = 0; |
3810 | 0 | tile_cols_end = tile_cols; |
3811 | 0 | } |
3812 | 0 | tile_count_tg = end_tile - start_tile + 1; |
3813 | 0 | num_workers = AOMMIN(pbi->max_threads, tile_count_tg); |
3814 | 0 |
|
3815 | 0 | // No tiles to decode. |
3816 | 0 | if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start || |
3817 | 0 | // First tile is larger than end_tile. |
3818 | 0 | tile_rows_start * tile_cols + tile_cols_start > end_tile || |
3819 | 0 | // Last tile is smaller than start_tile. |
3820 | 0 | (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile) |
3821 | 0 | return data; |
3822 | 0 | |
3823 | 0 | assert(tile_rows <= MAX_TILE_ROWS); |
3824 | 0 | assert(tile_cols <= MAX_TILE_COLS); |
3825 | 0 | assert(tile_count_tg > 0); |
3826 | 0 | assert(num_workers > 0); |
3827 | 0 | assert(start_tile <= end_tile); |
3828 | 0 | assert(start_tile >= 0 && end_tile < n_tiles); |
3829 | 0 |
|
3830 | 0 | decode_mt_init(pbi); |
3831 | 0 |
|
3832 | 0 | // get tile size in tile group |
3833 | 0 | #if EXT_TILE_DEBUG |
3834 | 0 | if (cm->large_scale_tile) assert(pbi->ext_tile_debug == 1); |
3835 | 0 | if (cm->large_scale_tile) |
3836 | 0 | raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers); |
3837 | 0 | else |
3838 | 0 | #endif // EXT_TILE_DEBUG |
3839 | 0 | get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile); |
3840 | 0 |
|
3841 | 0 | if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) { |
3842 | 0 | decoder_alloc_tile_data(pbi, n_tiles); |
3843 | 0 | } |
3844 | 0 |
|
3845 | 0 | for (int row = 0; row < tile_rows; row++) { |
3846 | 0 | for (int col = 0; col < tile_cols; col++) { |
3847 | 0 | TileDataDec *tile_data = pbi->tile_data + row * cm->tile_cols + col; |
3848 | 0 | av1_tile_init(&tile_data->tile_info, cm, row, col); |
3849 | 0 | } |
3850 | 0 | } |
3851 | 0 |
|
3852 | 0 | tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end, |
3853 | 0 | tile_cols_start, tile_cols_end, start_tile, end_tile); |
3854 | 0 |
|
3855 | 0 | reset_dec_workers(pbi, tile_worker_hook, num_workers); |
3856 | 0 | launch_dec_workers(pbi, data_end, num_workers); |
3857 | 0 | sync_dec_workers(pbi, num_workers); |
3858 | 0 |
|
3859 | 0 | if (pbi->mb.corrupted) |
3860 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
3861 | 0 | "Failed to decode tile data"); |
3862 | 0 |
|
3863 | 0 | if (cm->large_scale_tile) { |
3864 | 0 | if (n_tiles == 1) { |
3865 | 0 | // Find the end of the single tile buffer |
3866 | 0 | return aom_reader_find_end(&pbi->tile_data->bit_reader); |
3867 | 0 | } |
3868 | 0 | // Return the end of the last tile buffer |
3869 | 0 | return raw_data_end; |
3870 | 0 | } |
3871 | 0 | TileDataDec *const tile_data = pbi->tile_data + end_tile; |
3872 | 0 |
|
3873 | 0 | return aom_reader_find_end(&tile_data->bit_reader); |
3874 | 0 | } |
3875 | | |
3876 | 0 | static void dec_alloc_cb_buf(AV1Decoder *pbi) { |
3877 | 0 | AV1_COMMON *const cm = &pbi->common; |
3878 | 0 | int size = ((cm->mi_rows >> cm->seq_params.mib_size_log2) + 1) * |
3879 | 0 | ((cm->mi_cols >> cm->seq_params.mib_size_log2) + 1); |
3880 | 0 |
|
3881 | 0 | if (pbi->cb_buffer_alloc_size < size) { |
3882 | 0 | av1_dec_free_cb_buf(pbi); |
3883 | 0 | CHECK_MEM_ERROR(cm, pbi->cb_buffer_base, |
3884 | 0 | aom_memalign(32, sizeof(*pbi->cb_buffer_base) * size)); |
3885 | 0 | pbi->cb_buffer_alloc_size = size; |
3886 | 0 | } |
3887 | 0 | } |
3888 | | |
3889 | | static void row_mt_frame_init(AV1Decoder *pbi, int tile_rows_start, |
3890 | | int tile_rows_end, int tile_cols_start, |
3891 | | int tile_cols_end, int start_tile, int end_tile, |
3892 | 0 | int max_sb_rows) { |
3893 | 0 | AV1_COMMON *const cm = &pbi->common; |
3894 | 0 | AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info; |
3895 | 0 |
|
3896 | 0 | frame_row_mt_info->tile_rows_start = tile_rows_start; |
3897 | 0 | frame_row_mt_info->tile_rows_end = tile_rows_end; |
3898 | 0 | frame_row_mt_info->tile_cols_start = tile_cols_start; |
3899 | 0 | frame_row_mt_info->tile_cols_end = tile_cols_end; |
3900 | 0 | frame_row_mt_info->start_tile = start_tile; |
3901 | 0 | frame_row_mt_info->end_tile = end_tile; |
3902 | 0 | frame_row_mt_info->mi_rows_to_decode = 0; |
3903 | 0 | frame_row_mt_info->mi_rows_parse_done = 0; |
3904 | 0 | frame_row_mt_info->mi_rows_decode_started = 0; |
3905 | 0 | frame_row_mt_info->row_mt_exit = 0; |
3906 | 0 |
|
3907 | 0 | for (int tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) { |
3908 | 0 | for (int tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) { |
3909 | 0 | if (tile_row * cm->tile_cols + tile_col < start_tile || |
3910 | 0 | tile_row * cm->tile_cols + tile_col > end_tile) |
3911 | 0 | continue; |
3912 | 0 | |
3913 | 0 | TileDataDec *const tile_data = |
3914 | 0 | pbi->tile_data + tile_row * cm->tile_cols + tile_col; |
3915 | 0 | TileInfo tile_info = tile_data->tile_info; |
3916 | 0 |
|
3917 | 0 | tile_data->dec_row_mt_sync.mi_rows_parse_done = 0; |
3918 | 0 | tile_data->dec_row_mt_sync.mi_rows_decode_started = 0; |
3919 | 0 | tile_data->dec_row_mt_sync.num_threads_working = 0; |
3920 | 0 | tile_data->dec_row_mt_sync.mi_rows = |
3921 | 0 | ALIGN_POWER_OF_TWO(tile_info.mi_row_end - tile_info.mi_row_start, |
3922 | 0 | cm->seq_params.mib_size_log2); |
3923 | 0 | tile_data->dec_row_mt_sync.mi_cols = |
3924 | 0 | ALIGN_POWER_OF_TWO(tile_info.mi_col_end - tile_info.mi_col_start, |
3925 | 0 | cm->seq_params.mib_size_log2); |
3926 | 0 |
|
3927 | 0 | frame_row_mt_info->mi_rows_to_decode += |
3928 | 0 | tile_data->dec_row_mt_sync.mi_rows; |
3929 | 0 |
|
3930 | 0 | // Initialize cur_sb_col to -1 for all SB rows. |
3931 | 0 | memset(tile_data->dec_row_mt_sync.cur_sb_col, -1, |
3932 | 0 | sizeof(*tile_data->dec_row_mt_sync.cur_sb_col) * max_sb_rows); |
3933 | 0 | } |
3934 | 0 | } |
3935 | 0 |
|
3936 | 0 | #if CONFIG_MULTITHREAD |
3937 | 0 | if (pbi->row_mt_mutex_ == NULL) { |
3938 | 0 | CHECK_MEM_ERROR(cm, pbi->row_mt_mutex_, |
3939 | 0 | aom_malloc(sizeof(*(pbi->row_mt_mutex_)))); |
3940 | 0 | if (pbi->row_mt_mutex_) { |
3941 | 0 | pthread_mutex_init(pbi->row_mt_mutex_, NULL); |
3942 | 0 | } |
3943 | 0 | } |
3944 | 0 |
|
3945 | 0 | if (pbi->row_mt_cond_ == NULL) { |
3946 | 0 | CHECK_MEM_ERROR(cm, pbi->row_mt_cond_, |
3947 | 0 | aom_malloc(sizeof(*(pbi->row_mt_cond_)))); |
3948 | 0 | if (pbi->row_mt_cond_) { |
3949 | 0 | pthread_cond_init(pbi->row_mt_cond_, NULL); |
3950 | 0 | } |
3951 | 0 | } |
3952 | 0 | #endif |
3953 | 0 | } |
3954 | | |
3955 | | static const uint8_t *decode_tiles_row_mt(AV1Decoder *pbi, const uint8_t *data, |
3956 | | const uint8_t *data_end, |
3957 | 0 | int start_tile, int end_tile) { |
3958 | 0 | AV1_COMMON *const cm = &pbi->common; |
3959 | 0 | const int tile_cols = cm->tile_cols; |
3960 | 0 | const int tile_rows = cm->tile_rows; |
3961 | 0 | const int n_tiles = tile_cols * tile_rows; |
3962 | 0 | TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers; |
3963 | 0 | const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows); |
3964 | 0 | const int single_row = pbi->dec_tile_row >= 0; |
3965 | 0 | const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols); |
3966 | 0 | const int single_col = pbi->dec_tile_col >= 0; |
3967 | 0 | int tile_rows_start; |
3968 | 0 | int tile_rows_end; |
3969 | 0 | int tile_cols_start; |
3970 | 0 | int tile_cols_end; |
3971 | 0 | int tile_count_tg; |
3972 | 0 | int num_workers; |
3973 | 0 | const uint8_t *raw_data_end = NULL; |
3974 | 0 | int max_sb_rows = 0; |
3975 | 0 |
|
3976 | 0 | if (cm->large_scale_tile) { |
3977 | 0 | tile_rows_start = single_row ? dec_tile_row : 0; |
3978 | 0 | tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows; |
3979 | 0 | tile_cols_start = single_col ? dec_tile_col : 0; |
3980 | 0 | tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols; |
3981 | 0 | } else { |
3982 | 0 | tile_rows_start = 0; |
3983 | 0 | tile_rows_end = tile_rows; |
3984 | 0 | tile_cols_start = 0; |
3985 | 0 | tile_cols_end = tile_cols; |
3986 | 0 | } |
3987 | 0 | tile_count_tg = end_tile - start_tile + 1; |
3988 | 0 | num_workers = pbi->max_threads; |
3989 | 0 |
|
3990 | 0 | // No tiles to decode. |
3991 | 0 | if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start || |
3992 | 0 | // First tile is larger than end_tile. |
3993 | 0 | tile_rows_start * tile_cols + tile_cols_start > end_tile || |
3994 | 0 | // Last tile is smaller than start_tile. |
3995 | 0 | (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile) |
3996 | 0 | return data; |
3997 | 0 | |
3998 | 0 | assert(tile_rows <= MAX_TILE_ROWS); |
3999 | 0 | assert(tile_cols <= MAX_TILE_COLS); |
4000 | 0 | assert(tile_count_tg > 0); |
4001 | 0 | assert(num_workers > 0); |
4002 | 0 | assert(start_tile <= end_tile); |
4003 | 0 | assert(start_tile >= 0 && end_tile < n_tiles); |
4004 | 0 |
|
4005 | 0 | (void)tile_count_tg; |
4006 | 0 |
|
4007 | 0 | decode_mt_init(pbi); |
4008 | 0 |
|
4009 | 0 | // get tile size in tile group |
4010 | 0 | #if EXT_TILE_DEBUG |
4011 | 0 | if (cm->large_scale_tile) assert(pbi->ext_tile_debug == 1); |
4012 | 0 | if (cm->large_scale_tile) |
4013 | 0 | raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers); |
4014 | 0 | else |
4015 | 0 | #endif // EXT_TILE_DEBUG |
4016 | 0 | get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile); |
4017 | 0 |
|
4018 | 0 | if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) { |
4019 | 0 | for (int i = 0; i < pbi->allocated_tiles; i++) { |
4020 | 0 | TileDataDec *const tile_data = pbi->tile_data + i; |
4021 | 0 | av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync); |
4022 | 0 | } |
4023 | 0 | decoder_alloc_tile_data(pbi, n_tiles); |
4024 | 0 | } |
4025 | 0 |
|
4026 | 0 | for (int row = 0; row < tile_rows; row++) { |
4027 | 0 | for (int col = 0; col < tile_cols; col++) { |
4028 | 0 | TileDataDec *tile_data = pbi->tile_data + row * cm->tile_cols + col; |
4029 | 0 | av1_tile_init(&tile_data->tile_info, cm, row, col); |
4030 | 0 |
|
4031 | 0 | max_sb_rows = AOMMAX(max_sb_rows, |
4032 | 0 | av1_get_sb_rows_in_tile(cm, tile_data->tile_info)); |
4033 | 0 | } |
4034 | 0 | } |
4035 | 0 |
|
4036 | 0 | if (pbi->allocated_row_mt_sync_rows != max_sb_rows) { |
4037 | 0 | for (int i = 0; i < n_tiles; ++i) { |
4038 | 0 | TileDataDec *const tile_data = pbi->tile_data + i; |
4039 | 0 | av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync); |
4040 | 0 | dec_row_mt_alloc(&tile_data->dec_row_mt_sync, cm, max_sb_rows); |
4041 | 0 | } |
4042 | 0 | pbi->allocated_row_mt_sync_rows = max_sb_rows; |
4043 | 0 | } |
4044 | 0 |
|
4045 | 0 | tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end, |
4046 | 0 | tile_cols_start, tile_cols_end, start_tile, end_tile); |
4047 | 0 |
|
4048 | 0 | dec_alloc_cb_buf(pbi); |
4049 | 0 |
|
4050 | 0 | row_mt_frame_init(pbi, tile_rows_start, tile_rows_end, tile_cols_start, |
4051 | 0 | tile_cols_end, start_tile, end_tile, max_sb_rows); |
4052 | 0 |
|
4053 | 0 | reset_dec_workers(pbi, row_mt_worker_hook, num_workers); |
4054 | 0 | launch_dec_workers(pbi, data_end, num_workers); |
4055 | 0 | sync_dec_workers(pbi, num_workers); |
4056 | 0 |
|
4057 | 0 | if (pbi->mb.corrupted) |
4058 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4059 | 0 | "Failed to decode tile data"); |
4060 | 0 |
|
4061 | 0 | if (cm->large_scale_tile) { |
4062 | 0 | if (n_tiles == 1) { |
4063 | 0 | // Find the end of the single tile buffer |
4064 | 0 | return aom_reader_find_end(&pbi->tile_data->bit_reader); |
4065 | 0 | } |
4066 | 0 | // Return the end of the last tile buffer |
4067 | 0 | return raw_data_end; |
4068 | 0 | } |
4069 | 0 | TileDataDec *const tile_data = pbi->tile_data + end_tile; |
4070 | 0 |
|
4071 | 0 | return aom_reader_find_end(&tile_data->bit_reader); |
4072 | 0 | } |
4073 | | |
4074 | 0 | static void error_handler(void *data) { |
4075 | 0 | AV1_COMMON *const cm = (AV1_COMMON *)data; |
4076 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet"); |
4077 | 0 | } |
4078 | | |
4079 | | // Reads the high_bitdepth and twelve_bit fields in color_config() and sets |
4080 | | // seq_params->bit_depth based on the values of those fields and |
4081 | | // seq_params->profile. Reports errors by calling rb->error_handler() or |
4082 | | // aom_internal_error(). |
4083 | | static void read_bitdepth(struct aom_read_bit_buffer *rb, |
4084 | | SequenceHeader *seq_params, |
4085 | 0 | struct aom_internal_error_info *error_info) { |
4086 | 0 | const int high_bitdepth = aom_rb_read_bit(rb); |
4087 | 0 | if (seq_params->profile == PROFILE_2 && high_bitdepth) { |
4088 | 0 | const int twelve_bit = aom_rb_read_bit(rb); |
4089 | 0 | seq_params->bit_depth = twelve_bit ? AOM_BITS_12 : AOM_BITS_10; |
4090 | 0 | } else if (seq_params->profile <= PROFILE_2) { |
4091 | 0 | seq_params->bit_depth = high_bitdepth ? AOM_BITS_10 : AOM_BITS_8; |
4092 | 0 | } else { |
4093 | 0 | aom_internal_error(error_info, AOM_CODEC_UNSUP_BITSTREAM, |
4094 | 0 | "Unsupported profile/bit-depth combination"); |
4095 | 0 | } |
4096 | 0 | } |
4097 | | |
4098 | | void av1_read_film_grain_params(AV1_COMMON *cm, |
4099 | 0 | struct aom_read_bit_buffer *rb) { |
4100 | 0 | aom_film_grain_t *pars = &cm->film_grain_params; |
4101 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
4102 | 0 |
|
4103 | 0 | pars->apply_grain = aom_rb_read_bit(rb); |
4104 | 0 | if (!pars->apply_grain) { |
4105 | 0 | memset(pars, 0, sizeof(*pars)); |
4106 | 0 | return; |
4107 | 0 | } |
4108 | 0 | |
4109 | 0 | pars->random_seed = aom_rb_read_literal(rb, 16); |
4110 | 0 | if (cm->frame_type == INTER_FRAME) |
4111 | 0 | pars->update_parameters = aom_rb_read_bit(rb); |
4112 | 0 | else |
4113 | 0 | pars->update_parameters = 1; |
4114 | 0 |
|
4115 | 0 | pars->bit_depth = seq_params->bit_depth; |
4116 | 0 |
|
4117 | 0 | if (!pars->update_parameters) { |
4118 | 0 | // inherit parameters from a previous reference frame |
4119 | 0 | RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; |
4120 | 0 | int film_grain_params_ref_idx = aom_rb_read_literal(rb, 3); |
4121 | 0 | int buf_idx = cm->ref_frame_map[film_grain_params_ref_idx]; |
4122 | 0 | if (buf_idx == INVALID_IDX) { |
4123 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4124 | 0 | "Invalid Film grain reference idx"); |
4125 | 0 | } |
4126 | 0 | if (!frame_bufs[buf_idx].film_grain_params_present) { |
4127 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4128 | 0 | "Film grain reference parameters not available"); |
4129 | 0 | } |
4130 | 0 | uint16_t random_seed = pars->random_seed; |
4131 | 0 | *pars = frame_bufs[buf_idx].film_grain_params; // inherit paramaters |
4132 | 0 | pars->random_seed = random_seed; // with new random seed |
4133 | 0 | return; |
4134 | 0 | } |
4135 | 0 |
|
4136 | 0 | // Scaling functions parameters |
4137 | 0 | pars->num_y_points = aom_rb_read_literal(rb, 4); // max 14 |
4138 | 0 | if (pars->num_y_points > 14) |
4139 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4140 | 0 | "Number of points for film grain luma scaling function " |
4141 | 0 | "exceeds the maximum value."); |
4142 | 0 | for (int i = 0; i < pars->num_y_points; i++) { |
4143 | 0 | pars->scaling_points_y[i][0] = aom_rb_read_literal(rb, 8); |
4144 | 0 | if (i && pars->scaling_points_y[i - 1][0] >= pars->scaling_points_y[i][0]) |
4145 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4146 | 0 | "First coordinate of the scaling function points " |
4147 | 0 | "shall be increasing."); |
4148 | 0 | pars->scaling_points_y[i][1] = aom_rb_read_literal(rb, 8); |
4149 | 0 | } |
4150 | 0 |
|
4151 | 0 | if (!seq_params->monochrome) |
4152 | 0 | pars->chroma_scaling_from_luma = aom_rb_read_bit(rb); |
4153 | 0 | else |
4154 | 0 | pars->chroma_scaling_from_luma = 0; |
4155 | 0 |
|
4156 | 0 | if (seq_params->monochrome || pars->chroma_scaling_from_luma || |
4157 | 0 | ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) && |
4158 | 0 | (pars->num_y_points == 0))) { |
4159 | 0 | pars->num_cb_points = 0; |
4160 | 0 | pars->num_cr_points = 0; |
4161 | 0 | } else { |
4162 | 0 | pars->num_cb_points = aom_rb_read_literal(rb, 4); // max 10 |
4163 | 0 | if (pars->num_cb_points > 10) |
4164 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4165 | 0 | "Number of points for film grain cb scaling function " |
4166 | 0 | "exceeds the maximum value."); |
4167 | 0 | for (int i = 0; i < pars->num_cb_points; i++) { |
4168 | 0 | pars->scaling_points_cb[i][0] = aom_rb_read_literal(rb, 8); |
4169 | 0 | if (i && |
4170 | 0 | pars->scaling_points_cb[i - 1][0] >= pars->scaling_points_cb[i][0]) |
4171 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4172 | 0 | "First coordinate of the scaling function points " |
4173 | 0 | "shall be increasing."); |
4174 | 0 | pars->scaling_points_cb[i][1] = aom_rb_read_literal(rb, 8); |
4175 | 0 | } |
4176 | 0 |
|
4177 | 0 | pars->num_cr_points = aom_rb_read_literal(rb, 4); // max 10 |
4178 | 0 | if (pars->num_cr_points > 10) |
4179 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4180 | 0 | "Number of points for film grain cr scaling function " |
4181 | 0 | "exceeds the maximum value."); |
4182 | 0 | for (int i = 0; i < pars->num_cr_points; i++) { |
4183 | 0 | pars->scaling_points_cr[i][0] = aom_rb_read_literal(rb, 8); |
4184 | 0 | if (i && |
4185 | 0 | pars->scaling_points_cr[i - 1][0] >= pars->scaling_points_cr[i][0]) |
4186 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4187 | 0 | "First coordinate of the scaling function points " |
4188 | 0 | "shall be increasing."); |
4189 | 0 | pars->scaling_points_cr[i][1] = aom_rb_read_literal(rb, 8); |
4190 | 0 | } |
4191 | 0 |
|
4192 | 0 | if ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) && |
4193 | 0 | (((pars->num_cb_points == 0) && (pars->num_cr_points != 0)) || |
4194 | 0 | ((pars->num_cb_points != 0) && (pars->num_cr_points == 0)))) |
4195 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4196 | 0 | "In YCbCr 4:2:0, film grain shall be applied " |
4197 | 0 | "to both chroma components or neither."); |
4198 | 0 | } |
4199 | 0 |
|
4200 | 0 | pars->scaling_shift = aom_rb_read_literal(rb, 2) + 8; // 8 + value |
4201 | 0 |
|
4202 | 0 | // AR coefficients |
4203 | 0 | // Only sent if the corresponsing scaling function has |
4204 | 0 | // more than 0 points |
4205 | 0 |
|
4206 | 0 | pars->ar_coeff_lag = aom_rb_read_literal(rb, 2); |
4207 | 0 |
|
4208 | 0 | int num_pos_luma = 2 * pars->ar_coeff_lag * (pars->ar_coeff_lag + 1); |
4209 | 0 | int num_pos_chroma = num_pos_luma; |
4210 | 0 | if (pars->num_y_points > 0) ++num_pos_chroma; |
4211 | 0 |
|
4212 | 0 | if (pars->num_y_points) |
4213 | 0 | for (int i = 0; i < num_pos_luma; i++) |
4214 | 0 | pars->ar_coeffs_y[i] = aom_rb_read_literal(rb, 8) - 128; |
4215 | 0 |
|
4216 | 0 | if (pars->num_cb_points || pars->chroma_scaling_from_luma) |
4217 | 0 | for (int i = 0; i < num_pos_chroma; i++) |
4218 | 0 | pars->ar_coeffs_cb[i] = aom_rb_read_literal(rb, 8) - 128; |
4219 | 0 |
|
4220 | 0 | if (pars->num_cr_points || pars->chroma_scaling_from_luma) |
4221 | 0 | for (int i = 0; i < num_pos_chroma; i++) |
4222 | 0 | pars->ar_coeffs_cr[i] = aom_rb_read_literal(rb, 8) - 128; |
4223 | 0 |
|
4224 | 0 | pars->ar_coeff_shift = aom_rb_read_literal(rb, 2) + 6; // 6 + value |
4225 | 0 |
|
4226 | 0 | pars->grain_scale_shift = aom_rb_read_literal(rb, 2); |
4227 | 0 |
|
4228 | 0 | if (pars->num_cb_points) { |
4229 | 0 | pars->cb_mult = aom_rb_read_literal(rb, 8); |
4230 | 0 | pars->cb_luma_mult = aom_rb_read_literal(rb, 8); |
4231 | 0 | pars->cb_offset = aom_rb_read_literal(rb, 9); |
4232 | 0 | } |
4233 | 0 |
|
4234 | 0 | if (pars->num_cr_points) { |
4235 | 0 | pars->cr_mult = aom_rb_read_literal(rb, 8); |
4236 | 0 | pars->cr_luma_mult = aom_rb_read_literal(rb, 8); |
4237 | 0 | pars->cr_offset = aom_rb_read_literal(rb, 9); |
4238 | 0 | } |
4239 | 0 |
|
4240 | 0 | pars->overlap_flag = aom_rb_read_bit(rb); |
4241 | 0 |
|
4242 | 0 | pars->clip_to_restricted_range = aom_rb_read_bit(rb); |
4243 | 0 | } |
4244 | | |
4245 | 0 | static void read_film_grain(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
4246 | 0 | if (cm->seq_params.film_grain_params_present && |
4247 | 0 | (cm->show_frame || cm->showable_frame)) { |
4248 | 0 | av1_read_film_grain_params(cm, rb); |
4249 | 0 | } else { |
4250 | 0 | memset(&cm->film_grain_params, 0, sizeof(cm->film_grain_params)); |
4251 | 0 | } |
4252 | 0 | cm->film_grain_params.bit_depth = cm->seq_params.bit_depth; |
4253 | 0 | memcpy(&cm->cur_frame->film_grain_params, &cm->film_grain_params, |
4254 | 0 | sizeof(aom_film_grain_t)); |
4255 | 0 | } |
4256 | | |
4257 | | void av1_read_color_config(struct aom_read_bit_buffer *rb, |
4258 | | int allow_lowbitdepth, SequenceHeader *seq_params, |
4259 | 0 | struct aom_internal_error_info *error_info) { |
4260 | 0 | read_bitdepth(rb, seq_params, error_info); |
4261 | 0 |
|
4262 | 0 | seq_params->use_highbitdepth = |
4263 | 0 | seq_params->bit_depth > AOM_BITS_8 || !allow_lowbitdepth; |
4264 | 0 | // monochrome bit (not needed for PROFILE_1) |
4265 | 0 | const int is_monochrome = |
4266 | 0 | seq_params->profile != PROFILE_1 ? aom_rb_read_bit(rb) : 0; |
4267 | 0 | seq_params->monochrome = is_monochrome; |
4268 | 0 | int color_description_present_flag = aom_rb_read_bit(rb); |
4269 | 0 | if (color_description_present_flag) { |
4270 | 0 | seq_params->color_primaries = aom_rb_read_literal(rb, 8); |
4271 | 0 | seq_params->transfer_characteristics = aom_rb_read_literal(rb, 8); |
4272 | 0 | seq_params->matrix_coefficients = aom_rb_read_literal(rb, 8); |
4273 | 0 | } else { |
4274 | 0 | seq_params->color_primaries = AOM_CICP_CP_UNSPECIFIED; |
4275 | 0 | seq_params->transfer_characteristics = AOM_CICP_TC_UNSPECIFIED; |
4276 | 0 | seq_params->matrix_coefficients = AOM_CICP_MC_UNSPECIFIED; |
4277 | 0 | } |
4278 | 0 | if (is_monochrome) { |
4279 | 0 | // [16,235] (including xvycc) vs [0,255] range |
4280 | 0 | seq_params->color_range = aom_rb_read_bit(rb); |
4281 | 0 | seq_params->subsampling_y = seq_params->subsampling_x = 1; |
4282 | 0 | seq_params->chroma_sample_position = AOM_CSP_UNKNOWN; |
4283 | 0 | seq_params->separate_uv_delta_q = 0; |
4284 | 0 | return; |
4285 | 0 | } |
4286 | 0 | if (seq_params->color_primaries == AOM_CICP_CP_BT_709 && |
4287 | 0 | seq_params->transfer_characteristics == AOM_CICP_TC_SRGB && |
4288 | 0 | seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY) { |
4289 | 0 | // It would be good to remove this dependency. |
4290 | 0 | seq_params->subsampling_y = seq_params->subsampling_x = 0; |
4291 | 0 | seq_params->color_range = 1; // assume full color-range |
4292 | 0 | if (!(seq_params->profile == PROFILE_1 || |
4293 | 0 | (seq_params->profile == PROFILE_2 && |
4294 | 0 | seq_params->bit_depth == AOM_BITS_12))) { |
4295 | 0 | aom_internal_error( |
4296 | 0 | error_info, AOM_CODEC_UNSUP_BITSTREAM, |
4297 | 0 | "sRGB colorspace not compatible with specified profile"); |
4298 | 0 | } |
4299 | 0 | } else { |
4300 | 0 | // [16,235] (including xvycc) vs [0,255] range |
4301 | 0 | seq_params->color_range = aom_rb_read_bit(rb); |
4302 | 0 | if (seq_params->profile == PROFILE_0) { |
4303 | 0 | // 420 only |
4304 | 0 | seq_params->subsampling_x = seq_params->subsampling_y = 1; |
4305 | 0 | } else if (seq_params->profile == PROFILE_1) { |
4306 | 0 | // 444 only |
4307 | 0 | seq_params->subsampling_x = seq_params->subsampling_y = 0; |
4308 | 0 | } else { |
4309 | 0 | assert(seq_params->profile == PROFILE_2); |
4310 | 0 | if (seq_params->bit_depth == AOM_BITS_12) { |
4311 | 0 | seq_params->subsampling_x = aom_rb_read_bit(rb); |
4312 | 0 | if (seq_params->subsampling_x) |
4313 | 0 | seq_params->subsampling_y = aom_rb_read_bit(rb); // 422 or 420 |
4314 | 0 | else |
4315 | 0 | seq_params->subsampling_y = 0; // 444 |
4316 | 0 | } else { |
4317 | 0 | // 422 |
4318 | 0 | seq_params->subsampling_x = 1; |
4319 | 0 | seq_params->subsampling_y = 0; |
4320 | 0 | } |
4321 | 0 | } |
4322 | 0 | if (seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY && |
4323 | 0 | (seq_params->subsampling_x || seq_params->subsampling_y)) { |
4324 | 0 | aom_internal_error( |
4325 | 0 | error_info, AOM_CODEC_UNSUP_BITSTREAM, |
4326 | 0 | "Identity CICP Matrix incompatible with non 4:4:4 color sampling"); |
4327 | 0 | } |
4328 | 0 | if (seq_params->subsampling_x && seq_params->subsampling_y) { |
4329 | 0 | seq_params->chroma_sample_position = aom_rb_read_literal(rb, 2); |
4330 | 0 | } |
4331 | 0 | } |
4332 | 0 | seq_params->separate_uv_delta_q = aom_rb_read_bit(rb); |
4333 | 0 | } |
4334 | | |
4335 | | void av1_read_timing_info_header(AV1_COMMON *cm, |
4336 | 0 | struct aom_read_bit_buffer *rb) { |
4337 | 0 | cm->timing_info.num_units_in_display_tick = aom_rb_read_unsigned_literal( |
4338 | 0 | rb, 32); // Number of units in a display tick |
4339 | 0 | cm->timing_info.time_scale = |
4340 | 0 | aom_rb_read_unsigned_literal(rb, 32); // Time scale |
4341 | 0 | if (cm->timing_info.num_units_in_display_tick == 0 || |
4342 | 0 | cm->timing_info.time_scale == 0) { |
4343 | 0 | aom_internal_error( |
4344 | 0 | &cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4345 | 0 | "num_units_in_display_tick and time_scale must be greater than 0."); |
4346 | 0 | } |
4347 | 0 | cm->timing_info.equal_picture_interval = |
4348 | 0 | aom_rb_read_bit(rb); // Equal picture interval bit |
4349 | 0 | if (cm->timing_info.equal_picture_interval) { |
4350 | 0 | cm->timing_info.num_ticks_per_picture = |
4351 | 0 | aom_rb_read_uvlc(rb) + 1; // ticks per picture |
4352 | 0 | if (cm->timing_info.num_ticks_per_picture == 0) { |
4353 | 0 | aom_internal_error( |
4354 | 0 | &cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4355 | 0 | "num_ticks_per_picture_minus_1 cannot be (1 << 32) − 1."); |
4356 | 0 | } |
4357 | 0 | } |
4358 | 0 | } |
4359 | | |
4360 | | void av1_read_decoder_model_info(AV1_COMMON *cm, |
4361 | 0 | struct aom_read_bit_buffer *rb) { |
4362 | 0 | cm->buffer_model.encoder_decoder_buffer_delay_length = |
4363 | 0 | aom_rb_read_literal(rb, 5) + 1; |
4364 | 0 | cm->buffer_model.num_units_in_decoding_tick = aom_rb_read_unsigned_literal( |
4365 | 0 | rb, 32); // Number of units in a decoding tick |
4366 | 0 | cm->buffer_model.buffer_removal_time_length = aom_rb_read_literal(rb, 5) + 1; |
4367 | 0 | cm->buffer_model.frame_presentation_time_length = |
4368 | 0 | aom_rb_read_literal(rb, 5) + 1; |
4369 | 0 | } |
4370 | | |
4371 | | void av1_read_op_parameters_info(AV1_COMMON *const cm, |
4372 | 0 | struct aom_read_bit_buffer *rb, int op_num) { |
4373 | 0 | // The cm->op_params array has MAX_NUM_OPERATING_POINTS + 1 elements. |
4374 | 0 | if (op_num > MAX_NUM_OPERATING_POINTS) { |
4375 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4376 | 0 | "AV1 does not support %d decoder model operating points", |
4377 | 0 | op_num + 1); |
4378 | 0 | } |
4379 | 0 |
|
4380 | 0 | cm->op_params[op_num].decoder_buffer_delay = aom_rb_read_unsigned_literal( |
4381 | 0 | rb, cm->buffer_model.encoder_decoder_buffer_delay_length); |
4382 | 0 |
|
4383 | 0 | cm->op_params[op_num].encoder_buffer_delay = aom_rb_read_unsigned_literal( |
4384 | 0 | rb, cm->buffer_model.encoder_decoder_buffer_delay_length); |
4385 | 0 |
|
4386 | 0 | cm->op_params[op_num].low_delay_mode_flag = aom_rb_read_bit(rb); |
4387 | 0 | } |
4388 | | |
4389 | | static void av1_read_temporal_point_info(AV1_COMMON *const cm, |
4390 | 0 | struct aom_read_bit_buffer *rb) { |
4391 | 0 | cm->frame_presentation_time = aom_rb_read_unsigned_literal( |
4392 | 0 | rb, cm->buffer_model.frame_presentation_time_length); |
4393 | 0 | } |
4394 | | |
4395 | | void av1_read_sequence_header(AV1_COMMON *cm, struct aom_read_bit_buffer *rb, |
4396 | 0 | SequenceHeader *seq_params) { |
4397 | 0 | const int num_bits_width = aom_rb_read_literal(rb, 4) + 1; |
4398 | 0 | const int num_bits_height = aom_rb_read_literal(rb, 4) + 1; |
4399 | 0 | const int max_frame_width = aom_rb_read_literal(rb, num_bits_width) + 1; |
4400 | 0 | const int max_frame_height = aom_rb_read_literal(rb, num_bits_height) + 1; |
4401 | 0 |
|
4402 | 0 | seq_params->num_bits_width = num_bits_width; |
4403 | 0 | seq_params->num_bits_height = num_bits_height; |
4404 | 0 | seq_params->max_frame_width = max_frame_width; |
4405 | 0 | seq_params->max_frame_height = max_frame_height; |
4406 | 0 |
|
4407 | 0 | if (seq_params->reduced_still_picture_hdr) { |
4408 | 0 | seq_params->frame_id_numbers_present_flag = 0; |
4409 | 0 | } else { |
4410 | 0 | seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb); |
4411 | 0 | } |
4412 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
4413 | 0 | // We must always have delta_frame_id_length < frame_id_length, |
4414 | 0 | // in order for a frame to be referenced with a unique delta. |
4415 | 0 | // Avoid wasting bits by using a coding that enforces this restriction. |
4416 | 0 | seq_params->delta_frame_id_length = aom_rb_read_literal(rb, 4) + 2; |
4417 | 0 | seq_params->frame_id_length = |
4418 | 0 | aom_rb_read_literal(rb, 3) + seq_params->delta_frame_id_length + 1; |
4419 | 0 | if (seq_params->frame_id_length > 16) |
4420 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4421 | 0 | "Invalid frame_id_length"); |
4422 | 0 | } |
4423 | 0 |
|
4424 | 0 | setup_sb_size(seq_params, rb); |
4425 | 0 |
|
4426 | 0 | seq_params->enable_filter_intra = aom_rb_read_bit(rb); |
4427 | 0 | seq_params->enable_intra_edge_filter = aom_rb_read_bit(rb); |
4428 | 0 |
|
4429 | 0 | if (seq_params->reduced_still_picture_hdr) { |
4430 | 0 | seq_params->enable_interintra_compound = 0; |
4431 | 0 | seq_params->enable_masked_compound = 0; |
4432 | 0 | seq_params->enable_warped_motion = 0; |
4433 | 0 | seq_params->enable_dual_filter = 0; |
4434 | 0 | seq_params->enable_order_hint = 0; |
4435 | 0 | seq_params->enable_jnt_comp = 0; |
4436 | 0 | seq_params->enable_ref_frame_mvs = 0; |
4437 | 0 | seq_params->force_screen_content_tools = 2; // SELECT_SCREEN_CONTENT_TOOLS |
4438 | 0 | seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV |
4439 | 0 | seq_params->order_hint_bits_minus_1 = -1; |
4440 | 0 | } else { |
4441 | 0 | seq_params->enable_interintra_compound = aom_rb_read_bit(rb); |
4442 | 0 | seq_params->enable_masked_compound = aom_rb_read_bit(rb); |
4443 | 0 | seq_params->enable_warped_motion = aom_rb_read_bit(rb); |
4444 | 0 | seq_params->enable_dual_filter = aom_rb_read_bit(rb); |
4445 | 0 |
|
4446 | 0 | seq_params->enable_order_hint = aom_rb_read_bit(rb); |
4447 | 0 | seq_params->enable_jnt_comp = |
4448 | 0 | seq_params->enable_order_hint ? aom_rb_read_bit(rb) : 0; |
4449 | 0 | seq_params->enable_ref_frame_mvs = |
4450 | 0 | seq_params->enable_order_hint ? aom_rb_read_bit(rb) : 0; |
4451 | 0 |
|
4452 | 0 | if (aom_rb_read_bit(rb)) { |
4453 | 0 | seq_params->force_screen_content_tools = |
4454 | 0 | 2; // SELECT_SCREEN_CONTENT_TOOLS |
4455 | 0 | } else { |
4456 | 0 | seq_params->force_screen_content_tools = aom_rb_read_bit(rb); |
4457 | 0 | } |
4458 | 0 |
|
4459 | 0 | if (seq_params->force_screen_content_tools > 0) { |
4460 | 0 | if (aom_rb_read_bit(rb)) { |
4461 | 0 | seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV |
4462 | 0 | } else { |
4463 | 0 | seq_params->force_integer_mv = aom_rb_read_bit(rb); |
4464 | 0 | } |
4465 | 0 | } else { |
4466 | 0 | seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV |
4467 | 0 | } |
4468 | 0 | seq_params->order_hint_bits_minus_1 = |
4469 | 0 | seq_params->enable_order_hint ? aom_rb_read_literal(rb, 3) : -1; |
4470 | 0 | } |
4471 | 0 |
|
4472 | 0 | seq_params->enable_superres = aom_rb_read_bit(rb); |
4473 | 0 | seq_params->enable_cdef = aom_rb_read_bit(rb); |
4474 | 0 | seq_params->enable_restoration = aom_rb_read_bit(rb); |
4475 | 0 | } |
4476 | | |
4477 | | static int read_global_motion_params(WarpedMotionParams *params, |
4478 | | const WarpedMotionParams *ref_params, |
4479 | | struct aom_read_bit_buffer *rb, |
4480 | 0 | int allow_hp) { |
4481 | 0 | TransformationType type = aom_rb_read_bit(rb); |
4482 | 0 | if (type != IDENTITY) { |
4483 | 0 | if (aom_rb_read_bit(rb)) |
4484 | 0 | type = ROTZOOM; |
4485 | 0 | else |
4486 | 0 | type = aom_rb_read_bit(rb) ? TRANSLATION : AFFINE; |
4487 | 0 | } |
4488 | 0 |
|
4489 | 0 | *params = default_warp_params; |
4490 | 0 | params->wmtype = type; |
4491 | 0 |
|
4492 | 0 | if (type >= ROTZOOM) { |
4493 | 0 | params->wmmat[2] = aom_rb_read_signed_primitive_refsubexpfin( |
4494 | 0 | rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
4495 | 0 | (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - |
4496 | 0 | (1 << GM_ALPHA_PREC_BITS)) * |
4497 | 0 | GM_ALPHA_DECODE_FACTOR + |
4498 | 0 | (1 << WARPEDMODEL_PREC_BITS); |
4499 | 0 | params->wmmat[3] = aom_rb_read_signed_primitive_refsubexpfin( |
4500 | 0 | rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
4501 | 0 | (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) * |
4502 | 0 | GM_ALPHA_DECODE_FACTOR; |
4503 | 0 | } |
4504 | 0 |
|
4505 | 0 | if (type >= AFFINE) { |
4506 | 0 | params->wmmat[4] = aom_rb_read_signed_primitive_refsubexpfin( |
4507 | 0 | rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
4508 | 0 | (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) * |
4509 | 0 | GM_ALPHA_DECODE_FACTOR; |
4510 | 0 | params->wmmat[5] = aom_rb_read_signed_primitive_refsubexpfin( |
4511 | 0 | rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
4512 | 0 | (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) - |
4513 | 0 | (1 << GM_ALPHA_PREC_BITS)) * |
4514 | 0 | GM_ALPHA_DECODE_FACTOR + |
4515 | 0 | (1 << WARPEDMODEL_PREC_BITS); |
4516 | 0 | } else { |
4517 | 0 | params->wmmat[4] = -params->wmmat[3]; |
4518 | 0 | params->wmmat[5] = params->wmmat[2]; |
4519 | 0 | } |
4520 | 0 |
|
4521 | 0 | if (type >= TRANSLATION) { |
4522 | 0 | const int trans_bits = (type == TRANSLATION) |
4523 | 0 | ? GM_ABS_TRANS_ONLY_BITS - !allow_hp |
4524 | 0 | : GM_ABS_TRANS_BITS; |
4525 | 0 | const int trans_dec_factor = |
4526 | 0 | (type == TRANSLATION) ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp) |
4527 | 0 | : GM_TRANS_DECODE_FACTOR; |
4528 | 0 | const int trans_prec_diff = (type == TRANSLATION) |
4529 | 0 | ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp |
4530 | 0 | : GM_TRANS_PREC_DIFF; |
4531 | 0 | params->wmmat[0] = aom_rb_read_signed_primitive_refsubexpfin( |
4532 | 0 | rb, (1 << trans_bits) + 1, SUBEXPFIN_K, |
4533 | 0 | (ref_params->wmmat[0] >> trans_prec_diff)) * |
4534 | 0 | trans_dec_factor; |
4535 | 0 | params->wmmat[1] = aom_rb_read_signed_primitive_refsubexpfin( |
4536 | 0 | rb, (1 << trans_bits) + 1, SUBEXPFIN_K, |
4537 | 0 | (ref_params->wmmat[1] >> trans_prec_diff)) * |
4538 | 0 | trans_dec_factor; |
4539 | 0 | } |
4540 | 0 |
|
4541 | 0 | if (params->wmtype <= AFFINE) { |
4542 | 0 | int good_shear_params = get_shear_params(params); |
4543 | 0 | if (!good_shear_params) return 0; |
4544 | 0 | } |
4545 | 0 | |
4546 | 0 | return 1; |
4547 | 0 | } |
4548 | | |
4549 | 0 | static void read_global_motion(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { |
4550 | 0 | for (int frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) { |
4551 | 0 | const WarpedMotionParams *ref_params = |
4552 | 0 | cm->prev_frame ? &cm->prev_frame->global_motion[frame] |
4553 | 0 | : &default_warp_params; |
4554 | 0 | int good_params = read_global_motion_params( |
4555 | 0 | &cm->global_motion[frame], ref_params, rb, cm->allow_high_precision_mv); |
4556 | 0 | if (!good_params) { |
4557 | | #if WARPED_MOTION_DEBUG |
4558 | | printf("Warning: unexpected global motion shear params from aomenc\n"); |
4559 | | #endif |
4560 | | cm->global_motion[frame].invalid = 1; |
4561 | 0 | } |
4562 | 0 |
|
4563 | 0 | // TODO(sarahparker, debargha): The logic in the commented out code below |
4564 | 0 | // does not work currently and causes mismatches when resize is on. Fix it |
4565 | 0 | // before turning the optimization back on. |
4566 | 0 | /* |
4567 | 0 | YV12_BUFFER_CONFIG *ref_buf = get_ref_frame(cm, frame); |
4568 | 0 | if (cm->width == ref_buf->y_crop_width && |
4569 | 0 | cm->height == ref_buf->y_crop_height) { |
4570 | 0 | read_global_motion_params(&cm->global_motion[frame], |
4571 | 0 | &cm->prev_frame->global_motion[frame], rb, |
4572 | 0 | cm->allow_high_precision_mv); |
4573 | 0 | } else { |
4574 | 0 | cm->global_motion[frame] = default_warp_params; |
4575 | 0 | } |
4576 | 0 | */ |
4577 | 0 | /* |
4578 | 0 | printf("Dec Ref %d [%d/%d]: %d %d %d %d\n", |
4579 | 0 | frame, cm->current_video_frame, cm->show_frame, |
4580 | 0 | cm->global_motion[frame].wmmat[0], |
4581 | 0 | cm->global_motion[frame].wmmat[1], |
4582 | 0 | cm->global_motion[frame].wmmat[2], |
4583 | 0 | cm->global_motion[frame].wmmat[3]); |
4584 | 0 | */ |
4585 | 0 | } |
4586 | 0 | memcpy(cm->cur_frame->global_motion, cm->global_motion, |
4587 | 0 | REF_FRAMES * sizeof(WarpedMotionParams)); |
4588 | 0 | } |
4589 | | |
4590 | | static void show_existing_frame_reset(AV1Decoder *const pbi, |
4591 | 0 | int existing_frame_idx) { |
4592 | 0 | AV1_COMMON *const cm = &pbi->common; |
4593 | 0 | BufferPool *const pool = cm->buffer_pool; |
4594 | 0 | RefCntBuffer *const frame_bufs = pool->frame_bufs; |
4595 | 0 |
|
4596 | 0 | assert(cm->show_existing_frame); |
4597 | 0 |
|
4598 | 0 | cm->frame_type = KEY_FRAME; |
4599 | 0 |
|
4600 | 0 | pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; |
4601 | 0 |
|
4602 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
4603 | 0 | cm->frame_refs[i].idx = INVALID_IDX; |
4604 | 0 | cm->frame_refs[i].buf = NULL; |
4605 | 0 | } |
4606 | 0 |
|
4607 | 0 | if (pbi->need_resync) { |
4608 | 0 | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
4609 | 0 | pbi->need_resync = 0; |
4610 | 0 | } |
4611 | 0 |
|
4612 | 0 | cm->cur_frame->intra_only = 1; |
4613 | 0 |
|
4614 | 0 | if (cm->seq_params.frame_id_numbers_present_flag) { |
4615 | 0 | /* If bitmask is set, update reference frame id values and |
4616 | 0 | mark frames as valid for reference. |
4617 | 0 | Note that the displayed frame be valid for referencing |
4618 | 0 | in order to have been selected. |
4619 | 0 | */ |
4620 | 0 | int refresh_frame_flags = pbi->refresh_frame_flags; |
4621 | 0 | int display_frame_id = cm->ref_frame_id[existing_frame_idx]; |
4622 | 0 | for (int i = 0; i < REF_FRAMES; i++) { |
4623 | 0 | if ((refresh_frame_flags >> i) & 1) { |
4624 | 0 | cm->ref_frame_id[i] = display_frame_id; |
4625 | 0 | cm->valid_for_referencing[i] = 1; |
4626 | 0 | } |
4627 | 0 | } |
4628 | 0 | } |
4629 | 0 |
|
4630 | 0 | cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; |
4631 | 0 |
|
4632 | 0 | // Generate next_ref_frame_map. |
4633 | 0 | lock_buffer_pool(pool); |
4634 | 0 | int ref_index = 0; |
4635 | 0 | for (int mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
4636 | 0 | if (mask & 1) { |
4637 | 0 | cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; |
4638 | 0 | ++frame_bufs[cm->new_fb_idx].ref_count; |
4639 | 0 | } else { |
4640 | 0 | cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; |
4641 | 0 | } |
4642 | 0 | // Current thread holds the reference frame. |
4643 | 0 | if (cm->ref_frame_map[ref_index] >= 0) |
4644 | 0 | ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; |
4645 | 0 | ++ref_index; |
4646 | 0 | } |
4647 | 0 |
|
4648 | 0 | for (; ref_index < REF_FRAMES; ++ref_index) { |
4649 | 0 | cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; |
4650 | 0 |
|
4651 | 0 | // Current thread holds the reference frame. |
4652 | 0 | if (cm->ref_frame_map[ref_index] >= 0) |
4653 | 0 | ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; |
4654 | 0 | } |
4655 | 0 | unlock_buffer_pool(pool); |
4656 | 0 | pbi->hold_ref_buf = 1; |
4657 | 0 |
|
4658 | 0 | // Reload the adapted CDFs from when we originally coded this keyframe |
4659 | 0 | *cm->fc = cm->frame_contexts[existing_frame_idx]; |
4660 | 0 | } |
4661 | | |
4662 | 0 | static INLINE void reset_frame_buffers(AV1_COMMON *cm) { |
4663 | 0 | RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; |
4664 | 0 | int i; |
4665 | 0 |
|
4666 | 0 | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
4667 | 0 | memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); |
4668 | 0 |
|
4669 | 0 | lock_buffer_pool(cm->buffer_pool); |
4670 | 0 | for (i = 0; i < FRAME_BUFFERS; ++i) { |
4671 | 0 | if (i != cm->new_fb_idx) { |
4672 | 0 | frame_bufs[i].ref_count = 0; |
4673 | 0 | cm->buffer_pool->release_fb_cb(cm->buffer_pool->cb_priv, |
4674 | 0 | &frame_bufs[i].raw_frame_buffer); |
4675 | 0 | } else { |
4676 | 0 | assert(frame_bufs[i].ref_count == 1); |
4677 | 0 | } |
4678 | 0 | frame_bufs[i].cur_frame_offset = 0; |
4679 | 0 | av1_zero(frame_bufs[i].ref_frame_offset); |
4680 | 0 | } |
4681 | 0 | av1_zero_unused_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers); |
4682 | 0 | unlock_buffer_pool(cm->buffer_pool); |
4683 | 0 | } |
4684 | | |
4685 | | // On success, returns 0. On failure, calls aom_internal_error and does not |
4686 | | // return. |
4687 | | static int read_uncompressed_header(AV1Decoder *pbi, |
4688 | 0 | struct aom_read_bit_buffer *rb) { |
4689 | 0 | AV1_COMMON *const cm = &pbi->common; |
4690 | 0 | const SequenceHeader *const seq_params = &cm->seq_params; |
4691 | 0 | MACROBLOCKD *const xd = &pbi->mb; |
4692 | 0 | BufferPool *const pool = cm->buffer_pool; |
4693 | 0 | RefCntBuffer *const frame_bufs = pool->frame_bufs; |
4694 | 0 |
|
4695 | 0 | if (!pbi->sequence_header_ready) { |
4696 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4697 | 0 | "No sequence header"); |
4698 | 0 | } |
4699 | 0 |
|
4700 | 0 | cm->last_frame_type = cm->frame_type; |
4701 | 0 | cm->last_intra_only = cm->intra_only; |
4702 | 0 |
|
4703 | 0 | // NOTE: By default all coded frames to be used as a reference |
4704 | 0 | cm->is_reference_frame = 1; |
4705 | 0 |
|
4706 | 0 | if (seq_params->reduced_still_picture_hdr) { |
4707 | 0 | cm->show_existing_frame = 0; |
4708 | 0 | cm->show_frame = 1; |
4709 | 0 | cm->frame_type = KEY_FRAME; |
4710 | 0 | cm->error_resilient_mode = 1; |
4711 | 0 | } else { |
4712 | 0 | cm->show_existing_frame = aom_rb_read_bit(rb); |
4713 | 0 | cm->reset_decoder_state = 0; |
4714 | 0 |
|
4715 | 0 | if (cm->show_existing_frame) { |
4716 | 0 | if (pbi->sequence_header_changed) { |
4717 | 0 | aom_internal_error( |
4718 | 0 | &cm->error, AOM_CODEC_CORRUPT_FRAME, |
4719 | 0 | "New sequence header starts with a show_existing_frame."); |
4720 | 0 | } |
4721 | 0 | // Show an existing frame directly. |
4722 | 0 | const int existing_frame_idx = aom_rb_read_literal(rb, 3); |
4723 | 0 | const int frame_to_show = cm->ref_frame_map[existing_frame_idx]; |
4724 | 0 | if (seq_params->decoder_model_info_present_flag && |
4725 | 0 | cm->timing_info.equal_picture_interval == 0) { |
4726 | 0 | av1_read_temporal_point_info(cm, rb); |
4727 | 0 | } |
4728 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
4729 | 0 | int frame_id_length = seq_params->frame_id_length; |
4730 | 0 | int display_frame_id = aom_rb_read_literal(rb, frame_id_length); |
4731 | 0 | /* Compare display_frame_id with ref_frame_id and check valid for |
4732 | 0 | * referencing */ |
4733 | 0 | if (display_frame_id != cm->ref_frame_id[existing_frame_idx] || |
4734 | 0 | cm->valid_for_referencing[existing_frame_idx] == 0) |
4735 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4736 | 0 | "Reference buffer frame ID mismatch"); |
4737 | 0 | } |
4738 | 0 | lock_buffer_pool(pool); |
4739 | 0 | if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) { |
4740 | 0 | unlock_buffer_pool(pool); |
4741 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4742 | 0 | "Buffer %d does not contain a decoded frame", |
4743 | 0 | frame_to_show); |
4744 | 0 | } |
4745 | 0 | ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); |
4746 | 0 | cm->reset_decoder_state = |
4747 | 0 | frame_bufs[frame_to_show].frame_type == KEY_FRAME; |
4748 | 0 | unlock_buffer_pool(pool); |
4749 | 0 |
|
4750 | 0 | cm->lf.filter_level[0] = 0; |
4751 | 0 | cm->lf.filter_level[1] = 0; |
4752 | 0 | cm->show_frame = 1; |
4753 | 0 |
|
4754 | 0 | if (!frame_bufs[frame_to_show].showable_frame) { |
4755 | 0 | aom_merge_corrupted_flag(&xd->corrupted, 1); |
4756 | 0 | } |
4757 | 0 | if (cm->reset_decoder_state) frame_bufs[frame_to_show].showable_frame = 0; |
4758 | 0 |
|
4759 | 0 | cm->film_grain_params = frame_bufs[frame_to_show].film_grain_params; |
4760 | 0 |
|
4761 | 0 | if (cm->reset_decoder_state) { |
4762 | 0 | show_existing_frame_reset(pbi, existing_frame_idx); |
4763 | 0 | } else { |
4764 | 0 | pbi->refresh_frame_flags = 0; |
4765 | 0 | } |
4766 | 0 |
|
4767 | 0 | return 0; |
4768 | 0 | } |
4769 | 0 |
|
4770 | 0 | cm->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2); // 2 bits |
4771 | 0 | if (pbi->sequence_header_changed) { |
4772 | 0 | if (pbi->common.frame_type == KEY_FRAME) { |
4773 | 0 | // This is the start of a new coded video sequence. |
4774 | 0 | pbi->sequence_header_changed = 0; |
4775 | 0 | pbi->decoding_first_frame = 1; |
4776 | 0 | reset_frame_buffers(&pbi->common); |
4777 | 0 | } else { |
4778 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4779 | 0 | "Sequence header has changed without a keyframe."); |
4780 | 0 | } |
4781 | 0 | } |
4782 | 0 |
|
4783 | 0 | cm->show_frame = aom_rb_read_bit(rb); |
4784 | 0 | if (seq_params->still_picture && |
4785 | 0 | (cm->frame_type != KEY_FRAME || !cm->show_frame)) { |
4786 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4787 | 0 | "Still pictures must be coded as shown keyframes"); |
4788 | 0 | } |
4789 | 0 | cm->showable_frame = cm->frame_type != KEY_FRAME; |
4790 | 0 | if (cm->show_frame) { |
4791 | 0 | if (seq_params->decoder_model_info_present_flag && |
4792 | 0 | cm->timing_info.equal_picture_interval == 0) |
4793 | 0 | av1_read_temporal_point_info(cm, rb); |
4794 | 0 | } else { |
4795 | 0 | // See if this frame can be used as show_existing_frame in future |
4796 | 0 | cm->showable_frame = aom_rb_read_bit(rb); |
4797 | 0 | } |
4798 | 0 | cm->cur_frame->showable_frame = cm->showable_frame; |
4799 | 0 | cm->intra_only = cm->frame_type == INTRA_ONLY_FRAME; |
4800 | 0 | cm->error_resilient_mode = |
4801 | 0 | frame_is_sframe(cm) || (cm->frame_type == KEY_FRAME && cm->show_frame) |
4802 | 0 | ? 1 |
4803 | 0 | : aom_rb_read_bit(rb); |
4804 | 0 | } |
4805 | 0 |
|
4806 | 0 | cm->disable_cdf_update = aom_rb_read_bit(rb); |
4807 | 0 | if (seq_params->force_screen_content_tools == 2) { |
4808 | 0 | cm->allow_screen_content_tools = aom_rb_read_bit(rb); |
4809 | 0 | } else { |
4810 | 0 | cm->allow_screen_content_tools = seq_params->force_screen_content_tools; |
4811 | 0 | } |
4812 | 0 |
|
4813 | 0 | if (cm->allow_screen_content_tools) { |
4814 | 0 | if (seq_params->force_integer_mv == 2) { |
4815 | 0 | cm->cur_frame_force_integer_mv = aom_rb_read_bit(rb); |
4816 | 0 | } else { |
4817 | 0 | cm->cur_frame_force_integer_mv = seq_params->force_integer_mv; |
4818 | 0 | } |
4819 | 0 | } else { |
4820 | 0 | cm->cur_frame_force_integer_mv = 0; |
4821 | 0 | } |
4822 | 0 |
|
4823 | 0 | cm->frame_refs_short_signaling = 0; |
4824 | 0 | int frame_size_override_flag = 0; |
4825 | 0 | cm->allow_intrabc = 0; |
4826 | 0 | cm->primary_ref_frame = PRIMARY_REF_NONE; |
4827 | 0 |
|
4828 | 0 | if (!seq_params->reduced_still_picture_hdr) { |
4829 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
4830 | 0 | int frame_id_length = seq_params->frame_id_length; |
4831 | 0 | int diff_len = seq_params->delta_frame_id_length; |
4832 | 0 | int prev_frame_id = 0; |
4833 | 0 | int have_prev_frame_id = !pbi->decoding_first_frame && |
4834 | 0 | !(cm->frame_type == KEY_FRAME && cm->show_frame); |
4835 | 0 | if (have_prev_frame_id) { |
4836 | 0 | prev_frame_id = cm->current_frame_id; |
4837 | 0 | } |
4838 | 0 | cm->current_frame_id = aom_rb_read_literal(rb, frame_id_length); |
4839 | 0 |
|
4840 | 0 | if (have_prev_frame_id) { |
4841 | 0 | int diff_frame_id; |
4842 | 0 | if (cm->current_frame_id > prev_frame_id) { |
4843 | 0 | diff_frame_id = cm->current_frame_id - prev_frame_id; |
4844 | 0 | } else { |
4845 | 0 | diff_frame_id = |
4846 | 0 | (1 << frame_id_length) + cm->current_frame_id - prev_frame_id; |
4847 | 0 | } |
4848 | 0 | /* Check current_frame_id for conformance */ |
4849 | 0 | if (prev_frame_id == cm->current_frame_id || |
4850 | 0 | diff_frame_id >= (1 << (frame_id_length - 1))) { |
4851 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
4852 | 0 | "Invalid value of current_frame_id"); |
4853 | 0 | } |
4854 | 0 | } |
4855 | 0 | /* Check if some frames need to be marked as not valid for referencing */ |
4856 | 0 | for (int i = 0; i < REF_FRAMES; i++) { |
4857 | 0 | if (cm->frame_type == KEY_FRAME && cm->show_frame) { |
4858 | 0 | cm->valid_for_referencing[i] = 0; |
4859 | 0 | } else if (cm->current_frame_id - (1 << diff_len) > 0) { |
4860 | 0 | if (cm->ref_frame_id[i] > cm->current_frame_id || |
4861 | 0 | cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len)) |
4862 | 0 | cm->valid_for_referencing[i] = 0; |
4863 | 0 | } else { |
4864 | 0 | if (cm->ref_frame_id[i] > cm->current_frame_id && |
4865 | 0 | cm->ref_frame_id[i] < (1 << frame_id_length) + |
4866 | 0 | cm->current_frame_id - (1 << diff_len)) |
4867 | 0 | cm->valid_for_referencing[i] = 0; |
4868 | 0 | } |
4869 | 0 | } |
4870 | 0 | } |
4871 | 0 |
|
4872 | 0 | frame_size_override_flag = frame_is_sframe(cm) ? 1 : aom_rb_read_bit(rb); |
4873 | 0 |
|
4874 | 0 | cm->frame_offset = |
4875 | 0 | aom_rb_read_literal(rb, seq_params->order_hint_bits_minus_1 + 1); |
4876 | 0 | cm->current_video_frame = cm->frame_offset; |
4877 | 0 |
|
4878 | 0 | if (!cm->error_resilient_mode && !frame_is_intra_only(cm)) { |
4879 | 0 | cm->primary_ref_frame = aom_rb_read_literal(rb, PRIMARY_REF_BITS); |
4880 | 0 | } |
4881 | 0 | } |
4882 | 0 |
|
4883 | 0 | if (seq_params->decoder_model_info_present_flag) { |
4884 | 0 | cm->buffer_removal_time_present = aom_rb_read_bit(rb); |
4885 | 0 | if (cm->buffer_removal_time_present) { |
4886 | 0 | for (int op_num = 0; |
4887 | 0 | op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) { |
4888 | 0 | if (cm->op_params[op_num].decoder_model_param_present_flag) { |
4889 | 0 | if ((((seq_params->operating_point_idc[op_num] >> |
4890 | 0 | cm->temporal_layer_id) & |
4891 | 0 | 0x1) && |
4892 | 0 | ((seq_params->operating_point_idc[op_num] >> |
4893 | 0 | (cm->spatial_layer_id + 8)) & |
4894 | 0 | 0x1)) || |
4895 | 0 | seq_params->operating_point_idc[op_num] == 0) { |
4896 | 0 | cm->op_frame_timing[op_num].buffer_removal_time = |
4897 | 0 | aom_rb_read_unsigned_literal( |
4898 | 0 | rb, cm->buffer_model.buffer_removal_time_length); |
4899 | 0 | } else { |
4900 | 0 | cm->op_frame_timing[op_num].buffer_removal_time = 0; |
4901 | 0 | } |
4902 | 0 | } else { |
4903 | 0 | cm->op_frame_timing[op_num].buffer_removal_time = 0; |
4904 | 0 | } |
4905 | 0 | } |
4906 | 0 | } |
4907 | 0 | } |
4908 | 0 | if (cm->frame_type == KEY_FRAME) { |
4909 | 0 | if (!cm->show_frame) // unshown keyframe (forward keyframe) |
4910 | 0 | pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES); |
4911 | 0 | else // shown keyframe |
4912 | 0 | pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; |
4913 | 0 |
|
4914 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
4915 | 0 | cm->frame_refs[i].idx = INVALID_IDX; |
4916 | 0 | cm->frame_refs[i].buf = NULL; |
4917 | 0 | } |
4918 | 0 | if (pbi->need_resync) { |
4919 | 0 | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
4920 | 0 | pbi->need_resync = 0; |
4921 | 0 | } |
4922 | 0 | } else { |
4923 | 0 | if (cm->intra_only) { |
4924 | 0 | pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES); |
4925 | 0 | if (pbi->refresh_frame_flags == 0xFF) { |
4926 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
4927 | 0 | "Intra only frames cannot have refresh flags 0xFF"); |
4928 | 0 | } |
4929 | 0 | if (pbi->need_resync) { |
4930 | 0 | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
4931 | 0 | pbi->need_resync = 0; |
4932 | 0 | } |
4933 | 0 | } else if (pbi->need_resync != 1) { /* Skip if need resync */ |
4934 | 0 | pbi->refresh_frame_flags = |
4935 | 0 | frame_is_sframe(cm) ? 0xFF : aom_rb_read_literal(rb, REF_FRAMES); |
4936 | 0 | if (!pbi->refresh_frame_flags) { |
4937 | 0 | // NOTE: "pbi->refresh_frame_flags == 0" indicates that the coded frame |
4938 | 0 | // will not be used as a reference |
4939 | 0 | cm->is_reference_frame = 0; |
4940 | 0 | } |
4941 | 0 | } |
4942 | 0 | } |
4943 | 0 |
|
4944 | 0 | if (!frame_is_intra_only(cm) || pbi->refresh_frame_flags != 0xFF) { |
4945 | 0 | // Read all ref frame order hints if error_resilient_mode == 1 |
4946 | 0 | if (cm->error_resilient_mode && seq_params->enable_order_hint) { |
4947 | 0 | for (int ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) { |
4948 | 0 | // Read order hint from bit stream |
4949 | 0 | unsigned int frame_offset = |
4950 | 0 | aom_rb_read_literal(rb, seq_params->order_hint_bits_minus_1 + 1); |
4951 | 0 | // Get buffer index |
4952 | 0 | int buf_idx = cm->ref_frame_map[ref_idx]; |
4953 | 0 | assert(buf_idx < FRAME_BUFFERS); |
4954 | 0 | if (buf_idx == -1 || |
4955 | 0 | frame_offset != frame_bufs[buf_idx].cur_frame_offset) { |
4956 | 0 | if (buf_idx >= 0) { |
4957 | 0 | lock_buffer_pool(pool); |
4958 | 0 | decrease_ref_count(buf_idx, frame_bufs, pool); |
4959 | 0 | unlock_buffer_pool(pool); |
4960 | 0 | } |
4961 | 0 | // If no corresponding buffer exists, allocate a new buffer with all |
4962 | 0 | // pixels set to neutral grey. |
4963 | 0 | buf_idx = get_free_fb(cm); |
4964 | 0 | if (buf_idx == INVALID_IDX) { |
4965 | 0 | aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, |
4966 | 0 | "Unable to find free frame buffer"); |
4967 | 0 | } |
4968 | 0 | lock_buffer_pool(pool); |
4969 | 0 | if (aom_realloc_frame_buffer( |
4970 | 0 | &frame_bufs[buf_idx].buf, seq_params->max_frame_width, |
4971 | 0 | seq_params->max_frame_height, seq_params->subsampling_x, |
4972 | 0 | seq_params->subsampling_y, seq_params->use_highbitdepth, |
4973 | 0 | AOM_BORDER_IN_PIXELS, cm->byte_alignment, |
4974 | 0 | &pool->frame_bufs[buf_idx].raw_frame_buffer, pool->get_fb_cb, |
4975 | 0 | pool->cb_priv)) { |
4976 | 0 | unlock_buffer_pool(pool); |
4977 | 0 | aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, |
4978 | 0 | "Failed to allocate frame buffer"); |
4979 | 0 | } |
4980 | 0 | unlock_buffer_pool(pool); |
4981 | 0 | set_planes_to_neutral_grey(seq_params, &frame_bufs[buf_idx].buf, 0); |
4982 | 0 |
|
4983 | 0 | cm->ref_frame_map[ref_idx] = buf_idx; |
4984 | 0 | frame_bufs[buf_idx].cur_frame_offset = frame_offset; |
4985 | 0 | } |
4986 | 0 | } |
4987 | 0 | } |
4988 | 0 | } |
4989 | 0 |
|
4990 | 0 | if (cm->frame_type == KEY_FRAME) { |
4991 | 0 | setup_frame_size(cm, frame_size_override_flag, rb); |
4992 | 0 |
|
4993 | 0 | if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) |
4994 | 0 | cm->allow_intrabc = aom_rb_read_bit(rb); |
4995 | 0 | cm->allow_ref_frame_mvs = 0; |
4996 | 0 | cm->prev_frame = NULL; |
4997 | 0 | } else { |
4998 | 0 | cm->allow_ref_frame_mvs = 0; |
4999 | 0 |
|
5000 | 0 | if (cm->intra_only) { |
5001 | 0 | cm->cur_frame->film_grain_params_present = |
5002 | 0 | seq_params->film_grain_params_present; |
5003 | 0 | setup_frame_size(cm, frame_size_override_flag, rb); |
5004 | 0 | if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) |
5005 | 0 | cm->allow_intrabc = aom_rb_read_bit(rb); |
5006 | 0 |
|
5007 | 0 | } else if (pbi->need_resync != 1) { /* Skip if need resync */ |
5008 | 0 |
|
5009 | 0 | // Frame refs short signaling is off when error resilient mode is on. |
5010 | 0 | if (seq_params->enable_order_hint) |
5011 | 0 | cm->frame_refs_short_signaling = aom_rb_read_bit(rb); |
5012 | 0 |
|
5013 | 0 | if (cm->frame_refs_short_signaling) { |
5014 | 0 | // == LAST_FRAME == |
5015 | 0 | const int lst_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2); |
5016 | 0 | const int lst_idx = cm->ref_frame_map[lst_ref]; |
5017 | 0 |
|
5018 | 0 | // == GOLDEN_FRAME == |
5019 | 0 | const int gld_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2); |
5020 | 0 | const int gld_idx = cm->ref_frame_map[gld_ref]; |
5021 | 0 |
|
5022 | 0 | // Most of the time, streams start with a keyframe. In that case, |
5023 | 0 | // ref_frame_map will have been filled in at that point and will not |
5024 | 0 | // contain any -1's. However, streams are explicitly allowed to start |
5025 | 0 | // with an intra-only frame, so long as they don't then signal a |
5026 | 0 | // reference to a slot that hasn't been set yet. That's what we are |
5027 | 0 | // checking here. |
5028 | 0 | if (lst_idx == -1) |
5029 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5030 | 0 | "Inter frame requests nonexistent reference"); |
5031 | 0 | if (gld_idx == -1) |
5032 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5033 | 0 | "Inter frame requests nonexistent reference"); |
5034 | 0 |
|
5035 | 0 | av1_set_frame_refs(cm, lst_ref, gld_ref); |
5036 | 0 | } |
5037 | 0 |
|
5038 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
5039 | 0 | int ref = 0; |
5040 | 0 | if (!cm->frame_refs_short_signaling) { |
5041 | 0 | ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2); |
5042 | 0 | const int idx = cm->ref_frame_map[ref]; |
5043 | 0 |
|
5044 | 0 | // Most of the time, streams start with a keyframe. In that case, |
5045 | 0 | // ref_frame_map will have been filled in at that point and will not |
5046 | 0 | // contain any -1's. However, streams are explicitly allowed to start |
5047 | 0 | // with an intra-only frame, so long as they don't then signal a |
5048 | 0 | // reference to a slot that hasn't been set yet. That's what we are |
5049 | 0 | // checking here. |
5050 | 0 | if (idx == -1) |
5051 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5052 | 0 | "Inter frame requests nonexistent reference"); |
5053 | 0 |
|
5054 | 0 | RefBuffer *const ref_frame = &cm->frame_refs[i]; |
5055 | 0 | ref_frame->idx = idx; |
5056 | 0 | ref_frame->buf = &frame_bufs[idx].buf; |
5057 | 0 | ref_frame->map_idx = ref; |
5058 | 0 | } else { |
5059 | 0 | ref = cm->frame_refs[i].map_idx; |
5060 | 0 | } |
5061 | 0 |
|
5062 | 0 | cm->ref_frame_sign_bias[LAST_FRAME + i] = 0; |
5063 | 0 |
|
5064 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
5065 | 0 | int frame_id_length = seq_params->frame_id_length; |
5066 | 0 | int diff_len = seq_params->delta_frame_id_length; |
5067 | 0 | int delta_frame_id_minus_1 = aom_rb_read_literal(rb, diff_len); |
5068 | 0 | int ref_frame_id = |
5069 | 0 | ((cm->current_frame_id - (delta_frame_id_minus_1 + 1) + |
5070 | 0 | (1 << frame_id_length)) % |
5071 | 0 | (1 << frame_id_length)); |
5072 | 0 | // Compare values derived from delta_frame_id_minus_1 and |
5073 | 0 | // refresh_frame_flags. Also, check valid for referencing |
5074 | 0 | if (ref_frame_id != cm->ref_frame_id[ref] || |
5075 | 0 | cm->valid_for_referencing[ref] == 0) |
5076 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5077 | 0 | "Reference buffer frame ID mismatch"); |
5078 | 0 | } |
5079 | 0 | } |
5080 | 0 |
|
5081 | 0 | if (!cm->error_resilient_mode && frame_size_override_flag) { |
5082 | 0 | setup_frame_size_with_refs(cm, rb); |
5083 | 0 | } else { |
5084 | 0 | setup_frame_size(cm, frame_size_override_flag, rb); |
5085 | 0 | } |
5086 | 0 |
|
5087 | 0 | if (cm->cur_frame_force_integer_mv) { |
5088 | 0 | cm->allow_high_precision_mv = 0; |
5089 | 0 | } else { |
5090 | 0 | cm->allow_high_precision_mv = aom_rb_read_bit(rb); |
5091 | 0 | } |
5092 | 0 | cm->interp_filter = read_frame_interp_filter(rb); |
5093 | 0 | cm->switchable_motion_mode = aom_rb_read_bit(rb); |
5094 | 0 | } |
5095 | 0 |
|
5096 | 0 | cm->prev_frame = get_prev_frame(cm); |
5097 | 0 | if (cm->primary_ref_frame != PRIMARY_REF_NONE && |
5098 | 0 | cm->frame_refs[cm->primary_ref_frame].idx < 0) { |
5099 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5100 | 0 | "Reference frame containing this frame's initial " |
5101 | 0 | "frame context is unavailable."); |
5102 | 0 | } |
5103 | 0 |
|
5104 | 0 | if (!cm->intra_only && pbi->need_resync != 1) { |
5105 | 0 | if (frame_might_allow_ref_frame_mvs(cm)) |
5106 | 0 | cm->allow_ref_frame_mvs = aom_rb_read_bit(rb); |
5107 | 0 | else |
5108 | 0 | cm->allow_ref_frame_mvs = 0; |
5109 | 0 |
|
5110 | 0 | for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) { |
5111 | 0 | RefBuffer *const ref_buf = &cm->frame_refs[i]; |
5112 | 0 | av1_setup_scale_factors_for_frame( |
5113 | 0 | &ref_buf->sf, ref_buf->buf->y_crop_width, |
5114 | 0 | ref_buf->buf->y_crop_height, cm->width, cm->height); |
5115 | 0 | if ((!av1_is_valid_scale(&ref_buf->sf))) |
5116 | 0 | aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
5117 | 0 | "Reference frame has invalid dimensions"); |
5118 | 0 | } |
5119 | 0 | } |
5120 | 0 | } |
5121 | 0 |
|
5122 | 0 | av1_setup_frame_buf_refs(cm); |
5123 | 0 |
|
5124 | 0 | av1_setup_frame_sign_bias(cm); |
5125 | 0 |
|
5126 | 0 | cm->cur_frame->intra_only = cm->frame_type == KEY_FRAME || cm->intra_only; |
5127 | 0 | cm->cur_frame->frame_type = cm->frame_type; |
5128 | 0 |
|
5129 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
5130 | 0 | /* If bitmask is set, update reference frame id values and |
5131 | 0 | mark frames as valid for reference */ |
5132 | 0 | int refresh_frame_flags = pbi->refresh_frame_flags; |
5133 | 0 | for (int i = 0; i < REF_FRAMES; i++) { |
5134 | 0 | if ((refresh_frame_flags >> i) & 1) { |
5135 | 0 | cm->ref_frame_id[i] = cm->current_frame_id; |
5136 | 0 | cm->valid_for_referencing[i] = 1; |
5137 | 0 | } |
5138 | 0 | } |
5139 | 0 | } |
5140 | 0 |
|
5141 | 0 | const int might_bwd_adapt = |
5142 | 0 | !(seq_params->reduced_still_picture_hdr) && !(cm->disable_cdf_update); |
5143 | 0 | if (might_bwd_adapt) { |
5144 | 0 | cm->refresh_frame_context = aom_rb_read_bit(rb) |
5145 | 0 | ? REFRESH_FRAME_CONTEXT_DISABLED |
5146 | 0 | : REFRESH_FRAME_CONTEXT_BACKWARD; |
5147 | 0 | } else { |
5148 | 0 | cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; |
5149 | 0 | } |
5150 | 0 |
|
5151 | 0 | get_frame_new_buffer(cm)->bit_depth = seq_params->bit_depth; |
5152 | 0 | get_frame_new_buffer(cm)->color_primaries = seq_params->color_primaries; |
5153 | 0 | get_frame_new_buffer(cm)->transfer_characteristics = |
5154 | 0 | seq_params->transfer_characteristics; |
5155 | 0 | get_frame_new_buffer(cm)->matrix_coefficients = |
5156 | 0 | seq_params->matrix_coefficients; |
5157 | 0 | get_frame_new_buffer(cm)->monochrome = seq_params->monochrome; |
5158 | 0 | get_frame_new_buffer(cm)->chroma_sample_position = |
5159 | 0 | seq_params->chroma_sample_position; |
5160 | 0 | get_frame_new_buffer(cm)->color_range = seq_params->color_range; |
5161 | 0 | get_frame_new_buffer(cm)->render_width = cm->render_width; |
5162 | 0 | get_frame_new_buffer(cm)->render_height = cm->render_height; |
5163 | 0 |
|
5164 | 0 | if (pbi->need_resync) { |
5165 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5166 | 0 | "Keyframe / intra-only frame required to reset decoder" |
5167 | 0 | " state"); |
5168 | 0 | } |
5169 | 0 |
|
5170 | 0 | // Generate next_ref_frame_map. |
5171 | 0 | lock_buffer_pool(pool); |
5172 | 0 | int ref_index = 0; |
5173 | 0 | for (int mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
5174 | 0 | if (mask & 1) { |
5175 | 0 | cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; |
5176 | 0 | ++frame_bufs[cm->new_fb_idx].ref_count; |
5177 | 0 | } else { |
5178 | 0 | cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; |
5179 | 0 | } |
5180 | 0 | // Current thread holds the reference frame. |
5181 | 0 | if (cm->ref_frame_map[ref_index] >= 0) |
5182 | 0 | ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; |
5183 | 0 | ++ref_index; |
5184 | 0 | } |
5185 | 0 |
|
5186 | 0 | for (; ref_index < REF_FRAMES; ++ref_index) { |
5187 | 0 | cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; |
5188 | 0 |
|
5189 | 0 | // Current thread holds the reference frame. |
5190 | 0 | if (cm->ref_frame_map[ref_index] >= 0) |
5191 | 0 | ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; |
5192 | 0 | } |
5193 | 0 | unlock_buffer_pool(pool); |
5194 | 0 | pbi->hold_ref_buf = 1; |
5195 | 0 |
|
5196 | 0 | if (cm->allow_intrabc) { |
5197 | 0 | // Set parameters corresponding to no filtering. |
5198 | 0 | struct loopfilter *lf = &cm->lf; |
5199 | 0 | lf->filter_level[0] = 0; |
5200 | 0 | lf->filter_level[1] = 0; |
5201 | 0 | cm->cdef_bits = 0; |
5202 | 0 | cm->cdef_strengths[0] = 0; |
5203 | 0 | cm->nb_cdef_strengths = 1; |
5204 | 0 | cm->cdef_uv_strengths[0] = 0; |
5205 | 0 | cm->rst_info[0].frame_restoration_type = RESTORE_NONE; |
5206 | 0 | cm->rst_info[1].frame_restoration_type = RESTORE_NONE; |
5207 | 0 | cm->rst_info[2].frame_restoration_type = RESTORE_NONE; |
5208 | 0 | } |
5209 | 0 |
|
5210 | 0 | read_tile_info(pbi, rb); |
5211 | 0 | setup_quantization(cm, rb); |
5212 | 0 | xd->bd = (int)seq_params->bit_depth; |
5213 | 0 |
|
5214 | 0 | if (cm->num_allocated_above_context_planes < av1_num_planes(cm) || |
5215 | 0 | cm->num_allocated_above_context_mi_col < cm->mi_cols || |
5216 | 0 | cm->num_allocated_above_contexts < cm->tile_rows) { |
5217 | 0 | av1_free_above_context_buffers(cm, cm->num_allocated_above_contexts); |
5218 | 0 | if (av1_alloc_above_context_buffers(cm, cm->tile_rows)) |
5219 | 0 | aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, |
5220 | 0 | "Failed to allocate context buffers"); |
5221 | 0 | } |
5222 | 0 |
|
5223 | 0 | if (cm->primary_ref_frame == PRIMARY_REF_NONE) { |
5224 | 0 | av1_setup_past_independence(cm); |
5225 | 0 | } |
5226 | 0 |
|
5227 | 0 | setup_segmentation(cm, rb); |
5228 | 0 |
|
5229 | 0 | cm->delta_q_res = 1; |
5230 | 0 | cm->delta_lf_res = 1; |
5231 | 0 | cm->delta_lf_present_flag = 0; |
5232 | 0 | cm->delta_lf_multi = 0; |
5233 | 0 | cm->delta_q_present_flag = cm->base_qindex > 0 ? aom_rb_read_bit(rb) : 0; |
5234 | 0 | if (cm->delta_q_present_flag) { |
5235 | 0 | xd->current_qindex = cm->base_qindex; |
5236 | 0 | cm->delta_q_res = 1 << aom_rb_read_literal(rb, 2); |
5237 | 0 | if (!cm->allow_intrabc) cm->delta_lf_present_flag = aom_rb_read_bit(rb); |
5238 | 0 | if (cm->delta_lf_present_flag) { |
5239 | 0 | cm->delta_lf_res = 1 << aom_rb_read_literal(rb, 2); |
5240 | 0 | cm->delta_lf_multi = aom_rb_read_bit(rb); |
5241 | 0 | av1_reset_loop_filter_delta(xd, av1_num_planes(cm)); |
5242 | 0 | } |
5243 | 0 | } |
5244 | 0 |
|
5245 | 0 | xd->cur_frame_force_integer_mv = cm->cur_frame_force_integer_mv; |
5246 | 0 |
|
5247 | 0 | for (int i = 0; i < MAX_SEGMENTS; ++i) { |
5248 | 0 | const int qindex = cm->seg.enabled |
5249 | 0 | ? av1_get_qindex(&cm->seg, i, cm->base_qindex) |
5250 | 0 | : cm->base_qindex; |
5251 | 0 | xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 && |
5252 | 0 | cm->u_dc_delta_q == 0 && cm->u_ac_delta_q == 0 && |
5253 | 0 | cm->v_dc_delta_q == 0 && cm->v_ac_delta_q == 0; |
5254 | 0 | xd->qindex[i] = qindex; |
5255 | 0 | } |
5256 | 0 | cm->coded_lossless = is_coded_lossless(cm, xd); |
5257 | 0 | cm->all_lossless = cm->coded_lossless && !av1_superres_scaled(cm); |
5258 | 0 | setup_segmentation_dequant(cm); |
5259 | 0 | if (cm->coded_lossless) { |
5260 | 0 | cm->lf.filter_level[0] = 0; |
5261 | 0 | cm->lf.filter_level[1] = 0; |
5262 | 0 | } |
5263 | 0 | if (cm->coded_lossless || !seq_params->enable_cdef) { |
5264 | 0 | cm->cdef_bits = 0; |
5265 | 0 | cm->cdef_strengths[0] = 0; |
5266 | 0 | cm->cdef_uv_strengths[0] = 0; |
5267 | 0 | } |
5268 | 0 | if (cm->all_lossless || !seq_params->enable_restoration) { |
5269 | 0 | cm->rst_info[0].frame_restoration_type = RESTORE_NONE; |
5270 | 0 | cm->rst_info[1].frame_restoration_type = RESTORE_NONE; |
5271 | 0 | cm->rst_info[2].frame_restoration_type = RESTORE_NONE; |
5272 | 0 | } |
5273 | 0 | setup_loopfilter(cm, rb); |
5274 | 0 |
|
5275 | 0 | if (!cm->coded_lossless && seq_params->enable_cdef) { |
5276 | 0 | setup_cdef(cm, rb); |
5277 | 0 | } |
5278 | 0 | if (!cm->all_lossless && seq_params->enable_restoration) { |
5279 | 0 | decode_restoration_mode(cm, rb); |
5280 | 0 | } |
5281 | 0 |
|
5282 | 0 | cm->tx_mode = read_tx_mode(cm, rb); |
5283 | 0 | cm->reference_mode = read_frame_reference_mode(cm, rb); |
5284 | 0 | if (cm->reference_mode != SINGLE_REFERENCE) setup_compound_reference_mode(cm); |
5285 | 0 |
|
5286 | 0 | av1_setup_skip_mode_allowed(cm); |
5287 | 0 | cm->skip_mode_flag = cm->is_skip_mode_allowed ? aom_rb_read_bit(rb) : 0; |
5288 | 0 |
|
5289 | 0 | if (frame_might_allow_warped_motion(cm)) |
5290 | 0 | cm->allow_warped_motion = aom_rb_read_bit(rb); |
5291 | 0 | else |
5292 | 0 | cm->allow_warped_motion = 0; |
5293 | 0 |
|
5294 | 0 | cm->reduced_tx_set_used = aom_rb_read_bit(rb); |
5295 | 0 |
|
5296 | 0 | if (cm->allow_ref_frame_mvs && !frame_might_allow_ref_frame_mvs(cm)) { |
5297 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5298 | 0 | "Frame wrongly requests reference frame MVs"); |
5299 | 0 | } |
5300 | 0 |
|
5301 | 0 | if (!frame_is_intra_only(cm)) read_global_motion(cm, rb); |
5302 | 0 |
|
5303 | 0 | cm->cur_frame->film_grain_params_present = |
5304 | 0 | seq_params->film_grain_params_present; |
5305 | 0 | read_film_grain(cm, rb); |
5306 | 0 |
|
5307 | 0 | #if EXT_TILE_DEBUG |
5308 | 0 | if (pbi->ext_tile_debug && cm->large_scale_tile) { |
5309 | 0 | read_ext_tile_info(pbi, rb); |
5310 | 0 | av1_set_single_tile_decoding_mode(cm); |
5311 | 0 | } |
5312 | 0 | #endif // EXT_TILE_DEBUG |
5313 | 0 | return 0; |
5314 | 0 | } |
5315 | | |
5316 | | struct aom_read_bit_buffer *av1_init_read_bit_buffer( |
5317 | | AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data, |
5318 | 0 | const uint8_t *data_end) { |
5319 | 0 | rb->bit_offset = 0; |
5320 | 0 | rb->error_handler = error_handler; |
5321 | 0 | rb->error_handler_data = &pbi->common; |
5322 | 0 | rb->bit_buffer = data; |
5323 | 0 | rb->bit_buffer_end = data_end; |
5324 | 0 | return rb; |
5325 | 0 | } |
5326 | | |
5327 | | void av1_read_frame_size(struct aom_read_bit_buffer *rb, int num_bits_width, |
5328 | 0 | int num_bits_height, int *width, int *height) { |
5329 | 0 | *width = aom_rb_read_literal(rb, num_bits_width) + 1; |
5330 | 0 | *height = aom_rb_read_literal(rb, num_bits_height) + 1; |
5331 | 0 | } |
5332 | | |
5333 | 0 | BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) { |
5334 | 0 | int profile = aom_rb_read_literal(rb, PROFILE_BITS); |
5335 | 0 | return (BITSTREAM_PROFILE)profile; |
5336 | 0 | } |
5337 | | |
5338 | 0 | void superres_post_decode(AV1Decoder *pbi) { |
5339 | 0 | AV1_COMMON *const cm = &pbi->common; |
5340 | 0 | BufferPool *const pool = cm->buffer_pool; |
5341 | 0 |
|
5342 | 0 | if (!av1_superres_scaled(cm)) return; |
5343 | 0 | assert(!cm->all_lossless); |
5344 | 0 |
|
5345 | 0 | lock_buffer_pool(pool); |
5346 | 0 | av1_superres_upscale(cm, pool); |
5347 | 0 | unlock_buffer_pool(pool); |
5348 | 0 | } |
5349 | | |
5350 | | uint32_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi, |
5351 | | struct aom_read_bit_buffer *rb, |
5352 | | const uint8_t *data, |
5353 | | const uint8_t **p_data_end, |
5354 | 0 | int trailing_bits_present) { |
5355 | 0 | AV1_COMMON *const cm = &pbi->common; |
5356 | 0 | const int num_planes = av1_num_planes(cm); |
5357 | 0 | MACROBLOCKD *const xd = &pbi->mb; |
5358 | 0 |
|
5359 | | #if CONFIG_BITSTREAM_DEBUG |
5360 | | bitstream_queue_set_frame_read(cm->current_video_frame * 2 + cm->show_frame); |
5361 | | #endif |
5362 | | #if CONFIG_MISMATCH_DEBUG |
5363 | | mismatch_move_frame_idx_r(); |
5364 | | #endif |
5365 | |
|
5366 | 0 | for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { |
5367 | 0 | cm->global_motion[i] = default_warp_params; |
5368 | 0 | cm->cur_frame->global_motion[i] = default_warp_params; |
5369 | 0 | } |
5370 | 0 | xd->global_motion = cm->global_motion; |
5371 | 0 |
|
5372 | 0 | read_uncompressed_header(pbi, rb); |
5373 | 0 |
|
5374 | 0 | if (trailing_bits_present) av1_check_trailing_bits(pbi, rb); |
5375 | 0 |
|
5376 | 0 | // If cm->single_tile_decoding = 0, the independent decoding of a single tile |
5377 | 0 | // or a section of a frame is not allowed. |
5378 | 0 | if (!cm->single_tile_decoding && |
5379 | 0 | (pbi->dec_tile_row >= 0 || pbi->dec_tile_col >= 0)) { |
5380 | 0 | pbi->dec_tile_row = -1; |
5381 | 0 | pbi->dec_tile_col = -1; |
5382 | 0 | } |
5383 | 0 |
|
5384 | 0 | const uint32_t uncomp_hdr_size = |
5385 | 0 | (uint32_t)aom_rb_bytes_read(rb); // Size of the uncompressed header |
5386 | 0 | YV12_BUFFER_CONFIG *new_fb = get_frame_new_buffer(cm); |
5387 | 0 | xd->cur_buf = new_fb; |
5388 | 0 | if (av1_allow_intrabc(cm)) { |
5389 | 0 | av1_setup_scale_factors_for_frame( |
5390 | 0 | &cm->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height, |
5391 | 0 | xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height); |
5392 | 0 | } |
5393 | 0 |
|
5394 | 0 | if (cm->show_existing_frame) { |
5395 | 0 | // showing a frame directly |
5396 | 0 | *p_data_end = data + uncomp_hdr_size; |
5397 | 0 | if (cm->reset_decoder_state) { |
5398 | 0 | // Use the default frame context values. |
5399 | 0 | *cm->fc = cm->frame_contexts[FRAME_CONTEXT_DEFAULTS]; |
5400 | 0 | if (!cm->fc->initialized) |
5401 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5402 | 0 | "Uninitialized entropy context."); |
5403 | 0 | } |
5404 | 0 | return uncomp_hdr_size; |
5405 | 0 | } |
5406 | 0 |
|
5407 | 0 | cm->setup_mi(cm); |
5408 | 0 |
|
5409 | 0 | cm->current_frame_seg_map = cm->cur_frame->seg_map; |
5410 | 0 |
|
5411 | 0 | av1_setup_motion_field(cm); |
5412 | 0 |
|
5413 | 0 | av1_setup_block_planes(xd, cm->seq_params.subsampling_x, |
5414 | 0 | cm->seq_params.subsampling_y, num_planes); |
5415 | 0 | if (cm->primary_ref_frame == PRIMARY_REF_NONE) { |
5416 | 0 | // use the default frame context values |
5417 | 0 | *cm->fc = cm->frame_contexts[FRAME_CONTEXT_DEFAULTS]; |
5418 | 0 | } else { |
5419 | 0 | *cm->fc = cm->frame_contexts[cm->frame_refs[cm->primary_ref_frame].idx]; |
5420 | 0 | } |
5421 | 0 | if (!cm->fc->initialized) |
5422 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5423 | 0 | "Uninitialized entropy context."); |
5424 | 0 |
|
5425 | 0 | xd->corrupted = 0; |
5426 | 0 | return uncomp_hdr_size; |
5427 | 0 | } |
5428 | | |
5429 | | // Once-per-frame initialization |
5430 | 0 | static void setup_frame_info(AV1Decoder *pbi) { |
5431 | 0 | AV1_COMMON *const cm = &pbi->common; |
5432 | 0 |
|
5433 | 0 | if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE || |
5434 | 0 | cm->rst_info[1].frame_restoration_type != RESTORE_NONE || |
5435 | 0 | cm->rst_info[2].frame_restoration_type != RESTORE_NONE) { |
5436 | 0 | av1_alloc_restoration_buffers(cm); |
5437 | 0 | } |
5438 | 0 | const int use_highbd = cm->seq_params.use_highbitdepth ? 1 : 0; |
5439 | 0 | const int buf_size = MC_TEMP_BUF_PELS << use_highbd; |
5440 | 0 | if (pbi->td.mc_buf_size != buf_size) { |
5441 | 0 | av1_free_mc_tmp_buf(&pbi->td); |
5442 | 0 | allocate_mc_tmp_buf(cm, &pbi->td, buf_size, use_highbd); |
5443 | 0 | } |
5444 | 0 | } |
5445 | | |
5446 | | void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data, |
5447 | | const uint8_t *data_end, |
5448 | | const uint8_t **p_data_end, int start_tile, |
5449 | 0 | int end_tile, int initialize_flag) { |
5450 | 0 | AV1_COMMON *const cm = &pbi->common; |
5451 | 0 | MACROBLOCKD *const xd = &pbi->mb; |
5452 | 0 | const int tile_count_tg = end_tile - start_tile + 1; |
5453 | 0 |
|
5454 | 0 | if (initialize_flag) setup_frame_info(pbi); |
5455 | 0 | const int num_planes = av1_num_planes(cm); |
5456 | | #if LOOP_FILTER_BITMASK |
5457 | | av1_loop_filter_frame_init(cm, 0, num_planes); |
5458 | | av1_zero_array(cm->lf.lfm, cm->lf.lfm_num); |
5459 | | #endif |
5460 | |
|
5461 | 0 | if (pbi->max_threads > 1 && !(cm->large_scale_tile && !pbi->ext_tile_debug) && |
5462 | 0 | pbi->row_mt) |
5463 | 0 | *p_data_end = |
5464 | 0 | decode_tiles_row_mt(pbi, data, data_end, start_tile, end_tile); |
5465 | 0 | else if (pbi->max_threads > 1 && tile_count_tg > 1 && |
5466 | 0 | !(cm->large_scale_tile && !pbi->ext_tile_debug)) |
5467 | 0 | *p_data_end = decode_tiles_mt(pbi, data, data_end, start_tile, end_tile); |
5468 | 0 | else |
5469 | 0 | *p_data_end = decode_tiles(pbi, data, data_end, start_tile, end_tile); |
5470 | 0 |
|
5471 | 0 | // If the bit stream is monochrome, set the U and V buffers to a constant. |
5472 | 0 | if (num_planes < 3) { |
5473 | 0 | set_planes_to_neutral_grey(&cm->seq_params, xd->cur_buf, 1); |
5474 | 0 | } |
5475 | 0 |
|
5476 | 0 | if (end_tile != cm->tile_rows * cm->tile_cols - 1) { |
5477 | 0 | return; |
5478 | 0 | } |
5479 | 0 | |
5480 | 0 | if (!cm->allow_intrabc && !cm->single_tile_decoding) { |
5481 | 0 | if (cm->lf.filter_level[0] || cm->lf.filter_level[1]) { |
5482 | | #if LOOP_FILTER_BITMASK |
5483 | | av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb, 1, 0, |
5484 | | num_planes, 0); |
5485 | | #else |
5486 | 0 | if (pbi->num_workers > 1) { |
5487 | 0 | av1_loop_filter_frame_mt(get_frame_new_buffer(cm), cm, &pbi->mb, 0, |
5488 | 0 | num_planes, 0, pbi->tile_workers, |
5489 | 0 | pbi->num_workers, &pbi->lf_row_sync); |
5490 | 0 | } else { |
5491 | 0 | av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb, 0, |
5492 | 0 | num_planes, 0); |
5493 | 0 | } |
5494 | 0 | #endif |
5495 | 0 | } |
5496 | 0 |
|
5497 | 0 | const int do_loop_restoration = |
5498 | 0 | cm->rst_info[0].frame_restoration_type != RESTORE_NONE || |
5499 | 0 | cm->rst_info[1].frame_restoration_type != RESTORE_NONE || |
5500 | 0 | cm->rst_info[2].frame_restoration_type != RESTORE_NONE; |
5501 | 0 | const int do_cdef = |
5502 | 0 | !cm->skip_loop_filter && !cm->coded_lossless && |
5503 | 0 | (cm->cdef_bits || cm->cdef_strengths[0] || cm->cdef_uv_strengths[0]); |
5504 | 0 | const int do_superres = av1_superres_scaled(cm); |
5505 | 0 | const int optimized_loop_restoration = !do_cdef && !do_superres; |
5506 | 0 |
|
5507 | 0 | if (!optimized_loop_restoration) { |
5508 | 0 | if (do_loop_restoration) |
5509 | 0 | av1_loop_restoration_save_boundary_lines(&pbi->cur_buf->buf, cm, 0); |
5510 | 0 |
|
5511 | 0 | if (do_cdef) av1_cdef_frame(&pbi->cur_buf->buf, cm, &pbi->mb); |
5512 | 0 |
|
5513 | 0 | superres_post_decode(pbi); |
5514 | 0 |
|
5515 | 0 | if (do_loop_restoration) { |
5516 | 0 | av1_loop_restoration_save_boundary_lines(&pbi->cur_buf->buf, cm, 1); |
5517 | 0 | if (pbi->num_workers > 1) { |
5518 | 0 | av1_loop_restoration_filter_frame_mt( |
5519 | 0 | (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration, |
5520 | 0 | pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync, |
5521 | 0 | &pbi->lr_ctxt); |
5522 | 0 | } else { |
5523 | 0 | av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf, |
5524 | 0 | cm, optimized_loop_restoration, |
5525 | 0 | &pbi->lr_ctxt); |
5526 | 0 | } |
5527 | 0 | } |
5528 | 0 | } else { |
5529 | 0 | // In no cdef and no superres case. Provide an optimized version of |
5530 | 0 | // loop_restoration_filter. |
5531 | 0 | if (do_loop_restoration) { |
5532 | 0 | if (pbi->num_workers > 1) { |
5533 | 0 | av1_loop_restoration_filter_frame_mt( |
5534 | 0 | (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration, |
5535 | 0 | pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync, |
5536 | 0 | &pbi->lr_ctxt); |
5537 | 0 | } else { |
5538 | 0 | av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf, |
5539 | 0 | cm, optimized_loop_restoration, |
5540 | 0 | &pbi->lr_ctxt); |
5541 | 0 | } |
5542 | 0 | } |
5543 | 0 | } |
5544 | 0 | } |
5545 | 0 |
|
5546 | 0 | if (!xd->corrupted) { |
5547 | 0 | if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { |
5548 | 0 | assert(cm->context_update_tile_id < pbi->allocated_tiles); |
5549 | 0 | *cm->fc = pbi->tile_data[cm->context_update_tile_id].tctx; |
5550 | 0 | av1_reset_cdf_symbol_counters(cm->fc); |
5551 | 0 | } |
5552 | 0 | } else { |
5553 | 0 | aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, |
5554 | 0 | "Decode failed. Frame data is corrupted."); |
5555 | 0 | } |
5556 | 0 |
|
5557 | | #if CONFIG_INSPECTION |
5558 | | if (pbi->inspect_cb != NULL) { |
5559 | | (*pbi->inspect_cb)(pbi, pbi->inspect_ctx); |
5560 | | } |
5561 | | #endif |
5562 | |
|
5563 | 0 | // Non frame parallel update frame context here. |
5564 | 0 | if (!cm->large_scale_tile) { |
5565 | 0 | cm->frame_contexts[cm->new_fb_idx] = *cm->fc; |
5566 | 0 | } |
5567 | 0 | } |